CLOUDSTACK-9972: Enhance listVolume API to include physical size and … (#2158)

* CLOUDSTACK-9972: Enhance listVolume API to include physical size and utilization.
Also fixed pool, cluster and pod info

* CLOUDSTACK-9972: Fix volume_view and duplicate API constant

* CLOUDSTACK-9972: Backport Do not allow vms to be deployed on hosts that are in disabled pod

* CLOUDSTACK-9972: Fix localization missing keys

* CLOUDSTACK-9972: Fix sql path
This commit is contained in:
Abhinandan Prateek 2017-11-05 21:44:43 +05:30 committed by Rohit Yadav
parent 8c515df3e4
commit 4627fb2cd7
50 changed files with 1140 additions and 119 deletions

View File

@ -1,4 +1,3 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
@ -15,30 +14,17 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import com.cloud.agent.api.LogLevel.Log4jLevel;
import com.cloud.storage.Volume;
@LogLevel(Log4jLevel.Trace)
public class GetFileStatsCommand extends Command {
protected GetFileStatsCommand() {
}
String paths;
public GetFileStatsCommand(Volume volume) {
paths = volume.getPath();
}
public String getPaths() {
return paths;
}
public class BadCommand extends Command {
@Override
public boolean executeInSequence() {
// TODO Auto-generated method stub
return false;
}
public BadCommand(){
super();
}
}

View File

@ -20,5 +20,10 @@ public interface VolumeStats {
/**
* @return bytes used by the volume
*/
public long getBytesUsed();
long getVirtualSize();
/**
* @return bytes allocated
*/
long getPhysicalSize();
}

View File

@ -85,6 +85,7 @@ public class ApiConstants {
public static final String DEVICE_ID = "deviceid";
public static final String DISK_OFFERING_ID = "diskofferingid";
public static final String DISK_SIZE = "disksize";
public static final String UTILIZATION = "utilization";
public static final String DRIVER = "driver";
public static final String ROOT_DISK_SIZE = "rootdisksize";
public static final String DISPLAY_NAME = "displayname";
@ -205,6 +206,7 @@ public class ApiConstants {
public static final String SSHKEY_ENABLED = "sshkeyenabled";
public static final String PATH = "path";
public static final String POD_ID = "podid";
public static final String POD_NAME = "podname";
public static final String POD_IDS = "podids";
public static final String POLICY_ID = "policyid";
public static final String PORT = "port";

View File

@ -26,6 +26,7 @@ import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseListTaggedResourcesCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ResponseObject.ResponseView;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.DiskOfferingResponse;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.ListResponse;
@ -63,6 +64,9 @@ public class ListVolumesCmd extends BaseListTaggedResourcesCmd {
@Parameter(name = ApiConstants.POD_ID, type = CommandType.UUID, entityType = PodResponse.class, description = "the pod id the disk volume belongs to")
private Long podId;
@Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "the cluster id the disk volume belongs to", authorized = {RoleType.Admin})
private Long clusterId;
@Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, description = "the type of disk volume")
private String type;
@ -98,6 +102,10 @@ public class ListVolumesCmd extends BaseListTaggedResourcesCmd {
return hostId;
}
public Long getClusterId() {
return clusterId;
}
public Long getId() {
return id;
}

View File

@ -228,9 +228,36 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co
String chainInfo;
@SerializedName(ApiConstants.SNAPSHOT_QUIESCEVM)
@Param(description = "need quiesce vm or not when taking snapshot", since="4.3")
@Param(description = "need quiesce vm or not when taking snapshot", since = "4.3")
private boolean needQuiescevm;
@SerializedName(ApiConstants.PHYSICAL_SIZE)
@Param(description = "the bytes alloaated")
private Long physicalsize;
@SerializedName(ApiConstants.VIRTUAL_SIZE)
@Param(description = "the bytes actually consumed on disk")
private Long virtualsize;
@SerializedName(ApiConstants.UTILIZATION)
@Param(description = "the disk utilization")
private String utilization;
@SerializedName(ApiConstants.CLUSTER_ID)
@Param(description = "cluster id of the volume")
private String clusterid;
@SerializedName(ApiConstants.CLUSTER_NAME)
@Param(description = "cluster name where the volume is allocated")
private String clustername;
@SerializedName(ApiConstants.POD_ID)
@Param(description = "pod id of the volume")
private String podid;
@SerializedName(ApiConstants.POD_NAME)
@Param(description = "pod name of the volume")
private String podname;
public String getPath() {
return path;
@ -301,7 +328,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co
this.virtualMachineState = virtualMachineState;
}
public void setProvisioningType(String provisioningType){
public void setProvisioningType(String provisioningType) {
this.provisioningType = provisioningType;
}
@ -649,4 +676,61 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co
public Boolean getDisplayVolume() {
return displayVolume;
}
public Long getPhysicalsize() {
return physicalsize;
}
public void setPhysicalsize(Long physicalsize) {
this.physicalsize = physicalsize;
}
public Long getVirtualsize() {
return virtualsize;
}
public void setVirtualsize(Long virtualsize) {
this.virtualsize = virtualsize;
}
public String getUtilization() {
return utilization;
}
public void setUtilization(String utilization) {
this.utilization = utilization;
}
public String getClusterId() {
return clusterid;
}
public void setClusterId(String clusterid) {
this.clusterid = clusterid;
}
public String getClusterName() {
return clustername;
}
public void setClusterName(String clustername) {
this.clustername = clustername;
}
public String getPodId() {
return podid;
}
public void setPodId(String podid) {
this.podid = podid;
}
public String getPodName() {
return podname;
}
public void setPodName(String podname) {
this.podname = podname;
}
}

View File

@ -1,41 +0,0 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import com.cloud.agent.api.LogLevel.Log4jLevel;
import com.cloud.storage.VolumeStats;
@LogLevel(Log4jLevel.Trace)
public class GetFileStatsAnswer extends Answer implements VolumeStats {
long size;
protected GetFileStatsAnswer() {
}
public GetFileStatsAnswer(GetFileStatsCommand cmd, long value) {
super(cmd);
size = value;
}
@Override
public long getBytesUsed() {
return size;
}
}

View File

@ -25,6 +25,10 @@ import com.cloud.agent.api.LogLevel.Log4jLevel;
@LogLevel(Log4jLevel.Trace)
public class GetVmDiskStatsCommand extends Command {
public String getString() {
return "GetVmDiskStatsCommand [vmNames=" + vmNames + ", hostGuid=" + hostGuid + ", hostName=" + hostName + "]";
}
List<String> vmNames;
String hostGuid;
String hostName;

View File

@ -0,0 +1,73 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import java.util.HashMap;
import com.cloud.agent.api.LogLevel.Log4jLevel;
import com.cloud.storage.Storage.StoragePoolType;
@LogLevel(Log4jLevel.Trace)
public class GetVolumeStatsAnswer extends Answer {
String poolUuid;
StoragePoolType poolType;
HashMap<String, VolumeStatsEntry> volumeStats;
public GetVolumeStatsAnswer(GetVolumeStatsCommand cmd, String details, HashMap<String, VolumeStatsEntry> volumeStats) {
super(cmd, true, details);
this.poolUuid = cmd.getPoolUuid();
this.poolType = cmd.getPoolType();
this.volumeStats = volumeStats;
}
protected GetVolumeStatsAnswer() {
//no-args constructor for json serialization-deserialization
}
public String getPoolUuid() {
return poolUuid;
}
public void setPoolUuid(String poolUuid) {
this.poolUuid = poolUuid;
}
public StoragePoolType getPoolType() {
return poolType;
}
public void setPoolType(StoragePoolType poolType) {
this.poolType = poolType;
}
public HashMap<String, VolumeStatsEntry> getVolumeStats() {
return volumeStats;
}
public void setVolumeStats(HashMap<String, VolumeStatsEntry> volumeStats) {
this.volumeStats = volumeStats;
}
public String getString() {
return "GetVolumeStatsAnswer [poolUuid=" + poolUuid + ", poolType=" + poolType + ", volumeStats=" + volumeStats + "]";
}
}

View File

@ -0,0 +1,75 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import java.util.List;
import com.cloud.agent.api.LogLevel.Log4jLevel;
import com.cloud.storage.Storage.StoragePoolType;
@LogLevel(Log4jLevel.Trace)
public class GetVolumeStatsCommand extends Command {
List<String> volumeUuids;
StoragePoolType poolType;
String poolUuid;
protected GetVolumeStatsCommand() {
}
public GetVolumeStatsCommand(StoragePoolType poolType, String storeUuid, List<String> volumeUuids) {
this.volumeUuids = volumeUuids;
this.poolType = poolType;
this.poolUuid = storeUuid;
}
public List<String> getVolumeUuids() {
return volumeUuids;
}
public void setVolumeUuids(List<String> volumeUuids) {
this.volumeUuids = volumeUuids;
}
public StoragePoolType getPoolType() {
return poolType;
}
public void setPoolType(StoragePoolType poolType) {
this.poolType = poolType;
}
public String getPoolUuid() {
return poolUuid;
}
public void setPoolUuid(String storeUuid) {
this.poolUuid = storeUuid;
}
@Override
public boolean executeInSequence() {
return false;
}
public String getString() {
return "GetVolumeStatsCommand [volumeUuids=" + volumeUuids + ", poolType=" + poolType + ", poolUuid=" + poolUuid + "]";
}
}

View File

@ -0,0 +1,64 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
import com.cloud.storage.VolumeStats;
public class VolumeStatsEntry implements VolumeStats {
String volumeUuid;
long physicalsize = 0;
long virtualSize = 0;
public VolumeStatsEntry(String volumeUuid, long physicalsize, long virtualSize) {
this.volumeUuid = volumeUuid;
this.physicalsize = physicalsize;
this.virtualSize = virtualSize;
}
public String getVolumeUuid() {
return volumeUuid;
}
public void setVolumeUuid(String volumeUuid) {
this.volumeUuid = volumeUuid;
}
public long getPhysicalSize() {
return physicalsize;
}
public void setPhysicalSize(long size) {
this.physicalsize = size;
}
public long getVirtualSize() {
return virtualSize;
}
public void setVirtualSize(long virtualSize) {
this.virtualSize = virtualSize;
}
@Override
public String toString() {
return "VolumeStatsEntry [volumeUuid=" + volumeUuid + ", size=" + physicalsize + ", virtualSize=" + virtualSize + "]";
}
}

View File

@ -47,6 +47,7 @@ import com.google.gson.JsonSerializer;
import com.google.gson.stream.JsonReader;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.BadCommand;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.SecStorageFirewallCfgCommand.PortConfig;
import com.cloud.exception.UnsupportedVersionException;
@ -249,6 +250,8 @@ public class Request {
JsonReader jsonReader = new JsonReader(reader);
jsonReader.setLenient(true);
_cmds = s_gson.fromJson(jsonReader, (Type)Command[].class);
} catch (JsonParseException e) {
_cmds = new Command[] { new BadCommand() };
} catch (RuntimeException e) {
s_logger.error("Caught problem with " + _content, e);
throw e;

View File

@ -20,7 +20,6 @@
package com.cloud.agent.transport;
import java.nio.ByteBuffer;
import junit.framework.TestCase;
import org.apache.log4j.Level;
@ -32,13 +31,16 @@ import org.apache.cloudstack.storage.command.DownloadCommand;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.BadCommand;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.GetHostStatsCommand;
import com.cloud.agent.api.GetVolumeStatsCommand;
import com.cloud.agent.api.SecStorageFirewallCfgCommand;
import com.cloud.agent.api.UpdateHostPasswordCommand;
import com.cloud.agent.api.storage.DownloadAnswer;
import com.cloud.agent.api.storage.ListTemplateCommand;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.transport.Request.Version;
import com.cloud.exception.UnsupportedVersionException;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.serializer.GsonHelper;
@ -250,4 +252,26 @@ public class RequestTest extends TestCase {
}
}
public void testGoodCommand() {
s_logger.info("Testing good Command");
String content = "[{\"com.cloud.agent.api.GetVolumeStatsCommand\":{\"volumeUuids\":[\"dcc860ac-4a20-498f-9cb3-bab4d57aa676\"],"
+ "\"poolType\":\"NetworkFilesystem\",\"poolUuid\":\"e007c270-2b1b-3ce9-ae92-a98b94eef7eb\",\"contextMap\":{},\"wait\":5}}]";
Request sreq = new Request(Version.v2, 1L, 2L, 3L, 1L, (short)1, content);
sreq.setSequence(1);
Command cmds[] = sreq.getCommands();
s_logger.debug("Command class = " + cmds[0].getClass().getSimpleName());
assert cmds[0].getClass().equals(GetVolumeStatsCommand.class);
}
public void testBadCommand() {
s_logger.info("Testing Bad Command");
String content = "[{\"com.cloud.agent.api.SomeJunkCommand\":{\"volumeUuids\":[\"dcc860ac-4a20-498f-9cb3-bab4d57aa676\"],"
+ "\"poolType\":\"NetworkFilesystem\",\"poolUuid\":\"e007c270-2b1b-3ce9-ae92-a98b94eef7eb\",\"contextMap\":{},\"wait\":5}}]";
Request sreq = new Request(Version.v2, 1L, 2L, 3L, 1L, (short)1, content);
sreq.setSequence(1);
Command cmds[] = sreq.getCommands();
s_logger.debug("Command class = " + cmds[0].getClass().getSimpleName());
assert cmds[0].getClass().equals(BadCommand.class);
}
}

View File

@ -0,0 +1,66 @@
//
//Licensed to the Apache Software Foundation (ASF) under one
//or more contributor license agreements. See the NOTICE file
//distributed with this work for additional information
//regarding copyright ownership. The ASF licenses this file
//to you under the Apache License, Version 2.0 (the
//"License"); you may not use this file except in compliance
//with the License. You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing,
//software distributed under the License is distributed on an
//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
//KIND, either express or implied. See the License for the
//specific language governing permissions and limitations
//under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import java.util.HashMap;
import org.apache.log4j.Logger;
import org.libvirt.Connect;
import org.libvirt.LibvirtException;
import com.cloud.agent.api.Answer;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.agent.api.GetVolumeStatsAnswer;
import com.cloud.agent.api.GetVolumeStatsCommand;
import com.cloud.agent.api.VolumeStatsEntry;
@ResourceWrapper(handles = GetVolumeStatsCommand.class)
public final class LibvirtGetVolumeStatsCommandWrapper extends CommandWrapper<GetVolumeStatsCommand, Answer, LibvirtComputingResource> {
private static final Logger s_logger = Logger.getLogger(LibvirtGetVmDiskStatsCommandWrapper.class);
@Override
public Answer execute(final GetVolumeStatsCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
try {
Connect conn = LibvirtConnection.getConnection();
String storeUuid = cmd.getPoolUuid();
StoragePoolType poolType = cmd.getPoolType();
HashMap<String, VolumeStatsEntry> statEntry = new HashMap<String, VolumeStatsEntry>();
for (String volumeUuid : cmd.getVolumeUuids()) {
statEntry.put(volumeUuid, getVolumeStat(libvirtComputingResource, conn, volumeUuid, storeUuid, poolType));
}
return new GetVolumeStatsAnswer(cmd, "", statEntry);
} catch (LibvirtException e) {
return new GetVolumeStatsAnswer(cmd, "Can't get vm disk stats: " + e.getMessage(), null);
}
}
private VolumeStatsEntry getVolumeStat(final LibvirtComputingResource libvirtComputingResource, final Connect conn, final String volumeUuid, final String storeUuid, final StoragePoolType poolType) throws LibvirtException {
KVMStoragePool sourceKVMPool = libvirtComputingResource.getStoragePoolMgr().getStoragePool(poolType, storeUuid);
KVMPhysicalDisk sourceKVMVolume = sourceKVMPool.getPhysicalDisk(volumeUuid);
return new VolumeStatsEntry(volumeUuid, sourceKVMVolume.getSize(), sourceKVMVolume.getVirtualSize());
}
}

View File

@ -56,6 +56,11 @@ public class KVMPhysicalDisk {
this.pool = pool;
}
@Override
public String toString() {
return "KVMPhysicalDisk [path=" + path + ", name=" + name + ", pool=" + pool + ", format=" + format + ", size=" + size + ", virtualSize=" + virtualSize + "]";
}
public void setFormat(PhysicalDiskFormat format) {
this.format = format;
}

View File

@ -140,7 +140,7 @@ public class LibvirtStoragePool implements KVMStoragePool {
if (disk != null) {
return disk;
}
s_logger.debug("find volume bypass libvirt");
s_logger.debug("find volume bypass libvirt volumeUid " + volumeUid);
//For network file system or file system, try to use java file to find the volume, instead of through libvirt. BUG:CLOUDSTACK-4459
String localPoolPath = this.getLocalPath();
File f = new File(localPoolPath + File.separator + volumeUuid);
@ -152,6 +152,7 @@ public class LibvirtStoragePool implements KVMStoragePool {
disk.setFormat(PhysicalDiskFormat.QCOW2);
disk.setSize(f.length());
disk.setVirtualSize(f.length());
s_logger.debug("find volume bypass libvirt disk " + disk.toString());
return disk;
}

View File

@ -146,6 +146,8 @@ import com.cloud.agent.api.GetVmStatsAnswer;
import com.cloud.agent.api.GetVmStatsCommand;
import com.cloud.agent.api.GetVncPortAnswer;
import com.cloud.agent.api.GetVncPortCommand;
import com.cloud.agent.api.GetVolumeStatsAnswer;
import com.cloud.agent.api.GetVolumeStatsCommand;
import com.cloud.agent.api.HostStatsEntry;
import com.cloud.agent.api.HostVmStateReportEntry;
import com.cloud.agent.api.MaintainAnswer;
@ -199,6 +201,7 @@ import com.cloud.agent.api.UpgradeSnapshotCommand;
import com.cloud.agent.api.ValidateSnapshotAnswer;
import com.cloud.agent.api.ValidateSnapshotCommand;
import com.cloud.agent.api.VmStatsEntry;
import com.cloud.agent.api.VolumeStatsEntry;
import com.cloud.agent.api.check.CheckSshAnswer;
import com.cloud.agent.api.check.CheckSshCommand;
import com.cloud.agent.api.routing.IpAssocCommand;
@ -414,6 +417,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
answer = execute((GetVmNetworkStatsCommand) cmd);
} else if (clz == GetVmDiskStatsCommand.class) {
answer = execute((GetVmDiskStatsCommand)cmd);
} else if (cmd instanceof GetVolumeStatsCommand) {
return execute((GetVolumeStatsCommand)cmd);
} else if (clz == CheckHealthCommand.class) {
answer = execute((CheckHealthCommand)cmd);
} else if (clz == StopCommand.class) {
@ -3275,6 +3280,44 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
return new GetVmNetworkStatsAnswer(cmd, null, null, null);
}
protected GetVolumeStatsAnswer execute(GetVolumeStatsCommand cmd) {
try {
VmwareHypervisorHost srcHyperHost = getHyperHost(getServiceContext());
ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, cmd.getPoolUuid());
assert (morDs != null);
DatastoreMO primaryStorageDatastoreMo = new DatastoreMO(getServiceContext(), morDs);
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
DatacenterMO dcMo = new DatacenterMO(getServiceContext(), dcMor);
HashMap<String, VolumeStatsEntry> statEntry = new HashMap<String, VolumeStatsEntry>();
for (String chainInfo : cmd.getVolumeUuids()){
if (chainInfo != null) {
VirtualMachineDiskInfo infoInChain = _gson.fromJson(chainInfo, VirtualMachineDiskInfo.class);
if (infoInChain != null) {
String[] disks = infoInChain.getDiskChain();
if (disks.length > 0) {
for (String diskPath : disks) {
DatastoreFile file = new DatastoreFile(diskPath);
VirtualMachineMO vmMo = dcMo.findVm(file.getDir());
Pair<VirtualDisk, String> vds = vmMo.getDiskDevice(file.getFileName(), true);
long virtualsize = vds.first().getCapacityInKB() * 1024;
long physicalsize = primaryStorageDatastoreMo.fileDiskSize(file.getPath());
VolumeStatsEntry vse = new VolumeStatsEntry(chainInfo, physicalsize, virtualsize);
statEntry.put(chainInfo, vse);
}
}
}
}
}
return new GetVolumeStatsAnswer(cmd, "", statEntry);
} catch (Exception e) {
s_logger.info("VOLSTAT GetVolumeStatsCommand failed " + e.getMessage());
}
return new GetVolumeStatsAnswer(cmd, "", null);
}
protected Answer execute(CheckHealthCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource CheckHealthCommand: " + _gson.toJson(cmd));

View File

@ -0,0 +1,62 @@
//
//Licensed to the Apache Software Foundation (ASF) under one
//or more contributor license agreements. See the NOTICE file
//distributed with this work for additional information
//regarding copyright ownership. The ASF licenses this file
//to you under the Apache License, Version 2.0 (the
//"License"); you may not use this file except in compliance
//with the License. You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing,
//software distributed under the License is distributed on an
//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
//KIND, either express or implied. See the License for the
//specific language governing permissions and limitations
//under the License.
//
package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase;
import java.util.HashMap;
import org.apache.log4j.Logger;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.GetVolumeStatsAnswer;
import com.cloud.agent.api.GetVolumeStatsCommand;
import com.cloud.agent.api.VolumeStatsEntry;
import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.xensource.xenapi.Connection;
import com.xensource.xenapi.VDI;
@ResourceWrapper(handles = GetVolumeStatsCommand.class)
public final class CitrixGetVolumeStatsCommandWrapper extends CommandWrapper<GetVolumeStatsCommand, Answer, CitrixResourceBase> {
private static final Logger s_logger = Logger.getLogger(CitrixGetVolumeStatsCommandWrapper.class);
@Override
public Answer execute(final GetVolumeStatsCommand cmd, final CitrixResourceBase citrixResourceBase) {
Connection conn = citrixResourceBase.getConnection();
HashMap<String, VolumeStatsEntry> statEntry = new HashMap<String, VolumeStatsEntry>();
for (String volumeUuid : cmd.getVolumeUuids()) {
VDI vdi = citrixResourceBase.getVDIbyUuid(conn, volumeUuid, false);
if (vdi != null) {
try {
VolumeStatsEntry vse = new VolumeStatsEntry(volumeUuid, vdi.getPhysicalUtilisation(conn), vdi.getVirtualSize(conn));
statEntry.put(volumeUuid, vse);
} catch (Exception e) {
s_logger.warn("Unable to get volume stats", e);
statEntry.put(volumeUuid, new VolumeStatsEntry(volumeUuid, -1, -1));
}
} else {
s_logger.warn("VDI not found for path " + volumeUuid);
statEntry.put(volumeUuid, new VolumeStatsEntry(volumeUuid, -1L, -1L));
}
}
return new GetVolumeStatsAnswer(cmd, "", statEntry);
}
}

View File

@ -260,6 +260,7 @@ import com.cloud.storage.UploadVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.VolumeStats;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
@ -923,6 +924,10 @@ public class ApiDBUtils {
return s_statsCollector.getVmStats(hostId);
}
public static VolumeStats getVolumeStatistics(String volumeUuid) {
return s_statsCollector.getVolumeStats(volumeUuid);
}
public static StorageStats getSecondaryStorageStatistics(long id) {
return s_statsCollector.getStorageStats(id);
}

View File

@ -1736,6 +1736,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
String type = cmd.getType();
Map<String, String> tags = cmd.getTags();
Long storageId = cmd.getStorageId();
Long clusterId = cmd.getClusterId();
Long diskOffId = cmd.getDiskOfferingId();
Boolean display = cmd.getDisplay();
@ -1845,6 +1846,9 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
sc.setParameters("storageId", storageId);
}
if (clusterId != null) {
sc.setParameters("clusterId", clusterId);
}
// Don't return DomR and ConsoleProxy volumes
sc.setParameters("type", VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm, VirtualMachine.Type.DomainRouter);

View File

@ -16,6 +16,7 @@
// under the License.
package com.cloud.api.query;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.Hashtable;
@ -79,6 +80,8 @@ import com.cloud.api.query.vo.UserAccountJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.api.query.vo.VolumeJoinVO;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.VolumeStats;
import com.cloud.user.Account;
/**
@ -263,6 +266,7 @@ public class ViewResponseHelper {
public static List<VolumeResponse> createVolumeResponse(ResponseView view, VolumeJoinVO... volumes) {
Hashtable<Long, VolumeResponse> vrDataList = new Hashtable<Long, VolumeResponse>();
DecimalFormat df = new DecimalFormat("0.00");
for (VolumeJoinVO vr : volumes) {
VolumeResponse vrData = vrDataList.get(vr.getId());
if (vrData == null) {
@ -274,6 +278,28 @@ public class ViewResponseHelper {
vrData = ApiDBUtils.fillVolumeDetails(view, vrData, vr);
}
vrDataList.put(vr.getId(), vrData);
if (view == ResponseView.Full) {
VolumeStats vs = null;
if (vr.getFormat() == ImageFormat.QCOW2) {
vs = ApiDBUtils.getVolumeStatistics(vrData.getId());
}
else if (vr.getFormat() == ImageFormat.VHD){
vs = ApiDBUtils.getVolumeStatistics(vrData.getPath());
}
else if (vr.getFormat() == ImageFormat.OVA){
vs = ApiDBUtils.getVolumeStatistics(vrData.getChainInfo());
}
if (vs != null){
long vsz = vs.getVirtualSize();
long psz = vs.getPhysicalSize() ;
double util = (double)psz/vsz;
vrData.setVirtualsize(vsz);
vrData.setPhysicalsize(psz);
vrData.setUtilization(df.format(util));
}
}
}
return new ArrayList<VolumeResponse>(vrDataList.values());
}

View File

@ -78,6 +78,12 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation<VolumeJo
volResponse.setZoneId(volume.getDataCenterUuid());
volResponse.setZoneName(volume.getDataCenterName());
if (view == ResponseView.Full) {
volResponse.setClusterId(volume.getClusterUuid());
volResponse.setClusterName(volume.getClusterName());
volResponse.setPodId(volume.getPodUuid());
volResponse.setPodName(volume.getPodName());
}
if (volume.getVolumeType() != null) {
volResponse.setVolumeType(volume.getVolumeType().toString());

View File

@ -113,6 +113,21 @@ public class VolumeJoinVO extends BaseViewWithTagInformationVO implements Contro
@Column(name = "pod_id")
private long podId;
@Column(name = "pod_name")
private String podName;
@Column(name = "pod_uuid")
private String podUuid;
@Column(name = "cluster_id")
private long clusterId;
@Column(name = "cluster_name")
private String clusterName;
@Column(name = "cluster_uuid")
private String clusterUuid;
@Column(name = "data_center_id")
private long dataCenterId;
@ -469,6 +484,38 @@ public class VolumeJoinVO extends BaseViewWithTagInformationVO implements Contro
return poolName;
}
public String getPodName() {
return podName;
}
public void setPodName(String podName) {
this.podName = podName;
}
public String getPodUuid() {
return podUuid;
}
public void setPodUuid(String podUuid) {
this.podUuid = podUuid;
}
public void setPodId(long podId) {
this.podId = podId;
}
public long getClusterId() {
return clusterId;
}
public String getClusterName() {
return clusterName;
}
public String getClusterUuid() {
return clusterUuid;
}
public long getTemplateId() {
return templateId;
}

View File

@ -853,6 +853,8 @@ public enum Config {
"60000",
"The interval (in milliseconds) when vm stats are retrieved from agents.",
null),
VmDiskStatsInterval("Advanced", ManagementServer.class, Integer.class, "vm.disk.stats.interval", "0", "Interval (in seconds) to report vm disk statistics.", null),
VolumeStatsInterval("Advanced", ManagementServer.class, Integer.class, "volume.stats.interval", "60000", "Interval (in seconds) to report volume statistics.", null),
VmTransitionWaitInterval(
"Advanced",
ManagementServer.class,

View File

@ -282,12 +282,22 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
s_logger.debug("The specified host is in avoid set");
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " +
host.getClusterId());
s_logger.debug(
"Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
}
Pod pod = _podDao.findById(host.getPodId());
// check if the cluster or the pod is disabled
if (pod.getAllocationState() != Grouping.AllocationState.Enabled) {
s_logger.warn("The Pod containing this host is in disabled state, PodId= " + pod.getId());
return null;
}
Cluster cluster = _clusterDao.findById(host.getClusterId());
if (cluster.getAllocationState() != Grouping.AllocationState.Enabled) {
s_logger.warn("The Cluster containing this host is in disabled state, PodId= " + cluster.getId());
return null;
}
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
@ -1041,6 +1051,8 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
DataCenterDeployment potentialPlan =
new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
Pod pod = _podDao.findById(clusterVO.getPodId());
if (pod.getAllocationState() == Grouping.AllocationState.Enabled ) {
// find suitable hosts under this cluster, need as many hosts as we
// get.
List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
@ -1048,24 +1060,20 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
// pools for each volume of the VM
if (suitableHosts != null && !suitableHosts.isEmpty()) {
if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
Pod pod = _podDao.findById(clusterVO.getPodId());
DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
return dest;
}
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result =
findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
// choose the potential host and pool for the VM
if (!suitableVolumeStoragePools.isEmpty()) {
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired,
readyAndReusedVolumes);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoid,
resourceUsageRequired, readyAndReusedVolumes);
if (potentialResources != null) {
Pod pod = _podDao.findById(clusterVO.getPodId());
Host host = _hostDao.findById(potentialResources.first().getId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// remove the reused vol<->pool from destination, since
@ -1083,6 +1091,10 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
} else {
s_logger.debug("No suitable hosts found under this Cluster: " + clusterId);
}
}
else {
s_logger.debug("The cluster is in a disabled pod : " + pod.getId());
}
if (canAvoidCluster(clusterVO, avoid, plannerAvoidOutput, vmProfile)) {
avoid.addCluster(clusterVO.getId());

View File

@ -41,7 +41,6 @@ import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.utils.graphite.GraphiteClient;
@ -60,6 +59,7 @@ import com.cloud.agent.api.VgpuTypesInfo;
import com.cloud.agent.api.VmDiskStatsEntry;
import com.cloud.agent.api.VmNetworkStatsEntry;
import com.cloud.agent.api.VmStatsEntry;
import com.cloud.agent.api.VolumeStatsEntry;
import com.cloud.cluster.ManagementServerHostVO;
import com.cloud.cluster.dao.ManagementServerHostDao;
import com.cloud.dc.Vlan.VlanType;
@ -101,9 +101,9 @@ import com.cloud.storage.StorageManager;
import com.cloud.storage.StorageStats;
import com.cloud.storage.VolumeStats;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.UserStatisticsVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.user.VmDiskStatisticsVO;
import com.cloud.user.dao.UserStatisticsDao;
import com.cloud.user.dao.VmDiskStatisticsDao;
@ -160,6 +160,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
"Interval (in seconds) to report vm network statistics (for Shared networks). Vm network statistics will be disabled if this is set to 0 or less than 0.", false);
static final ConfigKey<Integer> vmNetworkStatsIntervalMin = new ConfigKey<Integer>("Advanced", Integer.class, "vm.network.stats.interval.min", "300",
"Minimal Interval (in seconds) to report vm network statistics (for Shared networks). If vm.network.stats.interval is smaller than this, use this to report vm network statistics.", false);
static final ConfigKey<Integer> StatsTimeout = new ConfigKey<Integer>("Advanced", Integer.class, "stats.timeout", "60000",
"The timeout for stats call in milli seconds.", true, ConfigKey.Scope.Cluster);
private static StatsCollector s_instance = null;
@ -177,12 +179,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
@Inject
private PrimaryDataStoreDao _storagePoolDao;
@Inject
private ImageStoreDao _imageStoreDao;
@Inject
private StorageManager _storageManager;
@Inject
private StoragePoolHostDao _storagePoolHostDao;
@Inject
private DataStoreManager _dataStoreMgr;
@Inject
private ResourceManager _resourceMgr;
@ -229,7 +227,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
private ConcurrentHashMap<Long, HostStats> _hostStats = new ConcurrentHashMap<Long, HostStats>();
private final ConcurrentHashMap<Long, VmStats> _VmStats = new ConcurrentHashMap<Long, VmStats>();
private final ConcurrentHashMap<Long, VolumeStats> _volumeStats = new ConcurrentHashMap<Long, VolumeStats>();
private final Map<String, VolumeStats> _volumeStats = new ConcurrentHashMap<String, VolumeStats>();
private ConcurrentHashMap<Long, StorageStats> _storageStats = new ConcurrentHashMap<Long, StorageStats>();
private ConcurrentHashMap<Long, StorageStats> _storagePoolStats = new ConcurrentHashMap<Long, StorageStats>();
@ -282,7 +280,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
hostStatsInterval = NumbersUtil.parseLong(configs.get("host.stats.interval"), 60000L);
hostAndVmStatsInterval = NumbersUtil.parseLong(configs.get("vm.stats.interval"), 60000L);
storageStatsInterval = NumbersUtil.parseLong(configs.get("storage.stats.interval"), 60000L);
volumeStatsInterval = NumbersUtil.parseLong(configs.get("volume.stats.interval"), -1L);
volumeStatsInterval = NumbersUtil.parseLong(configs.get("volume.stats.interval"), 600000L);
autoScaleStatsInterval = NumbersUtil.parseLong(configs.get("autoscale.stats.interval"), 60000L);
/* URI to send statistics to. Currently only Graphite is supported */
@ -359,6 +357,10 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
s_logger.debug("vm.network.stats.interval - " + vmNetworkStatsInterval.value() + " is 0 or less than 0, so not scheduling the vm network stats thread");
}
if (volumeStatsInterval > 0) {
_executor.scheduleAtFixedRate(new VolumeStatsTask(), 15000L, volumeStatsInterval, TimeUnit.MILLISECONDS);
}
//Schedule disk stats update task
_diskStatsUpdateExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("DiskStatsUpdater"));
@ -644,6 +646,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
return;
}
// collect the vm disk statistics(total) from hypervisor. added by weizhou, 2013.03.
s_logger.trace("Running VM disk stats ...");
try {
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
@ -887,6 +890,51 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
}
}
class VolumeStatsTask extends ManagedContextRunnable {
@Override
protected void runInContext() {
try {
List<StoragePoolVO> pools = _storagePoolDao.listAll();
for (StoragePoolVO pool : pools) {
List<VolumeVO> volumes = _volsDao.findByPoolId(pool.getId(), null);
List<String> volumeLocators = new ArrayList<String>();
for (VolumeVO volume: volumes){
if (volume.getFormat() == ImageFormat.QCOW2) {
volumeLocators.add(volume.getUuid());
}
else if (volume.getFormat() == ImageFormat.VHD){
volumeLocators.add(volume.getPath());
}
else if (volume.getFormat() == ImageFormat.OVA){
volumeLocators.add(volume.getChainInfo());
}
else {
s_logger.warn("Volume stats not implemented for this format type " + volume.getFormat() );
break;
}
}
try {
HashMap<String, VolumeStatsEntry> volumeStatsByUuid = _userVmMgr.getVolumeStatistics(pool.getClusterId(), pool.getUuid(), pool.getPoolType(), volumeLocators, StatsTimeout.value());
if (volumeStatsByUuid != null){
_volumeStats.putAll(volumeStatsByUuid);
}
} catch (Exception e) {
s_logger.warn("Failed to get volume stats for cluster with ID: " + pool.getClusterId(), e);
continue;
}
}
} catch (Throwable t) {
s_logger.error("Error trying to retrieve volume stats", t);
}
}
}
public VolumeStats getVolumeStats(String volumeLocator) {
return _volumeStats.get(volumeLocator);
}
class StorageCollector extends ManagedContextRunnable {
@Override
protected void runInContext() {
@ -1257,11 +1305,11 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
@Override
public String getConfigComponentName() {
return this.getClass().getSimpleName();
return StatsCollector.class.getSimpleName();
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { vmDiskStatsInterval, vmDiskStatsIntervalMin, vmNetworkStatsInterval, vmNetworkStatsIntervalMin };
return new ConfigKey<?>[] { vmDiskStatsInterval, vmDiskStatsIntervalMin, vmNetworkStatsInterval, vmNetworkStatsIntervalMin, StatsTimeout };
}
}

View File

@ -315,7 +315,7 @@ public class DatabaseConfig {
s_defaultConfigurationValues.put("host.stats.interval", "60000");
s_defaultConfigurationValues.put("storage.stats.interval", "60000");
//s_defaultConfigurationValues.put("volume.stats.interval", "-1");
s_defaultConfigurationValues.put("volume.stats.interval", "60000");
s_defaultConfigurationValues.put("port", "8250");
s_defaultConfigurationValues.put("integration.api.port", "8096");
s_defaultConfigurationValues.put("usage.stats.job.exec.time", "00:15"); // run at 12:15am

View File

@ -26,6 +26,7 @@ import org.apache.cloudstack.framework.config.ConfigKey;
import com.cloud.agent.api.VmDiskStatsEntry;
import com.cloud.agent.api.VmNetworkStatsEntry;
import com.cloud.agent.api.VmStatsEntry;
import com.cloud.agent.api.VolumeStatsEntry;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ManagementServerException;
@ -33,6 +34,7 @@ import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.VirtualMachineMigrationException;
import com.cloud.offering.ServiceOffering;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.user.Account;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
@ -82,6 +84,8 @@ public interface UserVmManager extends UserVmService {
HashMap<Long, List<VmDiskStatsEntry>> getVmDiskStatistics(long hostId, String hostName, List<Long> vmIds);
HashMap<String, VolumeStatsEntry> getVolumeStatistics(long clusterId, String poolUuid, StoragePoolType poolType, List<String> volumeLocator, int timout);
boolean deleteVmGroup(long groupId);
boolean addInstanceToGroup(long userVmId, String group);

View File

@ -103,6 +103,8 @@ import com.cloud.agent.api.GetVmNetworkStatsAnswer;
import com.cloud.agent.api.GetVmNetworkStatsCommand;
import com.cloud.agent.api.GetVmStatsAnswer;
import com.cloud.agent.api.GetVmStatsCommand;
import com.cloud.agent.api.GetVolumeStatsAnswer;
import com.cloud.agent.api.GetVolumeStatsCommand;
import com.cloud.agent.api.PvlanSetupCommand;
import com.cloud.agent.api.RestoreVMSnapshotAnswer;
import com.cloud.agent.api.RestoreVMSnapshotCommand;
@ -110,6 +112,7 @@ import com.cloud.agent.api.StartAnswer;
import com.cloud.agent.api.VmDiskStatsEntry;
import com.cloud.agent.api.VmNetworkStatsEntry;
import com.cloud.agent.api.VmStatsEntry;
import com.cloud.agent.api.VolumeStatsEntry;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.NicTO;
import com.cloud.agent.api.to.VirtualMachineTO;
@ -167,6 +170,7 @@ import com.cloud.gpu.GPU;
import com.cloud.ha.HighAvailabilityManager;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.HypervisorCapabilitiesVO;
@ -226,6 +230,7 @@ import com.cloud.storage.GuestOSVO;
import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Storage.TemplateType;
import com.cloud.storage.Snapshot;
import com.cloud.storage.StorageManager;
@ -1868,6 +1873,23 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
return vmStatsById;
}
@Override
public HashMap<String, VolumeStatsEntry> getVolumeStatistics(long clusterId, String poolUuid, StoragePoolType poolType, List<String> volumeLocator, int timeout) {
List<HostVO> neighbors = _resourceMgr.listHostsInClusterByStatus(clusterId, Status.Up);
for (HostVO neighbor : neighbors) {
GetVolumeStatsCommand cmd = new GetVolumeStatsCommand(poolType, poolUuid, volumeLocator);
if (timeout > 0) {
cmd.setWait(timeout/1000);
}
Answer answer = _agentMgr.easySend(neighbor.getId(), cmd);
if (answer instanceof GetVolumeStatsAnswer){
GetVolumeStatsAnswer volstats = (GetVolumeStatsAnswer)answer;
return volstats.getVolumeStats();
}
}
return null;
}
@Override
@DB
public UserVm recoverVirtualMachine(RecoverVMCmd cmd) throws ResourceAllocationException, CloudRuntimeException {

View File

@ -296,3 +296,140 @@ INSERT IGNORE INTO `cloud`.`configuration` (`category`, `instance`, `component`,
-- CLOUDSTACK-9859: Retirement of midonet plugin (final removal)
delete from `cloud`.`configuration` where name in ('midonet.apiserver.address', 'midonet.providerrouter.id');
-- CLOUDSTACK-9972: Enhance listVolumes API
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Premium', 'DEFAULT', 'management-server', 'volume.stats.interval', '600000', 'Interval (in seconds) to report volume statistics', '600000', now(), NULL, NULL);
DROP VIEW IF EXISTS `cloud`.`volume_view`;
CREATE VIEW `cloud`.`volume_view` AS
select
volumes.id,
volumes.uuid,
volumes.name,
volumes.device_id,
volumes.volume_type,
volumes.provisioning_type,
volumes.size,
volumes.min_iops,
volumes.max_iops,
volumes.created,
volumes.state,
volumes.attached,
volumes.removed,
volumes.display_volume,
volumes.format,
volumes.path,
volumes.chain_info,
account.id account_id,
account.uuid account_uuid,
account.account_name account_name,
account.type account_type,
domain.id domain_id,
domain.uuid domain_uuid,
domain.name domain_name,
domain.path domain_path,
projects.id project_id,
projects.uuid project_uuid,
projects.name project_name,
data_center.id data_center_id,
data_center.uuid data_center_uuid,
data_center.name data_center_name,
data_center.networktype data_center_type,
vm_instance.id vm_id,
vm_instance.uuid vm_uuid,
vm_instance.name vm_name,
vm_instance.state vm_state,
vm_instance.vm_type,
user_vm.display_name vm_display_name,
volume_store_ref.size volume_store_size,
volume_store_ref.download_pct,
volume_store_ref.download_state,
volume_store_ref.error_str,
volume_store_ref.created created_on_store,
disk_offering.id disk_offering_id,
disk_offering.uuid disk_offering_uuid,
disk_offering.name disk_offering_name,
disk_offering.display_text disk_offering_display_text,
disk_offering.use_local_storage,
disk_offering.system_use,
disk_offering.bytes_read_rate,
disk_offering.bytes_write_rate,
disk_offering.iops_read_rate,
disk_offering.iops_write_rate,
disk_offering.cache_mode,
storage_pool.id pool_id,
storage_pool.uuid pool_uuid,
storage_pool.name pool_name,
cluster.id cluster_id,
cluster.name cluster_name,
cluster.uuid cluster_uuid,
cluster.hypervisor_type,
vm_template.id template_id,
vm_template.uuid template_uuid,
vm_template.extractable,
vm_template.type template_type,
vm_template.name template_name,
vm_template.display_text template_display_text,
iso.id iso_id,
iso.uuid iso_uuid,
iso.name iso_name,
iso.display_text iso_display_text,
resource_tags.id tag_id,
resource_tags.uuid tag_uuid,
resource_tags.key tag_key,
resource_tags.value tag_value,
resource_tags.domain_id tag_domain_id,
resource_tags.account_id tag_account_id,
resource_tags.resource_id tag_resource_id,
resource_tags.resource_uuid tag_resource_uuid,
resource_tags.resource_type tag_resource_type,
resource_tags.customer tag_customer,
async_job.id job_id,
async_job.uuid job_uuid,
async_job.job_status job_status,
async_job.account_id job_account_id,
host_pod_ref.id pod_id,
host_pod_ref.uuid pod_uuid,
host_pod_ref.name pod_name,
resource_tag_account.account_name tag_account_name,
resource_tag_domain.uuid tag_domain_uuid,
resource_tag_domain.name tag_domain_name
from
`cloud`.`volumes`
inner join
`cloud`.`account` ON volumes.account_id = account.id
inner join
`cloud`.`domain` ON volumes.domain_id = domain.id
left join
`cloud`.`projects` ON projects.project_account_id = account.id
left join
`cloud`.`data_center` ON volumes.data_center_id = data_center.id
left join
`cloud`.`vm_instance` ON volumes.instance_id = vm_instance.id
left join
`cloud`.`user_vm` ON user_vm.id = vm_instance.id
left join
`cloud`.`volume_store_ref` ON volumes.id = volume_store_ref.volume_id
left join
`cloud`.`disk_offering` ON volumes.disk_offering_id = disk_offering.id
left join
`cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id
left join
`cloud`.`host_pod_ref` ON storage_pool.pod_id = host_pod_ref.id
left join
`cloud`.`cluster` ON storage_pool.cluster_id = cluster.id
left join
`cloud`.`vm_template` ON volumes.template_id = vm_template.id
left join
`cloud`.`vm_template` iso ON iso.id = volumes.iso_id
left join
`cloud`.`resource_tags` ON resource_tags.resource_id = volumes.id
and resource_tags.resource_type = 'Volume'
left join
`cloud`.`async_job` ON async_job.instance_id = volumes.id
and async_job.instance_type = 'Volume'
and async_job.job_status = 0
left join
`cloud`.`account` resource_tag_account ON resource_tag_account.id = resource_tags.account_id
left join
`cloud`.`domain` resource_tag_domain ON resource_tag_domain.id = resource_tags.domain_id;

View File

@ -35,7 +35,8 @@ from marvin.lib.base import (ServiceOffering,
from marvin.lib.common import (get_domain,
get_zone,
get_template,
find_storage_pool_type)
find_storage_pool_type,
get_pod)
from marvin.lib.utils import checkVolumeSize
from marvin.codes import SUCCESS, FAILED, XEN_SERVER
from nose.plugins.attrib import attr
@ -797,3 +798,74 @@ class TestVolumes(cloudstackTestCase):
"Check if volume exists in ListVolumes"
)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_10_list_volumes(self):
# Validate the following
#
# 1. List Root Volume and waits until it has the newly introduced attributes
#
# 2. Verifies return attributes has values different from none, when instance is running
#
list_vm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)[0]
host = Host.list(
self.apiclient,
type='Routing',
virtualmachineid=list_vm.id
)[0]
list_pods = get_pod(self.apiclient, self.zone.id, host.podid)
root_volume = self.wait_for_attributes_and_return_root_vol()
self.assertTrue(hasattr(root_volume, "utilization"))
self.assertTrue(root_volume.utilization is not None)
self.assertTrue(hasattr(root_volume, "virtualsize"))
self.assertTrue(root_volume.virtualsize is not None)
self.assertTrue(hasattr(root_volume, "physicalsize"))
self.assertTrue(root_volume.physicalsize is not None)
self.assertTrue(hasattr(root_volume, "vmname"))
self.assertEqual(root_volume.vmname, list_vm.name)
self.assertTrue(hasattr(root_volume, "clustername"))
self.assertTrue(root_volume.clustername is not None)
self.assertTrue(hasattr(root_volume, "clusterid"))
self.assertTrue(root_volume.clusterid is not None)
self.assertTrue(hasattr(root_volume, "storageid"))
self.assertTrue(root_volume.storageid is not None)
self.assertTrue(hasattr(root_volume, "storage"))
self.assertTrue(root_volume.storage is not None)
self.assertTrue(hasattr(root_volume, "zoneid"))
self.assertEqual(root_volume.zoneid, self.zone.id)
self.assertTrue(hasattr(root_volume, "zonename"))
self.assertEqual(root_volume.zonename, self.zone.name)
self.assertTrue(hasattr(root_volume, "podid"))
self.assertEqual(root_volume.podid, list_pods.id)
self.assertTrue(hasattr(root_volume, "podname"))
self.assertEqual(root_volume.podname, list_pods.name)
def wait_for_attributes_and_return_root_vol(self):
for i in range(60):
list_volume_response = Volume.list(
self.apiClient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
if list_volume_response[0].virtualsize is not None:
return list_volume_response[0]
time.sleep(1)

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "Disk Offering",
"label.disk.offering.details": "Disk offering details",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Disk Read (Bytes)",
"label.disk.read.io": "Disk Read (IO)",
"label.disk.size": "Disk Size",
"label.disk.size.gb": "Disk Size (in GB)",
"label.disk.total": "Disk Total",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Disk Volume",
"label.disk.write.bytes": "Disk Write (Bytes)",
"label.disk.write.io": "Disk Write (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "Disk Offering",
"label.disk.offering.details": "Disk offering details",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Disk Read (Bytes)",
"label.disk.read.io": "Disk Read (IO)",
"label.disk.size": "Disk Size",
"label.disk.size.gb": "Disk Size (in GB)",
"label.disk.total": "Disk Total",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Disk Volume",
"label.disk.write.bytes": "Disk Write (Bytes)",
"label.disk.write.io": "Disk Write (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Festplatten-Schreibrate (IOPS)",
"label.disk.offering": "Festplattenangebot",
"label.disk.offering.details": "Festplattenangebotdetails",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisionierungstyp",
"label.disk.read.bytes": "Festplatte Lesen (Bytes)",
"label.disk.read.io": "Festplatte Lesen (EA)",
"label.disk.size": "Festplattengröße",
"label.disk.size.gb": "Festplattengröße (in GB)",
"label.disk.total": "Gesamtzahl der Festplatten",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Festplattenvolumen",
"label.disk.write.bytes": "Festplatte Schreiben (Bytes)",
"label.disk.write.io": "Festplatte Schreiben (EA)",

View File

@ -667,12 +667,15 @@ var dictionary = {"ICMP.code":"ICMP Code",
"label.disk.iops.write.rate":"Disk Write Rate (IOPS)",
"label.disk.offering":"Disk Offering",
"label.disk.offering.details":"Disk offering details",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype":"Provisioning Type",
"label.disk.read.bytes":"Disk Read (Bytes)",
"label.disk.read.io":"Disk Read (IO)",
"label.disk.size":"Disk Size",
"label.disk.size.gb":"Disk Size (in GB)",
"label.disk.total":"Disk Total",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume":"Disk Volume",
"label.disk.write.bytes":"Disk Write (Bytes)",
"label.disk.write.io":"Disk Write (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Tasa Escritura de Disco (IOPS)",
"label.disk.offering": "Oferta de Disco",
"label.disk.offering.details": "Detalles de Oferta de Disco",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Tipo de Aprovisionamiento",
"label.disk.read.bytes": "Lectura Disco (Bytes)",
"label.disk.read.io": "Lectura Disco (IO)",
"label.disk.size": "tamaño de disco",
"label.disk.size.gb": "tamaño de disco (en GB)",
"label.disk.total": "disco Total",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "volumen de disco",
"label.disk.write.bytes": "Escritura Disco (Bytes)",
"label.disk.write.io": "Escritura Disco (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Débit écriture disque (IOPS)",
"label.disk.offering": "Offre de Disque",
"label.disk.offering.details": "Détails offre de disque",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Type de provisionnement",
"label.disk.read.bytes": "Lecture Disque (Octets)",
"label.disk.read.io": "Lecture Disque (IO)",
"label.disk.size": "Capacité disque",
"label.disk.size.gb": "Capacité disque (Go)",
"label.disk.total": "Espace disque total",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Volume disque",
"label.disk.write.bytes": "Écriture Disque (Octets)",
"label.disk.write.io": "Écriture Disque (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Írási ráta (IOPS)",
"label.disk.offering": "Merevlemez ajánlat",
"label.disk.offering.details": "Merevlemez ajánlat részletei",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Létrehozás típusa",
"label.disk.read.bytes": "Merevlemez olvasás (Byte)",
"label.disk.read.io": "Merevlemez írás (IO)",
"label.disk.size": "Merevlemez méret",
"label.disk.size.gb": "Merevlemez méret (GB)",
"label.disk.total": "Merevlemez összes",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Merevlemez kötet",
"label.disk.write.bytes": "Merevlemez írás (byte)",
"label.disk.write.io": "Merevlemez írás (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "Offerta Disco",
"label.disk.offering.details": "Disk offering details",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Tipo di Provisioning",
"label.disk.read.bytes": "Disk Read (Bytes)",
"label.disk.read.io": "Disk Read (IO)",
"label.disk.size": "Disk Size",
"label.disk.size.gb": "Disk Size (in GB)",
"label.disk.total": "Disk Total",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Disk Volume",
"label.disk.write.bytes": "Disk Write (Bytes)",
"label.disk.write.io": "Disk Write (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "ディスク書き込み速度 (IOPS)",
"label.disk.offering": "ディスク オファリング",
"label.disk.offering.details": "ディスクオファリングの詳細",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "プロビジョニングの種類",
"label.disk.read.bytes": "ディスク読み取り (バイト)",
"label.disk.read.io": "ディスク読み取り (IO)",
"label.disk.size": "ディスク サイズ",
"label.disk.size.gb": "ディスク サイズ (GB)",
"label.disk.total": "ディスク合計",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "ディスク ボリューム",
"label.disk.write.bytes": "ディスク書き込み (バイト)",
"label.disk.write.io": "ディスク書き込み (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "디스크 제공",
"label.disk.offering.details": "Disk offering details",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Disk Read (Bytes)",
"label.disk.read.io": "Disk Read (IO)",
"label.disk.size": "디스크 크기",
"label.disk.size.gb": "디스크 크기(GB 단위)",
"label.disk.total": "디스크 합계",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "디스크 볼륨",
"label.disk.write.bytes": "Disk Write (Bytes)",
"label.disk.write.io": "Disk Write (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Diskskrivehastighet (IOPS)",
"label.disk.offering": "Disktilbud",
"label.disk.offering.details": "Disktilbud detaljer",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisjoneringstype",
"label.disk.read.bytes": "Disk lese (Bytes)",
"label.disk.read.io": "Disk lese (IO)",
"label.disk.size": "Diskstørrelse",
"label.disk.size.gb": "Diskstørrelse (i GB)",
"label.disk.total": "Disk Totalt",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Disk Volum",
"label.disk.write.bytes": "Disk skrive (Bytes)",
"label.disk.write.io": "Disk skrive (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Schrijf snelheid Schijf (IOPS)",
"label.disk.offering": "Schijf Aanbieding",
"label.disk.offering.details": "schijfe offerte gegevens",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning type",
"label.disk.read.bytes": "Schijf lezen (Bytes)",
"label.disk.read.io": "Schijf Lezen (IO)",
"label.disk.size": "Schijf Grootte",
"label.disk.size.gb": "Schijf Grootte (in GB)",
"label.disk.total": "Schijf Totaal",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Schijf Volume",
"label.disk.write.bytes": "Schijf Schrijven (Bytes)",
"label.disk.write.io": "Schijf Schrijven (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "Disk Offering",
"label.disk.offering.details": "Disk offering details",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Disk Read (Bytes)",
"label.disk.read.io": "Disk Read (IO)",
"label.disk.size": "Wielkość dysku",
"label.disk.size.gb": "Wielkość dysku (w GB)",
"label.disk.total": "Disk Total",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Disk Volume",
"label.disk.write.bytes": "Disk Write (Bytes)",
"label.disk.write.io": "Disk Write (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Taxa de Escrita no Disco (IOPS)",
"label.disk.offering": "Oferta de Disco",
"label.disk.offering.details": "Detalhes da oferta de disco",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Tipo de Provisionamento",
"label.disk.read.bytes": "Leitura do Disco (Bytes)",
"label.disk.read.io": "Leitura do Disk (I/O)",
"label.disk.size": "Tamanho do Disco",
"label.disk.size.gb": "Tamanho (em GB)",
"label.disk.total": "Disco Total",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Disco",
"label.disk.write.bytes": "Escrita no Disco (Bytes)",
"label.disk.write.io": "Escrita no Disco (I/O)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "Скорость записи диска (IOPS)",
"label.disk.offering": "Услуга дискового пространства",
"label.disk.offering.details": "Disk offering details",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Прочитано с диска (Байт)",
"label.disk.read.io": "Прочитано с диска (IO)",
"label.disk.size": "Размер диска",
"label.disk.size.gb": "Размер диска (в ГБ)",
"label.disk.total": "Всего в дисках",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "Объем диска",
"label.disk.write.bytes": "Записано на диск (Байт)",
"label.disk.write.io": "Записано на диск (IO)",

View File

@ -657,12 +657,15 @@ var dictionary = {
"label.disk.iops.write.rate": "磁盘写入速度(IOPS)",
"label.disk.offering": "磁盘方案",
"label.disk.offering.details": "磁盘方案详情",
"label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "置备类型",
"label.disk.read.bytes": "磁盘读取(字节)",
"label.disk.read.io": "磁盘读取(IO)",
"label.disk.size": "磁盘大小",
"label.disk.size.gb": "磁盘大小(GB)",
"label.disk.total": "磁盘总量",
"label.disk.utilisation":"Utilisation",
"label.disk.virtualsize":"Virtual Size",
"label.disk.volume": "磁盘卷",
"label.disk.write.bytes": "磁盘写入(字节)",
"label.disk.write.io": "磁盘写入(IO)",

View File

@ -577,6 +577,18 @@
sizegb: {
label: 'label.metrics.disk.size'
},
physicalsize: {
label: 'label.disk.physicalsize',
converter: function(args) {
if (args == null || args == 0)
return "";
else
return cloudStack.converters.convertBytes(args);
}
},
utilization: {
label: 'label.disk.utilisation'
},
storagetype: {
label: 'label.metrics.disk.storagetype'
},

View File

@ -1752,7 +1752,7 @@
if (isAdmin()) {
hiddenFields = [];
} else {
hiddenFields = ['storage', 'hypervisor'];
hiddenFields = ['storage', 'hypervisor', 'virtualsize', 'physicalsize', 'utilization', 'clusterid', 'clustername'];
}
return hiddenFields;
},
@ -1817,6 +1817,33 @@
return cloudStack.converters.convertBytes(args);
}
},
clusterid: {
label: 'label.cluster'
},
clustername: {
label: 'label.cluster.name'
},
physicalsize: {
label: 'label.disk.physicalsize',
converter: function(args) {
if (args == null || args == 0)
return "";
else
return cloudStack.converters.convertBytes(args);
}
},
utilization: {
label: 'label.disk.utilisation'
},
virtualsize: {
label: 'label.disk.virtualsize',
converter: function(args) {
if (args == null || args == 0)
return "";
else
return cloudStack.converters.convertBytes(args);
}
},
miniops: {
label: 'label.disk.iops.min',
converter: function(args) {

View File

@ -24,7 +24,9 @@ import org.apache.log4j.Logger;
import com.vmware.vim25.DatastoreHostMount;
import com.vmware.vim25.DatastoreSummary;
import com.vmware.vim25.FileInfo;
import com.vmware.vim25.FileQueryFlags;
import com.vmware.vim25.HostDatastoreBrowserSearchResults;
import com.vmware.vim25.HostDatastoreBrowserSearchSpec;
import com.vmware.vim25.HostMountInfo;
import com.vmware.vim25.ManagedObjectReference;
import com.vmware.vim25.ObjectContent;
@ -339,6 +341,36 @@ public class DatastoreMO extends BaseMO {
return false;
}
public long fileDiskSize(String fileFullPath) throws Exception {
long size = 0;
DatastoreFile file = new DatastoreFile(fileFullPath);
DatastoreFile dirFile = new DatastoreFile(file.getDatastoreName(), file.getDir());
HostDatastoreBrowserMO browserMo = getHostDatastoreBrowserMO();
HostDatastoreBrowserSearchSpec searchSpec = new HostDatastoreBrowserSearchSpec();
FileQueryFlags fqf = new FileQueryFlags();
fqf.setFileSize(true);
fqf.setFileOwner(true);
fqf.setModification(true);
searchSpec.setDetails(fqf);
searchSpec.setSearchCaseInsensitive(false);
searchSpec.getMatchPattern().add(file.getFileName());
s_logger.debug("Search file " + file.getFileName() + " on " + dirFile.getPath()); //ROOT-2.vmdk, [3ecf7a579d3b3793b86d9d019a97ae27] s-2-VM
HostDatastoreBrowserSearchResults result = browserMo.searchDatastore(dirFile.getPath(), searchSpec);
if (result != null) {
List<FileInfo> info = result.getFile();
for (FileInfo fi : info) {
if (file.getFileName().equals(fi.getPath())) {
s_logger.debug("File found = " + fi.getPath() + ", size=" + fi.getFileSize());
return fi.getFileSize();
}
}
}
s_logger.debug("File " + fileFullPath + " does not exist on datastore");
return size;
}
public boolean folderExists(String folderParentDatastorePath, String folderName) throws Exception {
HostDatastoreBrowserMO browserMo = getHostDatastoreBrowserMO();

View File

@ -2376,6 +2376,59 @@ public class VirtualMachineMO extends BaseMO {
return null;
}
// return pair of VirtualDisk and disk device bus name(ide0:0, etc)
public Pair<VirtualDisk, String> getDiskDevice(String vmdkDatastorePath, boolean matchExactly) throws Exception {
List<VirtualDevice> devices = _context.getVimClient().getDynamicProperty(_mor, "config.hardware.device");
DatastoreFile dsSrcFile = new DatastoreFile(vmdkDatastorePath);
String srcBaseName = dsSrcFile.getFileBaseName();
String trimmedSrcBaseName = VmwareHelper.trimSnapshotDeltaPostfix(srcBaseName);
if (matchExactly) {
s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath + " with base name: " + srcBaseName);
} else {
s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath + " with trimmed base name: " + trimmedSrcBaseName);
}
if (devices != null && devices.size() > 0) {
for (VirtualDevice device : devices) {
if (device instanceof VirtualDisk) {
s_logger.info("Test against disk device, controller key: " + device.getControllerKey() + ", unit number: " + device.getUnitNumber());
VirtualDeviceBackingInfo backingInfo = ((VirtualDisk)device).getBacking();
if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) {
VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo;
do {
s_logger.info("Test against disk backing : " + diskBackingInfo.getFileName());
DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName());
String backingBaseName = dsBackingFile.getFileBaseName();
if (matchExactly) {
if (backingBaseName.equalsIgnoreCase(srcBaseName)) {
String deviceNumbering = getDeviceBusName(devices, device);
s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering);
return new Pair<VirtualDisk, String>((VirtualDisk)device, deviceNumbering);
}
} else {
if (backingBaseName.contains(trimmedSrcBaseName)) {
String deviceNumbering = getDeviceBusName(devices, device);
s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering);
return new Pair<VirtualDisk, String>((VirtualDisk)device, deviceNumbering);
}
}
diskBackingInfo = diskBackingInfo.getParent();
} while (diskBackingInfo != null);
}
}
}
}
return null;
}
public String getDiskCurrentTopBackingFileInChain(String deviceBusName) throws Exception {
List<VirtualDevice> devices = _context.getVimClient().getDynamicProperty(_mor, "config.hardware.device");
if (devices != null && devices.size() > 0) {