Human readable sizes in logs (#4207)

This PR adds outputting human readable byte sizes in the management server logs, agent logs, and usage records. A non-dynamic global variable is added (display.human.readable.sizes) to control switching this feature on and off. This setting is sent to the agent on connection and is only read from the database when the management server is started up. The setting is kept in memory by the use of a static field on the NumbersUtil class and is available throughout the codebase.

Instead of seeing things like:
2020-07-23 15:31:58,593 DEBUG [c.c.a.t.Request] (AgentManager-Handler-12:null) (logid:) Seq 8-1863645820801253428: Processing: { Ans: , MgmtId: 52238089807, via: 8, Ver: v1, Flags: 10, [{"com.cloud.agent.api.NetworkUsageAnswer":{"routerName":"r-224-VM","bytesSent":"106496","bytesReceived":"0","result":"true","details":"","wait":"0",}}] }

The KB MB and GB values will be printed out:

2020-07-23 15:31:58,593 DEBUG [c.c.a.t.Request] (AgentManager-Handler-12:null) (logid:) Seq 8-1863645820801253428: Processing: { Ans: , MgmtId: 52238089807, via: 8, Ver: v1, Flags: 10, [{"com.cloud.agent.api.NetworkUsageAnswer":{"routerName":"r-224-VM","bytesSent":"(104.00 KB) 106496","bytesReceived":"(0 bytes) 0","result":"true","details":"","wait":"0",}}] }

FS: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Human+Readable+Byte+sizes
This commit is contained in:
Spaceman1984 2020-08-13 12:25:16 +02:00 committed by GitHub
parent 55a5470da6
commit b586eb22f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 626 additions and 193 deletions

View File

@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import javax.naming.ConfigurationException;
import com.cloud.utils.NumbersUtil;
import org.apache.cloudstack.agent.lb.SetupMSListAnswer;
import org.apache.cloudstack.agent.lb.SetupMSListCommand;
import org.apache.cloudstack.ca.PostCertificateRenewalCommand;
@ -809,6 +810,8 @@ public class Agent implements HandlerFactory, IAgentControl {
public void processReadyCommand(final Command cmd) {
final ReadyCommand ready = (ReadyCommand)cmd;
// Set human readable sizes;
NumbersUtil.enableHumanReadableSizes = ready.getEnableHumanReadableSizes();
s_logger.info("Processing agent ready command, agent id = " + ready.getHostId());
if (ready.getHostId() != null) {

View File

@ -164,14 +164,23 @@ public class ResizeVolumeCmd extends BaseAsyncCmd implements UserCmd {
@Override
public String getEventDescription() {
return "Volume Id: " + this._uuidMgr.getUuid(Volume.class, getEntityId()) + " to size " + getSize() + "G";
if (getSize() != null) {
return "Volume Id: " + this._uuidMgr.getUuid(Volume.class, getEntityId()) + " to size " + getSize() + " GB";
} else {
return "Volume Id: " + this._uuidMgr.getUuid(Volume.class, getEntityId());
}
}
@Override
public void execute() throws ResourceAllocationException {
Volume volume = null;
try {
CallContext.current().setEventDetails("Volume Id: " + this._uuidMgr.getUuid(Volume.class, getEntityId()) + " to size " + getSize() + "G");
if (size != null) {
CallContext.current().setEventDetails("Volume Id: " + this._uuidMgr.getUuid(Volume.class, getEntityId()) + " to size " + getSize() + " GB");
} else {
CallContext.current().setEventDetails("Volume Id: " + this._uuidMgr.getUuid(Volume.class, getEntityId()));
}
volume = _volumeService.resizeVolume(this);
} catch (InvalidParameterValueException ex) {
s_logger.info(ex.getMessage());

View File

@ -33,15 +33,17 @@ public class ReadyCommand extends Command {
private List<String> msHostList;
private String lbAlgorithm;
private Long lbCheckInterval;
private Boolean enableHumanReadableSizes;
public ReadyCommand(Long dcId) {
super();
this.dcId = dcId;
}
public ReadyCommand(final Long dcId, final Long hostId) {
public ReadyCommand(final Long dcId, final Long hostId, boolean enableHumanReadableSizes) {
this(dcId);
this.hostId = hostId;
this.enableHumanReadableSizes = enableHumanReadableSizes;
}
public void setDetails(String details) {
@ -88,4 +90,8 @@ public class ReadyCommand extends Command {
public void setLbCheckInterval(Long lbCheckInterval) {
this.lbCheckInterval = lbCheckInterval;
}
public Boolean getEnableHumanReadableSizes() {
return enableHumanReadableSizes;
}
}

View File

@ -32,6 +32,7 @@ import java.util.List;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import com.cloud.utils.HumanReadableJson;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@ -417,6 +418,7 @@ public class Request {
assert false : "More gson errors on " + buff.toString();
return "";
}
content = new StringBuilder(HumanReadableJson.getHumanReadableBytesJson(content.toString()));
if (content.length() <= (1 + _cmds.length * 3)) {
return null;
}

View File

@ -52,6 +52,8 @@ import com.cloud.utils.Pair;
import com.cloud.utils.UriUtils;
import com.cloud.utils.net.Proxy;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
/**
* Download a template file using HTTP
*
@ -205,7 +207,7 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
) {
out.seek(localFileSize);
s_logger.info("Starting download from " + downloadUrl + " to " + toFile + " remoteSize=" + remoteSize + " , max size=" + maxTemplateSizeInBytes);
s_logger.info("Starting download from " + downloadUrl + " to " + toFile + " remoteSize=" + toHumanReadableSize(remoteSize) + " , max size=" + toHumanReadableSize(maxTemplateSizeInBytes));
if (copyBytes(file, in, out)) return 0;
@ -268,14 +270,14 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
String downloaded = "(incomplete download)";
if (totalBytes >= remoteSize) {
status = Status.DOWNLOAD_FINISHED;
downloaded = "(download complete remote=" + remoteSize + "bytes)";
downloaded = "(download complete remote=" + toHumanReadableSize(remoteSize) + " bytes)";
}
errorString = "Downloaded " + totalBytes + " bytes " + downloaded;
errorString = "Downloaded " + toHumanReadableSize(totalBytes) + " bytes " + downloaded;
}
private boolean canHandleDownloadSize() {
if (remoteSize > maxTemplateSizeInBytes) {
s_logger.info("Remote size is too large: " + remoteSize + " , max=" + maxTemplateSizeInBytes);
s_logger.info("Remote size is too large: " + toHumanReadableSize(remoteSize) + " , max=" + toHumanReadableSize(maxTemplateSizeInBytes));
status = Status.UNRECOVERABLE_ERROR;
errorString = "Download file size is too large";
return false;
@ -344,7 +346,7 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
long localFileSize = 0;
if (file.exists() && resume) {
localFileSize = file.length();
s_logger.info("Resuming download to file (current size)=" + localFileSize);
s_logger.info("Resuming download to file (current size)=" + toHumanReadableSize(localFileSize));
}
return localFileSize;
}

View File

@ -45,6 +45,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.Date;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
import static com.cloud.utils.StringUtils.join;
import static java.util.Arrays.asList;
@ -168,7 +169,7 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
return 0;
}
LOGGER.info("Starting download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " and size " + remoteSize + " bytes");
LOGGER.info("Starting download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " and size " + toHumanReadableSize(remoteSize) + " bytes");
// Time the upload starts.
final Date start = new Date();
@ -197,7 +198,7 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
// Record the amount of bytes transferred.
totalBytes += progressEvent.getBytesTransferred();
LOGGER.trace("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + ((new Date().getTime() - start.getTime()) / 1000) + " seconds");
LOGGER.trace("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + ((new Date().getTime() - start.getTime()) / 1000) + " seconds");
if (progressEvent.getEventType() == ProgressEventType.TRANSFER_STARTED_EVENT) {
status = Status.IN_PROGRESS;
@ -222,9 +223,9 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
downloadTime = new Date().getTime() - start.getTime();
if (status == Status.DOWNLOAD_FINISHED) {
LOGGER.info("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed successfully!");
LOGGER.info("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + (downloadTime / 1000) + " seconds, completed successfully!");
} else {
LOGGER.warn("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed with status " + status.toString());
LOGGER.warn("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + (downloadTime / 1000) + " seconds, completed with status " + status.toString());
}
// Close input stream

View File

@ -37,6 +37,8 @@ import com.cloud.storage.StorageLayer;
import com.cloud.storage.template.Processor.FormatInfo;
import com.cloud.utils.NumbersUtil;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class TemplateLocation {
private static final Logger s_logger = Logger.getLogger(TemplateLocation.class);
public final static String Filename = "template.properties";
@ -199,7 +201,7 @@ public class TemplateLocation {
if (!checkFormatValidity(newInfo)) {
s_logger.warn("Format is invalid");
s_logger.debug("Format: " + newInfo.format + " size: " + newInfo.size + " virtualsize: " + newInfo.virtualSize + " filename: " + newInfo.filename);
s_logger.debug("Format: " + newInfo.format + " size: " + toHumanReadableSize(newInfo.size) + " virtualsize: " + toHumanReadableSize(newInfo.virtualSize) + " filename: " + newInfo.filename);
s_logger.debug("format, filename cannot be null and size, virtual size should be > 0 ");
return false;
}

View File

@ -37,6 +37,8 @@ import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageLayer;
import com.cloud.utils.component.AdapterBase;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class VmdkProcessor extends AdapterBase implements Processor {
private static final Logger s_logger = Logger.getLogger(VmdkProcessor.class);
@ -114,7 +116,7 @@ public class VmdkProcessor extends AdapterBase implements Processor {
throw new InternalErrorException(msg);
}
s_logger.debug("vmdk file had size="+virtualSize);
s_logger.debug("vmdk file had size=" + toHumanReadableSize(virtualSize));
return virtualSize;
}

View File

@ -38,6 +38,7 @@ import java.util.concurrent.locks.ReentrantLock;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.utils.NumbersUtil;
import org.apache.cloudstack.agent.lb.IndirectAgentLB;
import org.apache.cloudstack.ca.CAManager;
import org.apache.cloudstack.framework.config.ConfigKey;
@ -585,7 +586,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
}
final Long dcId = host.getDataCenterId();
final ReadyCommand ready = new ReadyCommand(dcId, host.getId());
final ReadyCommand ready = new ReadyCommand(dcId, host.getId(), NumbersUtil.enableHumanReadableSizes);
final Answer answer = easySend(hostId, ready);
if (answer == null || !answer.getResult()) {
// this is tricky part for secondary storage
@ -1090,7 +1091,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
final HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup);
if (host != null) {
ready = new ReadyCommand(host.getDataCenterId(), host.getId());
ready = new ReadyCommand(host.getDataCenterId(), host.getId(), NumbersUtil.enableHumanReadableSizes);
if (!indirectAgentLB.compareManagementServerList(host.getId(), host.getDataCenterId(), agentMSHostList)) {
final List<String> newMSList = indirectAgentLB.getManagementServerList(host.getId(), host.getDataCenterId(), null);

View File

@ -139,6 +139,7 @@ import com.cloud.vm.VmWorkSerializer;
import com.cloud.vm.VmWorkTakeVolumeSnapshot;
import com.cloud.vm.dao.UserVmCloneSettingDao;
import com.cloud.vm.dao.UserVmDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable {
@ -714,10 +715,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (rootDisksize != null) {
rootDisksize = rootDisksize * 1024 * 1024 * 1024;
if (rootDisksize > size) {
s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name);
s_logger.debug("Using root disk size of " + toHumanReadableSize(rootDisksize) + " Bytes for volume " + name);
size = rootDisksize;
} else {
s_logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template");
s_logger.debug("Using root disk size of " + toHumanReadableSize(size) + " Bytes for volume " + name + "since specified root disk size of " + toHumanReadableSize(rootDisksize) + " Bytes is smaller than template");
}
}

View File

@ -95,6 +95,8 @@ import com.cloud.utils.mgmt.JmxUtil;
import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.storage.dao.VolumeDao;
import static com.cloud.utils.HumanReadableJson.getHumanReadableBytesJson;
public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, ClusterManagerListener, Configurable {
// Advanced
public static final ConfigKey<Long> JobExpireMinutes = new ConfigKey<Long>("Advanced", Long.class, "job.expire.minutes", "1440",
@ -257,6 +259,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
public void completeAsyncJob(final long jobId, final Status jobStatus, final int resultCode, final String resultObject) {
if (s_logger.isDebugEnabled()) {
String resultObj = obfuscatePassword(resultObject, HidePassword.value());
resultObj = convertHumanReadableJson(resultObj);
s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObj);
}
@ -343,6 +346,15 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
_messageBus.publish(null, AsyncJob.Topics.JOB_STATE, PublishScope.GLOBAL, jobId);
}
private String convertHumanReadableJson(String resultObj) {
if (resultObj != null && resultObj.contains("/") && resultObj.contains("{")){
resultObj = resultObj.substring(0, resultObj.indexOf("{")) + getHumanReadableBytesJson(resultObj.substring(resultObj.indexOf("{")));
}
return resultObj;
}
@Override
@DB
public void updateAsyncJobStatus(final long jobId, final int processStatus, final String resultObject) {

View File

@ -22,6 +22,7 @@ import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.utils.NumbersUtil;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.log4j.Logger;
@ -145,7 +146,7 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner {
}
}
s_logger.warn(String.format("Cannot find enough capacity(requested cpu=%1$s memory=%2$s)", cpu_requested, ram_requested));
s_logger.warn(String.format("Cannot find enough capacity(requested cpu=%1$s memory=%2$s)", cpu_requested, NumbersUtil.toHumanReadableSize(ram_requested)));
return null;
}

View File

@ -51,6 +51,8 @@ import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@ResourceWrapper(handles = BackupSnapshotCommand.class)
public final class LibvirtBackupSnapshotCommandWrapper extends CommandWrapper<BackupSnapshotCommand, Answer, LibvirtComputingResource> {
@ -121,7 +123,7 @@ public final class LibvirtBackupSnapshotCommandWrapper extends CommandWrapper<Ba
bos.write(buf, 0, bytes);
offset += bytes;
}
s_logger.debug("Completed backing up RBD snapshot " + snapshotName + " to " + snapshotDestPath + ". Bytes written: " + offset);
s_logger.debug("Completed backing up RBD snapshot " + snapshotName + " to " + snapshotDestPath + ". Bytes written: " + toHumanReadableSize(offset));
}catch(final IOException ex)
{
s_logger.error("BackupSnapshotAnswer:Exception:"+ ex.getMessage());

View File

@ -39,6 +39,8 @@ import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
/*
* Uses a local script now, eventually support for virStorageVolResize() will maybe work on qcow2 and lvm and we can do this in libvirt calls
*/
@ -59,7 +61,7 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
if ( currentSize == newSize) {
// nothing to do
s_logger.info("No need to resize volume: current size " + currentSize + " is same as new size " + newSize);
s_logger.info("No need to resize volume: current size " + toHumanReadableSize(currentSize) + " is same as new size " + toHumanReadableSize(newSize));
return new ResizeVolumeAnswer(command, true, "success", currentSize);
}
@ -80,7 +82,7 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
s_logger.debug("Volume " + path + " is on a RBD storage pool. No need to query for additional information.");
}
s_logger.debug("Resizing volume: " + path + "," + currentSize + "," + newSize + "," + type + "," + vmInstanceName + "," + shrinkOk);
s_logger.debug("Resizing volume: " + path + ", from: " + toHumanReadableSize(currentSize) + ", to: " + toHumanReadableSize(newSize) + ", type: " + type + ", name: " + vmInstanceName + ", shrinkOk: " + shrinkOk);
/* libvirt doesn't support resizing (C)LVM devices, and corrupts QCOW2 in some scenarios, so we have to do these via Bash script */
if (pool.getType() != StoragePoolType.CLVM && vol.getFormat() != PhysicalDiskFormat.QCOW2) {
@ -127,7 +129,7 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid());
pool.refresh();
final long finalSize = pool.getPhysicalDisk(volid).getVirtualSize();
s_logger.debug("after resize, size reports as " + finalSize + ", requested " + newSize);
s_logger.debug("after resize, size reports as: " + toHumanReadableSize(finalSize) + ", requested: " + toHumanReadableSize(newSize));
return new ResizeVolumeAnswer(command, true, "success", finalSize);
} catch (final CloudRuntimeException e) {
final String error = "Failed to resize volume: " + e.getMessage();

View File

@ -18,6 +18,7 @@
*/
package com.cloud.hypervisor.kvm.storage;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
import static com.cloud.utils.storage.S3.S3Utils.putFile;
import java.io.File;
@ -239,11 +240,11 @@ public class KVMStorageProcessor implements StorageProcessor {
final VolumeObjectTO volume = (VolumeObjectTO)destData;
// pass along volume's target size if it's bigger than template's size, for storage types that copy template rather than cloning on deploy
if (volume.getSize() != null && volume.getSize() > tmplVol.getVirtualSize()) {
s_logger.debug("Using configured size of " + volume.getSize());
s_logger.debug("Using configured size of " + toHumanReadableSize(volume.getSize()));
tmplVol.setSize(volume.getSize());
tmplVol.setVirtualSize(volume.getSize());
} else {
s_logger.debug("Using template's size of " + tmplVol.getVirtualSize());
s_logger.debug("Using template's size of " + toHumanReadableSize(tmplVol.getVirtualSize()));
}
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
} else if (destData instanceof TemplateObjectTO) {
@ -340,11 +341,11 @@ public class KVMStorageProcessor implements StorageProcessor {
/* Copy volume to primary storage */
if (size > templateVol.getSize()) {
s_logger.debug("Overriding provided template's size with new size " + size);
s_logger.debug("Overriding provided template's size with new size " + toHumanReadableSize(size));
templateVol.setSize(size);
templateVol.setVirtualSize(size);
} else {
s_logger.debug("Using templates disk size of " + templateVol.getVirtualSize() + "since size passed was " + size);
s_logger.debug("Using templates disk size of " + toHumanReadableSize(templateVol.getVirtualSize()) + "since size passed was " + toHumanReadableSize(size));
}
final KVMPhysicalDisk primaryVol = storagePoolMgr.copyPhysicalDisk(templateVol, volUuid, primaryPool, timeout);
@ -942,7 +943,7 @@ public class KVMStorageProcessor implements StorageProcessor {
size = snapFile.length();
}
s_logger.debug("Finished backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile + " Snapshot size: " + size);
s_logger.debug("Finished backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile + " Snapshot size: " + toHumanReadableSize(size));
} catch (final FileNotFoundException e) {
s_logger.error("Failed to open " + snapshotDestPath + ". The error was: " + e.getMessage());
return new CopyCmdAnswer(e.toString());
@ -1398,7 +1399,7 @@ public class KVMStorageProcessor implements StorageProcessor {
* Create full clone volume from VM snapshot
*/
protected KVMPhysicalDisk createFullCloneVolume(MigrationOptions migrationOptions, VolumeObjectTO volume, KVMStoragePool primaryPool, PhysicalDiskFormat format) {
s_logger.debug("For VM migration with full-clone volume: Creating empty stub disk for source disk " + migrationOptions.getSrcVolumeUuid() + " and size: " + volume.getSize() + " and format: " + format);
s_logger.debug("For VM migration with full-clone volume: Creating empty stub disk for source disk " + migrationOptions.getSrcVolumeUuid() + " and size: " + toHumanReadableSize(volume.getSize()) + " and format: " + format);
return primaryPool.createPhysicalDisk(volume.getUuid(), format, volume.getProvisioningType(), volume.getSize());
}

View File

@ -65,6 +65,8 @@ import com.cloud.storage.StorageLayer;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class LibvirtStorageAdaptor implements StorageAdaptor {
private static final Logger s_logger = Logger.getLogger(LibvirtStorageAdaptor.class);
private StorageLayer _storageLayer;
@ -493,9 +495,9 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
pool.setAvailable(storage.getInfo().available);
s_logger.debug("Succesfully refreshed pool " + uuid +
" Capacity: " + storage.getInfo().capacity +
" Used: " + storage.getInfo().allocation +
" Available: " + storage.getInfo().available);
" Capacity: " + toHumanReadableSize(storage.getInfo().capacity) +
" Used: " + toHumanReadableSize(storage.getInfo().allocation) +
" Available: " + toHumanReadableSize(storage.getInfo().available));
return pool;
} catch (LibvirtException e) {
@ -730,7 +732,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
s_logger.info("Attempting to create volume " + name + " (" + pool.getType().toString() + ") in pool "
+ pool.getUuid() + " with size " + size);
+ pool.getUuid() + " with size " + toHumanReadableSize(size));
switch (pool.getType()) {
case RBD:
@ -962,7 +964,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) {
s_logger.info("Creating volume " + name + " from template " + template.getName() + " in pool " + destPool.getUuid() +
" (" + destPool.getType().toString() + ") with size " + size);
" (" + destPool.getType().toString() + ") with size " + toHumanReadableSize(size));
KVMPhysicalDisk disk = null;
@ -1099,7 +1101,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
if (srcImage.isOldFormat()) {
/* The source image is RBD format 1, we have to do a regular copy */
s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() +
" is RBD format 1. We have to perform a regular copy (" + disk.getVirtualSize() + " bytes)");
" is RBD format 1. We have to perform a regular copy (" + toHumanReadableSize(disk.getVirtualSize()) + " bytes)");
rbd.create(disk.getName(), disk.getVirtualSize(), rbdFeatures, rbdOrder);
RbdImage destImage = rbd.open(disk.getName());
@ -1145,7 +1147,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
RbdImage diskImage = rbd.open(disk.getName());
diskImage.resize(disk.getVirtualSize());
rbd.close(diskImage);
s_logger.debug("Resized " + disk.getName() + " to " + disk.getVirtualSize());
s_logger.debug("Resized " + disk.getName() + " to " + toHumanReadableSize(disk.getVirtualSize()));
}
}
@ -1251,7 +1253,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
String sourcePath = disk.getPath();
KVMPhysicalDisk newDisk;
s_logger.debug("copyPhysicalDisk: disk size:" + disk.getSize() + ", virtualsize:" + disk.getVirtualSize()+" format:"+disk.getFormat());
s_logger.debug("copyPhysicalDisk: disk size:" + toHumanReadableSize(disk.getSize()) + ", virtualsize:" + toHumanReadableSize(disk.getVirtualSize())+" format:"+disk.getFormat());
if (destPool.getType() != StoragePoolType.RBD) {
if (disk.getFormat() == PhysicalDiskFormat.TAR) {
newDisk = destPool.createPhysicalDisk(name, PhysicalDiskFormat.DIR, Storage.ProvisioningType.THIN, disk.getVirtualSize());
@ -1345,7 +1347,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
RbdImageInfo rbdInfo = image.stat();
newDisk.setSize(rbdInfo.size);
newDisk.setVirtualSize(rbdInfo.size);
s_logger.debug("After copy the resulting RBD image " + rbdDestPath + " is " + rbdInfo.size + " bytes long");
s_logger.debug("After copy the resulting RBD image " + rbdDestPath + " is " + toHumanReadableSize(rbdInfo.size) + " bytes long");
rbd.close(image);
r.ioCtxDestroy(io);

View File

@ -149,6 +149,8 @@ import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo;
import com.vmware.vim25.VirtualMachineConfigSummary;
import com.vmware.vim25.VirtualMachineRuntimeInfo;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Configurable {
private static final Logger s_logger = Logger.getLogger(VMwareGuru.class);
@ -914,7 +916,7 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
VolumeVO vol = _volumeDao.findByUuidIncludingRemoved(volId);
usedVols.put(backedUpVol.getUuid(), true);
map.put(disk, vol);
s_logger.debug("VM restore mapping for disk " + disk.getBacking() + " (capacity: " + disk.getCapacityInBytes() + ") with volume ID" + vol.getId());
s_logger.debug("VM restore mapping for disk " + disk.getBacking() + " (capacity: " + toHumanReadableSize(disk.getCapacityInBytes()) + ") with volume ID" + vol.getId());
}
}
}
@ -1011,7 +1013,7 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
VirtualDisk restoredDisk = findRestoredVolume(volumeInfo, vmRestored);
String diskPath = vmRestored.getVmdkFileBaseName(restoredDisk);
s_logger.debug("Restored disk size=" + restoredDisk.getCapacityInKB() + " path=" + diskPath);
s_logger.debug("Restored disk size=" + toHumanReadableSize(restoredDisk.getCapacityInKB()) + " path=" + diskPath);
// Detach restored VM disks
vmRestored.detachAllDisks();

View File

@ -341,6 +341,9 @@ import com.vmware.vim25.VmfsDatastoreInfo;
import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec;
import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec;
import static com.cloud.utils.HumanReadableJson.getHumanReadableBytesJson;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer {
private static final Logger s_logger = Logger.getLogger(VmwareResource.class);
public static final String VMDK_EXTENSION = ".vmdk";
@ -1710,7 +1713,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
protected StartAnswer execute(StartCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource StartCommand: " + _gson.toJson(cmd));
s_logger.info("Executing resource StartCommand: " + getHumanReadableBytesJson(_gson.toJson(cmd)));
}
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
@ -2624,7 +2627,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
*/
protected void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) {
if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize) {
s_logger.info("Video card memory was set " + videoCard.getVideoRamSizeInKB().longValue() + "kb instead of " + svgaVmramSize + "kb");
s_logger.info("Video card memory was set " + toHumanReadableSize(videoCard.getVideoRamSizeInKB().longValue()) + " instead of " + toHumanReadableSize(svgaVmramSize));
configureSpecVideoCardNewVRamSize(videoCard, svgaVmramSize, vmConfigSpec);
}
}
@ -3924,7 +3927,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
protected Answer execute(RebootCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource RebootCommand: " + _gson.toJson(cmd));
s_logger.info("Executing resource RebootCommand: " + getHumanReadableBytesJson(_gson.toJson(cmd)));
}
boolean toolsInstallerMounted = false;
@ -4047,7 +4050,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
protected Answer execute(PrepareForMigrationCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource PrepareForMigrationCommand: " + _gson.toJson(cmd));
s_logger.info("Executing resource PrepareForMigrationCommand: " + getHumanReadableBytesJson(_gson.toJson(cmd)));
}
VirtualMachineTO vm = cmd.getVirtualMachine();
@ -4274,7 +4277,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
protected Answer execute(MigrateCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource MigrateCommand: " + _gson.toJson(cmd));
s_logger.info("Executing resource MigrateCommand: " + getHumanReadableBytesJson(_gson.toJson(cmd)));
}
final String vmName = cmd.getVmName();
@ -4317,7 +4320,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
protected Answer execute(MigrateWithStorageCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource MigrateWithStorageCommand: " + _gson.toJson(cmd));
s_logger.info("Executing resource MigrateWithStorageCommand: " + getHumanReadableBytesJson(_gson.toJson(cmd)));
}
VirtualMachineTO vmTo = cmd.getVirtualMachine();
@ -5308,7 +5311,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if (s_logger.isDebugEnabled()) {
s_logger.debug("Datastore summary info, storageId: " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype()
+ ", capacity: " + capacity + ", free: " + free + ", used: " + used);
+ ", capacity: " + toHumanReadableSize(capacity) + ", free: " + toHumanReadableSize(free) + ", used: " + toHumanReadableSize(used));
}
if (summary.getCapacity() <= 0) {
@ -6768,7 +6771,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
@Override
public Answer execute(DestroyCommand cmd) {
if (s_logger.isInfoEnabled()) {
s_logger.info("Executing resource DestroyCommand to evict template from storage pool: " + _gson.toJson(cmd));
s_logger.info("Executing resource DestroyCommand to evict template from storage pool: " + getHumanReadableBytesJson(_gson.toJson(cmd)));
}
try {

View File

@ -164,6 +164,8 @@ import com.xensource.xenapi.VLAN;
import com.xensource.xenapi.VM;
import com.xensource.xenapi.XenAPIObject;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
/**
* CitrixResourceBase encapsulates the calls to the XenServer Xapi process to
* perform the required functionalities for CloudStack.
@ -1799,7 +1801,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
cmd.setDom0MinMemory(dom0Ram);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Total Ram: " + ram + " dom0 Ram: " + dom0Ram);
s_logger.debug("Total Ram: " + toHumanReadableSize(ram) + " dom0 Ram: " + toHumanReadableSize(dom0Ram));
}
PIF pif = PIF.getByUuid(conn, _host.getPrivatePif());
@ -3088,7 +3090,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
// stability
if (dynamicMaxRam > staticMax) { // XS contraint that dynamic max <=
// static max
s_logger.warn("dynamixMax " + dynamicMaxRam + " cant be greater than static max " + staticMax + ", can lead to stability issues. Setting static max as much as dynamic max ");
s_logger.warn("dynamic max " + toHumanReadableSize(dynamicMaxRam) + " cant be greater than static max " + toHumanReadableSize(staticMax) + ", this can lead to stability issues. Setting static max as much as dynamic max ");
return dynamicMaxRam;
}
return staticMax;
@ -3102,7 +3104,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
if (dynamicMinRam < recommendedValue) { // XS contraint that dynamic min
// > static min
s_logger.warn("Vm is set to dynamixMin " + dynamicMinRam + " less than the recommended static min " + recommendedValue + ", could lead to stability issues");
s_logger.warn("Vm ram is set to dynamic min " + toHumanReadableSize(dynamicMinRam) + " and is less than the recommended static min " + toHumanReadableSize(recommendedValue) + ", this could lead to stability issues");
}
return dynamicMinRam;
}
@ -3329,13 +3331,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
VDI memoryVDI = vmsnap.getSuspendVDI(conn);
if (!isRefNull(memoryVDI)) {
size = size + memoryVDI.getPhysicalUtilisation(conn);
s_logger.debug("memoryVDI size :" + size);
s_logger.debug("memoryVDI size :" + toHumanReadableSize(size));
String parentUuid = memoryVDI.getSmConfig(conn).get("vhd-parent");
VDI pMemoryVDI = VDI.getByUuid(conn, parentUuid);
if (!isRefNull(pMemoryVDI)) {
size = size + pMemoryVDI.getPhysicalUtilisation(conn);
}
s_logger.debug("memoryVDI size+parent :" + size);
s_logger.debug("memoryVDI size+parent :" + toHumanReadableSize(size));
}
}
} catch (Exception e) {
@ -4215,7 +4217,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
final long vdiVirtualSize = vdi.getVirtualSize(conn);
if (vdiVirtualSize != volumeSize) {
s_logger.info("Resizing the data disk (VDI) from vdiVirtualSize: " + vdiVirtualSize + " to volumeSize: " + volumeSize);
s_logger.info("Resizing the data disk (VDI) from vdiVirtualSize: " + toHumanReadableSize(vdiVirtualSize) + " to volumeSize: " + toHumanReadableSize(volumeSize));
try {
vdi.resize(conn, volumeSize);

View File

@ -24,6 +24,8 @@ import com.xensource.xenapi.Host;
import com.xensource.xenapi.Types.XenAPIException;
import com.xensource.xenapi.VM;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class XcpServerResource extends CitrixResourceBase {
private final static Logger s_logger = Logger.getLogger(XcpServerResource.class);
@ -87,8 +89,8 @@ public class XcpServerResource extends CitrixResourceBase {
protected void setMemory(final Connection conn, final VM vm, final long minMemsize, final long maxMemsize) throws XmlRpcException, XenAPIException {
//setMemoryLimits(staticMin, staticMax, dynamicMin, dynamicMax)
if (s_logger.isDebugEnabled()) {
s_logger.debug("Memory Limits for VM [" + vm.getNameLabel(conn) + "[staticMin:" + mem_32m + ", staticMax:" + maxMemsize + ", dynamicMin: " + minMemsize +
", dynamicMax:" + maxMemsize + "]]");
s_logger.debug("Memory Limits for VM [" + vm.getNameLabel(conn) + "[staticMin:" + toHumanReadableSize(mem_32m) + ", staticMax:" + toHumanReadableSize(maxMemsize) + ", dynamicMin: " + toHumanReadableSize(minMemsize) +
", dynamicMax:" + toHumanReadableSize(maxMemsize) + "]]");
}
vm.setMemoryLimits(conn, mem_32m, maxMemsize, minMemsize, maxMemsize);
}

View File

@ -18,6 +18,7 @@
*/
package com.cloud.hypervisor.xenserver.resource;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
import static com.cloud.utils.ReflectUtil.flattenProperties;
import static com.google.common.collect.Lists.newArrayList;
@ -827,10 +828,10 @@ public class XenServerStorageProcessor implements StorageProcessor {
vdi = tmpltvdi.createClone(conn, new HashMap<String, String>());
Long virtualSize = vdi.getVirtualSize(conn);
if (volume.getSize() > virtualSize) {
s_logger.debug("Overriding provided template's size with new size " + volume.getSize() + " for volume: " + volume.getName());
s_logger.debug("Overriding provided template's size with new size " + toHumanReadableSize(volume.getSize()) + " for volume: " + volume.getName());
vdi.resize(conn, volume.getSize());
} else {
s_logger.debug("Using templates disk size of " + virtualSize + " for volume: " + volume.getName() + " since size passed was " + volume.getSize());
s_logger.debug("Using templates disk size of " + toHumanReadableSize(virtualSize) + " for volume: " + volume.getName() + " since size passed was " + toHumanReadableSize(volume.getSize()));
}
vdi.setNameLabel(conn, volume.getName());

View File

@ -62,6 +62,8 @@ import com.xensource.xenapi.Types.StorageOperations;
import com.xensource.xenapi.Types.XenAPIException;
import com.xensource.xenapi.VDI;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
private static final Logger s_logger = Logger.getLogger(XenServerStorageProcessor.class);
@ -665,7 +667,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
newSnapshot.setParentSnapshotPath(prevBackupUuid);
}
s_logger.info("New snapshot details: " + newSnapshot.toString());
s_logger.info("New snapshot physical utilization: " + physicalSize);
s_logger.info("New snapshot physical utilization: " + toHumanReadableSize(physicalSize));
return new CopyCmdAnswer(newSnapshot);
} catch (final Exception e) {

View File

@ -36,6 +36,8 @@ import com.xensource.xenapi.VDI;
import java.util.HashSet;
import java.util.Set;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@ResourceWrapper(handles = ResizeVolumeCommand.class)
public final class CitrixResizeVolumeCommandWrapper extends CommandWrapper<ResizeVolumeCommand, Answer, CitrixResourceBase> {
private static final Logger s_logger = Logger.getLogger(CitrixResizeVolumeCommandWrapper.class);
@ -50,7 +52,7 @@ public final class CitrixResizeVolumeCommandWrapper extends CommandWrapper<Resiz
try {
if (command.getCurrentSize() >= newSize) {
s_logger.info("No need to resize volume: " + volId +", current size " + command.getCurrentSize() + " is same as new size " + newSize);
s_logger.info("No need to resize volume: " + volId +", current size " + toHumanReadableSize(command.getCurrentSize()) + " is same as new size " + toHumanReadableSize(newSize));
return new ResizeVolumeAnswer(command, true, "success", newSize);
}
if (command.isManaged()) {

View File

@ -151,6 +151,8 @@ import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
import com.google.common.base.Strings;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class KubernetesClusterManagerImpl extends ManagerBase implements KubernetesClusterService {
private static final Logger LOGGER = Logger.getLogger(KubernetesClusterManagerImpl.class);
@ -535,7 +537,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
}
if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, ram_requested * reserved));
LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)));
}
hostEntry.setValue(new Pair<HostVO, Integer>(h, reserved));
suitable_host_found = true;
@ -557,7 +559,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
return new DeployDestination(zone, null, planCluster, null);
}
String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%d) with offering ID: %s",
cpu_requested * nodesCount, ram_requested * nodesCount, offering.getUuid());
cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getUuid());
LOGGER.warn(msg);
throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
}

View File

@ -88,6 +88,8 @@ import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
import com.google.common.base.Strings;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class KubernetesClusterResourceModifierActionWorker extends KubernetesClusterActionWorker {
@Inject
@ -229,7 +231,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
}
if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, ram_requested * reserved));
LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%d", h.getUuid(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)));
}
hostEntry.setValue(new Pair<HostVO, Integer>(h, reserved));
suitable_host_found = true;
@ -250,7 +252,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
return new DeployDestination(zone, null, null, null);
}
String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%d) with offering ID: %s and hypervisor: %s",
cpu_requested * nodesCount, ram_requested * nodesCount, offering.getUuid(), clusterTemplate.getHypervisorType().toString());
cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getUuid(), clusterTemplate.getHypervisorType().toString());
LOGGER.warn(msg);
throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
}

View File

@ -79,6 +79,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
private static final Logger s_logger = Logger.getLogger(DateraPrimaryDataStoreDriver.class);
private static final int s_lockTimeInSeconds = 300;
@ -616,7 +618,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
usedSpaceBytes += templatePoolRef.getTemplateSize();
}
}
s_logger.debug("usedSpaceBytes: " + String.valueOf(usedSpaceBytes));
s_logger.debug("usedSpaceBytes: " + toHumanReadableSize(usedSpaceBytes));
return usedSpaceBytes;
}
@ -657,7 +659,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
hypervisorSnapshotReserve = Math.max(hypervisorSnapshotReserve, s_lowestHypervisorSnapshotReserve);
volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f);
}
s_logger.debug("Volume size:" + String.valueOf(volumeSize));
s_logger.debug("Volume size: " + toHumanReadableSize(volumeSize));
break;
case TEMPLATE:
@ -670,7 +672,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
} else {
volumeSize = (long) (templateSize + templateSize * (s_lowestHypervisorSnapshotReserve / 100f));
}
s_logger.debug("Template volume size:" + String.valueOf(volumeSize));
s_logger.debug("Template volume size:" + toHumanReadableSize(volumeSize));
break;
}
@ -1091,7 +1093,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
long templateSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo,
storagePoolDao.findById(storagePoolId));
s_logger.debug("cached VM template sizeBytes: " + String.valueOf(templateSizeBytes));
s_logger.debug("cached VM template sizeBytes: " + toHumanReadableSize(templateSizeBytes));
int templateSizeGib = DateraUtil.bytesToGib(templateSizeBytes);

View File

@ -78,6 +78,8 @@ import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.template.TemplateManager;
import com.cloud.vm.dao.VMInstanceDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
@Override
public Map<String, String> getCapabilities() {
@ -366,7 +368,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
ResizeVolumeAnswer answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, resizeParameter.hosts, resizeCmd);
if (answer != null && answer.getResult()) {
long finalSize = answer.getNewSize();
s_logger.debug("Resize: volume started at size " + vol.getSize() + " and ended at size " + finalSize);
s_logger.debug("Resize: volume started at size: " + toHumanReadableSize(vol.getSize()) + " and ended at size: " + toHumanReadableSize(finalSize));
vol.setSize(finalSize);
vol.update();

View File

@ -299,7 +299,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize());
s_logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize() + " MB");
}
long serviceOfferingId = offering.getId();

View File

@ -57,6 +57,8 @@ import com.cloud.vm.VirtualMachineProfile;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class UserConcentratedAllocator extends AdapterBase implements PodAllocator {
private final static Logger s_logger = Logger.getLogger(UserConcentratedAllocator.class);
@ -262,7 +264,7 @@ public class UserConcentratedAllocator extends AdapterBase implements PodAllocat
if (s_logger.isDebugEnabled()) {
s_logger.debug("Counting memory capacity used by vm: " + vm.getId() + ", size: " + so.getRamSize() + "MB, host: " + hostId + ", currently counted: " +
usedCapacity + " Bytes");
toHumanReadableSize(usedCapacity) + " Bytes");
}
} else if (capacityType == Capacity.CAPACITY_TYPE_CPU) {
usedCapacity += so.getCpu() * so.getSpeed();

View File

@ -60,7 +60,7 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter {
private void onClusterNodeJoined(Object sender, ClusterNodeJoinEventArgs args) {
if (s_logger.isDebugEnabled()) {
for (ManagementServerHostVO mshost : args.getJoinedNodes()) {
s_logger.debug("Handle cluster node join alert, joined node: " + mshost.getServiceIP() + ", msidL: " + mshost.getMsid());
s_logger.debug("Handle cluster node join alert, joined node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid());
}
}

View File

@ -350,6 +350,8 @@ import com.cloud.vm.snapshot.VMSnapshot;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class ApiResponseHelper implements ResponseGenerator {
private static final Logger s_logger = Logger.getLogger(ApiResponseHelper.class);
@ -3481,7 +3483,13 @@ public class ApiResponseHelper implements ResponseGenerator {
builder.append("Bytes received by network ");
}
if (network != null) {
builder.append(network.getName()).append(" (").append(network.getUuid()).append(") ");
if (network.getName() != null) {
builder.append(network.getName());
}
if (network.getUuid() != null){
builder.append(" (").append(network.getUuid()).append(") ");
}
builder.append(" " + toHumanReadableSize(usageRecord.getRawUsage().longValue()) + " ");
}
if (vm != null) {
builder.append("using router ").append(vm.getInstanceName()).append(" (").append(vm.getUuid()).append(")");
@ -3518,6 +3526,9 @@ public class ApiResponseHelper implements ResponseGenerator {
if (volume != null) {
builder.append(" and volume ").append(volume.getName()).append(" (").append(volume.getUuid()).append(")");
}
if (usageRecord.getRawUsage()!= null){
builder.append(" " + toHumanReadableSize(usageRecord.getRawUsage().longValue()));
}
usageRecResponse.setDescription(builder.toString());
}
} else if (usageRecord.getUsageType() == UsageTypes.VOLUME) {
@ -3548,6 +3559,9 @@ public class ApiResponseHelper implements ResponseGenerator {
if (template != null) {
builder.append(" and template ").append(template.getName()).append(" (").append(template.getUuid()).append(")");
}
if (usageRecord.getSize() != null) {
builder.append(" and size " + toHumanReadableSize(usageRecord.getSize()));
}
usageRecResponse.setDescription(builder.toString());
}
} else if (usageRecord.getUsageType() == UsageTypes.TEMPLATE || usageRecord.getUsageType() == UsageTypes.ISO) {
@ -3575,7 +3589,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
if (tmpl != null) {
builder.append(" for ").append(tmpl.getName()).append(" (").append(tmpl.getUuid()).append(") ")
.append("with size ").append(usageRecord.getSize()).append(" and virtual size ").append(usageRecord.getVirtualSize());
.append("with size ").append(toHumanReadableSize(usageRecord.getSize())).append(" and virtual size ").append(toHumanReadableSize(usageRecord.getVirtualSize()));
}
usageRecResponse.setDescription(builder.toString());
}
@ -3594,7 +3608,7 @@ public class ApiResponseHelper implements ResponseGenerator {
builder.append("Snapshot usage ");
if (snap != null) {
builder.append("for ").append(snap.getName()).append(" (").append(snap.getUuid()).append(") ")
.append("with size ").append(usageRecord.getSize());
.append("with size ").append(toHumanReadableSize(usageRecord.getSize()));
}
usageRecResponse.setDescription(builder.toString());
}
@ -3741,6 +3755,9 @@ public class ApiResponseHelper implements ResponseGenerator {
if (diskOff != null) {
builder.append(" using disk offering ").append(diskOff.getName()).append(" (").append(diskOff.getUuid()).append(")");
}
if (usageRecord.getSize() != null){
builder.append(" and size " + toHumanReadableSize(usageRecord.getSize()));
}
usageRecResponse.setDescription(builder.toString());
}
} else if (usageRecord.getUsageType() == UsageTypes.VOLUME_SECONDARY) {
@ -3750,7 +3767,7 @@ public class ApiResponseHelper implements ResponseGenerator {
builder.append("Volume on secondary storage usage");
if (volume != null) {
builder.append(" for ").append(volume.getName()).append(" (").append(volume.getUuid()).append(") ")
.append("with size ").append(usageRecord.getSize());
.append("with size ").append(toHumanReadableSize(usageRecord.getSize()));
}
usageRecResponse.setDescription(builder.toString());
}
@ -3774,7 +3791,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
if (vmInstance != null) {
builder.append(" for VM ").append(vmInstance.getHostName()).append(" (").append(vmInstance.getUuid()).append(") ")
.append("with size ").append(usageRecord.getVirtualSize());
.append("with size ").append(toHumanReadableSize(usageRecord.getVirtualSize()));
}
usageRecResponse.setDescription(builder.toString());
}

View File

@ -95,6 +95,8 @@ import com.cloud.vm.dao.UserVmDetailsDao;
import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class CapacityManagerImpl extends ManagerBase implements CapacityManager, StateListener<State, VirtualMachine.Event, VirtualMachine>, Listener, ResourceListener,
Configurable {
private static final Logger s_logger = Logger.getLogger(CapacityManagerImpl.class);
@ -217,7 +219,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu);
s_logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem);
s_logger.debug("Hosts's actual total RAM: " + toHumanReadableSize(actualTotalMem) + " and RAM after applying overprovisioning: " + toHumanReadableSize(totalMem));
}
if (!moveFromReserved) {
@ -257,8 +259,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
", total with overprovisioning: " + totalCpu + "; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + capacityCpu.getReservedCapacity() +
"; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered);
s_logger.debug("release mem from host: " + hostId + ", old used: " + usedMem + ",reserved: " + reservedMem + ", total: " + totalMem + "; new used: " +
capacityMemory.getUsedCapacity() + ",reserved:" + capacityMemory.getReservedCapacity() + "; movedfromreserved: " + moveFromReserved +
s_logger.debug("release mem from host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ",reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + "; new used: " +
toHumanReadableSize(capacityMemory.getUsedCapacity()) + ",reserved:" + toHumanReadableSize(capacityMemory.getReservedCapacity()) + "; movedfromreserved: " + moveFromReserved +
",moveToReservered" + moveToReservered);
_capacityDao.update(capacityCpu.getId(), capacityCpu);
@ -332,7 +334,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
if (s_logger.isDebugEnabled()) {
s_logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId);
s_logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu);
s_logger.debug("Current Used RAM: " + usedMem + " , Free RAM:" + freeMem + " ,Requested RAM: " + ram);
s_logger.debug("Current Used RAM: " + toHumanReadableSize(usedMem) + " , Free RAM:" + toHumanReadableSize(freeMem) + " ,Requested RAM: " + toHumanReadableSize(ram));
}
capacityCpu.setUsedCapacity(usedCpu + cpu);
capacityMem.setUsedCapacity(usedMem + ram);
@ -343,7 +345,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
if (s_logger.isDebugEnabled()) {
s_logger.debug("We are allocating VM to the last host again, so adjusting the reserved capacity if it is not less than required");
s_logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu);
s_logger.debug("Reserved RAM: " + reservedMem + " , Requested RAM: " + ram);
s_logger.debug("Reserved RAM: " + toHumanReadableSize(reservedMem) + " , Requested RAM: " + toHumanReadableSize(ram));
}
if (reservedCpu >= cpu && reservedMem >= ram) {
capacityCpu.setReservedCapacity(reservedCpu - cpu);
@ -364,8 +366,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used:" + capacityCpu.getUsedCapacity() + ", reserved:" +
capacityCpu.getReservedCapacity() + "; requested cpu:" + cpu + ",alloc_from_last:" + fromLastHost);
s_logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + usedMem + ", old reserved: " + reservedMem + ", total: " +
totalMem + "; new used: " + capacityMem.getUsedCapacity() + ", reserved: " + capacityMem.getReservedCapacity() + "; requested mem: " + ram +
s_logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ", old reserved: " + toHumanReadableSize(reservedMem) + ", total: " +
toHumanReadableSize(totalMem) + "; new used: " + toHumanReadableSize(capacityMem.getUsedCapacity()) + ", reserved: " + toHumanReadableSize(capacityMem.getReservedCapacity()) + "; requested mem: " + toHumanReadableSize(ram) +
",alloc_from_last:" + fromLastHost);
long cluster_id = host.getClusterId();
@ -432,7 +434,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
boolean hasCapacity = false;
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + ram +
s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + toHumanReadableSize(ram) +
" , cpuOverprovisioningFactor: " + cpuOvercommitRatio);
}
@ -474,7 +476,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
if (s_logger.isDebugEnabled()) {
s_logger.debug("We need to allocate to the last host again, so checking if there is enough reserved capacity");
s_logger.debug("Reserved CPU: " + freeCpu + " , Requested CPU: " + cpu);
s_logger.debug("Reserved RAM: " + freeMem + " , Requested RAM: " + ram);
s_logger.debug("Reserved RAM: " + toHumanReadableSize(freeMem) + " , Requested RAM: " + toHumanReadableSize(ram));
}
/* alloc from reserved */
if (reservedCpu >= cpu) {
@ -503,7 +505,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
if (s_logger.isDebugEnabled()) {
s_logger.debug("Free CPU: " + freeCpu + " , Requested CPU: " + cpu);
s_logger.debug("Free RAM: " + freeMem + " , Requested RAM: " + ram);
s_logger.debug("Free RAM: " + toHumanReadableSize(freeMem) + " , Requested RAM: " + toHumanReadableSize(ram));
}
/* alloc from free resource */
if ((reservedCpuValueToUse + usedCpu + cpu <= totalCpu)) {
@ -526,17 +528,17 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
", total with overprovisioning: " + totalCpu + "; requested cpu:" + cpu + ",alloc_from_last_host?:" + checkFromReservedCapacity +
" ,considerReservedCapacity?: " + considerReservedCapacity);
s_logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + usedMem + ", reserved: " + reservedMem + ", total: " + totalMem +
"; requested mem: " + ram + ",alloc_from_last_host?:" + checkFromReservedCapacity + " ,considerReservedCapacity?: " + considerReservedCapacity);
s_logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + toHumanReadableSize(usedMem) + ", reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) +
"; requested mem: " + toHumanReadableSize(ram) + ", alloc_from_last_host?: " + checkFromReservedCapacity + " , considerReservedCapacity?: " + considerReservedCapacity);
} else {
if (checkFromReservedCapacity) {
s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", requested cpu: " + cpu + ", reservedMem: " +
reservedMem + ", requested mem: " + ram);
toHumanReadableSize(reservedMem) + ", requested mem: " + toHumanReadableSize(ram));
} else {
s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " +
cpu + ", actual total cpu: " + actualTotalCpu + ", total cpu with overprovisioning: " + totalCpu + ", reservedMem: " + reservedMem + ", used Mem: " +
usedMem + ", requested mem: " + ram + ", total Mem:" + totalMem + " ,considerReservedCapacity?: " + considerReservedCapacity);
s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + ", reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " +
cpu + ", actual total cpu: " + actualTotalCpu + ", total cpu with overprovisioning: " + totalCpu + ", reservedMem: " + toHumanReadableSize(reservedMem) + ", used Mem: " +
toHumanReadableSize(usedMem) + ", requested mem: " + toHumanReadableSize(ram) + ", total Mem:" + toHumanReadableSize(totalMem) + " ,considerReservedCapacity?: " + considerReservedCapacity);
}
if (s_logger.isDebugEnabled()) {
@ -822,8 +824,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
}
if (memCap.getTotalCapacity() != host.getTotalMemory()) {
s_logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + memCap.getTotalCapacity() + " new total memory:" +
host.getTotalMemory());
s_logger.debug("Calibrate total memory for host: " + host.getId() + " old total memory:" + toHumanReadableSize(memCap.getTotalCapacity()) + " new total memory:" +
toHumanReadableSize(host.getTotalMemory()));
memCap.setTotalCapacity(host.getTotalMemory());
}
@ -834,8 +836,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
}
if (memCap.getUsedCapacity() == usedMemory && memCap.getReservedCapacity() == reservedMemory) {
s_logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + memCap.getUsedCapacity() + " reservedMem: " +
memCap.getReservedCapacity());
s_logger.debug("No need to calibrate memory capacity, host:" + host.getId() + " usedMem: " + toHumanReadableSize(memCap.getUsedCapacity()) + " reservedMem: " +
toHumanReadableSize(memCap.getReservedCapacity()));
} else {
if (memCap.getReservedCapacity() != reservedMemory) {
s_logger.debug("Calibrate reserved memory for host: " + host.getId() + " old reservedMem:" + memCap.getReservedCapacity() + " new reservedMem:" +
@ -848,7 +850,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
* state(starting/migrating) that I don't know on which host
* they are allocated
*/
s_logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + memCap.getUsedCapacity() + " new usedMem: " + usedMemory);
s_logger.debug("Calibrate used memory for host: " + host.getId() + " old usedMem: " + toHumanReadableSize(memCap.getUsedCapacity()) + " new usedMem: " + toHumanReadableSize(usedMemory));
memCap.setUsedCapacity(usedMemory);
}
}
@ -1022,7 +1024,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
CapacityVOMem.setReservedCapacity(0);
CapacityVOMem.setTotalCapacity(newTotalMem);
} else {
s_logger.debug("What? new cpu is :" + newTotalMem + ", old one is " + CapacityVOMem.getUsedCapacity() + "," + CapacityVOMem.getReservedCapacity() +
s_logger.debug("What? new mem is :" + newTotalMem + ", old one is " + CapacityVOMem.getUsedCapacity() + "," + CapacityVOMem.getReservedCapacity() +
"," + CapacityVOMem.getTotalCapacity());
}
_capacityDao.update(CapacityVOMem.getId(), CapacityVOMem);

View File

@ -136,6 +136,8 @@ import com.cloud.vm.VirtualMachineProfile;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener,
StateListener<State, VirtualMachine.Event, VirtualMachine> {
@ -267,7 +269,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" +
plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + toHumanReadableSize(ram_requested));
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No"));
}

View File

@ -486,7 +486,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla
//we need clusters having enough cpu AND RAM to host this particular VM and order them by aggregate cluster capacity
if (s_logger.isDebugEnabled()) {
s_logger.debug("Listing clusters in order of aggregate capacity, that have (atleast one host with) enough CPU and RAM capacity under this " +
s_logger.debug("Listing clusters in order of aggregate capacity, that have (at least one host with) enough CPU and RAM capacity under this " +
(isZone ? "Zone: " : "Pod: ") + id);
}
String capacityTypeToOrder = configDao.getValue(Config.HostCapacityTypeToOrderClusters.key());

View File

@ -90,6 +90,8 @@ import com.cloud.vm.NicVO;
import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.NicDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class ExternalDeviceUsageManagerImpl extends ManagerBase implements ExternalDeviceUsageManager {
@ -312,13 +314,13 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter
userStats.setCurrentBytesSent(newCurrentBytesSent);
if (oldCurrentBytesSent > newCurrentBytesSent) {
s_logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + ".");
s_logger.warn(warning + "Stored bytes sent: " + toHumanReadableSize(oldCurrentBytesSent) + ", new bytes sent: " + toHumanReadableSize(newCurrentBytesSent) + ".");
userStats.setNetBytesSent(oldNetBytesSent + oldCurrentBytesSent);
}
userStats.setCurrentBytesReceived(newCurrentBytesReceived);
if (oldCurrentBytesReceived > newCurrentBytesReceived) {
s_logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + ".");
s_logger.warn(warning + "Stored bytes received: " + toHumanReadableSize(oldCurrentBytesReceived) + ", new bytes received: " + toHumanReadableSize(newCurrentBytesReceived) + ".");
userStats.setNetBytesReceived(oldNetBytesReceived + oldCurrentBytesReceived);
}
@ -531,13 +533,13 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter
userStats.setCurrentBytesSent(newCurrentBytesSent);
if (oldCurrentBytesSent > newCurrentBytesSent) {
s_logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + ".");
s_logger.warn(warning + "Stored bytes sent: " + toHumanReadableSize(oldCurrentBytesSent) + ", new bytes sent: " + toHumanReadableSize(newCurrentBytesSent) + ".");
userStats.setNetBytesSent(oldNetBytesSent + oldCurrentBytesSent);
}
userStats.setCurrentBytesReceived(newCurrentBytesReceived);
if (oldCurrentBytesReceived > newCurrentBytesReceived) {
s_logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + ".");
s_logger.warn(warning + "Stored bytes received: " + toHumanReadableSize(oldCurrentBytesReceived) + ", new bytes received: " + toHumanReadableSize(newCurrentBytesReceived) + ".");
userStats.setNetBytesReceived(oldNetBytesReceived + oldCurrentBytesReceived);
}

View File

@ -270,6 +270,8 @@ import com.cloud.vm.dao.VMInstanceDao;
import com.google.gson.JsonSyntaxException;
import com.google.gson.reflect.TypeToken;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
/**
* VirtualNetworkApplianceManagerImpl manages the different types of virtual
* network appliances available in the Cloud Stack.
@ -793,7 +795,7 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
if (s_logger.isDebugEnabled()) {
s_logger.debug("Received # of bytes that's less than the last one. "
+ "Assuming something went wrong and persisting it. Router: " + answerFinal.getRouterName() + " Reported: "
+ answerFinal.getBytesReceived() + " Stored: " + stats.getCurrentBytesReceived());
+ toHumanReadableSize(answerFinal.getBytesReceived()) + " Stored: " + toHumanReadableSize(stats.getCurrentBytesReceived()));
}
stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived());
}
@ -802,7 +804,7 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
if (s_logger.isDebugEnabled()) {
s_logger.debug("Received # of bytes that's less than the last one. "
+ "Assuming something went wrong and persisting it. Router: " + answerFinal.getRouterName() + " Reported: "
+ answerFinal.getBytesSent() + " Stored: " + stats.getCurrentBytesSent());
+ toHumanReadableSize(answerFinal.getBytesSent()) + " Stored: " + toHumanReadableSize(stats.getCurrentBytesSent()));
}
stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent());
}
@ -817,8 +819,8 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
});
} catch (final Exception e) {
s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + answer.getBytesReceived() + "; Tx: "
+ answer.getBytesSent());
s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + toHumanReadableSize(answer.getBytesReceived()) + "; Tx: "
+ toHumanReadableSize(answer.getBytesSent()));
}
}
}
@ -3117,7 +3119,7 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
if (stats.getCurrentBytesReceived() > answerFinal.getBytesReceived()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: "
+ answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesReceived() + " Stored: " + stats.getCurrentBytesReceived());
+ answerFinal.getRouterName() + " Reported: " + toHumanReadableSize(answerFinal.getBytesReceived()) + " Stored: " + toHumanReadableSize(stats.getCurrentBytesReceived()));
}
stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived());
}
@ -3125,7 +3127,7 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
if (stats.getCurrentBytesSent() > answerFinal.getBytesSent()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: "
+ answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesSent() + " Stored: " + stats.getCurrentBytesSent());
+ answerFinal.getRouterName() + " Reported: " + toHumanReadableSize(answerFinal.getBytesSent()) + " Stored: " + toHumanReadableSize(stats.getCurrentBytesSent()));
}
stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent());
}
@ -3139,8 +3141,8 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
}
});
} catch (final Exception e) {
s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + answer.getBytesReceived() + "; Tx: "
+ answer.getBytesSent());
s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + toHumanReadableSize(answer.getBytesReceived()) + "; Tx: "
+ toHumanReadableSize(answer.getBytesSent()));
}
}
}

View File

@ -107,6 +107,8 @@ import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLimitService, Configurable {
public static final Logger s_logger = Logger.getLogger(ResourceLimitManagerImpl.class);
@ -427,8 +429,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
long domainResourceLimit = findCorrectResourceLimitForDomain(domain, type);
long currentDomainResourceCount = _resourceCountDao.getResourceCount(domainId, ResourceOwnerType.Domain, type);
long requestedDomainResourceCount = currentDomainResourceCount + numResources;
String messageSuffix = " domain resource limits of Type '" + type + "'" + " for Domain Id = " + domainId + " is exceeded: Domain Resource Limit = " + domainResourceLimit
+ ", Current Domain Resource Amount = " + currentDomainResourceCount + ", Requested Resource Amount = " + numResources + ".";
String messageSuffix = " domain resource limits of Type '" + type + "'" + " for Domain Id = " + domainId + " is exceeded: Domain Resource Limit = " + toHumanReadableSize(domainResourceLimit)
+ ", Current Domain Resource Amount = " + toHumanReadableSize(currentDomainResourceCount) + ", Requested Resource Amount = " + toHumanReadableSize(numResources) + ".";
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if" + messageSuffix);
@ -450,9 +452,20 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
long accountResourceLimit = findCorrectResourceLimitForAccount(account, type);
long currentResourceCount = _resourceCountDao.getResourceCount(account.getId(), ResourceOwnerType.Account, type);
long requestedResourceCount = currentResourceCount + numResources;
String convertedAccountResourceLimit = String.valueOf(accountResourceLimit);
String convertedCurrentResourceCount = String.valueOf(currentResourceCount);
String convertedNumResources = String.valueOf(numResources);
if (type == ResourceType.secondary_storage || type == ResourceType.primary_storage){
convertedAccountResourceLimit = toHumanReadableSize(accountResourceLimit);
convertedCurrentResourceCount = toHumanReadableSize(currentResourceCount);
convertedNumResources = toHumanReadableSize(numResources);
}
String messageSuffix = " amount of resources of Type = '" + type + "' for " + (project == null ? "Account Name = " + account.getAccountName() : "Project Name = " + project.getName())
+ " in Domain Id = " + account.getDomainId() + " is exceeded: Account Resource Limit = " + accountResourceLimit + ", Current Account Resource Amount = " + currentResourceCount
+ ", Requested Resource Amount = " + numResources + ".";
+ " in Domain Id = " + account.getDomainId() + " is exceeded: Account Resource Limit = " + convertedAccountResourceLimit + ", Current Account Resource Amount = " + convertedCurrentResourceCount
+ ", Requested Resource Amount = " + convertedNumResources + ".";
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if" + messageSuffix);
@ -807,7 +820,11 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
@DB
protected boolean updateResourceCountForAccount(final long accountId, final ResourceType type, final boolean increment, final long delta) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Updating resource Type = " + type + " count for Account = " + accountId + " Operation = " + (increment ? "increasing" : "decreasing") + " Amount = " + delta);
String convertedDelta = String.valueOf(delta);
if (type == ResourceType.secondary_storage || type == ResourceType.primary_storage){
convertedDelta = toHumanReadableSize(delta);
}
s_logger.debug("Updating resource Type = " + type + " count for Account = " + accountId + " Operation = " + (increment ? "increasing" : "decreasing") + " Amount = " + convertedDelta);
}
try {
return Transaction.execute(new TransactionCallback<Boolean>() {

View File

@ -719,6 +719,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
static final ConfigKey<Integer> vmPasswordLength = new ConfigKey<Integer>("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false);
static final ConfigKey<Integer> sshKeyLength = new ConfigKey<Integer>("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global);
static final ConfigKey<Boolean> humanReadableSizes = new ConfigKey<Boolean>("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global);
@Inject
public AccountManager _accountMgr;
@Inject
@ -935,6 +937,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Override
public boolean start() {
s_logger.info("Startup CloudStack management server...");
// Set human readable sizes
NumbersUtil.enableHumanReadableSizes = _configDao.findByName("display.human.readable.sizes").getValue().equals("true");
if (_lockMasterListener == null) {
_lockMasterListener = new LockMasterListener(ManagementServerNode.getManagementServerId());
@ -3176,7 +3180,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] {vmPasswordLength, sshKeyLength};
return new ConfigKey<?>[] {vmPasswordLength, sshKeyLength, humanReadableSizes};
}
protected class EventPurgeTask extends ManagedContextRunnable {

View File

@ -142,6 +142,8 @@ import com.cloud.vm.dao.NicDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
/**
* Provides real time stats for various agent resources up to x seconds
*
@ -739,15 +741,15 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
if (isCurrentVmDiskStatsDifferentFromPrevious(previousVmDiskStats, vmDiskStat_lock)) {
s_logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName()
+ " . VM: " + vmDiskStat.getVmName() + " Read(Bytes): " + vmDiskStat.getBytesRead() + " write(Bytes): " + vmDiskStat.getBytesWrite()
+ " Read(IO): " + vmDiskStat.getIORead() + " write(IO): " + vmDiskStat.getIOWrite());
+ " . VM: " + vmDiskStat.getVmName() + " Read(Bytes): " + toHumanReadableSize(vmDiskStat.getBytesRead()) + " write(Bytes): " + toHumanReadableSize(vmDiskStat.getBytesWrite())
+ " Read(IO): " + toHumanReadableSize(vmDiskStat.getIORead()) + " write(IO): " + toHumanReadableSize(vmDiskStat.getIOWrite()));
continue;
}
if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Read # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: "
+ host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getBytesRead() + " Stored: "
+ host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesRead()) + " Stored: "
+ vmDiskStat_lock.getCurrentBytesRead());
}
vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead());
@ -756,8 +758,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Write # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: "
+ host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getBytesWrite() + " Stored: "
+ vmDiskStat_lock.getCurrentBytesWrite());
+ host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesWrite()) + " Stored: "
+ toHumanReadableSize(vmDiskStat_lock.getCurrentBytesWrite()));
}
vmDiskStat_lock.setNetBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite());
}
@ -885,8 +887,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
if (vmNetworkStat_lock.getCurrentBytesSent() > vmNetworkStat.getBytesSent()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Sent # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: "
+ host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + vmNetworkStat.getBytesSent() + " Stored: "
+ vmNetworkStat_lock.getCurrentBytesSent());
+ host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Stored: "
+ toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent()));
}
vmNetworkStat_lock.setNetBytesSent(vmNetworkStat_lock.getNetBytesSent() + vmNetworkStat_lock.getCurrentBytesSent());
}
@ -895,8 +897,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
if (vmNetworkStat_lock.getCurrentBytesReceived() > vmNetworkStat.getBytesReceived()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: "
+ host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + vmNetworkStat.getBytesReceived() + " Stored: "
+ vmNetworkStat_lock.getCurrentBytesReceived());
+ host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesReceived()) + " Stored: "
+ toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived()));
}
vmNetworkStat_lock.setNetBytesReceived(vmNetworkStat_lock.getNetBytesReceived() + vmNetworkStat_lock.getCurrentBytesReceived());
}
@ -1005,7 +1007,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
Answer answer = ssAhost.sendMessage(command);
if (answer != null && answer.getResult()) {
storageStats.put(storeId, (StorageStats)answer);
s_logger.trace("HostId: " + storeId + " Used: " + ((StorageStats)answer).getByteUsed() + " Total Available: " + ((StorageStats)answer).getCapacityBytes());
s_logger.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes()));
}
}
_storageStats = storageStats;

View File

@ -199,6 +199,8 @@ import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.dao.VMInstanceDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class StorageManagerImpl extends ManagerBase implements StorageManager, ClusterManagerListener, Configurable {
private static final Logger s_logger = Logger.getLogger(StorageManagerImpl.class);
@ -979,13 +981,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getId());
totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue();
s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString());
s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + storagePool.getCapacityBytes());
s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(storagePool.getCapacityBytes()));
} else {
s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString());
totalOverProvCapacity = storagePool.getCapacityBytes();
}
s_logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + totalOverProvCapacity);
s_logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity));
CapacityState capacityState = CapacityState.Enabled;
if (storagePool.getScope() == ScopeType.ZONE) {
DataCenterVO dc = ApiDBUtils.findZoneById(storagePool.getDataCenterId());
@ -1027,7 +1029,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
_capacityDao.update(capacity.getId(), capacity);
}
}
s_logger.debug("Successfully set Capacity - " + totalOverProvCapacity + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - "
s_logger.debug("Successfully set Capacity - " + toHumanReadableSize(totalOverProvCapacity) + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - "
+ storagePool.getId() + ", PodId " + storagePool.getPodId());
}
@ -1739,7 +1741,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (stats != null) {
double usedPercentage = ((double)stats.getByteUsed() / (double)totalSize);
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + ", usedPct: " + usedPercentage
s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + toHumanReadableSize(pool.getCapacityBytes()) + ", usedBytes: " + toHumanReadableSize(stats.getByteUsed()) + ", usedPct: " + usedPercentage
+ ", disable threshold: " + storageUsedThreshold);
}
if (usedPercentage >= storageUsedThreshold) {
@ -1882,20 +1884,20 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with over-provisioning factor " + overProvFactor.toString());
s_logger.debug("Total over-provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes());
s_logger.debug("Total over-provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(pool.getCapacityBytes()));
} else {
totalOverProvCapacity = pool.getCapacityBytes();
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString());
}
s_logger.debug("Total capacity of the pool " + poolVO.getName() + " with ID " + pool.getId() + " is " + totalOverProvCapacity);
s_logger.debug("Total capacity of the pool " + poolVO.getName() + " with ID " + pool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity));
double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking pool: " + pool.getId() + " for storage allocation , maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithTemplate
+ ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + storageAllocatedThreshold);
s_logger.debug("Checking pool: " + pool.getId() + " for storage allocation , maxSize : " + toHumanReadableSize(totalOverProvCapacity) + ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate)
+ ", askingSize : " + toHumanReadableSize(totalAskingSize) + ", allocated disable threshold: " + storageAllocatedThreshold);
}
double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity);
@ -1911,8 +1913,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation, not enough storage, maxSize : " + totalOverProvCapacity
+ ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize);
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation, not enough storage, maxSize : " + toHumanReadableSize(totalOverProvCapacity)
+ ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate) + ", askingSize : " + toHumanReadableSize(totalAskingSize));
}
return false;

View File

@ -328,6 +328,8 @@ import com.cloud.vm.snapshot.VMSnapshotManager;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class UserVmManagerImpl extends ManagerBase implements UserVmManager, VirtualMachineGuru, UserVmService, Configurable {
private static final Logger s_logger = Logger.getLogger(UserVmManagerImpl.class);
@ -2835,7 +2837,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
Boolean enterSetup = cmd.getBootIntoSetup();
if (enterSetup != null && !HypervisorType.VMware.equals(vmInstance.getHypervisorType())) {
if (enterSetup != null && enterSetup && !HypervisorType.VMware.equals(vmInstance.getHypervisorType())) {
throw new InvalidParameterValueException("Booting into a hardware setup menu is not implemented on " + vmInstance.getHypervisorType());
}
UserVm userVm = rebootVirtualMachine(CallContext.current().getCallingUserId(), vmId, enterSetup == null ? false : cmd.getBootIntoSetup());
@ -4027,8 +4029,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
{
// rootdisksize must be larger than template.
if ((rootDiskSize << 30) < templateVO.getSize()) {
Long templateVOSizeGB = templateVO.getSize() / GiB_TO_BYTES;
String error = "Unsupported: rootdisksize override is smaller than template size " + templateVO.getSize() + "B (" + templateVOSizeGB + "GB)";
String error = "Unsupported: rootdisksize override is smaller than template size " + toHumanReadableSize(templateVO.getSize());
s_logger.error(error);
throw new InvalidParameterValueException(error);
} else if ((rootDiskSize << 30) > templateVO.getSize()) {
@ -4039,10 +4040,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
s_logger.error(error);
throw new InvalidParameterValueException(error);
} else {
s_logger.debug("Rootdisksize override validation successful. Template root disk size " + (templateVO.getSize() / GiB_TO_BYTES) + "GB Root disk size specified " + rootDiskSize + "GB");
s_logger.debug("Rootdisksize override validation successful. Template root disk size " + toHumanReadableSize(templateVO.getSize()) + " Root disk size specified " + rootDiskSize + " GB");
}
} else {
s_logger.debug("Root disk size specified is " + (rootDiskSize << 30) + "B and Template root disk size is " + templateVO.getSize() + "B. Both are equal so no need to override");
s_logger.debug("Root disk size specified is " + toHumanReadableSize(rootDiskSize << 30) + " and Template root disk size is " + toHumanReadableSize(templateVO.getSize()) + ". Both are equal so no need to override");
customParameters.remove(VmDetailConstants.ROOT_DISK_SIZE);
}
}
@ -4170,15 +4171,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|| (previousvmNetworkStats.getCurrentBytesReceived() != vmNetworkStat_lock.getCurrentBytesReceived()))) {
s_logger.debug("vm network stats changed from the time GetNmNetworkStatsCommand was sent. " +
"Ignoring current answer. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() +
" Sent(Bytes): " + vmNetworkStat.getBytesSent() + " Received(Bytes): " + vmNetworkStat.getBytesReceived());
" Sent(Bytes): " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Received(Bytes): " + toHumanReadableSize(vmNetworkStat.getBytesReceived()));
continue;
}
if (vmNetworkStat_lock.getCurrentBytesSent() > vmNetworkStat.getBytesSent()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Sent # of bytes that's less than the last one. " +
s_logger.debug("Sent # of bytes that's less than the last one. " +
"Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() +
" Reported: " + vmNetworkStat.getBytesSent() + " Stored: " + vmNetworkStat_lock.getCurrentBytesSent());
" Reported: " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent()));
}
vmNetworkStat_lock.setNetBytesSent(vmNetworkStat_lock.getNetBytesSent() + vmNetworkStat_lock.getCurrentBytesSent());
}
@ -4188,7 +4189,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
if (s_logger.isDebugEnabled()) {
s_logger.debug("Received # of bytes that's less than the last one. " +
"Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() +
" Reported: " + vmNetworkStat.getBytesReceived() + " Stored: " + vmNetworkStat_lock.getCurrentBytesReceived());
" Reported: " + toHumanReadableSize(vmNetworkStat.getBytesReceived()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived()));
}
vmNetworkStat_lock.setNetBytesReceived(vmNetworkStat_lock.getNetBytesReceived() + vmNetworkStat_lock.getCurrentBytesReceived());
}
@ -4982,7 +4983,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Read # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName()
+ " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getBytesRead() + " Stored: " + vmDiskStat_lock.getCurrentBytesRead());
+ " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesRead()) + " Stored: " + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesRead()));
}
vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead());
}
@ -4990,8 +4991,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Write # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName()
+ " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getBytesWrite() + " Stored: "
+ vmDiskStat_lock.getCurrentBytesWrite());
+ " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesWrite()) + " Stored: "
+ toHumanReadableSize(vmDiskStat_lock.getCurrentBytesWrite()));
}
vmDiskStat_lock.setNetBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite());
}

View File

@ -3163,7 +3163,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
if (defaultMaxAccountSecondaryStorageInGB != Resource.RESOURCE_UNLIMITED && (accountDirSizeInGB + contentLengthInGB) > defaultMaxAccountSecondaryStorageInGB) {
s_logger.error("accountDirSizeInGb: " + accountDirSizeInGB + " defaultMaxAccountSecondaryStorageInGB: " + defaultMaxAccountSecondaryStorageInGB + " contentLengthInGB:"
+ contentLengthInGB);
+ contentLengthInGB); // extra attention
String errorMessage = "Maximum number of resources of type secondary_storage for account has exceeded";
updateStateMapWithError(cmd.getEntityUUID(), errorMessage);
throw new InvalidParameterValueException(errorMessage);

View File

@ -87,6 +87,8 @@ import com.cloud.utils.storage.QCOW2Utils;
import org.apache.cloudstack.utils.security.ChecksumValue;
import org.apache.cloudstack.utils.security.DigestHelper;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class DownloadManagerImpl extends ManagerBase implements DownloadManager {
private String _name;
StorageLayer _storage;
@ -268,7 +270,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
}
TemplateDownloader td = dj.getTemplateDownloader();
LOGGER.info("Download Completion for jobId: " + jobId + ", status=" + status);
LOGGER.info("local: " + td.getDownloadLocalPath() + ", bytes=" + td.getDownloadedBytes() + ", error=" + td.getDownloadError() + ", pct=" +
LOGGER.info("local: " + td.getDownloadLocalPath() + ", bytes=" + toHumanReadableSize(td.getDownloadedBytes()) + ", error=" + td.getDownloadError() + ", pct=" +
td.getDownloadPercent());
switch (status) {

View File

@ -53,6 +53,8 @@ import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class UploadManagerImpl extends ManagerBase implements UploadManager {
public class Completion implements UploadCompleteCallback {
@ -439,7 +441,7 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager {
}
TemplateUploader tu = uj.getTemplateUploader();
s_logger.warn("Upload Completion for jobId: " + jobId + ", status=" + status);
s_logger.warn("UploadedBytes=" + tu.getUploadedBytes() + ", error=" + tu.getUploadError() + ", pct=" + tu.getUploadPercent());
s_logger.warn("UploadedBytes=" + toHumanReadableSize(tu.getUploadedBytes()) + ", error=" + tu.getUploadError() + ", pct=" + tu.getUploadPercent());
switch (status) {
case ABORTED:

View File

@ -16,11 +16,10 @@
# under the License.
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.lib.common import list_hosts
from cryptography import x509
from cryptography.hazmat.backends import default_backend

View File

@ -0,0 +1,95 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from marvin.cloudstackAPI import updateConfiguration
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.sshClient import SshClient
from nose.plugins.attrib import attr
class TestHumanReadableLogs(cloudstackTestCase):
"""
Test correct output when logging byte size values.
"""
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.mgtSvrDetails = self.config.__dict__["mgtSvr"][0].__dict__
@attr(tags=["devcloud", "basic", "advanced"], required_hardware="false")
def test_01_disableHumanReadableLogs(self):
"""
Test log file output after disabling human readable sizes feature
"""
#create ssh client
sshClient = getSSHClient(self)
# Disable feature
updateConfig(self, "false")
# Restart service
command = "systemctl restart cloudstack-management"
sshClient.execute(command)
# CapacityChecker runs as soon as management server is up
# Check if "usedMem: (" is printed out within 60 seconds while server is starting
command = "timeout 60 tail -f /var/log/cloudstack/management/management-server.log | grep 'usedMem: ('";
sshClient.timeout = 60
result = sshClient.runCommand(command)
self.assertTrue(result['status'] == "FAILED")
@attr(tags=["devcloud", "basic", "advanced"], required_hardware="false")
def test_02_enableHumanReadableLogs(self):
"""
Test log file output after enabling human readable sizes feature
"""
# create ssh client
sshClient = getSSHClient(self)
# Enable feature
updateConfig(self, "true")
# Restart service
command = "systemctl restart cloudstack-management"
sshClient.execute(command)
# CapacityChecker runs as soon as management server is up
# Check if "usedMem: (" is printed out within 60 seconds while server is restarting
command = "timeout 60 tail -f /var/log/cloudstack/management/management-server.log | grep 'usedMem: ('";
sshClient.timeout = 60
result = sshClient.runCommand(command)
self.assertTrue(result['status'] == "SUCCESS")
def updateConfig(self, enableFeature):
updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
updateConfigurationCmd.name = "display.human.readable.sizes"
updateConfigurationCmd.value = enableFeature
updateConfigurationResponse = self.apiClient.updateConfiguration(updateConfigurationCmd)
self.debug("updated the parameter %s with value %s" % (
updateConfigurationResponse.name, updateConfigurationResponse.value))
def getSSHClient(self):
sshClient = SshClient(
self.mgtSvrDetails["mgtSvrIp"],
22,
self.mgtSvrDetails["user"],
self.mgtSvrDetails["passwd"]
)
return sshClient

View File

@ -1504,11 +1504,11 @@ public class TestClientWithAPI {
int bytesReceived = Integer.parseInt(requestKeyValues.get("receivedbytes"));
int bytesSent = Integer.parseInt(requestKeyValues.get("sentbytes"));
if ((bytesReceived > 100000000) && (bytesSent > 0)) {
s_logger.info("Network stat is correct for account" + s_account.get() + "; bytest received is " + bytesReceived + " and bytes sent is " + bytesSent);
s_logger.info("Network stat is correct for account" + s_account.get() + "; bytest received is " + toHumanReadableSize(bytesReceived) + " and bytes sent is " + toHumanReadableSize(bytesSent));
return true;
} else {
s_logger.error("Incorrect value for bytes received/sent for the account " + s_account.get() + ". We got " + bytesReceived + " bytes received; " +
" and " + bytesSent + " bytes sent");
s_logger.error("Incorrect value for bytes received/sent for the account " + s_account.get() + ". We got " + toHumanReadableSize(bytesReceived) + " bytes received; " +
" and " + toHumanReadableSize(bytesSent) + " bytes sent");
return false;
}

View File

@ -96,6 +96,8 @@ import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class UsageManagerImpl extends ManagerBase implements UsageManager, Runnable {
public static final Logger s_logger = Logger.getLogger(UsageManagerImpl.class.getName());
@ -1298,7 +1300,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
if (usageNetworkStats != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("getting current accounted bytes for... accountId: " + usageNetworkStats.getAccountId() + " in zone: " + userStat.getDataCenterId() +
"; abr: " + usageNetworkStats.getAggBytesReceived() + "; abs: " + usageNetworkStats.getAggBytesSent());
"; abr: " + toHumanReadableSize(usageNetworkStats.getAggBytesReceived()) + "; abs: " + toHumanReadableSize(usageNetworkStats.getAggBytesSent()));
}
currentAccountedBytesSent = usageNetworkStats.getAggBytesSent();
currentAccountedBytesReceived = usageNetworkStats.getAggBytesReceived();
@ -1307,13 +1309,13 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
long bytesReceived = userStat.getAggBytesReceived() - currentAccountedBytesReceived;
if (bytesSent < 0) {
s_logger.warn("Calculated negative value for bytes sent: " + bytesSent + ", user stats say: " + userStat.getAggBytesSent() +
", previous network usage was: " + currentAccountedBytesSent);
s_logger.warn("Calculated negative value for bytes sent: " + toHumanReadableSize(bytesSent) + ", user stats say: " + toHumanReadableSize(userStat.getAggBytesSent()) +
", previous network usage was: " + toHumanReadableSize(currentAccountedBytesSent));
bytesSent = 0;
}
if (bytesReceived < 0) {
s_logger.warn("Calculated negative value for bytes received: " + bytesReceived + ", user stats say: " + userStat.getAggBytesReceived() +
", previous network usage was: " + currentAccountedBytesReceived);
s_logger.warn("Calculated negative value for bytes received: " + toHumanReadableSize(bytesReceived) + ", user stats say: " + toHumanReadableSize(userStat.getAggBytesReceived()) +
", previous network usage was: " + toHumanReadableSize(currentAccountedBytesReceived));
bytesReceived = 0;
}
@ -1342,8 +1344,8 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
if (usageVmDiskStat != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("getting current accounted bytes for... accountId: " + usageVmDiskStat.getAccountId() + " in zone: " + vmDiskStat.getDataCenterId() +
"; aiw: " + vmDiskStat.getAggIOWrite() + "; air: " + usageVmDiskStat.getAggIORead() + "; abw: " + vmDiskStat.getAggBytesWrite() + "; abr: " +
usageVmDiskStat.getAggBytesRead());
"; aiw: " + toHumanReadableSize(vmDiskStat.getAggIOWrite()) + "; air: " + toHumanReadableSize(usageVmDiskStat.getAggIORead()) + "; abw: " + toHumanReadableSize(vmDiskStat.getAggBytesWrite()) + "; abr: " +
toHumanReadableSize(usageVmDiskStat.getAggBytesRead()));
}
currentAccountedIORead = usageVmDiskStat.getAggIORead();
currentAccountedIOWrite = usageVmDiskStat.getAggIOWrite();
@ -1356,23 +1358,23 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
long bytesWrite = vmDiskStat.getAggBytesWrite() - currentAccountedBytesWrite;
if (ioRead < 0) {
s_logger.warn("Calculated negative value for io read: " + ioRead + ", vm disk stats say: " + vmDiskStat.getAggIORead() + ", previous vm disk usage was: " +
currentAccountedIORead);
s_logger.warn("Calculated negative value for io read: " + toHumanReadableSize(ioRead) + ", vm disk stats say: " + toHumanReadableSize(vmDiskStat.getAggIORead()) + ", previous vm disk usage was: " +
toHumanReadableSize(currentAccountedIORead));
ioRead = 0;
}
if (ioWrite < 0) {
s_logger.warn("Calculated negative value for io write: " + ioWrite + ", vm disk stats say: " + vmDiskStat.getAggIOWrite() + ", previous vm disk usage was: " +
currentAccountedIOWrite);
s_logger.warn("Calculated negative value for io write: " + toHumanReadableSize(ioWrite) + ", vm disk stats say: " + toHumanReadableSize(vmDiskStat.getAggIOWrite()) + ", previous vm disk usage was: " +
toHumanReadableSize(currentAccountedIOWrite));
ioWrite = 0;
}
if (bytesRead < 0) {
s_logger.warn("Calculated negative value for bytes read: " + bytesRead + ", vm disk stats say: " + vmDiskStat.getAggBytesRead() +
", previous vm disk usage was: " + currentAccountedBytesRead);
s_logger.warn("Calculated negative value for bytes read: " + toHumanReadableSize(bytesRead) + ", vm disk stats say: " + toHumanReadableSize(vmDiskStat.getAggBytesRead()) +
", previous vm disk usage was: " + toHumanReadableSize(currentAccountedBytesRead));
bytesRead = 0;
}
if (bytesWrite < 0) {
s_logger.warn("Calculated negative value for bytes write: " + bytesWrite + ", vm disk stats say: " + vmDiskStat.getAggBytesWrite() +
", previous vm disk usage was: " + currentAccountedBytesWrite);
s_logger.warn("Calculated negative value for bytes write: " + toHumanReadableSize(bytesWrite) + ", vm disk stats say: " + toHumanReadableSize(vmDiskStat.getAggBytesWrite()) +
", previous vm disk usage was: " + toHumanReadableSize(currentAccountedBytesWrite));
bytesWrite = 0;
}
@ -1387,9 +1389,9 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
vmDiskStat.getAggIOWrite(), bytesRead, bytesWrite, vmDiskStat.getAggBytesRead(), vmDiskStat.getAggBytesWrite(), timestamp);
if (s_logger.isDebugEnabled()) {
s_logger.debug("creating vmDiskHelperEntry... accountId: " + vmDiskStat.getAccountId() + " in zone: " + vmDiskStat.getDataCenterId() + "; aiw: " +
vmDiskStat.getAggIOWrite() + "; air: " + vmDiskStat.getAggIORead() + "; curAIR: " + currentAccountedIORead + "; curAIW: " + currentAccountedIOWrite +
"; uir: " + ioRead + "; uiw: " + ioWrite + "; abw: " + vmDiskStat.getAggBytesWrite() + "; abr: " + vmDiskStat.getAggBytesRead() + "; curABR: " +
currentAccountedBytesRead + "; curABW: " + currentAccountedBytesWrite + "; ubr: " + bytesRead + "; ubw: " + bytesWrite);
toHumanReadableSize(vmDiskStat.getAggIOWrite()) + "; air: " + toHumanReadableSize(vmDiskStat.getAggIORead()) + "; curAIR: " + toHumanReadableSize(currentAccountedIORead) + "; curAIW: " + toHumanReadableSize(currentAccountedIOWrite) +
"; uir: " + toHumanReadableSize(ioRead) + "; uiw: " + toHumanReadableSize(ioWrite) + "; abw: " + toHumanReadableSize(vmDiskStat.getAggBytesWrite()) + "; abr: " + toHumanReadableSize(vmDiskStat.getAggBytesRead()) + "; curABR: " +
toHumanReadableSize(currentAccountedBytesRead) + "; curABW: " + toHumanReadableSize(currentAccountedBytesWrite) + "; ubr: " + toHumanReadableSize(bytesRead) + "; ubw: " + toHumanReadableSize(bytesWrite));
}
usageVmDisks.add(usageVmDiskVO);
}

View File

@ -37,6 +37,8 @@ import com.cloud.usage.dao.UsageNetworkDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.db.SearchCriteria;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class NetworkUsageParser {
public static final Logger s_logger = Logger.getLogger(NetworkUsageParser.class.getName());
@ -101,7 +103,7 @@ public class NetworkUsageParser {
if ((totalBytesSent > 0L) || (totalBytesReceived > 0L)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating usage record, total bytes sent:" + totalBytesSent + ", total bytes received: " + totalBytesReceived + " for account: " +
s_logger.debug("Creating usage record, total bytes sent: " + toHumanReadableSize(totalBytesSent) + ", total bytes received: " + toHumanReadableSize(totalBytesReceived) + " for account: " +
account.getId() + " in availability zone " + networkInfo.getZoneId() + ", start: " + startDate + ", end: " + endDate);
}

View File

@ -38,6 +38,8 @@ import com.cloud.usage.dao.UsageStorageDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.Pair;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class StorageUsageParser {
public static final Logger s_logger = Logger.getLogger(StorageUsageParser.class.getName());
@ -186,9 +188,9 @@ public class StorageUsageParser {
break;
}
//Create the usage record
usageDesc += "Id:" + storageId + " Size:" + size;
usageDesc += "Id:" + storageId + " Size:" + toHumanReadableSize(size);
if (type != StorageTypes.SNAPSHOT) {
usageDesc += " VirtualSize:" + virtualSize;
usageDesc += " VirtualSize: " + toHumanReadableSize(virtualSize);
}
//ToDo: get zone id

View File

@ -36,6 +36,8 @@ import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageVMSnapshotOnPrimaryDao;
import com.cloud.user.AccountVO;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class VMSanpshotOnPrimaryParser {
public static final Logger s_logger = Logger.getLogger(VMSanpshotOnPrimaryParser.class.getName());
@ -119,7 +121,7 @@ public class VMSanpshotOnPrimaryParser {
// Create the usage record
String usageDesc = "VMSnapshot Id: " + vmSnapshotId + " On Primary Usage: VM Id: " + vmId;
usageDesc += " Size: " + virtualSize;
usageDesc += " Size: " + toHumanReadableSize(virtualSize);
UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", usageType, new Double(usage), vmId, name, null, null,
vmSnapshotId, physicalSize, virtualSize, startDate, endDate);

View File

@ -36,6 +36,8 @@ import com.cloud.usage.dao.UsageDao;
import com.cloud.usage.dao.UsageVMSnapshotDao;
import com.cloud.user.AccountVO;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class VMSnapshotUsageParser {
public static final Logger s_logger = Logger.getLogger(VMSnapshotUsageParser.class.getName());
@ -143,7 +145,7 @@ public class VMSnapshotUsageParser {
usageDesc += " DiskOffering: " + doId;
}
usageDesc += " Size: " + size;
usageDesc += " Size: " + toHumanReadableSize(size);
UsageVO usageRecord =
new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, new Double(usage), vmId, null, doId, null, vmSnapshotId, size,

View File

@ -37,6 +37,8 @@ import com.cloud.usage.dao.UsageVmDiskDao;
import com.cloud.user.AccountVO;
import com.cloud.utils.db.SearchCriteria;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
@Component
public class VmDiskUsageParser {
public static final Logger s_logger = Logger.getLogger(VmDiskUsageParser.class.getName());
@ -106,8 +108,8 @@ public class VmDiskUsageParser {
if ((ioRead > 0L) || (ioWrite > 0L) || (bytesRead > 0L) || (bytesWrite > 0L)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Creating vm disk usage record, io read:" + ioRead + ", io write: " + ioWrite + "bytes read:" + bytesRead + ", bytes write: " +
bytesWrite + "for account: " + account.getId() + " in availability zone " + vmDiskInfo.getZoneId() + ", start: " + startDate + ", end: " +
s_logger.debug("Creating vm disk usage record, io read:" + toHumanReadableSize(ioRead) + ", io write: " + toHumanReadableSize(ioWrite) + ", bytes read:" + toHumanReadableSize(bytesRead) + ", bytes write: " +
toHumanReadableSize(bytesWrite) + " for account: " + account.getId() + " in availability zone " + vmDiskInfo.getZoneId() + ", start: " + startDate + ", end: " +
endDate);
}

View File

@ -0,0 +1,109 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import java.util.Iterator;
import java.util.Map.Entry;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class HumanReadableJson {
private boolean changeValue;
private StringBuilder output = new StringBuilder();
private boolean firstElement = true;
private final String[] elementsToMatch = {
"bytesSent","bytesReceived","BytesWrite","BytesRead","bytesReadRate","bytesWriteRate","iopsReadRate",
"iopsWriteRate","ioRead","ioWrite","bytesWrite","bytesRead","networkkbsread","networkkbswrite",
"diskkbsread","diskkbswrite","minRam","maxRam","volumeSize", "size","newSize","memorykbs",
"memoryintfreekbs","memorytargetkbs","diskioread","diskiowrite","totalSize","capacityBytes",
"availableBytes","maxDownloadSizeInBytes","templateSize","templatePhySicalSize"
};
public static String getHumanReadableBytesJson(String json){
HumanReadableJson humanReadableJson = new HumanReadableJson();
humanReadableJson.addElement(json);
return humanReadableJson.output.toString();
}
private void addElement(String content) {
JsonParser parser = new JsonParser();
JsonElement jsonElement = parser.parse(content);
if (jsonElement.isJsonArray()) {
output.append("[");
addArray(jsonElement.toString());
output.append("]");
firstElement = false;
}
if (jsonElement.isJsonObject()) {
output.append("{");
firstElement = true;
addObject(jsonElement.getAsJsonObject().toString());
output.append("}");
firstElement = false;
}
if (jsonElement.isJsonPrimitive()) {
if (changeValue) {
output.append("\"" + toHumanReadableSize(jsonElement.getAsLong()) + "\"");
} else {
output.append("\"" + jsonElement.getAsString() + "\"");
}
firstElement = false;
}
}
private void addObject(String content) {
JsonParser parser = new JsonParser();
JsonElement el1 = parser.parse(content);
el1.getAsJsonObject().entrySet();
Iterator<Entry<String, JsonElement>> it = el1.getAsJsonObject().entrySet().iterator();
while(it.hasNext()) {
Entry<String, JsonElement> value = it.next();
String key = value.getKey();
if (!firstElement){
output.append(",");
}
output.append("\"" + key + "\":");
for (int i = 0; i < elementsToMatch.length; i++){
if (key.equals(elementsToMatch[i])) {
changeValue = true;
break;
}
}
addElement(value.getValue().toString());
changeValue = false;
}
}
private void addArray(String content) {
JsonParser parser = new JsonParser();
JsonArray ar1 = parser.parse(content).getAsJsonArray();
for (int count = 0; count < ar1.size(); count++) {
if (count > 0) {
output.append(",");
}
addElement(ar1.get(count).toString());
}
}
}

View File

@ -43,6 +43,8 @@ public class NumbersUtil {
return NumberUtils.toFloat(s, defaultValue);
}
public static Boolean enableHumanReadableSizes = true;
/**
* Converts bytes to long on input.
*/
@ -74,7 +76,12 @@ public class NumbersUtil {
protected static final long GB = 1024 * MB;
protected static final long TB = 1024 * GB;
public static String toReadableSize(long bytes) {
public static String toReadableSize(Long bytes) {
if (bytes == null){
return "null";
}
if (bytes < KB && bytes >= 0) {
return Long.toString(bytes) + " bytes";
}
@ -93,6 +100,13 @@ public class NumbersUtil {
return builder.toString();
}
public static String toHumanReadableSize(long size) {
if (enableHumanReadableSizes){
return "(" + toReadableSize(size) + ") " + ((Long)size).toString();
}
return ((Long)size).toString();
}
/**
* Converts a string of the format 'yy-MM-dd'T'HH:mm:ss.SSS" into ms.
*

View File

@ -0,0 +1,66 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils;
import org.junit.Test;
import java.util.Locale;
import static org.junit.Assert.assertEquals;
import static com.cloud.utils.HumanReadableJson.getHumanReadableBytesJson;
public class HumanReadableJsonTest {
@Test
public void parseJsonObjectTest() {
assertEquals("{}", getHumanReadableBytesJson("{}"));
}
@Test
public void parseJsonArrayTest() {
assertEquals("[]", getHumanReadableBytesJson("[]"));
assertEquals("[[],[]]", getHumanReadableBytesJson("[[],[]]"));
assertEquals("[{},{}]", getHumanReadableBytesJson("[{},{}]"));
}
@Test
public void parseSimpleJsonTest() {
assertEquals("[{\"object\":{}}]", getHumanReadableBytesJson("[{\"object\":{}}]"));
}
@Test
public void parseComplexJsonTest() {
assertEquals("[{\"object\":[],\"object2\":[]},{}]", getHumanReadableBytesJson("[{\"object\":[],\"object2\":[]},{}]"));
assertEquals("[{\"object\":{},\"object2\":{}}]", getHumanReadableBytesJson("[{\"object\":{},\"object2\":{}}]"));
assertEquals("[{\"object\":[{},{}]}]", getHumanReadableBytesJson("[{\"object\":[{},{}]}]"));
assertEquals("[{\"object\":[]},{\"object\":[]}]", getHumanReadableBytesJson("[{\"object\":[]},{\"object\":[]}]"));
assertEquals("[{\"object\":[{\"object\":[]}]},{\"object\":[]}]", getHumanReadableBytesJson("[{\"object\":[{\"object\":[]}]},{\"object\":[]}]"));
}
@Test
public void parseMatchJsonTest() {
assertEquals("[{\"size\":\"(0 bytes) 0\"}]", getHumanReadableBytesJson("[{\"size\": \"0\"}]"));
assertEquals("[{\"size\":\"(0 bytes) 0\",\"bytesSent\":\"(0 bytes) 0\"}]", getHumanReadableBytesJson("[{\"size\": \"0\", \"bytesSent\": \"0\"}]"));
}
@Test
public void localeTest() {
Locale.setDefault(Locale.UK); // UK test
assertEquals("[{\"size\":\"(100.05 KB) 102456\"}]", getHumanReadableBytesJson("[{\"size\": \"102456\"}]"));
Locale.setDefault(Locale.US); // US test
assertEquals("[{\"size\":\"(100.05 KB) 102456\"}]", getHumanReadableBytesJson("[{\"size\": \"102456\"}]"));
Locale.setDefault(Locale.forLanguageTag("en-ZA")); // Other region test
assertEquals("[{\"size\":\"(100,05 KB) 102456\"}]", getHumanReadableBytesJson("[{\"size\": \"102456\"}]"));
}
}

View File

@ -31,10 +31,10 @@ public class NumbersUtilTest {
public void toReadableSize() {
Locale.setDefault(Locale.US); // Fixed locale for the test
assertEquals("1.0000 TB", NumbersUtil.toReadableSize((1024l * 1024l * 1024l * 1024l)));
assertEquals("1.00 GB", NumbersUtil.toReadableSize(1024 * 1024 * 1024));
assertEquals("1.00 MB", NumbersUtil.toReadableSize(1024 * 1024));
assertEquals("1.00 KB", NumbersUtil.toReadableSize((1024)));
assertEquals("1023 bytes", NumbersUtil.toReadableSize((1023)));
assertEquals("1.00 GB", NumbersUtil.toReadableSize(1024L * 1024 * 1024));
assertEquals("1.00 MB", NumbersUtil.toReadableSize(1024L * 1024));
assertEquals("1.00 KB", NumbersUtil.toReadableSize((1024L)));
assertEquals("1023 bytes", NumbersUtil.toReadableSize((1023L)));
}
@Test
@ -44,4 +44,9 @@ public class NumbersUtilTest {
assertEquals(257, NumbersUtil.bytesToLong(new byte[] {0, 0, 0, 0, 0, 0, 1, 1}));
}
@Test
public void nullToLong() {
assertEquals("null", NumbersUtil.toReadableSize(null));
}
}

View File

@ -40,6 +40,8 @@ import com.cloud.exception.CloudException;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.utils.Pair;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class DatastoreMO extends BaseMO {
private static final Logger s_logger = Logger.getLogger(DatastoreMO.class);
@ -367,7 +369,7 @@ public class DatastoreMO extends BaseMO {
List<FileInfo> info = result.getFile();
for (FileInfo fi : info) {
if (file.getFileName().equals(fi.getPath())) {
s_logger.debug("File found = " + fi.getPath() + ", size=" + fi.getFileSize());
s_logger.debug("File found = " + fi.getPath() + ", size=" + toHumanReadableSize(fi.getFileSize()));
return fi.getFileSize();
}
}

View File

@ -114,6 +114,8 @@ import com.cloud.utils.Ternary;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.script.Script;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
public class VirtualMachineMO extends BaseMO {
private static final Logger s_logger = Logger.getLogger(VirtualMachineMO.class);
private static final ExecutorService MonitorServiceExecutor = Executors.newCachedThreadPool(new NamedThreadFactory("VM-Question-Monitor"));
@ -1744,7 +1746,7 @@ public class VirtualMachineMO extends BaseMO {
@Override
public void action(Long param) {
if (s_logger.isTraceEnabled()) {
s_logger.trace("Download progress " + param + "/" + totalBytes);
s_logger.trace("Download progress " + param + "/" + toHumanReadableSize(totalBytes));
}
progressReporter.reportProgress((int)(param * 100 / totalBytes));
}