CLOUDSTACK-4816:provide configurable option to choose single vs

multipart upload to S3 object storage based on object size.
This commit is contained in:
Min Chen 2013-10-17 10:28:59 -07:00
parent 85f83a4e83
commit 25acfbad78
7 changed files with 47 additions and 15 deletions

View File

@ -40,6 +40,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
private Date created;
private boolean enableRRS;
private boolean multipartEnabled;
private long maxSingleUploadSizeInBytes;
public S3TO() {
@ -51,7 +52,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
final String secretKey, final String endPoint,
final String bucketName, final Boolean httpsFlag,
final Integer connectionTimeout, final Integer maxErrorRetry,
final Integer socketTimeout, final Date created, final boolean enableRRS, final boolean multipart) {
final Integer socketTimeout, final Date created, final boolean enableRRS, final long maxUploadSize) {
super();
@ -67,7 +68,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
this.socketTimeout = socketTimeout;
this.created = created;
this.enableRRS = enableRRS;
this.multipartEnabled = multipart;
this.maxSingleUploadSizeInBytes = maxUploadSize;
}
@ -278,14 +279,28 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
this.enableRRS = enableRRS;
}
public boolean isMultipartEnabled() {
return multipartEnabled;
public long getMaxSingleUploadSizeInBytes() {
return maxSingleUploadSizeInBytes;
}
public void setMultipartEnabled(boolean multipartEnabled) {
this.multipartEnabled = multipartEnabled;
public void setMaxSingleUploadSizeInBytes(long maxSingleUploadSizeInBytes) {
this.maxSingleUploadSizeInBytes = maxSingleUploadSizeInBytes;
}
public boolean getSingleUpload(long objSize){
if ( maxSingleUploadSizeInBytes < 0 ){
// always use single part upload
return true;
} else if ( maxSingleUploadSizeInBytes == 0 ){
// always use multi part upload
return false;
} else {
// check object size to set flag
if (objSize < maxSingleUploadSizeInBytes){
return true;
} else{
return false;
}
}
}
}

View File

@ -258,7 +258,8 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
});
if ( s3.isMultipartEnabled()){
if ( !s3.getSingleUpload(remoteSize) ){
// use TransferManager to do multipart upload
S3Utils.mputObject(s3, putObjectRequest);
} else{

View File

@ -67,13 +67,21 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
.get(ApiConstants.S3_SOCKET_TIMEOUT)), imgStore.getCreated(),
_configDao.getValue(Config.S3EnableRRS.toString()) == null ? false : Boolean.parseBoolean(_configDao
.getValue(Config.S3EnableRRS.toString())),
_configDao.getValue(Config.S3EnableMultiPartUpload.toString()) == null ? true : Boolean.parseBoolean(_configDao
.getValue(Config.S3EnableMultiPartUpload.toString()))
getMaxSingleUploadSizeInBytes()
);
}
private long getMaxSingleUploadSizeInBytes() {
try {
return Long.parseLong(_configDao.getValue(Config.S3MaxSingleUploadSize.toString())) * 1024L * 1024L * 1024L;
} catch (NumberFormatException e) {
// use default 5GB
return 5L * 1024L * 1024L * 1024L;
}
}
@Override
public String createEntityExtractUrl(DataStore store, String installPath, ImageFormat format, DataObject dataObject) {
// for S3, no need to do anything, just return template url for

View File

@ -377,7 +377,8 @@ public enum Config {
// object store
S3EnableRRS("Advanced", ManagementServer.class, Boolean.class, "s3.rrs.enabled", "false", "enable s3 reduced redundancy storage", null),
S3EnableMultiPartUpload("Advanced", ManagementServer.class, Boolean.class, "s3.multipart.enabled", "true", "enable s3 multipart upload", null),
S3MaxSingleUploadSize("Advanced", ManagementServer.class, Integer.class, "s3.singleupload.max.size", "5", "The maximum size limit for S3 single part upload API(in GB). If it is set to 0, then it means always use multi-part upload to upload object to S3. " +
"If it is set to -1, then it means always use single-part upload to upload object to S3. ", null),
// Ldap
LdapBasedn("Advanced", ManagementServer.class, String.class, "ldap.basedn", null, "Sets the basedn for LDAP", null),

View File

@ -857,10 +857,12 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
}
}
}
long srcSize = srcFile.length();
ImageFormat format = getTemplateFormat(srcFile.getName());
String key = destData.getPath() + S3Utils.SEPARATOR + srcFile.getName();
if (s3.isMultipartEnabled()){
mputFile(s3, srcFile, bucket, key);
if (!s3.getSingleUpload(srcSize)){
mputFile(s3, srcFile, bucket, key);
} else{
putFile(s3, srcFile, bucket, key);
}

View File

@ -20,7 +20,8 @@
--;
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 's3.multipart.enabled', 'true', 'enable s3 multipart upload');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 's3.singleupload.max.size', '5',
'The maximum size limit for S3 single part upload API(in GB). If it is set to 0, then it means always use multi-part upload to upload object to S3. If it is set to -1, then it means always use single-part upload to upload object to S3.');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Storage", 'DEFAULT', 'management-server', "enable.ha.storage.migration", "true", "Enable/disable storage migration across primary storage during HA");

View File

@ -171,6 +171,10 @@ public final class S3Utils {
assert clientOptions != null;
assert req != null;
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Sending stream as S3 object using PutObjectRequest"));
}
acquireClient(clientOptions).putObject(req);
}