CLOUDSTACK-9062: Improve S3 implementation.

The S3 implementation is far from finished, this commit focusses on the bases.

 - Upgrade AWS SDK to latest version.
 - Rewrite S3 Template downloader.
 - Rewrite S3Utils utility class.
 - Improve addImageStoreS3 API command.
 - Split various classes for convenience.
 - Various minor improvements and code optimalisations.

A side effect of the new AWS SDK is that it, by default, uses the V4 signature. Therefore I added an option to specify the Signer, so it stays compatible with previous versions.
This commit is contained in:
Boris Schrijver 2015-11-13 02:19:24 +01:00
parent fe2917e91b
commit 5c0366c99e
48 changed files with 1055 additions and 1290 deletions

View File

@ -77,7 +77,17 @@ under the License.
</category> </category>
<category name="net"> <category name="net">
<priority value="INFO"/> <priority value="INFO"/>
</category>
<!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
<category name="com.amazonaws">
<priority value="INFO"/>
</category>
<!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
<category name="httpclient.wire">
<priority value="INFO"/>
</category> </category>
<!-- ======================= --> <!-- ======================= -->

View File

@ -21,9 +21,9 @@ import java.util.Date;
import com.cloud.agent.api.LogLevel; import com.cloud.agent.api.LogLevel;
import com.cloud.agent.api.LogLevel.Log4jLevel; import com.cloud.agent.api.LogLevel.Log4jLevel;
import com.cloud.storage.DataStoreRole; import com.cloud.storage.DataStoreRole;
import com.cloud.utils.S3Utils; import com.cloud.utils.storage.S3.ClientOptions;
public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { public final class S3TO implements ClientOptions, DataStoreTO {
private Long id; private Long id;
private String uuid; private String uuid;
@ -33,6 +33,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
private String secretKey; private String secretKey;
private String endPoint; private String endPoint;
private String bucketName; private String bucketName;
private String signer;
private Boolean httpsFlag; private Boolean httpsFlag;
private Boolean useTCPKeepAlive; private Boolean useTCPKeepAlive;
private Integer connectionTimeout; private Integer connectionTimeout;
@ -44,17 +45,9 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
private long maxSingleUploadSizeInBytes; private long maxSingleUploadSizeInBytes;
private static final String pathSeparator = "/"; private static final String pathSeparator = "/";
public S3TO() {
super();
}
public S3TO(final Long id, final String uuid, final String accessKey, final String secretKey, final String endPoint, final String bucketName, public S3TO(final Long id, final String uuid, final String accessKey, final String secretKey, final String endPoint, final String bucketName,
final Boolean httpsFlag, final Integer connectionTimeout, final Integer maxErrorRetry, final Integer socketTimeout, final Date created, final String signer, final Boolean httpsFlag, final Integer connectionTimeout, final Integer maxErrorRetry, final Integer socketTimeout,
final boolean enableRRS, final long maxUploadSize, final Integer connectionTtl, final Boolean useTCPKeepAlive) { final Date created, final boolean enableRRS, final long maxUploadSize, final Integer connectionTtl, final Boolean useTCPKeepAlive) {
super();
this.id = id; this.id = id;
this.uuid = uuid; this.uuid = uuid;
@ -62,6 +55,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
this.secretKey = secretKey; this.secretKey = secretKey;
this.endPoint = endPoint; this.endPoint = endPoint;
this.bucketName = bucketName; this.bucketName = bucketName;
this.signer = signer;
this.httpsFlag = httpsFlag; this.httpsFlag = httpsFlag;
this.connectionTimeout = connectionTimeout; this.connectionTimeout = connectionTimeout;
this.maxErrorRetry = maxErrorRetry; this.maxErrorRetry = maxErrorRetry;
@ -74,98 +68,6 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
} }
@Override
public boolean equals(final Object thatObject) {
if (this == thatObject) {
return true;
}
if (thatObject == null || getClass() != thatObject.getClass()) {
return false;
}
final S3TO thatS3TO = (S3TO)thatObject;
if (httpsFlag != null ? !httpsFlag.equals(thatS3TO.httpsFlag) : thatS3TO.httpsFlag != null) {
return false;
}
if (accessKey != null ? !accessKey.equals(thatS3TO.accessKey) : thatS3TO.accessKey != null) {
return false;
}
if (connectionTimeout != null ? !connectionTimeout.equals(thatS3TO.connectionTimeout) : thatS3TO.connectionTimeout != null) {
return false;
}
if (endPoint != null ? !endPoint.equals(thatS3TO.endPoint) : thatS3TO.endPoint != null) {
return false;
}
if (id != null ? !id.equals(thatS3TO.id) : thatS3TO.id != null) {
return false;
}
if (uuid != null ? !uuid.equals(thatS3TO.uuid) : thatS3TO.uuid != null) {
return false;
}
if (maxErrorRetry != null ? !maxErrorRetry.equals(thatS3TO.maxErrorRetry) : thatS3TO.maxErrorRetry != null) {
return false;
}
if (secretKey != null ? !secretKey.equals(thatS3TO.secretKey) : thatS3TO.secretKey != null) {
return false;
}
if (socketTimeout != null ? !socketTimeout.equals(thatS3TO.socketTimeout) : thatS3TO.socketTimeout != null) {
return false;
}
if (connectionTtl != null ? !connectionTtl.equals(thatS3TO.connectionTtl) : thatS3TO.connectionTtl != null) {
return false;
}
if (useTCPKeepAlive != null ? !useTCPKeepAlive.equals(thatS3TO.useTCPKeepAlive) : thatS3TO.useTCPKeepAlive != null) {
return false;
}
if (bucketName != null ? !bucketName.equals(thatS3TO.bucketName) : thatS3TO.bucketName != null) {
return false;
}
if (created != null ? !created.equals(thatS3TO.created) : thatS3TO.created != null) {
return false;
}
if (enableRRS != thatS3TO.enableRRS) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = id != null ? id.hashCode() : 0;
result = 31 * result + (accessKey != null ? accessKey.hashCode() : 0);
result = 31 * result + (secretKey != null ? secretKey.hashCode() : 0);
result = 31 * result + (endPoint != null ? endPoint.hashCode() : 0);
result = 31 * result + (bucketName != null ? bucketName.hashCode() : 0);
result = 31 * result + (httpsFlag ? 1 : 0);
result = 31 * result + (connectionTimeout != null ? connectionTimeout.hashCode() : 0);
result = 31 * result + (maxErrorRetry != null ? maxErrorRetry.hashCode() : 0);
result = 31 * result + (socketTimeout != null ? socketTimeout.hashCode() : 0);
result = 31 * result + (connectionTtl != null ? connectionTtl.hashCode() : 0);
result = 31 * result + (useTCPKeepAlive ? 1 : 0);
return result;
}
public Long getId() { public Long getId() {
return this.id; return this.id;
} }
@ -223,6 +125,15 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
this.bucketName = bucketName; this.bucketName = bucketName;
} }
@Override
public String getSigner() {
return this.signer;
}
public void setSigner(final String signer) {
this.signer = signer;
}
@Override @Override
public Boolean isHttps() { public Boolean isHttps() {
return this.httpsFlag; return this.httpsFlag;
@ -327,4 +238,100 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO {
public String getPathSeparator() { public String getPathSeparator() {
return pathSeparator; return pathSeparator;
} }
@Override
public boolean equals(final Object thatObject) {
if (this == thatObject) {
return true;
}
if (thatObject == null || getClass() != thatObject.getClass()) {
return false;
}
final S3TO thatS3TO = (S3TO)thatObject;
if (httpsFlag != null ? !httpsFlag.equals(thatS3TO.httpsFlag) : thatS3TO.httpsFlag != null) {
return false;
}
if (accessKey != null ? !accessKey.equals(thatS3TO.accessKey) : thatS3TO.accessKey != null) {
return false;
}
if (connectionTimeout != null ? !connectionTimeout.equals(thatS3TO.connectionTimeout) : thatS3TO.connectionTimeout != null) {
return false;
}
if (endPoint != null ? !endPoint.equals(thatS3TO.endPoint) : thatS3TO.endPoint != null) {
return false;
}
if (id != null ? !id.equals(thatS3TO.id) : thatS3TO.id != null) {
return false;
}
if (uuid != null ? !uuid.equals(thatS3TO.uuid) : thatS3TO.uuid != null) {
return false;
}
if (maxErrorRetry != null ? !maxErrorRetry.equals(thatS3TO.maxErrorRetry) : thatS3TO.maxErrorRetry != null) {
return false;
}
if (secretKey != null ? !secretKey.equals(thatS3TO.secretKey) : thatS3TO.secretKey != null) {
return false;
}
if (socketTimeout != null ? !socketTimeout.equals(thatS3TO.socketTimeout) : thatS3TO.socketTimeout != null) {
return false;
}
if (connectionTtl != null ? !connectionTtl.equals(thatS3TO.connectionTtl) : thatS3TO.connectionTtl != null) {
return false;
}
if (useTCPKeepAlive != null ? !useTCPKeepAlive.equals(thatS3TO.useTCPKeepAlive) : thatS3TO.useTCPKeepAlive != null) {
return false;
}
if (bucketName != null ? !bucketName.equals(thatS3TO.bucketName) : thatS3TO.bucketName != null) {
return false;
}
if (signer != null ? !signer.equals(thatS3TO.signer) : thatS3TO.signer != null) {
return false;
}
if (created != null ? !created.equals(thatS3TO.created) : thatS3TO.created != null) {
return false;
}
if (enableRRS != thatS3TO.enableRRS) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = id != null ? id.hashCode() : 0;
result = 31 * result + (accessKey != null ? accessKey.hashCode() : 0);
result = 31 * result + (secretKey != null ? secretKey.hashCode() : 0);
result = 31 * result + (endPoint != null ? endPoint.hashCode() : 0);
result = 31 * result + (bucketName != null ? bucketName.hashCode() : 0);
result = 31 * result + (signer != null ? signer.hashCode() : 0);
result = 31 * result + (httpsFlag ? 1 : 0);
result = 31 * result + (connectionTimeout != null ? connectionTimeout.hashCode() : 0);
result = 31 * result + (maxErrorRetry != null ? maxErrorRetry.hashCode() : 0);
result = 31 * result + (socketTimeout != null ? socketTimeout.hashCode() : 0);
result = 31 * result + (connectionTtl != null ? connectionTtl.hashCode() : 0);
result = 31 * result + (useTCPKeepAlive ? 1 : 0);
return result;
}
} }

View File

@ -14,6 +14,7 @@
// KIND, either express or implied. See the License for the // KIND, either express or implied. See the License for the
// specific language governing permissions and limitations // specific language governing permissions and limitations
// under the License. // under the License.
package com.cloud.storage; package com.cloud.storage;
import java.net.UnknownHostException; import java.net.UnknownHostException;
@ -38,14 +39,14 @@ public interface StorageService {
* Create StoragePool based on uri * Create StoragePool based on uri
* *
* @param cmd * @param cmd
* the command object that specifies the zone, cluster/pod, URI, details, etc. to use to create the * The command object that specifies the zone, cluster/pod, URI, details, etc. to use to create the
* storage pool. * storage pool.
* @return * @return
* The StoragePool created.
* @throws ResourceInUseException * @throws ResourceInUseException
* @throws IllegalArgumentException * @throws IllegalArgumentException
* @throws UnknownHostException * @throws UnknownHostException
* @throws ResourceUnavailableException * @throws ResourceUnavailableException
* TODO
*/ */
StoragePool createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException; StoragePool createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException;
@ -63,15 +64,13 @@ public interface StorageService {
/** /**
* Enable maintenance for primary storage * Enable maintenance for primary storage
* *
* @param cmd * @param primaryStorageId
* - the command specifying primaryStorageId * - the primaryStorageId
* @return the primary storage pool * @return the primary storage pool
* @throws ResourceUnavailableException * @throws ResourceUnavailableException
* TODO
* @throws InsufficientCapacityException * @throws InsufficientCapacityException
* TODO
*/ */
public StoragePool preparePrimaryStorageForMaintenance(Long primaryStorageId) throws ResourceUnavailableException, InsufficientCapacityException; StoragePool preparePrimaryStorageForMaintenance(Long primaryStorageId) throws ResourceUnavailableException, InsufficientCapacityException;
/** /**
* Complete maintenance for primary storage * Complete maintenance for primary storage
@ -80,19 +79,18 @@ public interface StorageService {
* - the command specifying primaryStorageId * - the command specifying primaryStorageId
* @return the primary storage pool * @return the primary storage pool
* @throws ResourceUnavailableException * @throws ResourceUnavailableException
* TODO
*/ */
public StoragePool cancelPrimaryStorageForMaintenance(CancelPrimaryStorageMaintenanceCmd cmd) throws ResourceUnavailableException; StoragePool cancelPrimaryStorageForMaintenance(CancelPrimaryStorageMaintenanceCmd cmd) throws ResourceUnavailableException;
public StoragePool updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException; StoragePool updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException;
public StoragePool getStoragePool(long id); StoragePool getStoragePool(long id);
boolean deleteImageStore(DeleteImageStoreCmd cmd); boolean deleteImageStore(DeleteImageStoreCmd cmd);
boolean deleteSecondaryStagingStore(DeleteSecondaryStagingStoreCmd cmd); boolean deleteSecondaryStagingStore(DeleteSecondaryStagingStoreCmd cmd);
public ImageStore discoverImageStore(String name, String url, String providerName, Long dcId, Map details) throws IllegalArgumentException, DiscoveryException, ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException,
InvalidParameterValueException; InvalidParameterValueException;
@ -107,7 +105,7 @@ public interface StorageService {
* @throws DiscoveryException * @throws DiscoveryException
* @throws InvalidParameterValueException * @throws InvalidParameterValueException
*/ */
public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws IllegalArgumentException, DiscoveryException, ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws IllegalArgumentException, DiscoveryException,
InvalidParameterValueException; InvalidParameterValueException;

View File

@ -501,6 +501,9 @@ public class ApiConstants {
public static final String S3_SECRET_KEY = "secretkey"; public static final String S3_SECRET_KEY = "secretkey";
public static final String S3_END_POINT = "endpoint"; public static final String S3_END_POINT = "endpoint";
public static final String S3_BUCKET_NAME = "bucket"; public static final String S3_BUCKET_NAME = "bucket";
public static final String S3_SIGNER = "s3signer";
public static final String S3_V3_SIGNER = "S3SignerType";
public static final String S3_V4_SIGNER = "AWSS3V4SignerType";
public static final String S3_HTTPS_FLAG = "usehttps"; public static final String S3_HTTPS_FLAG = "usehttps";
public static final String S3_CONNECTION_TIMEOUT = "connectiontimeout"; public static final String S3_CONNECTION_TIMEOUT = "connectiontimeout";
public static final String S3_CONNECTION_TTL = "connectionttl"; public static final String S3_CONNECTION_TTL = "connectionttl";

View File

@ -26,6 +26,7 @@ import static org.apache.cloudstack.api.ApiConstants.S3_CONNECTION_TTL;
import static org.apache.cloudstack.api.ApiConstants.S3_END_POINT; import static org.apache.cloudstack.api.ApiConstants.S3_END_POINT;
import static org.apache.cloudstack.api.ApiConstants.S3_HTTPS_FLAG; import static org.apache.cloudstack.api.ApiConstants.S3_HTTPS_FLAG;
import static org.apache.cloudstack.api.ApiConstants.S3_MAX_ERROR_RETRY; import static org.apache.cloudstack.api.ApiConstants.S3_MAX_ERROR_RETRY;
import static org.apache.cloudstack.api.ApiConstants.S3_SIGNER;
import static org.apache.cloudstack.api.ApiConstants.S3_SECRET_KEY; import static org.apache.cloudstack.api.ApiConstants.S3_SECRET_KEY;
import static org.apache.cloudstack.api.ApiConstants.S3_SOCKET_TIMEOUT; import static org.apache.cloudstack.api.ApiConstants.S3_SOCKET_TIMEOUT;
import static org.apache.cloudstack.api.ApiConstants.S3_USE_TCP_KEEPALIVE; import static org.apache.cloudstack.api.ApiConstants.S3_USE_TCP_KEEPALIVE;
@ -36,6 +37,7 @@ import static org.apache.cloudstack.api.BaseCmd.CommandType.STRING;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import com.cloud.utils.storage.S3.ClientOptions;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.APICommand;
@ -54,12 +56,12 @@ import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.ResourceUnavailableException;
import com.cloud.storage.ImageStore; import com.cloud.storage.ImageStore;
@APICommand(name = "addS3", description = "Adds S3", responseObject = ImageStoreResponse.class, since = "4.0.0", @APICommand(name = "addImageStoreS3", description = "Adds S3 Image Store", responseObject = ImageStoreResponse.class, since = "4.7.0",
requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) requestHasSensitiveInfo = true, responseHasSensitiveInfo = false)
public final class AddS3Cmd extends BaseCmd { public final class AddImageStoreS3CMD extends BaseCmd implements ClientOptions {
public static final Logger s_logger = Logger.getLogger(AddS3Cmd.class.getName()); public static final Logger s_logger = Logger.getLogger(AddImageStoreS3CMD.class.getName());
private static final String s_name = "adds3response"; private static final String s_name = "addImageStoreS3Response";
@Parameter(name = S3_ACCESS_KEY, type = STRING, required = true, description = "S3 access key") @Parameter(name = S3_ACCESS_KEY, type = STRING, required = true, description = "S3 access key")
private String accessKey; private String accessKey;
@ -67,41 +69,49 @@ public final class AddS3Cmd extends BaseCmd {
@Parameter(name = S3_SECRET_KEY, type = STRING, required = true, description = "S3 secret key") @Parameter(name = S3_SECRET_KEY, type = STRING, required = true, description = "S3 secret key")
private String secretKey; private String secretKey;
@Parameter(name = S3_END_POINT, type = STRING, required = false, description = "S3 host name") @Parameter(name = S3_END_POINT, type = STRING, required = true, description = "S3 endpoint")
private String endPoint; private String endPoint;
@Parameter(name = S3_BUCKET_NAME, type = STRING, required = true, description = "name of the template storage bucket") @Parameter(name = S3_BUCKET_NAME, type = STRING, required = true, description = "Name of the storage bucket")
private String bucketName; private String bucketName;
@Parameter(name = S3_HTTPS_FLAG, type = BOOLEAN, required = false, description = "connect to the S3 endpoint via HTTPS?") @Parameter(name = S3_SIGNER, type = STRING, required = false, description = "Signer Algorithm to use, either S3SignerType or AWSS3V4SignerType")
private String signer;
@Parameter(name = S3_HTTPS_FLAG, type = BOOLEAN, required = false, description = "Use HTTPS instead of HTTP")
private Boolean httpsFlag; private Boolean httpsFlag;
@Parameter(name = S3_CONNECTION_TIMEOUT, type = INTEGER, required = false, description = "connection timeout (milliseconds)") @Parameter(name = S3_CONNECTION_TIMEOUT, type = INTEGER, required = false, description = "Connection timeout (milliseconds)")
private Integer connectionTimeout; private Integer connectionTimeout;
@Parameter(name = S3_MAX_ERROR_RETRY, type = INTEGER, required = false, description = "maximum number of times to retry on error") @Parameter(name = S3_MAX_ERROR_RETRY, type = INTEGER, required = false, description = "Maximum number of times to retry on error")
private Integer maxErrorRetry; private Integer maxErrorRetry;
@Parameter(name = S3_SOCKET_TIMEOUT, type = INTEGER, required = false, description = "socket timeout (milliseconds)") @Parameter(name = S3_SOCKET_TIMEOUT, type = INTEGER, required = false, description = "Socket timeout (milliseconds)")
private Integer socketTimeout; private Integer socketTimeout;
@Parameter(name = S3_CONNECTION_TTL, type = INTEGER, required = false, description = "connection ttl (milliseconds)") @Parameter(name = S3_CONNECTION_TTL, type = INTEGER, required = false, description = "Connection TTL (milliseconds)")
private Integer connectionTtl; private Integer connectionTtl;
@Parameter(name = S3_USE_TCP_KEEPALIVE, type = BOOLEAN, required = false, description = "whether tcp keepalive is used") @Parameter(name = S3_USE_TCP_KEEPALIVE, type = BOOLEAN, required = false, description = "Whether TCP keep-alive is used")
private Boolean useTCPKeepAlive; private Boolean useTCPKeepAlive;
@Override @Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
ResourceAllocationException, NetworkRuleConflictException { ResourceAllocationException, NetworkRuleConflictException {
Map<String, String> dm = new HashMap<String, String>(); Map<String, String> dm = new HashMap();
dm.put(ApiConstants.S3_ACCESS_KEY, getAccessKey()); dm.put(ApiConstants.S3_ACCESS_KEY, getAccessKey());
dm.put(ApiConstants.S3_SECRET_KEY, getSecretKey()); dm.put(ApiConstants.S3_SECRET_KEY, getSecretKey());
dm.put(ApiConstants.S3_END_POINT, getEndPoint()); dm.put(ApiConstants.S3_END_POINT, getEndPoint());
dm.put(ApiConstants.S3_BUCKET_NAME, getBucketName()); dm.put(ApiConstants.S3_BUCKET_NAME, getBucketName());
if (getHttpsFlag() != null) {
dm.put(ApiConstants.S3_HTTPS_FLAG, getHttpsFlag().toString()); if (getSigner() != null && (getSigner().equals(ApiConstants.S3_V3_SIGNER) || getSigner().equals(ApiConstants.S3_V4_SIGNER))) {
dm.put(ApiConstants.S3_SIGNER, getSigner());
}
if (isHttps() != null) {
dm.put(ApiConstants.S3_HTTPS_FLAG, isHttps().toString());
} }
if (getConnectionTimeout() != null) { if (getConnectionTimeout() != null) {
dm.put(ApiConstants.S3_CONNECTION_TIMEOUT, getConnectionTimeout().toString()); dm.put(ApiConstants.S3_CONNECTION_TIMEOUT, getConnectionTimeout().toString());
@ -119,17 +129,16 @@ public final class AddS3Cmd extends BaseCmd {
dm.put(ApiConstants.S3_USE_TCP_KEEPALIVE, getUseTCPKeepAlive().toString()); dm.put(ApiConstants.S3_USE_TCP_KEEPALIVE, getUseTCPKeepAlive().toString());
} }
try{ try{
ImageStore result = _storageService.discoverImageStore(null, null, "S3", null, dm); ImageStore result = _storageService.discoverImageStore(null, null, "S3", null, dm);
ImageStoreResponse storeResponse = null; ImageStoreResponse storeResponse;
if (result != null) { if (result != null) {
storeResponse = _responseGenerator.createImageStoreResponse(result); storeResponse = _responseGenerator.createImageStoreResponse(result);
storeResponse.setResponseName(getCommandName()); storeResponse.setResponseName(getCommandName());
storeResponse.setObjectName("secondarystorage"); storeResponse.setObjectName("imagestore");
setResponseObject(storeResponse); setResponseObject(storeResponse);
} else { } else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add S3 secondary storage"); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add S3 Image Store.");
} }
} catch (DiscoveryException ex) { } catch (DiscoveryException ex) {
s_logger.warn("Exception: ", ex); s_logger.warn("Exception: ", ex);
@ -137,6 +146,60 @@ public final class AddS3Cmd extends BaseCmd {
} }
} }
@Override
public String getCommandName() {
return s_name;
}
@Override
public long getEntityOwnerId() {
return ACCOUNT_ID_SYSTEM;
}
public String getAccessKey() {
return accessKey;
}
public String getSecretKey() {
return secretKey;
}
public String getEndPoint() {
return endPoint;
}
public String getBucketName() {
return bucketName;
}
public String getSigner() {
return signer;
}
public Boolean isHttps() {
return httpsFlag;
}
public Integer getConnectionTimeout() {
return connectionTimeout;
}
public Integer getMaxErrorRetry() {
return maxErrorRetry;
}
public Integer getSocketTimeout() {
return socketTimeout;
}
public Integer getConnectionTtl() {
return connectionTtl;
}
public Boolean getUseTCPKeepAlive() {
return useTCPKeepAlive;
}
@Override @Override
public boolean equals(final Object thatObject) { public boolean equals(final Object thatObject) {
@ -148,7 +211,7 @@ public final class AddS3Cmd extends BaseCmd {
return false; return false;
} }
final AddS3Cmd thatAddS3Cmd = (AddS3Cmd)thatObject; final AddImageStoreS3CMD thatAddS3Cmd = (AddImageStoreS3CMD)thatObject;
if (httpsFlag != null ? !httpsFlag.equals(thatAddS3Cmd.httpsFlag) : thatAddS3Cmd.httpsFlag != null) { if (httpsFlag != null ? !httpsFlag.equals(thatAddS3Cmd.httpsFlag) : thatAddS3Cmd.httpsFlag != null) {
return false; return false;
@ -191,7 +254,6 @@ public final class AddS3Cmd extends BaseCmd {
} }
return true; return true;
} }
@Override @Override
@ -201,64 +263,14 @@ public final class AddS3Cmd extends BaseCmd {
result = 31 * result + (secretKey != null ? secretKey.hashCode() : 0); result = 31 * result + (secretKey != null ? secretKey.hashCode() : 0);
result = 31 * result + (endPoint != null ? endPoint.hashCode() : 0); result = 31 * result + (endPoint != null ? endPoint.hashCode() : 0);
result = 31 * result + (bucketName != null ? bucketName.hashCode() : 0); result = 31 * result + (bucketName != null ? bucketName.hashCode() : 0);
result = 31 * result + (httpsFlag != null && httpsFlag == true ? 1 : 0); result = 31 * result + (signer != null ? signer.hashCode() : 0);
result = 31 * result + (httpsFlag != null && httpsFlag ? 1 : 0);
result = 31 * result + (connectionTimeout != null ? connectionTimeout.hashCode() : 0); result = 31 * result + (connectionTimeout != null ? connectionTimeout.hashCode() : 0);
result = 31 * result + (maxErrorRetry != null ? maxErrorRetry.hashCode() : 0); result = 31 * result + (maxErrorRetry != null ? maxErrorRetry.hashCode() : 0);
result = 31 * result + (socketTimeout != null ? socketTimeout.hashCode() : 0); result = 31 * result + (socketTimeout != null ? socketTimeout.hashCode() : 0);
result = 31 * result + (connectionTtl != null ? connectionTtl.hashCode() : 0); result = 31 * result + (connectionTtl != null ? connectionTtl.hashCode() : 0);
result = 31 * result + (useTCPKeepAlive != null && useTCPKeepAlive == true ? 1 : 0); result = 31 * result + (useTCPKeepAlive != null && useTCPKeepAlive ? 1 : 0);
return result; return result;
}
@Override
public String getCommandName() {
return s_name;
}
@Override
public long getEntityOwnerId() {
return ACCOUNT_ID_SYSTEM;
}
public String getAccessKey() {
return accessKey;
}
public String getSecretKey() {
return secretKey;
}
public String getEndPoint() {
return endPoint;
}
public String getBucketName() {
return bucketName;
}
public Boolean getHttpsFlag() {
return httpsFlag;
}
public Integer getConnectionTimeout() {
return connectionTimeout;
}
public Integer getMaxErrorRetry() {
return maxErrorRetry;
}
public Integer getSocketTimeout() {
return socketTimeout;
}
public Integer getConnectionTtl() {
return connectionTtl;
}
public Boolean getUseTCPKeepAlive() {
return useTCPKeepAlive;
} }
} }

View File

@ -1,55 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.api.command.admin.storage;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.ImageStoreResponse;
import org.apache.cloudstack.api.response.ListResponse;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException;
@APICommand(name = "listS3s", description = "Lists S3s", responseObject = ImageStoreResponse.class, since = "4.0.0",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class ListS3sCmd extends BaseListCmd {
private static final String COMMAND_NAME = "lists3sresponse";
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
ResourceAllocationException, NetworkRuleConflictException {
ListImageStoresCmd cmd = new ListImageStoresCmd();
cmd.setProvider("S3");
ListResponse<ImageStoreResponse> response = _queryService.searchForImageStores(cmd);
response.setResponseName(getCommandName());
this.setResponseObject(response);
}
@Override
public String getCommandName() {
return COMMAND_NAME;
}
}

View File

@ -32,7 +32,7 @@ import org.apache.cloudstack.api.response.TemplateResponse;
import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceAllocationException;
import com.cloud.template.VirtualMachineTemplate; import com.cloud.template.VirtualMachineTemplate;
@APICommand(name = "registerTemplate", description = "Registers an existing template into the CloudStack cloud. ", responseObject = TemplateResponse.class, responseView = ResponseView.Full, @APICommand(name = "registerTemplate", description = "Registers an existing template into the CloudStack cloud.", responseObject = TemplateResponse.class, responseView = ResponseView.Full,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class RegisterTemplateCmdByAdmin extends RegisterTemplateCmd { public class RegisterTemplateCmdByAdmin extends RegisterTemplateCmd {
public static final Logger s_logger = Logger.getLogger(RegisterTemplateCmdByAdmin.class.getName()); public static final Logger s_logger = Logger.getLogger(RegisterTemplateCmdByAdmin.class.getName());

View File

@ -282,12 +282,9 @@ listCapacity=3
addSwift=1 addSwift=1
listSwifts=1 listSwifts=1
#### s3 commands
addS3=1
listS3s=1
#### image store commands #### image store commands
addImageStore=1 addImageStore=1
addImageStoreS3=1
listImageStores=1 listImageStores=1
deleteImageStore=1 deleteImageStore=1
createSecondaryStagingStore=1 createSecondaryStagingStore=1

View File

@ -162,6 +162,16 @@ under the License.
<appender-ref ref="APISERVER"/> <appender-ref ref="APISERVER"/>
</logger> </logger>
<!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
<category name="com.amazonaws">
<priority value="INFO"/>
</category>
<!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
<category name="httpclient.wire">
<priority value="INFO"/>
</category>
<!-- ============================== --> <!-- ============================== -->
<!-- Add or remove these logger for SNMP, this logger is for SNMP alerts plugin --> <!-- Add or remove these logger for SNMP, this logger is for SNMP alerts plugin -->
<!-- ============================== --> <!-- ============================== -->

View File

@ -28,7 +28,6 @@ import com.cloud.host.Host;
import com.cloud.utils.component.Manager; import com.cloud.utils.component.Manager;
/** /**
*
* ServerResource is a generic container to execute commands sent * ServerResource is a generic container to execute commands sent
*/ */
public interface ServerResource extends Manager { public interface ServerResource extends Manager {
@ -70,4 +69,5 @@ public interface ServerResource extends Manager {
IAgentControl getAgentControl(); IAgentControl getAgentControl();
void setAgentControl(IAgentControl agentControl); void setAgentControl(IAgentControl agentControl);
} }

View File

@ -47,10 +47,10 @@ import org.apache.log4j.Logger;
import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
import com.cloud.agent.api.storage.Proxy;
import com.cloud.storage.StorageLayer; import com.cloud.storage.StorageLayer;
import com.cloud.utils.Pair; import com.cloud.utils.Pair;
import com.cloud.utils.UriUtils; import com.cloud.utils.UriUtils;
import com.cloud.utils.net.Proxy;
/** /**
* Download a template file using HTTP * Download a template file using HTTP

View File

@ -19,303 +19,235 @@
package com.cloud.storage.template; package com.cloud.storage.template;
import static com.cloud.utils.StringUtils.join; import com.amazonaws.event.ProgressEvent;
import static java.util.Arrays.asList; import com.amazonaws.event.ProgressEventType;
import com.amazonaws.event.ProgressListener;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.StorageClass;
import com.amazonaws.services.s3.transfer.Upload;
import com.cloud.agent.api.to.S3TO;
import com.cloud.utils.net.HTTPUtils;
import com.cloud.utils.net.Proxy;
import com.cloud.utils.storage.S3.S3Utils;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
import org.apache.commons.httpclient.Header;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.URIException;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.params.HttpMethodParams;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import java.io.BufferedInputStream; import java.io.BufferedInputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.util.Date; import java.util.Date;
import org.apache.cloudstack.managed.context.ManagedContextRunnable; import static com.cloud.utils.StringUtils.join;
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import static java.util.Arrays.asList;
import org.apache.commons.httpclient.ChunkedInputStream;
import org.apache.commons.httpclient.Credentials;
import org.apache.commons.httpclient.Header;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpException;
import org.apache.commons.httpclient.HttpMethod;
import org.apache.commons.httpclient.HttpMethodRetryHandler;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
import org.apache.commons.httpclient.NoHttpResponseException;
import org.apache.commons.httpclient.UsernamePasswordCredentials;
import org.apache.commons.httpclient.auth.AuthScope;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.params.HttpMethodParams;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.ProgressEvent;
import com.amazonaws.services.s3.model.ProgressListener;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.StorageClass;
import com.cloud.agent.api.storage.Proxy;
import com.cloud.agent.api.to.S3TO;
import com.cloud.utils.Pair;
import com.cloud.utils.S3Utils;
import com.cloud.utils.UriUtils;
/** /**
* Download a template file using HTTP(S) * Download a template file using HTTP(S)
*
* This class, once instantiated, has the purpose to download a single Template to an S3 Image Store.
*
* Execution of the instance is started when runInContext() is called.
*/ */
public class S3TemplateDownloader extends ManagedContextRunnable implements TemplateDownloader { public class S3TemplateDownloader extends ManagedContextRunnable implements TemplateDownloader {
private static final Logger s_logger = Logger.getLogger(S3TemplateDownloader.class.getName()); private static final Logger LOGGER = Logger.getLogger(S3TemplateDownloader.class.getName());
private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager();
private String downloadUrl;
private String installPath;
private String s3Key;
private String fileName;
private String fileExtension;
private String errorString = " ";
private final String downloadUrl;
private final String s3Key;
private final String fileExtension;
private final HttpClient httpClient;
private final GetMethod getMethod;
private final DownloadCompleteCallback downloadCompleteCallback;
private final S3TO s3TO;
private String errorString = "";
private TemplateDownloader.Status status = TemplateDownloader.Status.NOT_STARTED; private TemplateDownloader.Status status = TemplateDownloader.Status.NOT_STARTED;
private ResourceType resourceType = ResourceType.TEMPLATE; private ResourceType resourceType = ResourceType.TEMPLATE;
private final HttpClient client; private long remoteSize;
private final HttpMethodRetryHandler myretryhandler; private long downloadTime;
private GetMethod request;
private DownloadCompleteCallback completionCallback;
private S3TO s3to;
private long remoteSize = 0;
private long downloadTime = 0;
private long totalBytes; private long totalBytes;
private long maxTemplateSizeInByte; private long maxTemplateSizeInByte;
private boolean resume = false; private boolean resume = false;
private boolean inited = true;
public S3TemplateDownloader(S3TO s3to, String downloadUrl, String installPath, DownloadCompleteCallback callback, public S3TemplateDownloader(S3TO s3TO, String downloadUrl, String installPath, DownloadCompleteCallback downloadCompleteCallback,
long maxTemplateSizeInBytes, String user, String password, Proxy proxy, ResourceType resourceType) { long maxTemplateSizeInBytes, String username, String password, Proxy proxy, ResourceType resourceType) {
this.s3to = s3to;
this.downloadUrl = downloadUrl; this.downloadUrl = downloadUrl;
this.installPath = installPath; this.s3TO = s3TO;
this.status = TemplateDownloader.Status.NOT_STARTED;
this.resourceType = resourceType; this.resourceType = resourceType;
this.maxTemplateSizeInByte = maxTemplateSizeInBytes; this.maxTemplateSizeInByte = maxTemplateSizeInBytes;
this.httpClient = HTTPUtils.getHTTPClient();
this.downloadCompleteCallback = downloadCompleteCallback;
this.totalBytes = 0; // Create a GET method for the download url.
this.client = new HttpClient(s_httpClientManager); this.getMethod = new GetMethod(downloadUrl);
this.myretryhandler = new HttpMethodRetryHandler() { // Set the retry handler, default to retry 5 times.
@Override this.getMethod.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, HTTPUtils.getHttpMethodRetryHandler(5));
public boolean retryMethod(final HttpMethod method, final IOException exception, int executionCount) {
if (executionCount >= 2) {
// Do not retry if over max retry count
return false;
}
if (exception instanceof NoHttpResponseException) {
// Retry if the server dropped connection on us
return true;
}
if (!method.isRequestSent()) {
// Retry if the request has not been sent fully or
// if it's OK to retry methods that have been sent
return true;
}
// otherwise do not retry
return false;
}
};
try { // Follow redirects
request = new GetMethod(downloadUrl); this.getMethod.setFollowRedirects(true);
request.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, myretryhandler);
completionCallback = callback;
Pair<String, Integer> hostAndPort = UriUtils.validateUrl(downloadUrl); // Set file extension.
fileName = StringUtils.substringAfterLast(downloadUrl, "/"); this.fileExtension = StringUtils.substringAfterLast(StringUtils.substringAfterLast(downloadUrl, "/"), ".");
fileExtension = StringUtils.substringAfterLast(fileName, ".");
if (proxy != null) { // Calculate and set S3 Key.
client.getHostConfiguration().setProxy(proxy.getHost(), proxy.getPort()); this.s3Key = join(asList(installPath, StringUtils.substringAfterLast(downloadUrl, "/")), S3Utils.SEPARATOR);
if (proxy.getUserName() != null) {
Credentials proxyCreds = new UsernamePasswordCredentials(proxy.getUserName(), proxy.getPassword()); // Set proxy if available.
client.getState().setProxyCredentials(AuthScope.ANY, proxyCreds); HTTPUtils.setProxy(proxy, this.httpClient);
}
} // Set credentials if available.
if ((user != null) && (password != null)) { HTTPUtils.setCredentials(username, password, this.httpClient);
client.getParams().setAuthenticationPreemptive(true);
Credentials defaultcreds = new UsernamePasswordCredentials(user, password);
client.getState().setCredentials(
new AuthScope(hostAndPort.first(), hostAndPort.second(), AuthScope.ANY_REALM), defaultcreds);
s_logger.info("Added username=" + user + ", password=" + password + "for host " + hostAndPort.first()
+ ":" + hostAndPort.second());
} else {
s_logger.info("No credentials configured for host=" + hostAndPort.first() + ":" + hostAndPort.second());
}
} catch (IllegalArgumentException iae) {
errorString = iae.getMessage();
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
inited = false;
} catch (Exception ex) {
errorString = "Unable to start download -- check url? ";
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
s_logger.warn("Exception in constructor -- " + ex.toString());
} catch (Throwable th) {
s_logger.warn("throwable caught ", th);
}
} }
@Override @Override
public long download(boolean resume, DownloadCompleteCallback callback) { public long download(boolean resume, DownloadCompleteCallback callback) {
switch (status) { if (!status.equals(Status.NOT_STARTED)) {
case ABORTED: // Only start downloading if we haven't started yet.
case UNRECOVERABLE_ERROR: LOGGER.debug("Template download is already started, not starting again. Template: " + downloadUrl);
case DOWNLOAD_FINISHED:
return 0;
default:
return 0;
} }
int responseCode;
if ((responseCode = HTTPUtils.executeMethod(httpClient, getMethod)) == -1) {
errorString = "Exception while executing HttpMethod " + getMethod.getName() + " on URL " + downloadUrl;
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
}
if (!HTTPUtils.verifyResponseCode(responseCode)) {
errorString = "Response code for GetMethod of " + downloadUrl + " is incorrect, responseCode: " + responseCode;
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
}
// Headers
Header contentLengthHeader = getMethod.getResponseHeader("Content-Length");
Header contentTypeHeader = getMethod.getResponseHeader("Content-Type");
// Check the contentLengthHeader and transferEncodingHeader.
if (contentLengthHeader == null) {
errorString = "The ContentLengthHeader of " + downloadUrl + " isn't supplied";
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
} else {
// The ContentLengthHeader is supplied, parse it's value.
remoteSize = Long.parseLong(contentLengthHeader.getValue());
}
if (remoteSize > maxTemplateSizeInByte) {
errorString = "Remote size is too large for template " + downloadUrl + " remote size is " + remoteSize + " max allowed is " + maxTemplateSizeInByte;
LOGGER.warn(errorString);
status = Status.UNRECOVERABLE_ERROR;
return 0;
}
InputStream inputStream;
try { try {
// execute get method inputStream = new BufferedInputStream(getMethod.getResponseBodyAsStream());
int responseCode = HttpStatus.SC_OK; } catch (IOException e) {
if ((responseCode = client.executeMethod(request)) != HttpStatus.SC_OK) { errorString = "Exception occurred while opening InputStream for template " + downloadUrl;
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; LOGGER.warn(errorString);
errorString = " HTTP Server returned " + responseCode + " (expected 200 OK) ";
return 0; // FIXME: retry?
}
// get the total size of file
Header contentLengthHeader = request.getResponseHeader("Content-Length");
boolean chunked = false;
long remoteSize2 = 0;
if (contentLengthHeader == null) {
Header chunkedHeader = request.getResponseHeader("Transfer-Encoding");
if (chunkedHeader == null || !"chunked".equalsIgnoreCase(chunkedHeader.getValue())) {
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
errorString = " Failed to receive length of download ";
return 0; // FIXME: what status do we put here? Do we retry?
} else if ("chunked".equalsIgnoreCase(chunkedHeader.getValue())) {
chunked = true;
}
} else {
remoteSize2 = Long.parseLong(contentLengthHeader.getValue());
}
if (remoteSize == 0) { status = Status.UNRECOVERABLE_ERROR;
remoteSize = remoteSize2; return 0;
}
if (remoteSize > maxTemplateSizeInByte) {
s_logger.info("Remote size is too large: " + remoteSize + " , max=" + maxTemplateSizeInByte);
status = Status.UNRECOVERABLE_ERROR;
errorString = "Download file size is too large";
return 0;
}
if (remoteSize == 0) {
remoteSize = maxTemplateSizeInByte;
}
// get content type
String contentType = null;
Header contentTypeHeader = request.getResponseHeader("Content-Type");
if (contentTypeHeader != null) {
contentType = contentTypeHeader.getValue();
}
InputStream in = !chunked ? new BufferedInputStream(request.getResponseBodyAsStream())
: new ChunkedInputStream(request.getResponseBodyAsStream());
s_logger.info("Starting download from " + getDownloadUrl() + " to s3 bucket " + s3to.getBucketName()
+ " remoteSize=" + remoteSize + " , max size=" + maxTemplateSizeInByte);
Date start = new Date();
// compute s3 key
s3Key = join(asList(installPath, fileName), S3Utils.SEPARATOR);
// download using S3 API
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(remoteSize);
if (contentType != null) {
metadata.setContentType(contentType);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(s3to.getBucketName(), s3Key, in, metadata);
// check if RRS is enabled
if (s3to.getEnableRRS()) {
putObjectRequest = putObjectRequest.withStorageClass(StorageClass.ReducedRedundancy);
}
// register progress listenser
putObjectRequest.setProgressListener(new ProgressListener() {
@Override
public void progressChanged(ProgressEvent progressEvent) {
// s_logger.debug(progressEvent.getBytesTransfered()
// + " number of byte transferd "
// + new Date());
totalBytes += progressEvent.getBytesTransfered();
if (progressEvent.getEventCode() == ProgressEvent.COMPLETED_EVENT_CODE) {
s_logger.info("download completed");
status = TemplateDownloader.Status.DOWNLOAD_FINISHED;
} else if (progressEvent.getEventCode() == ProgressEvent.FAILED_EVENT_CODE) {
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
} else if (progressEvent.getEventCode() == ProgressEvent.CANCELED_EVENT_CODE) {
status = TemplateDownloader.Status.ABORTED;
} else {
status = TemplateDownloader.Status.IN_PROGRESS;
}
}
});
if (!s3to.getSingleUpload(remoteSize)) {
// use TransferManager to do multipart upload
S3Utils.mputObject(s3to, putObjectRequest);
} else {
// single part upload, with 5GB limit in Amazon
S3Utils.putObject(s3to, putObjectRequest);
while (status != TemplateDownloader.Status.DOWNLOAD_FINISHED
&& status != TemplateDownloader.Status.UNRECOVERABLE_ERROR
&& status != TemplateDownloader.Status.ABORTED) {
// wait for completion
}
}
// finished or aborted
Date finish = new Date();
String downloaded = "(incomplete download)";
if (totalBytes >= remoteSize) {
status = TemplateDownloader.Status.DOWNLOAD_FINISHED;
downloaded = "(download complete remote=" + remoteSize + "bytes)";
} else {
errorString = "Downloaded " + totalBytes + " bytes " + downloaded;
}
downloadTime += finish.getTime() - start.getTime();
return totalBytes;
} catch (HttpException hte) {
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
errorString = hte.getMessage();
} catch (IOException ioe) {
// probably a file write error
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
errorString = ioe.getMessage();
} catch (AmazonClientException ex) {
// S3 api exception
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
errorString = ex.getMessage();
} catch (InterruptedException e) {
// S3 upload api exception
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
errorString = e.getMessage();
} finally {
// close input stream
request.releaseConnection();
if (callback != null) {
callback.downloadComplete(status);
}
} }
return 0;
LOGGER.info("Starting download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " and size " + remoteSize + " bytes");
// Time the upload starts.
final Date start = new Date();
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentLength(remoteSize);
if (contentTypeHeader.getValue() != null) {
objectMetadata.setContentType(contentTypeHeader.getValue());
}
// Create the PutObjectRequest.
PutObjectRequest putObjectRequest = new PutObjectRequest(s3TO.getBucketName(), s3Key, inputStream, objectMetadata);
// If reduced redundancy is enabled, set it.
if (s3TO.getEnableRRS()) {
putObjectRequest.withStorageClass(StorageClass.ReducedRedundancy);
}
Upload upload = S3Utils.putObject(s3TO, putObjectRequest);
upload.addProgressListener(new ProgressListener() {
@Override
public void progressChanged(ProgressEvent progressEvent) {
// Record the amount of bytes transferred.
totalBytes += progressEvent.getBytesTransferred();
LOGGER.trace("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + ((new Date().getTime() - start.getTime()) / 1000) + " seconds");
if (progressEvent.getEventType() == ProgressEventType.TRANSFER_STARTED_EVENT) {
status = Status.IN_PROGRESS;
} else if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
status = Status.DOWNLOAD_FINISHED;
} else if (progressEvent.getEventType() == ProgressEventType.TRANSFER_CANCELED_EVENT) {
status = Status.ABORTED;
} else if (progressEvent.getEventType() == ProgressEventType.TRANSFER_FAILED_EVENT) {
status = Status.UNRECOVERABLE_ERROR;
}
}
});
try {
// Wait for the upload to complete.
upload.waitForCompletion();
} catch (InterruptedException e) {
// Interruption while waiting for the upload to complete.
LOGGER.warn("Interruption occurred while waiting for upload of " + downloadUrl + " to complete");
}
downloadTime = new Date().getTime() - start.getTime();
if (status == Status.DOWNLOAD_FINISHED) {
LOGGER.info("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed successfully!");
} else {
LOGGER.warn("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + totalBytes + " in " + (downloadTime / 1000) + " seconds, completed with status " + status.toString());
}
// Close input stream
getMethod.releaseConnection();
// Call the callback!
if (callback != null) {
callback.downloadComplete(status);
}
return totalBytes;
} }
public String getDownloadUrl() { public String getDownloadUrl() {
return downloadUrl; try {
return getMethod.getURI().toString();
} catch (URIException e) {
return null;
}
} }
@Override @Override
public TemplateDownloader.Status getStatus() { public Status getStatus() {
return status; return status;
} }
@ -342,47 +274,38 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
return null; return null;
} }
return S3Utils.getObjectStream(s3to, s3to.getBucketName(), s3Key); return S3Utils.getObjectStream(s3TO, s3TO.getBucketName(), s3Key);
} }
public void cleanupAfterError() { public void cleanupAfterError() {
if (status != Status.UNRECOVERABLE_ERROR) { LOGGER.warn("Cleanup after error, trying to remove object: " + s3Key);
s_logger.debug("S3Template downloader does not have state UNRECOVERABLE_ERROR, no cleanup neccesarry.");
return;
}
s_logger.info("Cleanup after UNRECOVERABLE_ERROR, trying to remove object: " + s3Key); S3Utils.deleteObject(s3TO, s3TO.getBucketName(), s3Key);
S3Utils.deleteObject(s3to, s3to.getBucketName(), s3Key);
} }
@Override @Override
@SuppressWarnings("fallthrough")
public boolean stopDownload() { public boolean stopDownload() {
switch (getStatus()) { switch (status) {
case IN_PROGRESS: case IN_PROGRESS:
if (request != null) { if (getMethod != null) {
request.abort(); getMethod.abort();
} }
status = TemplateDownloader.Status.ABORTED; break;
return true; case UNKNOWN:
case UNKNOWN: case NOT_STARTED:
case NOT_STARTED: case RECOVERABLE_ERROR:
case RECOVERABLE_ERROR: case UNRECOVERABLE_ERROR:
case UNRECOVERABLE_ERROR: case ABORTED:
case ABORTED: case DOWNLOAD_FINISHED:
status = TemplateDownloader.Status.ABORTED; // Remove the object if it already has been uploaded.
case DOWNLOAD_FINISHED: S3Utils.deleteObject(s3TO, s3TO.getBucketName(), s3Key);
try { break;
S3Utils.deleteObject(s3to, s3to.getBucketName(), s3Key); default:
} catch (Exception ex) { break;
// ignore delete exception if it is not there
}
return true;
default:
return true;
} }
status = TemplateDownloader.Status.ABORTED;
return true;
} }
@Override @Override
@ -396,18 +319,12 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
@Override @Override
protected void runInContext() { protected void runInContext() {
try { // Start the download!
download(resume, completionCallback); download(resume, downloadCompleteCallback);
} catch (Throwable t) {
s_logger.warn("Caught exception during download " + t.getMessage(), t);
errorString = "Failed to install: " + t.getMessage();
status = TemplateDownloader.Status.UNRECOVERABLE_ERROR;
}
} }
@Override @Override
public void setStatus(TemplateDownloader.Status status) { public void setStatus(Status status) {
this.status = status; this.status = status;
} }
@ -442,7 +359,7 @@ public class S3TemplateDownloader extends ManagedContextRunnable implements Temp
@Override @Override
public boolean isInited() { public boolean isInited() {
return inited; return true;
} }
public ResourceType getResourceType() { public ResourceType getResourceType() {

View File

@ -25,16 +25,16 @@ public interface TemplateDownloader extends Runnable {
* Callback used to notify completion of download * Callback used to notify completion of download
* *
*/ */
public interface DownloadCompleteCallback { interface DownloadCompleteCallback {
void downloadComplete(Status status); void downloadComplete(Status status);
} }
public static enum Status { enum Status {
UNKNOWN, NOT_STARTED, IN_PROGRESS, ABORTED, UNRECOVERABLE_ERROR, RECOVERABLE_ERROR, DOWNLOAD_FINISHED, POST_DOWNLOAD_FINISHED UNKNOWN, NOT_STARTED, IN_PROGRESS, ABORTED, UNRECOVERABLE_ERROR, RECOVERABLE_ERROR, DOWNLOAD_FINISHED, POST_DOWNLOAD_FINISHED
} }
public static long DEFAULT_MAX_TEMPLATE_SIZE_IN_BYTES = 50L * 1024L * 1024L * 1024L; long DEFAULT_MAX_TEMPLATE_SIZE_IN_BYTES = 50L * 1024L * 1024L * 1024L;
/** /**
* Initiate download, resuming a previous one if required * Initiate download, resuming a previous one if required
@ -42,55 +42,54 @@ public interface TemplateDownloader extends Runnable {
* @param callback completion callback to be called after download is complete * @param callback completion callback to be called after download is complete
* @return bytes downloaded * @return bytes downloaded
*/ */
public long download(boolean resume, DownloadCompleteCallback callback); long download(boolean resume, DownloadCompleteCallback callback);
/** /**
* @return * @return
*/ */
public boolean stopDownload(); boolean stopDownload();
/** /**
* @return percent of file downloaded * @return percent of file downloaded
*/ */
public int getDownloadPercent(); int getDownloadPercent();
/** /**
* Get the status of the download * Get the status of the download
* @return status of download * @return status of download
*/ */
public TemplateDownloader.Status getStatus(); TemplateDownloader.Status getStatus();
/** /**
* Get time taken to download so far * Get time taken to download so far
* @return time in seconds taken to download * @return time in seconds taken to download
*/ */
public long getDownloadTime(); long getDownloadTime();
/** /**
* Get bytes downloaded * Get bytes downloaded
* @return bytes downloaded so far * @return bytes downloaded so far
*/ */
public long getDownloadedBytes(); long getDownloadedBytes();
/** /**
* Get the error if any * Get the error if any
* @return error string if any * @return error string if any
*/ */
public String getDownloadError(); String getDownloadError();
/** Get local path of the downloaded file /** Get local path of the downloaded file
* @return local path of the file downloaded * @return local path of the file downloaded
*/ */
public String getDownloadLocalPath(); String getDownloadLocalPath();
public void setStatus(TemplateDownloader.Status status); void setStatus(TemplateDownloader.Status status);
public void setDownloadError(String string); void setDownloadError(String string);
public void setResume(boolean resume); void setResume(boolean resume);
public boolean isInited(); boolean isInited();
public long getMaxTemplateSizeInBytes();
long getMaxTemplateSizeInBytes();
} }

View File

@ -25,7 +25,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
import com.cloud.agent.api.storage.AbstractDownloadCommand; import com.cloud.agent.api.storage.AbstractDownloadCommand;
import com.cloud.agent.api.storage.PasswordAuth; import com.cloud.agent.api.storage.PasswordAuth;
import com.cloud.agent.api.storage.Proxy; import com.cloud.utils.net.Proxy;
import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.NfsTO;
import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ImageFormat;

View File

@ -23,14 +23,14 @@ import java.util.Set;
public interface DataStoreProvider { public interface DataStoreProvider {
// constants for provider names // constants for provider names
static final String NFS_IMAGE = "NFS"; String NFS_IMAGE = "NFS";
static final String S3_IMAGE = "S3"; String S3_IMAGE = "S3";
static final String SWIFT_IMAGE = "Swift"; String SWIFT_IMAGE = "Swift";
static final String SAMPLE_IMAGE = "Sample"; String SAMPLE_IMAGE = "Sample";
static final String SMB = "NFS"; String SMB = "NFS";
static final String DEFAULT_PRIMARY = "DefaultPrimary"; String DEFAULT_PRIMARY = "DefaultPrimary";
static enum DataStoreProviderType { enum DataStoreProviderType {
PRIMARY, IMAGE, ImageCache PRIMARY, IMAGE, ImageCache
} }

View File

@ -196,7 +196,7 @@ public class TemplateServiceImpl implements TemplateService {
@Override @Override
public void downloadBootstrapSysTemplate(DataStore store) { public void downloadBootstrapSysTemplate(DataStore store) {
Set<VMTemplateVO> toBeDownloaded = new HashSet<VMTemplateVO>(); Set<VMTemplateVO> toBeDownloaded = new HashSet();
List<VMTemplateVO> rtngTmplts = _templateDao.listAllSystemVMTemplates(); List<VMTemplateVO> rtngTmplts = _templateDao.listAllSystemVMTemplates();

View File

@ -27,15 +27,15 @@ import com.cloud.storage.DataStoreRole;
import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.NoTransitionException;
public interface ObjectInDataStoreManager { public interface ObjectInDataStoreManager {
public DataObject create(DataObject dataObj, DataStore dataStore); DataObject create(DataObject dataObj, DataStore dataStore);
public boolean delete(DataObject dataObj); boolean delete(DataObject dataObj);
public boolean deleteIfNotReady(DataObject dataObj); boolean deleteIfNotReady(DataObject dataObj);
public DataObject get(DataObject dataObj, DataStore store); DataObject get(DataObject dataObj, DataStore store);
public boolean update(DataObject vo, Event event) throws NoTransitionException, ConcurrentOperationException; boolean update(DataObject vo, Event event) throws NoTransitionException, ConcurrentOperationException;
DataObjectInStore findObject(long objId, DataObjectType type, long dataStoreId, DataStoreRole role); DataObjectInStore findObject(long objId, DataObjectType type, long dataStoreId, DataStoreRole role);

View File

@ -47,7 +47,6 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import com.cloud.agent.api.Answer; import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.agent.api.storage.DownloadAnswer;
import com.cloud.agent.api.storage.Proxy;
import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DataTO;
import com.cloud.alert.AlertManager; import com.cloud.alert.AlertManager;
@ -58,6 +57,7 @@ import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.download.DownloadMonitor; import com.cloud.storage.download.DownloadMonitor;
import com.cloud.utils.net.Proxy;
public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
private static final Logger s_logger = Logger.getLogger(BaseImageStoreDriverImpl.class); private static final Logger s_logger = Logger.getLogger(BaseImageStoreDriverImpl.class);

View File

@ -33,27 +33,27 @@
<dependency> <dependency>
<groupId>com.fasterxml.jackson.module</groupId> <groupId>com.fasterxml.jackson.module</groupId>
<artifactId>jackson-module-jaxb-annotations</artifactId> <artifactId>jackson-module-jaxb-annotations</artifactId>
<version>2.4.4</version> <version>2.6.3</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.fasterxml.jackson.core</groupId> <groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId> <artifactId>jackson-annotations</artifactId>
<version>2.4.4</version> <version>2.6.3</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.fasterxml.jackson.core</groupId> <groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId> <artifactId>jackson-core</artifactId>
<version>2.4.4</version> <version>2.6.3</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.fasterxml.jackson.core</groupId> <groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId> <artifactId>jackson-databind</artifactId>
<version>2.4.4</version> <version>2.6.3</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.fasterxml.jackson.jaxrs</groupId> <groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-json-provider</artifactId> <artifactId>jackson-jaxrs-json-provider</artifactId>
<version>2.4.4</version> <version>2.6.3</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.apache.cxf</groupId> <groupId>org.apache.cxf</groupId>

View File

@ -162,6 +162,16 @@ under the License.
<appender-ref ref="APISERVER"/> <appender-ref ref="APISERVER"/>
</logger> </logger>
<!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
<category name="com.amazonaws">
<priority value="INFO"/>
</category>
<!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
<category name="httpclient.wire">
<priority value="INFO"/>
</category>
<!-- ============================== --> <!-- ============================== -->
<!-- Add or remove these logger for SNMP, this logger is for SNMP alerts plugin --> <!-- Add or remove these logger for SNMP, this logger is for SNMP alerts plugin -->
<!-- ============================== --> <!-- ============================== -->

View File

@ -18,8 +18,7 @@
*/ */
package com.cloud.hypervisor.kvm.storage; package com.cloud.hypervisor.kvm.storage;
import static com.cloud.utils.S3Utils.mputFile; import static com.cloud.utils.storage.S3.S3Utils.putFile;
import static com.cloud.utils.S3Utils.putFile;
import java.io.File; import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
@ -95,7 +94,7 @@ import com.cloud.storage.template.Processor.FormatInfo;
import com.cloud.storage.template.QCOW2Processor; import com.cloud.storage.template.QCOW2Processor;
import com.cloud.storage.template.TemplateLocation; import com.cloud.storage.template.TemplateLocation;
import com.cloud.utils.NumbersUtil; import com.cloud.utils.NumbersUtil;
import com.cloud.utils.S3Utils; import com.cloud.utils.storage.S3.S3Utils;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script; import com.cloud.utils.script.Script;
@ -594,15 +593,10 @@ public class KVMStorageProcessor implements StorageProcessor {
} }
protected String copyToS3(final File srcFile, final S3TO destStore, final String destPath) throws InterruptedException { protected String copyToS3(final File srcFile, final S3TO destStore, final String destPath) throws InterruptedException {
final String bucket = destStore.getBucketName();
final long srcSize = srcFile.length();
final String key = destPath + S3Utils.SEPARATOR + srcFile.getName(); final String key = destPath + S3Utils.SEPARATOR + srcFile.getName();
if (!destStore.getSingleUpload(srcSize)) {
mputFile(destStore, srcFile, bucket, key); putFile(destStore, srcFile, destStore.getBucketName(), key).waitForCompletion();
} else {
putFile(destStore, srcFile, bucket, key);
}
return key; return key;
} }
@ -668,7 +662,7 @@ public class KVMStorageProcessor implements StorageProcessor {
final SnapshotObjectTO snapshotOnCacheStore = (SnapshotObjectTO)answer.getNewData(); final SnapshotObjectTO snapshotOnCacheStore = (SnapshotObjectTO)answer.getNewData();
snapshotOnCacheStore.setDataStore(cacheStore); snapshotOnCacheStore.setDataStore(cacheStore);
((SnapshotObjectTO)destData).setDataStore(imageStore); ((SnapshotObjectTO)destData).setDataStore(imageStore);
final CopyCommand newCpyCmd = new CopyCommand(snapshotOnCacheStore, destData, cmd.getWaitInMillSeconds(), cmd.executeInSequence()); final CopyCommand newCpyCmd = new CopyCommand(snapshotOnCacheStore, destData, cmd.getWaitInMillSeconds(), cmd.executeInSequence());
return copyToObjectStore(newCpyCmd); return copyToObjectStore(newCpyCmd);
} }

View File

@ -70,11 +70,11 @@ import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Storage; import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.resource.StorageProcessor; import com.cloud.storage.resource.StorageProcessor;
import com.cloud.utils.S3Utils;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.storage.encoding.DecodedDataObject; import com.cloud.utils.storage.encoding.DecodedDataObject;
import com.cloud.utils.storage.encoding.DecodedDataStore; import com.cloud.utils.storage.encoding.DecodedDataStore;
import com.cloud.utils.storage.encoding.Decoder; import com.cloud.utils.storage.encoding.Decoder;
import com.cloud.utils.storage.S3.ClientOptions;
import com.xensource.xenapi.Connection; import com.xensource.xenapi.Connection;
import com.xensource.xenapi.Host; import com.xensource.xenapi.Host;
import com.xensource.xenapi.PBD; import com.xensource.xenapi.PBD;
@ -1061,7 +1061,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
try { try {
final List<String> parameters = newArrayList(flattenProperties(s3, S3Utils.ClientOptions.class)); final List<String> parameters = newArrayList(flattenProperties(s3, ClientOptions.class));
// https workaround for Introspector bug that does not // https workaround for Introspector bug that does not
// recognize Boolean accessor methods ... // recognize Boolean accessor methods ...

View File

@ -19,7 +19,7 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-image-s3</artifactId> <artifactId>cloud-plugin-storage-image-s3</artifactId>
<name>Apache CloudStack Plugin - Storage Image S3</name> <name>Apache CloudStack Plugin - Storage Image S3 provider</name>
<parent> <parent>
<groupId>org.apache.cloudstack</groupId> <groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId> <artifactId>cloudstack-plugins</artifactId>

View File

@ -39,7 +39,7 @@ import com.cloud.agent.api.to.S3TO;
import com.cloud.configuration.Config; import com.cloud.configuration.Config;
import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ImageFormat;
import com.cloud.utils.NumbersUtil; import com.cloud.utils.NumbersUtil;
import com.cloud.utils.S3Utils; import com.cloud.utils.storage.S3.S3Utils;
public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl { public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
private static final Logger s_logger = Logger.getLogger(S3ImageStoreDriverImpl.class); private static final Logger s_logger = Logger.getLogger(S3ImageStoreDriverImpl.class);
@ -58,7 +58,9 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
imgStore.getUuid(), imgStore.getUuid(),
details.get(ApiConstants.S3_ACCESS_KEY), details.get(ApiConstants.S3_ACCESS_KEY),
details.get(ApiConstants.S3_SECRET_KEY), details.get(ApiConstants.S3_SECRET_KEY),
details.get(ApiConstants.S3_END_POINT), details.get(ApiConstants.S3_BUCKET_NAME), details.get(ApiConstants.S3_END_POINT),
details.get(ApiConstants.S3_BUCKET_NAME),
details.get(ApiConstants.S3_SIGNER),
details.get(ApiConstants.S3_HTTPS_FLAG) == null ? false : Boolean.parseBoolean(details.get(ApiConstants.S3_HTTPS_FLAG)), details.get(ApiConstants.S3_HTTPS_FLAG) == null ? false : Boolean.parseBoolean(details.get(ApiConstants.S3_HTTPS_FLAG)),
details.get(ApiConstants.S3_CONNECTION_TIMEOUT) == null ? null : Integer.valueOf(details.get(ApiConstants.S3_CONNECTION_TIMEOUT)), details.get(ApiConstants.S3_CONNECTION_TIMEOUT) == null ? null : Integer.valueOf(details.get(ApiConstants.S3_CONNECTION_TIMEOUT)),
details.get(ApiConstants.S3_MAX_ERROR_RETRY) == null ? null : Integer.valueOf(details.get(ApiConstants.S3_MAX_ERROR_RETRY)), details.get(ApiConstants.S3_MAX_ERROR_RETRY) == null ? null : Integer.valueOf(details.get(ApiConstants.S3_MAX_ERROR_RETRY)),
@ -74,27 +76,29 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
try { try {
return Long.parseLong(_configDao.getValue(Config.S3MaxSingleUploadSize.toString())) * 1024L * 1024L * 1024L; return Long.parseLong(_configDao.getValue(Config.S3MaxSingleUploadSize.toString())) * 1024L * 1024L * 1024L;
} catch (NumberFormatException e) { } catch (NumberFormatException e) {
// use default 5GB // use default 1TB
return 5L * 1024L * 1024L * 1024L; return 1024L * 1024L * 1024L * 1024L;
} }
} }
@Override @Override
public String createEntityExtractUrl(DataStore store, String installPath, ImageFormat format, DataObject dataObject) { public String createEntityExtractUrl(DataStore store, String key, ImageFormat format, DataObject dataObject) {
// for S3, no need to do anything, just return template url for /**
// extract template. but we need to set object acl as public_read to * Generate a pre-signed URL for the given object.
// make the url accessible */
S3TO s3 = (S3TO)getStoreTO(store); S3TO s3 = (S3TO)getStoreTO(store);
String key = installPath;
s_logger.info("Generating pre-signed s3 entity extraction URL."); if(s_logger.isDebugEnabled()) {
s_logger.debug("Generating pre-signed s3 entity extraction URL for object: " + key);
}
Date expiration = new Date(); Date expiration = new Date();
long milliSeconds = expiration.getTime(); long milliSeconds = expiration.getTime();
// get extract url expiration interval set in global configuration (in seconds) // Get extract url expiration interval set in global configuration (in seconds)
String urlExpirationInterval = _configDao.getValue(Config.ExtractURLExpirationInterval.toString()); String urlExpirationInterval = _configDao.getValue(Config.ExtractURLExpirationInterval.toString());
int expirationInterval = NumbersUtil.parseInt(urlExpirationInterval, 14400);
milliSeconds += 1000 * expirationInterval; // expired after configured interval (in milliseconds) // Expired after configured interval (in milliseconds), default 14400 seconds
milliSeconds += 1000 * NumbersUtil.parseInt(urlExpirationInterval, 14400);
expiration.setTime(milliSeconds); expiration.setTime(milliSeconds);
URL s3url = S3Utils.generatePresignedUrl(s3, s3.getBucketName(), key, expiration); URL s3url = S3Utils.generatePresignedUrl(s3, s3.getBucketName(), key, expiration);
@ -103,5 +107,4 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl {
return s3url.toString(); return s3url.toString();
} }
} }

View File

@ -71,7 +71,6 @@ public class S3ImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
@Override @Override
public DataStore initialize(Map<String, Object> dsInfos) { public DataStore initialize(Map<String, Object> dsInfos) {
Long dcId = (Long)dsInfos.get("zoneId");
String url = (String)dsInfos.get("url"); String url = (String)dsInfos.get("url");
String name = (String)dsInfos.get("name"); String name = (String)dsInfos.get("name");
String providerName = (String)dsInfos.get("providerName"); String providerName = (String)dsInfos.get("providerName");
@ -79,11 +78,10 @@ public class S3ImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
DataStoreRole role = (DataStoreRole)dsInfos.get("role"); DataStoreRole role = (DataStoreRole)dsInfos.get("role");
Map<String, String> details = (Map<String, String>)dsInfos.get("details"); Map<String, String> details = (Map<String, String>)dsInfos.get("details");
s_logger.info("Trying to add a S3 store in data center " + dcId); s_logger.info("Trying to add a S3 store with endpoint: " + details.get(ApiConstants.S3_END_POINT));
Map<String, Object> imageStoreParameters = new HashMap<String, Object>(); Map<String, Object> imageStoreParameters = new HashMap();
imageStoreParameters.put("name", name); imageStoreParameters.put("name", name);
imageStoreParameters.put("zoneId", dcId);
imageStoreParameters.put("url", url); imageStoreParameters.put("url", url);
String protocol = "http"; String protocol = "http";
String useHttps = details.get(ApiConstants.S3_HTTPS_FLAG); String useHttps = details.get(ApiConstants.S3_HTTPS_FLAG);

View File

@ -44,14 +44,15 @@ import com.cloud.utils.component.ComponentContext;
@Component @Component
public class S3ImageStoreProviderImpl implements ImageStoreProvider { public class S3ImageStoreProviderImpl implements ImageStoreProvider {
private final String providerName = DataStoreProvider.S3_IMAGE;
protected ImageStoreLifeCycle lifeCycle;
protected ImageStoreDriver driver;
@Inject @Inject
ImageStoreProviderManager storeMgr; ImageStoreProviderManager storeMgr;
@Inject @Inject
ImageStoreHelper helper; ImageStoreHelper helper;
private final String providerName = DataStoreProvider.S3_IMAGE;
protected ImageStoreLifeCycle lifeCycle;
protected ImageStoreDriver driver;
@Override @Override
public DataStoreLifeCycle getDataStoreLifeCycle() { public DataStoreLifeCycle getDataStoreLifeCycle() {
return lifeCycle; return lifeCycle;

View File

@ -96,7 +96,8 @@
<org.springframework.version>3.2.12.RELEASE</org.springframework.version> <org.springframework.version>3.2.12.RELEASE</org.springframework.version>
<cs.mockito.version>1.9.5</cs.mockito.version> <cs.mockito.version>1.9.5</cs.mockito.version>
<cs.powermock.version>1.5.3</cs.powermock.version> <cs.powermock.version>1.5.3</cs.powermock.version>
<cs.aws.sdk.version>1.3.22</cs.aws.sdk.version> <cs.aws.sdk.version>1.10.34</cs.aws.sdk.version>
<cs.jackson.version>2.6.3</cs.jackson.version>
<cs.lang.version>2.6</cs.lang.version> <cs.lang.version>2.6</cs.lang.version>
<cs.lang3.version>3.4</cs.lang3.version> <cs.lang3.version>3.4</cs.lang3.version>
<cs.commons-io.version>2.4</cs.commons-io.version> <cs.commons-io.version>2.4</cs.commons-io.version>

View File

@ -377,7 +377,7 @@ class S3Client(object):
def parseArguments(args): def parseArguments(args):
# The keys in the args map will correspond to the properties defined on # The keys in the args map will correspond to the properties defined on
# the com.cloud.utils.S3Utils#ClientOptions interface # the com.cloud.utils.storage.S3.S3Utils#ClientOptions interface
client = S3Client( client = S3Client(
args['accessKey'], args['secretKey'], args['endPoint'], args['accessKey'], args['secretKey'], args['endPoint'],
args['https'], args['connectionTimeout'], args['socketTimeout']) args['https'], args['connectionTimeout'], args['socketTimeout'])

View File

@ -109,6 +109,16 @@ under the License.
<appender-ref ref="APISERVER"/> <appender-ref ref="APISERVER"/>
</logger> </logger>
<!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
<category name="com.amazonaws">
<priority value="INFO"/>
</category>
<!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
<category name="httpclient.wire">
<priority value="INFO"/>
</category>
<!-- ======================= --> <!-- ======================= -->
<!-- Setup the Root category --> <!-- Setup the Root category -->
<!-- ======================= --> <!-- ======================= -->

View File

@ -162,7 +162,7 @@ import org.apache.cloudstack.api.command.admin.router.StopRouterCmd;
import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd; import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd;
import org.apache.cloudstack.api.command.admin.router.UpgradeRouterTemplateCmd; import org.apache.cloudstack.api.command.admin.router.UpgradeRouterTemplateCmd;
import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; import org.apache.cloudstack.api.command.admin.storage.AddImageStoreS3CMD;
import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd;
@ -171,7 +171,6 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
import org.apache.cloudstack.api.command.admin.storage.FindStoragePoolsForMigrationCmd; import org.apache.cloudstack.api.command.admin.storage.FindStoragePoolsForMigrationCmd;
import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd; import org.apache.cloudstack.api.command.admin.storage.ListImageStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd;
import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd; import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStoresCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
import org.apache.cloudstack.api.command.admin.storage.ListStorageProvidersCmd; import org.apache.cloudstack.api.command.admin.storage.ListStorageProvidersCmd;
@ -2640,12 +2639,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(StartRouterCmd.class); cmdList.add(StartRouterCmd.class);
cmdList.add(StopRouterCmd.class); cmdList.add(StopRouterCmd.class);
cmdList.add(UpgradeRouterCmd.class); cmdList.add(UpgradeRouterCmd.class);
cmdList.add(AddS3Cmd.class);
cmdList.add(AddSwiftCmd.class); cmdList.add(AddSwiftCmd.class);
cmdList.add(CancelPrimaryStorageMaintenanceCmd.class); cmdList.add(CancelPrimaryStorageMaintenanceCmd.class);
cmdList.add(CreateStoragePoolCmd.class); cmdList.add(CreateStoragePoolCmd.class);
cmdList.add(DeletePoolCmd.class); cmdList.add(DeletePoolCmd.class);
cmdList.add(ListS3sCmd.class);
cmdList.add(ListSwiftsCmd.class); cmdList.add(ListSwiftsCmd.class);
cmdList.add(ListStoragePoolsCmd.class); cmdList.add(ListStoragePoolsCmd.class);
cmdList.add(ListStorageTagsCmd.class); cmdList.add(ListStorageTagsCmd.class);
@ -2911,6 +2908,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(RemoveFromGlobalLoadBalancerRuleCmd.class); cmdList.add(RemoveFromGlobalLoadBalancerRuleCmd.class);
cmdList.add(ListStorageProvidersCmd.class); cmdList.add(ListStorageProvidersCmd.class);
cmdList.add(AddImageStoreCmd.class); cmdList.add(AddImageStoreCmd.class);
cmdList.add(AddImageStoreS3CMD.class);
cmdList.add(ListImageStoresCmd.class); cmdList.add(ListImageStoresCmd.class);
cmdList.add(DeleteImageStoreCmd.class); cmdList.add(DeleteImageStoreCmd.class);
cmdList.add(CreateSecondaryStagingStoreCmd.class); cmdList.add(CreateSecondaryStagingStoreCmd.class);

View File

@ -1844,7 +1844,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
} }
@Override @Override
public ImageStore discoverImageStore(String name, String url, String providerName, Long dcId, Map details) throws IllegalArgumentException, DiscoveryException, public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException,
InvalidParameterValueException { InvalidParameterValueException {
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName); DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName);
@ -1857,13 +1857,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
} }
ScopeType scopeType = ScopeType.ZONE; ScopeType scopeType = ScopeType.ZONE;
if (dcId == null) { if (zoneId == null) {
scopeType = ScopeType.REGION; scopeType = ScopeType.REGION;
} }
if (name == null) { if (name == null) {
name = url; name = url;
} }
ImageStoreVO imageStore = _imageStoreDao.findByName(name); ImageStoreVO imageStore = _imageStoreDao.findByName(name);
if (imageStore != null) { if (imageStore != null) {
throw new InvalidParameterValueException("The image store with name " + name + " already exists, try creating with another name"); throw new InvalidParameterValueException("The image store with name " + name + " already exists, try creating with another name");
@ -1884,11 +1885,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
} }
} }
if (dcId != null) { if (zoneId != null) {
// Check if the zone exists in the system // Check if the zone exists in the system
DataCenterVO zone = _dcDao.findById(dcId); DataCenterVO zone = _dcDao.findById(zoneId);
if (zone == null) { if (zone == null) {
throw new InvalidParameterValueException("Can't find zone by id " + dcId); throw new InvalidParameterValueException("Can't find zone by id " + zoneId);
} }
Account account = CallContext.current().getCallingAccount(); Account account = CallContext.current().getCallingAccount();
@ -1901,8 +1902,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
} }
} }
Map<String, Object> params = new HashMap<String, Object>(); Map<String, Object> params = new HashMap();
params.put("zoneId", dcId); params.put("zoneId", zoneId);
params.put("url", url); params.put("url", url);
params.put("name", name); params.put("name", name);
params.put("details", details); params.put("details", details);
@ -1911,11 +1912,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
params.put("role", DataStoreRole.Image); params.put("role", DataStoreRole.Image);
DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
DataStore store; DataStore store;
try { try {
store = lifeCycle.initialize(params); store = lifeCycle.initialize(params);
} catch (Exception e) { } catch (Exception e) {
s_logger.debug("Failed to add data store: " + e.getMessage(), e); if(s_logger.isDebugEnabled()) {
s_logger.debug("Failed to add data store: " + e.getMessage(), e);
}
throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e);
} }
@ -1927,9 +1931,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
_imageSrv.addSystemVMTemplatesToSecondary(store); _imageSrv.addSystemVMTemplatesToSecondary(store);
} }
// associate builtin template with zones associated with this image // associate builtin template with zones associated with this image store
// store associateCrosszoneTemplatesToZone(zoneId);
associateCrosszoneTemplatesToZone(dcId);
// duplicate cache store records to region wide storage // duplicate cache store records to region wide storage
if (scopeType == ScopeType.REGION) { if (scopeType == ScopeType.REGION) {

View File

@ -49,7 +49,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
import com.cloud.agent.AgentManager; import com.cloud.agent.AgentManager;
import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.agent.api.storage.DownloadAnswer;
import com.cloud.agent.api.storage.Proxy; import com.cloud.utils.net.Proxy;
import com.cloud.configuration.Config; import com.cloud.configuration.Config;
import com.cloud.storage.RegisterVolumePayload; import com.cloud.storage.RegisterVolumePayload;
import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ImageFormat;

View File

@ -16,8 +16,7 @@
// under the License. // under the License.
package org.apache.cloudstack.storage.resource; package org.apache.cloudstack.storage.resource;
import static com.cloud.utils.S3Utils.mputFile; import static com.cloud.utils.storage.S3.S3Utils.putFile;
import static com.cloud.utils.S3Utils.putFile;
import static com.cloud.utils.StringUtils.join; import static com.cloud.utils.StringUtils.join;
import static java.lang.String.format; import static java.lang.String.format;
import static java.util.Arrays.asList; import static java.util.Arrays.asList;
@ -155,8 +154,7 @@ import com.cloud.storage.template.TemplateProp;
import com.cloud.storage.template.VhdProcessor; import com.cloud.storage.template.VhdProcessor;
import com.cloud.storage.template.VmdkProcessor; import com.cloud.storage.template.VmdkProcessor;
import com.cloud.utils.NumbersUtil; import com.cloud.utils.NumbersUtil;
import com.cloud.utils.S3Utils; import com.cloud.utils.storage.S3.S3Utils;
import com.cloud.utils.S3Utils.FileNamingStrategy;
import com.cloud.utils.SwiftUtil; import com.cloud.utils.SwiftUtil;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.NetUtils; import com.cloud.utils.net.NetUtils;
@ -386,12 +384,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
} }
} }
File destFile = S3Utils.getFile(s3, s3.getBucketName(), srcData.getPath(), downloadDirectory, new FileNamingStrategy() { File destFile = new File(downloadDirectory, substringAfterLast(srcData.getPath(), S3Utils.SEPARATOR));
@Override
public String determineFileName(final String key) { S3Utils.getFile(s3, s3.getBucketName(), srcData.getPath(), destFile).waitForCompletion();
return substringAfterLast(key, S3Utils.SEPARATOR);
}
});
if (destFile == null) { if (destFile == null) {
return new CopyCmdAnswer("Can't find template"); return new CopyCmdAnswer("Can't find template");
@ -400,7 +396,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
return postProcessing(destFile, downloadPath, destPath, srcData, destData); return postProcessing(destFile, downloadPath, destPath, srcData, destData);
} catch (Exception e) { } catch (Exception e) {
final String errMsg = format("Failed to download" + "due to $2%s", e.getMessage()); final String errMsg = format("Failed to download" + "due to $1%s", e.getMessage());
s_logger.error(errMsg, e); s_logger.error(errMsg, e);
return new CopyCmdAnswer(errMsg); return new CopyCmdAnswer(errMsg);
} }
@ -907,14 +903,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
} }
} }
long srcSize = srcFile.length();
ImageFormat format = getTemplateFormat(srcFile.getName()); ImageFormat format = getTemplateFormat(srcFile.getName());
String key = destData.getPath() + S3Utils.SEPARATOR + srcFile.getName(); String key = destData.getPath() + S3Utils.SEPARATOR + srcFile.getName();
if (!s3.getSingleUpload(srcSize)) {
mputFile(s3, srcFile, bucket, key); putFile(s3, srcFile, bucket, key).waitForCompletion();
} else {
putFile(s3, srcFile, bucket, key);
}
DataTO retObj = null; DataTO retObj = null;
if (destData.getObjectType() == DataObjectType.TEMPLATE) { if (destData.getObjectType() == DataObjectType.TEMPLATE) {
@ -1509,7 +1501,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
Map<String, TemplateProp> s3ListTemplate(S3TO s3) { Map<String, TemplateProp> s3ListTemplate(S3TO s3) {
String bucket = s3.getBucketName(); String bucket = s3.getBucketName();
// List the objects in the source directory on S3 // List the objects in the source directory on S3
final List<S3ObjectSummary> objectSummaries = S3Utils.getDirectory(s3, bucket, TEMPLATE_ROOT_DIR); final List<S3ObjectSummary> objectSummaries = S3Utils.listDirectory(s3, bucket, TEMPLATE_ROOT_DIR);
if (objectSummaries == null) { if (objectSummaries == null) {
return null; return null;
} }
@ -1530,7 +1522,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
Map<Long, TemplateProp> s3ListVolume(S3TO s3) { Map<Long, TemplateProp> s3ListVolume(S3TO s3) {
String bucket = s3.getBucketName(); String bucket = s3.getBucketName();
// List the objects in the source directory on S3 // List the objects in the source directory on S3
final List<S3ObjectSummary> objectSummaries = S3Utils.getDirectory(s3, bucket, VOLUME_ROOT_DIR); final List<S3ObjectSummary> objectSummaries = S3Utils.listDirectory(s3, bucket, VOLUME_ROOT_DIR);
if (objectSummaries == null) { if (objectSummaries == null) {
return null; return null;
} }

View File

@ -19,11 +19,10 @@ package org.apache.cloudstack.storage.resource;
import com.cloud.resource.ServerResource; import com.cloud.resource.ServerResource;
/** /**
*
* SecondaryStorageServerResource is a generic container to execute commands sent * SecondaryStorageServerResource is a generic container to execute commands sent
*/ */
public interface SecondaryStorageResource extends ServerResource { public interface SecondaryStorageResource extends ServerResource {
public String getRootDir(String cmd); String getRootDir(String cmd);
} }

View File

@ -20,5 +20,7 @@ import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command; import com.cloud.agent.api.Command;
public interface SecondaryStorageResourceHandler { public interface SecondaryStorageResourceHandler {
Answer executeRequest(Command cmd); Answer executeRequest(Command cmd);
} }

View File

@ -24,7 +24,7 @@ import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
import org.apache.cloudstack.storage.resource.SecondaryStorageResource; import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.agent.api.storage.DownloadAnswer;
import com.cloud.agent.api.storage.Proxy; import com.cloud.utils.net.Proxy;
import com.cloud.agent.api.to.S3TO; import com.cloud.agent.api.to.S3TO;
import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateHostVO;

View File

@ -49,7 +49,7 @@ import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.agent.api.storage.DownloadAnswer;
import com.cloud.agent.api.storage.Proxy; import com.cloud.utils.net.Proxy;
import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.api.to.S3TO; import com.cloud.agent.api.to.S3TO;

View File

@ -88,6 +88,16 @@ under the License.
<priority value="INFO"/> <priority value="INFO"/>
</category> </category>
<!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
<category name="com.amazonaws">
<priority value="INFO"/>
</category>
<!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
<category name="httpclient.wire">
<priority value="INFO"/>
</category>
<!-- ======================= --> <!-- ======================= -->
<!-- Setup the Root category --> <!-- Setup the Root category -->
<!-- ======================= --> <!-- ======================= -->

View File

@ -89,6 +89,16 @@ under the License.
<priority value="INFO"/> <priority value="INFO"/>
</category> </category>
<!-- Limit the com.amazonaws category to INFO as its DEBUG is verbose -->
<category name="com.amazonaws">
<priority value="INFO"/>
</category>
<!-- Limit the httpclient.wire category to INFO as its DEBUG is verbose -->
<category name="httpclient.wire">
<priority value="INFO"/>
</category>
<!-- ======================= --> <!-- ======================= -->
<!-- Setup the Root category --> <!-- Setup the Root category -->
<!-- ======================= --> <!-- ======================= -->

View File

@ -168,6 +168,11 @@
<artifactId>guava-testlib</artifactId> <artifactId>guava-testlib</artifactId>
<version>${cs.guava-testlib.version}</version> <version>${cs.guava-testlib.version}</version>
</dependency> </dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${cs.jackson.version}</version>
</dependency>
</dependencies> </dependencies>
<build> <build>
<sourceDirectory>src/main/java</sourceDirectory> <sourceDirectory>src/main/java</sourceDirectory>

View File

@ -1,619 +0,0 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils;
import static com.amazonaws.Protocol.HTTP;
import static com.amazonaws.Protocol.HTTPS;
import static com.cloud.utils.StringUtils.join;
import static java.io.File.createTempFile;
import static java.lang.String.format;
import static java.lang.System.currentTimeMillis;
import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static java.util.Collections.unmodifiableList;
import static org.apache.commons.lang.ArrayUtils.isEmpty;
import static org.apache.commons.lang.StringUtils.isBlank;
import static org.apache.commons.lang.StringUtils.isNotBlank;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import com.amazonaws.AmazonClientException;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.HttpMethod;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.Bucket;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.Upload;
import com.cloud.utils.exception.CloudRuntimeException;
public final class S3Utils {
private static final Logger LOGGER = Logger.getLogger(S3Utils.class);
public static final String SEPARATOR = "/";
private static final int MIN_BUCKET_NAME_LENGTH = 3;
private static final int MAX_BUCKET_NAME_LENGTH = 63;
private S3Utils() {
super();
}
public static AmazonS3 acquireClient(final ClientOptions clientOptions) {
final AWSCredentials credentials = new BasicAWSCredentials(clientOptions.getAccessKey(), clientOptions.getSecretKey());
final ClientConfiguration configuration = new ClientConfiguration();
if (clientOptions.isHttps() != null) {
configuration.setProtocol(clientOptions.isHttps() == true ? HTTPS : HTTP);
}
if (clientOptions.getConnectionTimeout() != null) {
configuration.setConnectionTimeout(clientOptions.getConnectionTimeout());
}
if (clientOptions.getMaxErrorRetry() != null) {
configuration.setMaxErrorRetry(clientOptions.getMaxErrorRetry());
}
if (clientOptions.getSocketTimeout() != null) {
configuration.setSocketTimeout(clientOptions.getSocketTimeout());
}
if (clientOptions.getUseTCPKeepAlive() != null) {
//configuration.setUseTcpKeepAlive(clientOptions.getUseTCPKeepAlive());
LOGGER.debug("useTCPKeepAlive not supported by old AWS SDK");
}
if (clientOptions.getConnectionTtl() != null) {
//configuration.setConnectionTTL(clientOptions.getConnectionTtl());
LOGGER.debug("connectionTtl not supported by old AWS SDK");
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Creating S3 client with configuration: [protocol: %1$s, connectionTimeOut: " + "%2$s, maxErrorRetry: %3$s, socketTimeout: %4$s, useTCPKeepAlive: %5$s, connectionTtl: %6$s]",
configuration.getProtocol(), configuration.getConnectionTimeout(), configuration.getMaxErrorRetry(), configuration.getSocketTimeout(),
-1, -1));
}
final AmazonS3Client client = new AmazonS3Client(credentials, configuration);
if (isNotBlank(clientOptions.getEndPoint())) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Setting the end point for S3 client %1$s to %2$s.", client, clientOptions.getEndPoint()));
}
client.setEndpoint(clientOptions.getEndPoint());
}
return client;
}
public static void putFile(final ClientOptions clientOptions, final File sourceFile, final String bucketName, final String key) {
assert clientOptions != null;
assert sourceFile != null;
assert !isBlank(bucketName);
assert !isBlank(key);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Sending file %1$s as S3 object %2$s in " + "bucket %3$s", sourceFile.getName(), key, bucketName));
}
acquireClient(clientOptions).putObject(bucketName, key, sourceFile);
}
public static void putObject(final ClientOptions clientOptions, final InputStream sourceStream, final String bucketName, final String key) {
assert clientOptions != null;
assert sourceStream != null;
assert !isBlank(bucketName);
assert !isBlank(key);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Sending stream as S3 object %1$s in " + "bucket %2$s", key, bucketName));
}
acquireClient(clientOptions).putObject(bucketName, key, sourceStream, null);
}
public static void putObject(final ClientOptions clientOptions, final PutObjectRequest req) {
assert clientOptions != null;
assert req != null;
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Sending stream as S3 object using PutObjectRequest"));
}
acquireClient(clientOptions).putObject(req);
}
// multi-part upload file
public static void mputFile(final ClientOptions clientOptions, final File sourceFile, final String bucketName, final String key) throws InterruptedException {
assert clientOptions != null;
assert sourceFile != null;
assert !isBlank(bucketName);
assert !isBlank(key);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Multipart sending file %1$s as S3 object %2$s in " + "bucket %3$s", sourceFile.getName(), key, bucketName));
}
TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions));
Upload upload = tm.upload(bucketName, key, sourceFile);
upload.waitForCompletion();
}
// multi-part upload object
public static void mputObject(final ClientOptions clientOptions, final InputStream sourceStream, final String bucketName, final String key)
throws InterruptedException {
assert clientOptions != null;
assert sourceStream != null;
assert !isBlank(bucketName);
assert !isBlank(key);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Multipart sending stream as S3 object %1$s in " + "bucket %2$s", key, bucketName));
}
TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions));
Upload upload = tm.upload(bucketName, key, sourceStream, null);
upload.waitForCompletion();
}
// multi-part upload object
public static void mputObject(final ClientOptions clientOptions, final PutObjectRequest req) throws InterruptedException {
assert clientOptions != null;
assert req != null;
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Multipart sending object to S3 using PutObjectRequest");
}
TransferManager tm = new TransferManager(S3Utils.acquireClient(clientOptions));
Upload upload = tm.upload(req);
upload.waitForCompletion();
}
public static void setObjectAcl(final ClientOptions clientOptions, final String bucketName, final String key, final CannedAccessControlList acl) {
assert clientOptions != null;
assert acl != null;
acquireClient(clientOptions).setObjectAcl(bucketName, key, acl);
}
public static URL generatePresignedUrl(final ClientOptions clientOptions, final String bucketName, final String key, final Date expiration) {
assert clientOptions != null;
assert !isBlank(bucketName);
assert !isBlank(key);
return acquireClient(clientOptions).generatePresignedUrl(bucketName, key, expiration, HttpMethod.GET);
}
// Note that whenever S3Object is returned, client code needs to close the internal stream to avoid resource leak.
public static S3Object getObject(final ClientOptions clientOptions, final String bucketName, final String key) {
assert clientOptions != null;
assert !isBlank(bucketName);
assert !isBlank(key);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Get S3 object %1$s in " + "bucket %2$s", key, bucketName));
}
return acquireClient(clientOptions).getObject(bucketName, key);
}
// Note that whenever S3Object is returned, client code needs to close the internal stream to avoid resource leak.
public static S3ObjectInputStream getObjectStream(final ClientOptions clientOptions, final String bucketName, final String key) {
assert clientOptions != null;
assert !isBlank(bucketName);
assert !isBlank(key);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Get S3 object %1$s in " + "bucket %2$s", key, bucketName));
}
return acquireClient(clientOptions).getObject(bucketName, key).getObjectContent();
}
@SuppressWarnings("unchecked")
public static File getFile(final ClientOptions clientOptions, final String bucketName, final String key, final File targetDirectory,
final FileNamingStrategy namingStrategy) {
assert clientOptions != null;
assert isNotBlank(bucketName);
assert isNotBlank(key);
assert targetDirectory != null && targetDirectory.isDirectory();
assert namingStrategy != null;
final AmazonS3 connection = acquireClient(clientOptions);
File tempFile = null;
try {
tempFile = createTempFile(join("-", targetDirectory.getName(), currentTimeMillis(), "part"), "tmp", targetDirectory);
tempFile.deleteOnExit();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Downloading object %1$s from bucket %2$s to temp file %3$s", key, bucketName, tempFile.getName()));
}
try {
connection.getObject(new GetObjectRequest(bucketName, key), tempFile);
} catch (AmazonClientException ex) {
// hack to handle different ETAG format generated from RiakCS for multi-part uploaded object
String msg = ex.getMessage();
if (!msg.contains("verify integrity")) {
throw ex;
}
}
final File targetFile = new File(targetDirectory, namingStrategy.determineFileName(key));
tempFile.renameTo(targetFile);
return targetFile;
} catch (FileNotFoundException e) {
throw new CloudRuntimeException(format("Failed open file %1$s in order to get object %2$s from bucket %3$s.", targetDirectory.getAbsoluteFile(), bucketName,
key), e);
} catch (IOException e) {
throw new CloudRuntimeException(format("Unable to allocate temporary file in directory %1$s to download %2$s:%3$s from S3",
targetDirectory.getAbsolutePath(), bucketName, key), e);
} finally {
if (tempFile != null) {
tempFile.delete();
}
}
}
public static List<File> getDirectory(final ClientOptions clientOptions, final String bucketName, final String sourcePath, final File targetDirectory,
final FileNamingStrategy namingStrategy) {
assert clientOptions != null;
assert isNotBlank(bucketName);
assert isNotBlank(sourcePath);
assert targetDirectory != null;
final AmazonS3 connection = acquireClient(clientOptions);
// List the objects in the source directory on S3
final List<S3ObjectSummary> objectSummaries = listDirectory(bucketName, sourcePath, connection);
final List<File> files = new ArrayList<File>();
for (final S3ObjectSummary objectSummary : objectSummaries) {
files.add(getFile(clientOptions, bucketName, objectSummary.getKey(), targetDirectory, namingStrategy));
}
return unmodifiableList(files);
}
public static List<S3ObjectSummary> getDirectory(final ClientOptions clientOptions, final String bucketName, final String sourcePath) {
assert clientOptions != null;
assert isNotBlank(bucketName);
assert isNotBlank(sourcePath);
final AmazonS3 connection = acquireClient(clientOptions);
// List the objects in the source directory on S3
return listDirectory(bucketName, sourcePath, connection);
}
private static List<S3ObjectSummary> listDirectory(final String bucketName, final String directory, final AmazonS3 client) {
List<S3ObjectSummary> objects = new ArrayList<S3ObjectSummary>();
ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(directory + SEPARATOR);
ObjectListing ol = client.listObjects(listObjectsRequest);
if(ol.isTruncated()) {
do {
objects.addAll(ol.getObjectSummaries());
listObjectsRequest.setMarker(ol.getNextMarker());
ol = client.listObjects(listObjectsRequest);
} while (ol.isTruncated());
}
else {
objects.addAll(ol.getObjectSummaries());
}
if (objects.isEmpty()) {
return emptyList();
}
return unmodifiableList(objects);
}
public static void putDirectory(final ClientOptions clientOptions, final String bucketName, final File directory, final FilenameFilter fileNameFilter,
final ObjectNamingStrategy namingStrategy) {
assert clientOptions != null;
assert isNotBlank(bucketName);
assert directory != null && directory.isDirectory();
assert fileNameFilter != null;
assert namingStrategy != null;
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Putting directory %1$s in S3 bucket %2$s.", directory.getAbsolutePath(), bucketName));
}
// Determine the list of files to be sent using the passed filter ...
final File[] files = directory.listFiles(fileNameFilter);
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(format("Putting files (%1$s) in S3 bucket %2$s.", ArrayUtils.toString(files, "no files found"), bucketName));
}
// Skip spinning up an S3 connection when no files will be sent ...
if (isEmpty(files)) {
return;
}
final AmazonS3 client = acquireClient(clientOptions);
// Send the files to S3 using the passed ObjectNaming strategy to
// determine the key ...
for (final File file : files) {
final String key = namingStrategy.determineKey(file);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(format("Putting file %1$s into bucket %2$s with key %3$s.", file.getAbsolutePath(), bucketName, key));
}
client.putObject(bucketName, key, file);
}
}
public static void deleteObject(final ClientOptions clientOptions, final String bucketName, final String key) {
assert clientOptions != null;
assert isNotBlank(bucketName);
assert isNotBlank(key);
final AmazonS3 client = acquireClient(clientOptions);
client.deleteObject(bucketName, key);
}
public static void deleteDirectory(final ClientOptions clientOptions, final String bucketName, final String directoryName) {
assert clientOptions != null;
assert isNotBlank(bucketName);
assert isNotBlank(directoryName);
final AmazonS3 client = acquireClient(clientOptions);
final List<S3ObjectSummary> objects = listDirectory(bucketName, directoryName, client);
for (final S3ObjectSummary object : objects) {
client.deleteObject(bucketName, object.getKey());
}
client.deleteObject(bucketName, directoryName);
}
public static boolean canConnect(final ClientOptions clientOptions) {
try {
acquireClient(clientOptions);
return true;
} catch (AmazonClientException e) {
LOGGER.warn("Ignored Exception while checking connection options", e);
return false;
}
}
public static boolean doesBucketExist(final ClientOptions clientOptions, final String bucketName) {
assert clientOptions != null;
assert !isBlank(bucketName);
try {
final List<Bucket> buckets = acquireClient(clientOptions).listBuckets();
for (Bucket bucket : buckets) {
if (bucket.getName().equals(bucketName)) {
return true;
}
}
return false;
} catch (AmazonClientException e) {
LOGGER.warn("Ignored Exception while checking bucket existence", e);
return false;
}
}
public static boolean canReadWriteBucket(final ClientOptions clientOptions, final String bucketName) {
assert clientOptions != null;
assert isNotBlank(bucketName);
try {
final AmazonS3 client = acquireClient(clientOptions);
final String fileContent = "testing put and delete";
final InputStream inputStream = new ByteArrayInputStream(fileContent.getBytes());
final String key = UUID.randomUUID().toString() + ".txt";
final ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(fileContent.length());
client.putObject(bucketName, key, inputStream, metadata);
client.deleteObject(bucketName, key);
return true;
} catch (AmazonClientException e) {
return false;
}
}
public static List<String> checkClientOptions(ClientOptions clientOptions) {
assert clientOptions != null;
List<String> errorMessages = new ArrayList<String>();
errorMessages.addAll(checkRequiredField("access key", clientOptions.getAccessKey()));
errorMessages.addAll(checkRequiredField("secret key", clientOptions.getSecretKey()));
errorMessages.addAll(checkOptionalField("connection timeout", clientOptions.getConnectionTimeout()));
errorMessages.addAll(checkOptionalField("socket timeout", clientOptions.getSocketTimeout()));
errorMessages.addAll(checkOptionalField("max error retries", clientOptions.getMaxErrorRetry()));
errorMessages.addAll(checkOptionalField("connection ttl", clientOptions.getConnectionTtl()));
return unmodifiableList(errorMessages);
}
public static List<String> checkBucketName(final String bucketLabel, final String bucket) {
assert isNotBlank(bucketLabel);
assert isNotBlank(bucket);
final List<String> errorMessages = new ArrayList<String>();
if (bucket.length() < MIN_BUCKET_NAME_LENGTH) {
errorMessages.add(format("The length of %1$s " + "for the %2$s must have a length of at least %3$s " + "characters", bucket, bucketLabel,
MIN_BUCKET_NAME_LENGTH));
}
if (bucket.length() > MAX_BUCKET_NAME_LENGTH) {
errorMessages.add(format("The length of %1$s " + "for the %2$s must not have a length of at greater" + " than %3$s characters", bucket, bucketLabel,
MAX_BUCKET_NAME_LENGTH));
}
return unmodifiableList(errorMessages);
}
private static List<String> checkOptionalField(final String fieldName, final Integer fieldValue) {
if (fieldValue != null && fieldValue < 0) {
return singletonList(format("The value of %1$s must " + "be greater than zero.", fieldName));
}
return emptyList();
}
private static List<String> checkRequiredField(String fieldName, String fieldValue) {
if (isBlank(fieldValue)) {
return singletonList(format("A %1$s must be specified.", fieldName));
}
return emptyList();
}
public interface ClientOptions {
String getAccessKey();
String getSecretKey();
String getEndPoint();
Boolean isHttps();
Integer getConnectionTimeout();
Integer getMaxErrorRetry();
Integer getSocketTimeout();
Boolean getUseTCPKeepAlive();
Integer getConnectionTtl();
}
public interface ObjectNamingStrategy {
String determineKey(File file);
}
public interface FileNamingStrategy {
String determineFileName(String key);
}
}

View File

@ -0,0 +1,143 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils.net;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpMethod;
import org.apache.commons.httpclient.HttpMethodRetryHandler;
import org.apache.commons.httpclient.HttpStatus;
import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
import org.apache.commons.httpclient.NoHttpResponseException;
import org.apache.commons.httpclient.UsernamePasswordCredentials;
import org.apache.commons.httpclient.auth.AuthScope;
import org.apache.log4j.Logger;
import java.io.IOException;
public final class HTTPUtils {
private static final Logger LOGGER = Logger.getLogger(HTTPUtils.class);
// The connection manager.
private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager();
private HTTPUtils() {}
public static HttpClient getHTTPClient() {
return new HttpClient(s_httpClientManager);
}
/**
* @return A HttpMethodRetryHandler with given number of retries.
*/
public static HttpMethodRetryHandler getHttpMethodRetryHandler(final int retryCount) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Initializing new HttpMethodRetryHandler with retry count " + retryCount);
}
return new HttpMethodRetryHandler() {
@Override
public boolean retryMethod(final HttpMethod method, final IOException exception, int executionCount) {
if (executionCount >= retryCount) {
// Do not retry if over max retry count
return false;
}
if (exception instanceof NoHttpResponseException) {
// Retry if the server dropped connection on us
return true;
}
if (!method.isRequestSent()) {
// Retry if the request has not been sent fully or
// if it's OK to retry methods that have been sent
return true;
}
// otherwise do not retry
return false;
}
};
}
/**
* @param proxy
* @param httpClient
*/
public static void setProxy(Proxy proxy, HttpClient httpClient) {
if (proxy != null && httpClient != null) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Setting proxy with host " + proxy.getHost() + " and port " + proxy.getPort() + " for host " + httpClient.getHostConfiguration().getHost() + ":" + httpClient.getHostConfiguration().getPort());
}
httpClient.getHostConfiguration().setProxy(proxy.getHost(), proxy.getPort());
if (proxy.getUserName() != null && proxy.getPassword() != null) {
httpClient.getState().setProxyCredentials(AuthScope.ANY, new UsernamePasswordCredentials(proxy.getUserName(), proxy.getPassword()));
}
}
}
/**
* @param username
* @param password
* @param httpClient
*/
public static void setCredentials(String username, String password, HttpClient httpClient) {
if (username != null && password != null && httpClient != null) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Setting credentials with username " + username + " for host " + httpClient.getHostConfiguration().getHost() + ":" + httpClient.getHostConfiguration().getPort());
}
httpClient.getParams().setAuthenticationPreemptive(true);
httpClient.getState().setCredentials(
new AuthScope(httpClient.getHostConfiguration().getHost(), httpClient.getHostConfiguration().getPort(), AuthScope.ANY_REALM), new UsernamePasswordCredentials(username, password));
}
}
/**
* @param httpClient
* @param httpMethod
* @return
* Returns the HTTP Status Code or -1 if an exception occurred.
*/
public static int executeMethod(HttpClient httpClient, HttpMethod httpMethod) {
// Execute GetMethod
try {
return httpClient.executeMethod(httpMethod);
} catch (IOException e) {
LOGGER.warn("Exception while executing HttpMethod " + httpMethod.getName() + " on URL " + httpMethod.getPath());
return -1;
}
}
/**
* @param responseCode
* @return
*/
public static boolean verifyResponseCode(int responseCode) {
switch (responseCode) {
case HttpStatus.SC_OK:
case HttpStatus.SC_MOVED_PERMANENTLY:
case HttpStatus.SC_MOVED_TEMPORARILY:
return true;
default:
return false;
}
}
}

View File

@ -14,7 +14,8 @@
// KIND, either express or implied. See the License for the // KIND, either express or implied. See the License for the
// specific language governing permissions and limitations // specific language governing permissions and limitations
// under the License. // under the License.
package com.cloud.agent.api.storage;
package com.cloud.utils.net;
import java.net.URI; import java.net.URI;
@ -27,10 +28,6 @@ public class Proxy {
private String _userName; private String _userName;
private String _password; private String _password;
public Proxy() {
}
public Proxy(String host, int port, String userName, String password) { public Proxy(String host, int port, String userName, String password) {
this._host = host; this._host = host;
this._port = port; this._port = port;

View File

@ -0,0 +1,42 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils.storage.S3;
public interface ClientOptions {
String getAccessKey();
String getSecretKey();
String getEndPoint();
String getSigner();
Boolean isHttps();
Integer getConnectionTimeout();
Integer getMaxErrorRetry();
Integer getSocketTimeout();
Boolean getUseTCPKeepAlive();
Integer getConnectionTtl();
}

View File

@ -0,0 +1,25 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils.storage.S3;
public interface FileNamingStrategy {
String determineFileName(String key);
}

View File

@ -0,0 +1,27 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils.storage.S3;
import java.io.File;
public interface ObjectNamingStrategy {
String determineKey(File file);
}

View File

@ -0,0 +1,216 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.utils.storage.S3;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.HttpMethod;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.services.s3.transfer.Download;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.Upload;
import org.apache.log4j.Logger;
import java.io.File;
import java.io.InputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.amazonaws.Protocol.HTTP;
import static com.amazonaws.Protocol.HTTPS;
import static java.lang.String.format;
import static java.util.Collections.emptyList;
import static java.util.Collections.unmodifiableList;
import static org.apache.commons.lang.StringUtils.isNotBlank;
public final class S3Utils {
private static final Logger LOGGER = Logger.getLogger(S3Utils.class);
public static final String SEPARATOR = "/";
private static final Map<String, TransferManager> TRANSFERMANAGER_ACCESSKEY_MAP = new HashMap<>();
private S3Utils() {}
public static TransferManager getTransferManager(final ClientOptions clientOptions) {
if(TRANSFERMANAGER_ACCESSKEY_MAP.containsKey(clientOptions.getAccessKey())) {
return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
}
final AWSCredentials basicAWSCredentials = new BasicAWSCredentials(clientOptions.getAccessKey(), clientOptions.getSecretKey());
final ClientConfiguration configuration = new ClientConfiguration();
if (clientOptions.isHttps() != null) {
configuration.setProtocol(clientOptions.isHttps() ? HTTPS : HTTP);
}
if (clientOptions.getConnectionTimeout() != null) {
configuration.setConnectionTimeout(clientOptions.getConnectionTimeout());
}
if (clientOptions.getMaxErrorRetry() != null) {
configuration.setMaxErrorRetry(clientOptions.getMaxErrorRetry());
}
if (clientOptions.getSocketTimeout() != null) {
configuration.setSocketTimeout(clientOptions.getSocketTimeout());
}
if (clientOptions.getUseTCPKeepAlive() != null) {
configuration.setUseTcpKeepAlive(clientOptions.getUseTCPKeepAlive());
}
if (clientOptions.getConnectionTtl() != null) {
configuration.setConnectionTTL(clientOptions.getConnectionTtl());
}
if (clientOptions.getSigner() != null) {
configuration.setSignerOverride(clientOptions.getSigner());
}
LOGGER.debug(format("Creating S3 client with configuration: [protocol: %1$s, signer: %2$s, connectionTimeOut: %3$s, maxErrorRetry: %4$s, socketTimeout: %5$s, useTCPKeepAlive: %6$s, connectionTtl: %7$s]",
configuration.getProtocol(), configuration.getSignerOverride(), configuration.getConnectionTimeout(), configuration.getMaxErrorRetry(), configuration.getSocketTimeout(),
clientOptions.getUseTCPKeepAlive(), clientOptions.getConnectionTtl()));
final AmazonS3Client client = new AmazonS3Client(basicAWSCredentials, configuration);
if (isNotBlank(clientOptions.getEndPoint())) {
LOGGER.debug(format("Setting the end point for S3 client with access key %1$s to %2$s.", clientOptions.getAccessKey(), clientOptions.getEndPoint()));
client.setEndpoint(clientOptions.getEndPoint());
}
TRANSFERMANAGER_ACCESSKEY_MAP.put(clientOptions.getAccessKey(), new TransferManager(client));
return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
}
public static AmazonS3 getAmazonS3Client(final ClientOptions clientOptions) {
return getTransferManager(clientOptions).getAmazonS3Client();
}
public static Upload putFile(final ClientOptions clientOptions, final File sourceFile, final String bucketName, final String key) {
LOGGER.debug(format("Sending file %1$s as S3 object %2$s in bucket %3$s", sourceFile.getName(), key, bucketName));
return getTransferManager(clientOptions).upload(bucketName, key, sourceFile);
}
public static Upload putObject(final ClientOptions clientOptions, final InputStream sourceStream, final String bucketName, final String key) {
LOGGER.debug(format("Sending stream as S3 object %1$s in bucket %2$s", key, bucketName));
return getTransferManager(clientOptions).upload(bucketName, key, sourceStream, null);
}
public static Upload putObject(final ClientOptions clientOptions, final PutObjectRequest req) {
LOGGER.debug(format("Sending stream as S3 object %1$s in bucket %2$s using PutObjectRequest", req.getKey(), req.getBucketName()));
return getTransferManager(clientOptions).upload(req);
}
public static Download getFile(final ClientOptions clientOptions, final String bucketName, final String key, final File file) {
LOGGER.debug(format("Receiving object %1$s as file %2$s from bucket %3$s", key, file.getAbsolutePath(), bucketName));
return getTransferManager(clientOptions).download(bucketName, key, file);
}
public static Download getFile(final ClientOptions clientOptions, final GetObjectRequest getObjectRequest, final File file) {
LOGGER.debug(format("Receiving object %1$s as file %2$s from bucket %3$s using GetObjectRequest", getObjectRequest.getKey(), file.getAbsolutePath(), getObjectRequest.getBucketName()));
return getTransferManager(clientOptions).download(getObjectRequest, file);
}
public static URL generatePresignedUrl(final ClientOptions clientOptions, final String bucketName, final String key, final Date expiration) {
LOGGER.debug(format("Generating presigned url for key %1s in bucket %2s with expiration date %3s", key, bucketName, expiration.toString()));
return getTransferManager(clientOptions).getAmazonS3Client().generatePresignedUrl(bucketName, key, expiration, HttpMethod.GET);
}
// Note that whenever S3ObjectInputStream is returned, client code needs to close the internal stream to avoid resource leak.
public static S3ObjectInputStream getObjectStream(final ClientOptions clientOptions, final String bucketName, final String key) {
LOGGER.debug(format("Get S3ObjectInputStream from S3 Object %1$s in bucket %2$s", key, bucketName));
return getTransferManager(clientOptions).getAmazonS3Client().getObject(bucketName, key).getObjectContent();
}
public static List<S3ObjectSummary> listDirectory(final ClientOptions clientOptions, final String bucketName, final String directory) {
LOGGER.debug(format("Listing S3 directory %1$s in bucket %2$s", directory, bucketName));
List<S3ObjectSummary> objects = new ArrayList<>();
ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
listObjectsRequest.withBucketName(bucketName);
listObjectsRequest.withPrefix(directory);
ObjectListing ol = getAmazonS3Client(clientOptions).listObjects(listObjectsRequest);
if(ol.isTruncated()) {
do {
objects.addAll(ol.getObjectSummaries());
listObjectsRequest.setMarker(ol.getNextMarker());
ol = getAmazonS3Client(clientOptions).listObjects(listObjectsRequest);
} while (ol.isTruncated());
}
else {
objects.addAll(ol.getObjectSummaries());
}
if (objects.isEmpty()) {
return emptyList();
}
return unmodifiableList(objects);
}
public static void deleteObject(final ClientOptions clientOptions, final String bucketName, final String key) {
LOGGER.debug(format("Deleting S3 Object %1$s in bucket %2$s", key, bucketName));
getAmazonS3Client(clientOptions).deleteObject(bucketName,key);
}
public static void deleteDirectory(final ClientOptions clientOptions, final String bucketName, final String directoryName) {
LOGGER.debug(format("Deleting S3 Directory %1$s in bucket %2$s", directoryName, bucketName));
final List<S3ObjectSummary> objects = listDirectory(clientOptions, bucketName, directoryName);
for (final S3ObjectSummary object : objects) {
deleteObject(clientOptions, bucketName, object.getKey());
}
deleteObject(clientOptions, bucketName, directoryName);
}
}

View File

@ -32,6 +32,8 @@ import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.type.CollectionType;
import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
@ -43,8 +45,6 @@ import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.message.BasicStatusLine; import org.apache.http.message.BasicStatusLine;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.type.CollectionType;
import org.junit.Test; import org.junit.Test;
import com.google.gson.FieldNamingPolicy; import com.google.gson.FieldNamingPolicy;