Removed trailing spaces

This commit is contained in:
Alex Huang 2013-11-21 04:08:01 -08:00
parent e4b22d0fca
commit 224f479974
613 changed files with 4405 additions and 4405 deletions

View File

@ -19,7 +19,7 @@ package com.cloud.agent.dao;
import com.cloud.utils.component.Manager;
/**
*
*
*/
public interface StorageComponent extends Manager {
String get(String key);

View File

@ -33,7 +33,7 @@ import com.cloud.utils.PropertiesUtil;
/**
* Uses Properties to implement storage.
*
*
* @config {@table || Param Name | Description | Values | Default || || path |
* path to the properties _file | String | db/db.properties || * }
**/

View File

@ -66,7 +66,7 @@ import com.cloud.utils.net.NetUtils;
import com.cloud.utils.script.Script;
/**
*
*
* I don't want to introduce extra cross-cutting concerns into console proxy
* process, as it involves configurations like zone/pod, agent auto self-upgrade
* etc. I also don't want to introduce more module dependency issues into our
@ -74,12 +74,12 @@ import com.cloud.utils.script.Script;
* will be done through reflection. As a result, come out with following
* solution to solve the problem of building a communication channel between
* consoole proxy and management server.
*
*
* We will deploy an agent shell inside console proxy VM, and this agent shell
* will launch current console proxy from within this special server resource,
* through it console proxy can build a communication channel with management
* server.
*
*
*/
public class ConsoleProxyResource extends ServerResourceBase implements ServerResource {
static final Logger s_logger = Logger.getLogger(ConsoleProxyResource.class);

View File

@ -24,7 +24,7 @@ import com.cloud.utils.component.Manager;
/**
* Maintains vm data (user data, meta-data, password) that can be fetched via
* HTTP by user vms
*
*
*/
public interface VmDataServer extends Manager {

View File

@ -23,7 +23,7 @@ import com.cloud.vm.VirtualMachine.PowerState;
// TODO vmsync
// We should also have a HostVmStateReport class instead of using raw Map<> data structure,
// for now, we store host-specific info at each VM entry and host fields are fixed
//
//
// This needs to be refactor-ed afterwards
//
public class HostVmStateReportEntry {

View File

@ -52,11 +52,11 @@ public interface HostAllocator extends Adapter {
/**
* Determines which physical hosts are suitable to allocate the guest
* virtual machines on
*
*
* Allocators must set any other hosts not considered for allocation in the
* ExcludeList avoid. Thus the avoid set and the list of hosts suitable,
* together must cover the entire host set in the cluster.
*
*
* @param VirtualMachineProfile
* vmProfile
* @param DeploymentPlan
@ -81,8 +81,8 @@ public interface HostAllocator extends Adapter {
* Allocators must set any other hosts not considered for allocation in the
* ExcludeList avoid. Thus the avoid set and the list of hosts suitable,
* together must cover the entire host set in the cluster.
*
*
*
*
* @param VirtualMachineProfile
* vmProfile
* @param DeploymentPlan

View File

@ -23,7 +23,7 @@ import com.cloud.vm.VirtualMachine;
public interface FenceBuilder extends Adapter {
/**
* Fence off the vm.
*
*
* @param vm vm
* @param host host where the vm was running on.
*/

View File

@ -24,7 +24,7 @@ import com.cloud.vm.VirtualMachine;
public interface Investigator extends Adapter {
/**
* Returns if the vm is still alive.
*
*
* @param vm to work on.
*/
public Boolean isVmAlive(VirtualMachine vm, Host host);

View File

@ -68,7 +68,7 @@ public class Hypervisor {
/**
* This method really needs to be part of the properties of the hypervisor type itself.
*
*
* @param hyperType
* @return
*/

View File

@ -72,7 +72,7 @@ public interface HypervisorGuru extends Adapter {
/**
* Give the hypervisor guru the opportinity to decide if additional clean is
* required for nics before expunging the VM
*
*
*/
List<Command> finalizeExpungeNics(VirtualMachine vm, List<NicProfile> nics);
}

View File

@ -23,13 +23,13 @@ import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
/**
* NetworkGuru and NetworkElements that implement this interface
* NetworkGuru and NetworkElements that implement this interface
* will be called during Virtual Machine migration.
*/
public interface NetworkMigrationResponder {
/**
* Prepare for migration.
*
*
* This method will be called per nic before the vm migration.
* @param nic
* @param network
@ -42,9 +42,9 @@ public interface NetworkMigrationResponder {
/**
* Cancel for migration preparation.
*
* This method will be called per nic when the entire vm migration
* process failed and need to release the resouces that was
*
* This method will be called per nic when the entire vm migration
* process failed and need to release the resouces that was
* allocated at the migration preparation.
* @param nic destination nic
* @param network destination network
@ -56,9 +56,9 @@ public interface NetworkMigrationResponder {
/**
* Commit the migration resource.
*
* This method will be called per nic when the entire vm migration
* process was successful. This is useful to release the resource of
*
* This method will be called per nic when the entire vm migration
* process was successful. This is useful to release the resource of
* source deployment where vm has left.
* @param nic source nic
* @param network source network

View File

@ -42,14 +42,14 @@ import com.cloud.vm.VirtualMachine;
/**
* The NetworkModel presents a read-only view into the Network data such as L2 networks,
* Nics, PublicIps, NetworkOfferings, traffic labels, physical networks and the like
* The idea is that only the orchestration core should be able to modify the data, while other
* The idea is that only the orchestration core should be able to modify the data, while other
* participants in the orchestration can use this interface to query the data.
*/
public interface NetworkModel {
/**
* Lists IP addresses that belong to VirtualNetwork VLANs
*
*
* @param accountId
* - account that the IP address should belong to
* @param associatedNetworkId

View File

@ -23,7 +23,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
/**
* Network includes all of the enums used within networking.
*
*
*/
public class Networks {

View File

@ -30,7 +30,7 @@ public interface IpDeployer extends Adapter {
/**
* Modify ip addresses on this network
* Depending on the State of the ip addresses the element should take
* appropriate action.
* appropriate action.
* If state is Releasing the ip address should be de-allocated
* If state is Allocating or Allocated the ip address should be provisioned
* @param network

View File

@ -44,13 +44,13 @@ public interface NetworkElement extends Adapter {
Map<Service, Map<Capability, String>> getCapabilities();
/**
* NOTE:
* NOTE:
* NetworkElement -> Network.Provider is a one-to-one mapping. While adding a new NetworkElement, one must add a new Provider name to Network.Provider.
*/
Provider getProvider();
/**
* Implement the network configuration as specified.
* Implement the network configuration as specified.
* @param config fully specified network configuration.
* @param offering network offering that originated the network configuration.
* @return true if network configuration is now usable; false if not; null if not handled by this element.
@ -124,7 +124,7 @@ public interface NetworkElement extends Adapter {
boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException;
/**
* This should return true if out of multiple services provided by this element, only some can be enabled. If all the services MUST be provided, this should return false.
* This should return true if out of multiple services provided by this element, only some can be enabled. If all the services MUST be provided, this should return false.
* @return true/false
*/
boolean canEnableIndividualServices();

View File

@ -27,7 +27,7 @@ public interface Vpc extends ControlledEntity, Identity, InternalIdentity {
}
/**
*
*
* @return VPC name
*/
String getName();
@ -43,31 +43,31 @@ public interface Vpc extends ControlledEntity, Identity, InternalIdentity {
String getCidr();
/**
*
*
* @return VPC state
*/
State getState();
/**
*
*
* @return VPC offering id - the offering that VPC is created from
*/
long getVpcOfferingId();
/**
*
*
* @return VPC display text
*/
String getDisplayText();
/**
*
*
* @return VPC network domain. All networks participating in the VPC, become the part of the same network domain
*/
String getNetworkDomain();
/**
*
*
* @return true if restart is required for the VPC; false otherwise
*/
boolean isRestartRequired();

View File

@ -28,7 +28,7 @@ public interface VpcOffering extends InternalIdentity, Identity {
public static final String defaultVPCNSOfferingName = "Default VPC offering with Netscaler";
/**
*
*
* @return VPC offering name
*/
String getName();
@ -39,13 +39,13 @@ public interface VpcOffering extends InternalIdentity, Identity {
String getDisplayText();
/**
*
*
* @return VPC offering state
*/
State getState();
/**
*
*
* @return true if offering is default - came with the cloudStack fresh install; false otherwise
*/
boolean isDefault();

View File

@ -35,7 +35,7 @@ public interface VpcService {
/**
* Persists VPC record in the database
*
*
* @param zoneId
* @param vpcOffId
* @param vpcOwnerId
@ -50,7 +50,7 @@ public interface VpcService {
/**
* Deletes a VPC
*
*
* @param vpcId
* @return
* @throws InsufficientCapacityException
@ -61,7 +61,7 @@ public interface VpcService {
/**
* Updates VPC with new name/displayText
*
*
* @param vpcId
* @param vpcName
* @param displayText
@ -71,7 +71,7 @@ public interface VpcService {
/**
* Lists VPC(s) based on the parameters passed to the method call
*
*
* @param id
* @param vpcName
* @param displayText
@ -98,7 +98,7 @@ public interface VpcService {
/**
* Starts VPC which includes starting VPC provider and applying all the neworking rules on the backend
*
*
* @param vpcId
* @param destroyOnFailure TODO
* @return
@ -110,7 +110,7 @@ public interface VpcService {
/**
* Shuts down the VPC which includes shutting down all VPC provider and rules cleanup on the backend
*
*
* @param vpcId
* @return
* @throws ConcurrentOperationException
@ -120,7 +120,7 @@ public interface VpcService {
/**
* Restarts the VPC. VPC gets shutdown and started as a part of it
*
*
* @param id
* @return
* @throws InsufficientCapacityException
@ -129,7 +129,7 @@ public interface VpcService {
/**
* Returns a Private gateway found in the VPC by id
*
*
* @param id
* @return
*/
@ -137,7 +137,7 @@ public interface VpcService {
/**
* Persists VPC private gateway in the Database.
*
*
*
* @param vpcId TODO
* @param physicalNetworkId
@ -159,7 +159,7 @@ public interface VpcService {
/**
* Applies VPC private gateway on the backend, so it becomes functional
*
*
* @param gatewayId
* @param destroyOnFailure TODO
* @return
@ -170,7 +170,7 @@ public interface VpcService {
/**
* Deletes VPC private gateway
*
*
* @param id
* @return
* @throws ResourceUnavailableException
@ -180,7 +180,7 @@ public interface VpcService {
/**
* Returns the list of Private gateways existing in the VPC
*
*
* @param listPrivateGatewaysCmd
* @return
*/
@ -188,7 +188,7 @@ public interface VpcService {
/**
* Returns Static Route found by Id
*
*
* @param routeId
* @return
*/
@ -196,7 +196,7 @@ public interface VpcService {
/**
* Applies existing Static Routes to the VPC elements
*
*
* @param vpcId
* @return
* @throws ResourceUnavailableException
@ -205,7 +205,7 @@ public interface VpcService {
/**
* Deletes static route from the backend and the database
*
*
* @param routeId
* @return TODO
* @throws ResourceUnavailableException
@ -214,7 +214,7 @@ public interface VpcService {
/**
* Persists static route entry in the Database
*
*
* @param gatewayId
* @param cidr
* @return
@ -223,7 +223,7 @@ public interface VpcService {
/**
* Lists static routes based on parameters passed to the call
*
*
* @param listStaticRoutesCmd
* @return
*/
@ -231,7 +231,7 @@ public interface VpcService {
/**
* Associates IP address from the Public network, to the VPC
*
*
* @param ipId
* @param vpcId
* @return

View File

@ -49,7 +49,7 @@ public interface DomainService {
/**
* find the domain by its path
*
*
* @param domainPath
* the path to use to lookup a domain
* @return domainVO the domain with the matching path, or null if no domain with the given path exists

View File

@ -17,7 +17,7 @@
package com.cloud.vm;
/**
* ConsoleProxy is a system VM instance that is used
* ConsoleProxy is a system VM instance that is used
* to proxy VNC traffic
*/
public interface ConsoleProxy extends SystemVm {

View File

@ -17,7 +17,7 @@
package com.cloud.vm;
/**
* Secondary Storage VM is a system VM instance that is used
* Secondary Storage VM is a system VM instance that is used
* to interface the management server to secondary storage
*/
public interface SecondaryStorageVm extends SystemVm {

View File

@ -290,7 +290,7 @@ public interface UserVmService {
/**
* Creates a User VM in Advanced Zone (Security Group feature is disabled)
* in the database and returns the VM to the caller.
*
*
*
* @param zone
* - availability zone for the virtual machine
@ -397,7 +397,7 @@ public interface UserVmService {
* @param cmd
* - the command specifying vmId and new serviceOfferingId
* @return the vm
* @throws ResourceAllocationException
* @throws ResourceAllocationException
*/
UserVm upgradeVirtualMachine(UpgradeVMCmd cmd) throws ResourceAllocationException;

View File

@ -29,7 +29,7 @@ import com.cloud.utils.fsm.StateObject;
/**
* VirtualMachine describes the properties held by a virtual machine
*
*
*/
public interface VirtualMachine extends RunningOn, ControlledEntity, Identity, InternalIdentity, StateObject<VirtualMachine.State> {
@ -258,7 +258,7 @@ public interface VirtualMachine extends RunningOn, ControlledEntity, Identity, I
/**
* returns the guest OS ID
*
*
* @return guestOSId
*/
long getGuestOSId();

View File

@ -21,7 +21,7 @@ import java.util.Formatter;
import com.cloud.dc.Vlan;
/**
* VM Name.
* VM Name.
*/
public class VirtualMachineName {
public static final String SEPARATOR = "-";

View File

@ -78,7 +78,7 @@ public interface AffinityGroupProcessor extends Adapter {
* subDomainAccess() should return true if the affinity/anti-affinity group
* can be created for a domain and used by the sub-domains. If true, all
* accounts under the sub-domains can see this group and use it.
*
*
* @return boolean true/false
*/
boolean subDomainAccess();

View File

@ -11,7 +11,7 @@
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

View File

@ -23,7 +23,7 @@ import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
/**
*
*
* Load Balancer instance is the User Vm instance participating in the Load Balancer
*
*/

View File

@ -118,7 +118,7 @@ public class CallContext {
/**
* This method should only be called if you can propagate the context id
* from another CallContext.
*
*
* @param callingUser calling user
* @param callingAccount calling account
* @param contextId context id propagated from another call context
@ -131,7 +131,7 @@ public class CallContext {
protected static CallContext register(User callingUser, Account callingAccount, Long userId, Long accountId, String contextId) {
/*
Unit tests will have multiple times of setup/tear-down call to this, remove assertions to all unit test to run
assert s_currentContext.get() == null : "There's a context already so what does this new register context mean? " + s_currentContext.get().toString();
if (s_currentContext.get() != null) { // FIXME: This should be removed soon. I added this check only to surface all the places that have this problem.
throw new CloudRuntimeException("There's a context already so what does this new register context mean? " + s_currentContext.get().toString());

View File

@ -5,7 +5,7 @@
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,

View File

@ -5,7 +5,7 @@
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,

View File

@ -79,7 +79,7 @@ public interface RegionService {
/**
* Updates an account
* isPopagate falg is set to true if sent from peer Region
* isPopagate falg is set to true if sent from peer Region
*
* @param cmd
* - the parameter containing accountId or account nameand domainId
@ -91,8 +91,8 @@ public interface RegionService {
* Disables an account by accountName and domainId or accountId
* @param cmd
* @return
* @throws ResourceUnavailableException
* @throws ConcurrentOperationException
* @throws ResourceUnavailableException
* @throws ConcurrentOperationException
*/
Account disableAccount(DisableAccountCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException;
@ -112,7 +112,7 @@ public interface RegionService {
/**
* update an existing domain
*
*
* @param cmd
* - the command containing domainId and new domainName
* @return Domain object if the command succeeded

View File

@ -29,7 +29,7 @@ public interface UsageService {
/**
* Generate Billing Records from the last time it was generated to the
* time specified.
*
*
* @param cmd the command wrapping the generate parameters
* - userId unique id of the user, pass in -1 to generate billing records
* for all users
@ -41,7 +41,7 @@ public interface UsageService {
/**
* Retrieves all Usage Records generated between the start and end date specified
*
*
* @param userId unique id of the user, pass in -1 to retrieve billing records
* for all users
* @param startDate inclusive.

View File

@ -29,7 +29,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
/**
* @author dhoogland
*
*
*/
public class NetworksTest {

View File

@ -69,22 +69,22 @@ public class AddAccountToProjectCmdTest extends TestCase {
/****
* Condition not handled in the code
*
*
*****/
/*
* @Test public void testGetEntityOwnerIdForNullProject() {
*
*
* ProjectService projectService = Mockito.mock(ProjectService.class);
* Mockito
* .when(projectService.getProject(Mockito.anyLong())).thenReturn(null);
* addAccountToProjectCmd._projectService = projectService;
*
*
* try { addAccountToProjectCmd.getEntityOwnerId(); }
* catch(InvalidParameterValueException exception) {
* Assert.assertEquals("Unable to find project by id 2",
* exception.getLocalizedMessage()); }
*
*
* }
*/
@ -110,32 +110,32 @@ public class AddAccountToProjectCmdTest extends TestCase {
/**
* To run the test uncomment the return statement for getAccountName() in
* setup() and return null
*
*
* **/
/*
* @Test public void testGetEventDescriptionForNullAccount() {
*
*
* String result = addAccountToProjectCmd.getEventDescription(); String
* expected = "Sending invitation to email null to join project: 2";
* Assert.assertEquals(expected, result);
*
*
* }
*/
/***
*
*
*
*
*
*
* ***/
/*
* @Test public void testGetEventDescriptionForAccount() {
*
*
* String result = addAccountToProjectCmd.getEventDescription(); String
* expected = "Adding account accountName to project: 2";
* Assert.assertEquals(expected, result);
*
*
* }
*/
@ -152,20 +152,20 @@ public class AddAccountToProjectCmdTest extends TestCase {
/*
* @Test public void testExecuteForAccountNameEmail() {
*
*
* try {
*
*
* ComponentLocator c = Mockito.mock(ComponentLocator.class); UserContext
* userContext = Mockito.mock(UserContext.class);
*
*
* // Mockito.when(userContext.current()).thenReturn(userContext);
*
*
*
*
* addAccountToProjectCmd.execute(); } catch(InvalidParameterValueException
* exception) {
* Assert.assertEquals("Either accountName or email is required",
* exception.getLocalizedMessage()); }
*
*
* }
*/

View File

@ -97,7 +97,7 @@ public class AddHostCmdTest extends TestCase {
/*
* @Test public void testExecuteForResult() throws Exception {
*
*
* addHostCmd._resourceService = resourceService;
* addHostCmd._responseGenerator = responseGenerator; MockHost mockInstance
* = new MockHost(); MockHost[] mockArray = new MockHost[]{mockInstance};

View File

@ -69,27 +69,27 @@ public class AddVpnUserCmdTest extends TestCase {
/*
* @Test public void testExecuteVpnUserNotFound() {
*
*
* EntityManager entityManager = Mockito.mock(EntityManager.class);
*
*
* Mockito.when(entityManager.findById(VpnUser.class,
* Mockito.anyLong())).thenReturn(null);
*
*
* addVpnUserCmd._entityMgr = entityManager; try { addVpnUserCmd.execute();
* } catch (Exception e) { }
*
*
* }
*
*
*
*
* @Test public void testExecuteVpnUserFound() {
*
*
* EntityManager entityManager = Mockito.mock(EntityManager.class);
* addVpnUserCmd._entityMgr = entityManager;
*
*
* VpnUser vpnUser = Mockito.mock(VpnUser.class);
* Mockito.when(entityManager.findById(VpnUser.class,
* Mockito.anyLong())).thenReturn(vpnUser); addVpnUserCmd.execute();
*
*
* }
*/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getBucketLoggingStatus0
*/
@ -41,7 +41,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param copyObject2
*/
@ -52,7 +52,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getBucketAccessControlPolicy4
*/
@ -63,7 +63,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param listBucket6
*/
@ -74,7 +74,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param putObject8
*/
@ -85,7 +85,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param createBucket10
*/
@ -96,7 +96,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param listAllMyBuckets12
*/
@ -107,7 +107,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getObject14
*/
@ -118,7 +118,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param deleteBucket16
*/
@ -129,7 +129,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param setBucketLoggingStatus18
*/
@ -140,7 +140,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getObjectAccessControlPolicy20
*/
@ -151,7 +151,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param deleteObject22
*/
@ -162,7 +162,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param setBucketAccessControlPolicy24
*/
@ -173,7 +173,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param setObjectAccessControlPolicy26
*/
@ -184,7 +184,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param putObjectInline28
*/
@ -195,7 +195,7 @@ public class AmazonS3Skeleton implements AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getObjectExtended30
*/

View File

@ -32,7 +32,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getBucketLoggingStatus
*/
@ -40,16 +40,16 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param copyObject
* @throws AxisFault
* @throws AxisFault
*/
public com.amazon.s3.CopyObjectResponse copyObject(com.amazon.s3.CopyObject copyObject) throws AxisFault;
/**
* Auto generated method signature
*
*
* @param getBucketAccessControlPolicy
*/
@ -57,7 +57,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param listBucket
*/
@ -65,7 +65,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param putObject
*/
@ -73,7 +73,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param createBucket
*/
@ -81,7 +81,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param listAllMyBuckets
*/
@ -89,7 +89,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getObject
*/
@ -97,7 +97,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param deleteBucket
*/
@ -105,7 +105,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param setBucketLoggingStatus
*/
@ -113,7 +113,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getObjectAccessControlPolicy
*/
@ -121,7 +121,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param deleteObject
*/
@ -129,7 +129,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param setBucketAccessControlPolicy
*/
@ -137,7 +137,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param setObjectAccessControlPolicy
*/
@ -145,7 +145,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param putObjectInline
*/
@ -153,7 +153,7 @@ public interface AmazonS3SkeletonInterface {
/**
* Auto generated method signature
*
*
* @param getObjectExtended
*/

View File

@ -227,10 +227,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#getBucketLoggingStatus
* @param getBucketLoggingStatus0
*/
public com.amazon.s3.client.AmazonS3Stub.GetBucketLoggingStatusResponse getBucketLoggingStatus(
@ -326,10 +326,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startgetBucketLoggingStatus
* @param getBucketLoggingStatus0
*/
public void startgetBucketLoggingStatus(
@ -455,10 +455,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#copyObject
* @param copyObject2
*/
public com.amazon.s3.client.AmazonS3Stub.CopyObjectResponse copyObject(
@ -553,10 +553,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startcopyObject
* @param copyObject2
*/
public void startcopyObject(
@ -682,10 +682,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#getBucketAccessControlPolicy
* @param getBucketAccessControlPolicy4
*/
public com.amazon.s3.client.AmazonS3Stub.GetBucketAccessControlPolicyResponse getBucketAccessControlPolicy(
@ -781,10 +781,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startgetBucketAccessControlPolicy
* @param getBucketAccessControlPolicy4
*/
public void startgetBucketAccessControlPolicy(
@ -910,10 +910,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#listBucket
* @param listBucket6
*/
public com.amazon.s3.client.AmazonS3Stub.ListBucketResponse listBucket(
@ -1008,10 +1008,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startlistBucket
* @param listBucket6
*/
public void startlistBucket(
@ -1137,10 +1137,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#putObject
* @param putObject8
*/
public com.amazon.s3.client.AmazonS3Stub.PutObjectResponse putObject(
@ -1235,10 +1235,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startputObject
* @param putObject8
*/
public void startputObject(
@ -1364,10 +1364,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#createBucket
* @param createBucket10
*/
public com.amazon.s3.client.AmazonS3Stub.CreateBucketResponse createBucket(
@ -1463,10 +1463,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startcreateBucket
* @param createBucket10
*/
public void startcreateBucket(
@ -1592,10 +1592,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#listAllMyBuckets
* @param listAllMyBuckets12
*/
public com.amazon.s3.client.AmazonS3Stub.ListAllMyBucketsResponse listAllMyBuckets(
@ -1691,10 +1691,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startlistAllMyBuckets
* @param listAllMyBuckets12
*/
public void startlistAllMyBuckets(
@ -1820,10 +1820,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#getObject
* @param getObject14
*/
public com.amazon.s3.client.AmazonS3Stub.GetObjectResponse getObject(
@ -1918,10 +1918,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startgetObject
* @param getObject14
*/
public void startgetObject(
@ -2047,10 +2047,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#deleteBucket
* @param deleteBucket16
*/
public com.amazon.s3.client.AmazonS3Stub.DeleteBucketResponse deleteBucket(
@ -2146,10 +2146,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startdeleteBucket
* @param deleteBucket16
*/
public void startdeleteBucket(
@ -2275,10 +2275,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#setBucketLoggingStatus
* @param setBucketLoggingStatus18
*/
public com.amazon.s3.client.AmazonS3Stub.SetBucketLoggingStatusResponse setBucketLoggingStatus(
@ -2374,10 +2374,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startsetBucketLoggingStatus
* @param setBucketLoggingStatus18
*/
public void startsetBucketLoggingStatus(
@ -2503,10 +2503,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#getObjectAccessControlPolicy
* @param getObjectAccessControlPolicy20
*/
public com.amazon.s3.client.AmazonS3Stub.GetObjectAccessControlPolicyResponse getObjectAccessControlPolicy(
@ -2602,10 +2602,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startgetObjectAccessControlPolicy
* @param getObjectAccessControlPolicy20
*/
public void startgetObjectAccessControlPolicy(
@ -2731,10 +2731,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#deleteObject
* @param deleteObject22
*/
public com.amazon.s3.client.AmazonS3Stub.DeleteObjectResponse deleteObject(
@ -2830,10 +2830,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startdeleteObject
* @param deleteObject22
*/
public void startdeleteObject(
@ -2959,10 +2959,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#setBucketAccessControlPolicy
* @param setBucketAccessControlPolicy24
*/
public com.amazon.s3.client.AmazonS3Stub.SetBucketAccessControlPolicyResponse setBucketAccessControlPolicy(
@ -3058,10 +3058,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startsetBucketAccessControlPolicy
* @param setBucketAccessControlPolicy24
*/
public void startsetBucketAccessControlPolicy(
@ -3187,10 +3187,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#setObjectAccessControlPolicy
* @param setObjectAccessControlPolicy26
*/
public com.amazon.s3.client.AmazonS3Stub.SetObjectAccessControlPolicyResponse setObjectAccessControlPolicy(
@ -3286,10 +3286,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startsetObjectAccessControlPolicy
* @param setObjectAccessControlPolicy26
*/
public void startsetObjectAccessControlPolicy(
@ -3415,10 +3415,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#putObjectInline
* @param putObjectInline28
*/
public com.amazon.s3.client.AmazonS3Stub.PutObjectInlineResponse putObjectInline(
@ -3514,10 +3514,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startputObjectInline
* @param putObjectInline28
*/
public void startputObjectInline(
@ -3643,10 +3643,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature
*
*
* @see com.amazon.s3.client.AmazonS3#getObjectExtended
* @param getObjectExtended30
*/
public com.amazon.s3.client.AmazonS3Stub.GetObjectExtendedResponse getObjectExtended(
@ -3742,10 +3742,10 @@ public class AmazonS3Stub extends org.apache.axis2.client.Stub {
/**
* Auto generated method signature for Asynchronous Invocations
*
*
* @see com.amazon.s3.client.AmazonS3#startgetObjectExtended
* @param getObjectExtended30
*/
public void startgetObjectExtended(

View File

@ -80,7 +80,7 @@ public class AuthenticationHandler implements Handler {
/**
* For EC2 SOAP calls this function's goal is to extract the X509 certificate that is
* part of the WS-Security wrapped SOAP request. We need the cert in order to
* part of the WS-Security wrapped SOAP request. We need the cert in order to
* map it to the user's Cloud API key and Cloud Secret Key.
*/
@Override
@ -105,11 +105,11 @@ public class AuthenticationHandler implements Handler {
ByteArrayInputStream bs = new ByteArrayInputStream(certBytes);
while (bs.available() > 0)
userCert = cf.generateCertificate(bs);
//System.out.println( "cert: " + userCert.toString());
//System.out.println( "cert: " + userCert.toString());
String uniqueId = AuthenticationUtils.X509CertUniqueId(userCert);
logger.debug("X509 cert's uniqueId: " + uniqueId);
// -> find the Cloud API key and the secret key from the cert's uniqueId
// -> find the Cloud API key and the secret key from the cert's uniqueId
UserCredentialsDao ucDao = new UserCredentialsDaoImpl();
UserCredentialsVO cloudKeys = ucDao.getByCertUniqueId(uniqueId);
if (null == cloudKeys) {

View File

@ -73,13 +73,13 @@ public class AuthenticationHandler implements Handler {
}
/**
* Verify the request's authentication signature by extracting all the
* Verify the request's authentication signature by extracting all the
* necessary parts of the request, obtaining the requestor's secret key, and
* recalculating the signature.
*
* On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3
*
* On Signature mismatch raise an AxisFault (i.e., a SoapFault) with what Amazon S3
* defines as a "Client.SignatureMismatch" error.
*
*
* Special case: need to deal with anonymous requests where no AWSAccessKeyId is
* given. In this case just pass the request on.
*/
@ -92,7 +92,7 @@ public class AuthenticationHandler implements Handler {
String secretKey = null;
String temp = null;
// [A] Obtain the HttpServletRequest object
// [A] Obtain the HttpServletRequest object
HttpServletRequest httpObj = (HttpServletRequest)msgContext.getProperty("transport.http.servletRequest");
if (null != httpObj)
System.out.println("S3 SOAP auth test header access - acceptable Encoding type: " + httpObj.getHeader("Accept-Encoding"));
@ -175,7 +175,7 @@ public class AuthenticationHandler implements Handler {
/**
* Given the user's access key, then obtain his secret key in the user database.
*
*
* @param accessKey - a unique string allocated for each registered user
* @return the secret key or null of no matching user found
*/

View File

@ -54,15 +54,15 @@ import org.apache.log4j.Logger;
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
</pre>
* This implementation of input stream does not support marking operations.
*
*
* Incoming data is DIME encoded when its MIME type is "application/dime".
* Then use this class to pull out 2 streams:
* (1) The first stream is the SOAP request,
* (1) The first stream is the SOAP request,
* (2) The second stream is a chunked attachment (e.g., a file to store)
*
* The DIME format is defined at this reference:
* http://msdn.microsoft.com/en-us/library/aa480488.aspx
*
*
* @author Rick Rineholt
*/
public class DimeDelimitedInputStream extends FilterInputStream {
@ -94,7 +94,7 @@ public class DimeDelimitedInputStream extends FilterInputStream {
/**
* There can be multiple streams in a DIME encoding. For example, the first
* stream can be a SOAP message, and the second stream a binary attachment (e.g.,
* stream can be a SOAP message, and the second stream a binary attachment (e.g.,
* a file). During reading after an EOF is returned, this function should be
* called to see if there is another stream following the last.
*
@ -231,7 +231,7 @@ public class DimeDelimitedInputStream extends FilterInputStream {
if (0 == len)
return 0; //quick.
// odd case no data to read -- give back 0 next time -1;
// odd case no data to read -- give back 0 next time -1;
if (recordLength == 0 && bytesRead == 0 && !moreChunks) {
++bytesRead;
if (ME)
@ -302,7 +302,7 @@ public class DimeDelimitedInputStream extends FilterInputStream {
/**
* The DIME header is read into local class data fields and are not
* passed as part of the stream data.
*
*
* @param isChunk
* @throws IOException
*/
@ -430,7 +430,7 @@ public class DimeDelimitedInputStream extends FilterInputStream {
/**
* Read from the delimited stream.
*
*
* @param b is the array to read into. Read as much as possible
* into the size of this array.
* @return the number of bytes read. -1 if endof stream
@ -487,7 +487,7 @@ public class DimeDelimitedInputStream extends FilterInputStream {
/**
* Skip n bytes of data in the DIME stream, while reading and processing
* any headers in the current stream.
*
*
* @param n - number of data bytes to skip
* @return number of bytes actually skipped
* @throws IOException

View File

@ -15,7 +15,7 @@
// specific language governing permissions and limitations
// under the License.
/**
*
*
*/
package com.cloud.bridge.io;
@ -35,11 +35,11 @@ import org.apache.axis2.databinding.ADBBean;
import org.apache.axis2.databinding.ADBException;
/**
* Provide an MTOM aware serializable output stream writer to be consumed by implementors of the
* Provide an MTOM aware serializable output stream writer to be consumed by implementors of the
* com.amazon.s3 Response ADB bean classes.
* This writer enables participation is StaX based builders and AXIOM om xml stream processing
* An instance of a MTOMAwareResultStreamWriter is a convenient argument to a com.amazon.s3 Response bean, as generated
* from the Amazon S3 WSDL using
* from the Amazon S3 WSDL using
* wsdl2java.sh -ss -sd -ssi -g -p com.amazon.s3 -ns2p "http://s3.amazonaws.com/doc/2006-03-01/"=com.amazon.s3 -uri cloud-AmazonS3.wsdl
* Such a bean implements a serialize method of the form
* public void serialize(qualifiedName,omfactory, xmlWriter)
@ -55,8 +55,8 @@ import org.apache.axis2.databinding.ADBException;
* Addtionally, as a side effect, ensure that the org.apache.axis2.databinding classes which serialize the
* output of each fields have been initialized to be aware of any custom classes which override the default
* output xsd converter methods of Axis2's databinding. Such a custom class is notified to the ADB framework
* (via its org.apache.axis2.databinding.utils.ConverterUtil class) by setting a System property,
* SYSTEM_PROPERTY_ADB_CONVERTERUTIL to name the custom class.
* (via its org.apache.axis2.databinding.utils.ConverterUtil class) by setting a System property,
* SYSTEM_PROPERTY_ADB_CONVERTERUTIL to name the custom class.
*/
public class MTOMAwareResultStreamWriter {
@ -88,7 +88,7 @@ public class MTOMAwareResultStreamWriter {
System.setProperty(org.apache.axis2.databinding.utils.ConverterUtil.SYSTEM_PROPERTY_ADB_CONVERTERUTIL, "com.cloud.bridge.util.DatabindingConverterUtil");
}
/*
/*
* @params
* @param nameOfResult Used as the tag description of the result written out when the requester serializes
* @param outputStream The stream capable of sinking bytes written at the time the requester is ready to serialize,
@ -107,7 +107,7 @@ public class MTOMAwareResultStreamWriter {
qualifiedName = new QName(S3XMLNS, nameOfResult, DEFAULT_NS_PREFIX);
}
/*
/*
* @params
* @param nameOfResult Used as the tag description of the result written out when the requester serializes
* @param outputStream The stream capable of sinking bytes written at the time the requester is ready to serialize,

View File

@ -21,10 +21,10 @@ import org.apache.log4j.Logger;
import java.io.InputStream;
import java.io.IOException;
/**
/**
* A DIME stream is actually composed of multiple encoded streams.
* This class is a wrapper around the DimeDelimitedInputStream inorder
* to provide a simple iterator like interface for all the streams in a
* This class is a wrapper around the DimeDelimitedInputStream inorder
* to provide a simple iterator like interface for all the streams in a
* DIME encoded message.
*/
public class MultiPartDimeInputStream {
@ -42,7 +42,7 @@ public class MultiPartDimeInputStream {
/**
* The SOAP stream must be first, call nextInputStream to get
* access to the first stream and all streams after that.
*
*
* @param is the true input stream holding the incoming request.
*/
public MultiPartDimeInputStream(InputStream is) throws IOException {
@ -52,7 +52,7 @@ public class MultiPartDimeInputStream {
/**
* These three methods are DIME specific but provide potentially
* useful information about the current stream's data.
*
*
* @return URL or MIME type
*/
public String getStreamType() {
@ -87,7 +87,7 @@ public class MultiPartDimeInputStream {
* Move on to the next stream encoded in the DIME stream.
* If the current stream has not been all read, then we skip the remaining bytes of
* that stream.
*
*
* @return false if no next input stream, true if next input stream ready
* @throws IOException
*/

View File

@ -95,7 +95,7 @@ public class S3CAStorBucketAdapter implements S3BucketAdapter {
// The "domain" to store streams can be specified. If not specified, streams will be written
// without a "domain" query arg, so they will go into the castor default domain.
// The port is optional and must be at the end of the config string, defaults to 80.
// Examples: "castor 172.16.78.130 172.16.78.131 80", "castor 172.16.78.130 domain=mycluster.example.com",
// Examples: "castor 172.16.78.130 172.16.78.131 80", "castor 172.16.78.130 domain=mycluster.example.com",
// "castor zeroconf=mycluster.example.com domain=mycluster.example.com 80"
String[] cfg = mountedRoot.split(" ");
int numIPs = cfg.length - 1;

View File

@ -124,7 +124,7 @@ public class S3FileSystemBucketAdapter implements S3BucketAdapter {
* object that can be accessed by normal S3 calls. This function could take a long time since a multipart is
* allowed to have upto 10,000 parts (each 5 gib long). Amazon defines that while this operation is in progress
* whitespace is sent back to the client inorder to keep the HTTP connection alive.
*
*
* @param mountedRoot - where both the source and dest buckets are located
* @param destBucket - resulting location of the concatenated objects
* @param fileName - resulting file name of the concatenated objects
@ -230,7 +230,7 @@ public class S3FileSystemBucketAdapter implements S3BucketAdapter {
}
private String getBucketFolderName(String bucket) {
// temporary
// temporary
String name = bucket.replace(' ', '_');
name = bucket.replace('\\', '-');
name = bucket.replace('/', '-');

View File

@ -26,7 +26,7 @@ import com.cloud.utils.db.Transaction;
/**
* ServiceEngineLifecycle is used to participate Axis service life cycle management
* so that we can inject proper initialization and cleanup procedure into the
* so that we can inject proper initialization and cleanup procedure into the
* process
*/
public class ServiceEngineLifecycle implements ServiceLifeCycle {

View File

@ -26,13 +26,13 @@ import com.cloud.bridge.util.Triple;
/**
* A model of stored ACLs to remember the ACL permissions per canonicalUserID per grantee
* Hold the AWS S3 grantee and permission constants.
*
*
* This class implements two forms of getCannedAccessControls mappings, as static methods,
*
*
* (a) an OrderedPair which provides a maplet across
* < permission, grantee >
* when given an aclRequestString and a target (i.e. bucket or object),
*
*
* (b) a Triplet
* < permission1, permission2, symbol >
* when given an aclRequestString, a target (i.e. bucket or object) and the ID of the owner.
@ -51,13 +51,13 @@ public interface SAcl {
public static final int PERMISSION_WRITE_ACL = 8;
public static final int PERMISSION_FULL = (PERMISSION_READ | PERMISSION_WRITE | PERMISSION_READ_ACL | PERMISSION_WRITE_ACL);
/** Return an OrderedPair
/** Return an OrderedPair
* < permission, grantee >
* comprising
* a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
* SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL
* a grantee - which is one of GRANTEE_ALLUSERS, GRANTEE_AUTHENTICATED, GRANTEE_USER
*
*
* Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets.
* The ACL request string is treated as a request for a known cannedAccessPolicy
* @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs
@ -66,27 +66,27 @@ public interface SAcl {
//public static OrderedPair <Integer,Integer> getCannedAccessControls ( String aclRequestString, String target );
/* {
if ( aclRequestString.equalsIgnoreCase( "public-read" ))
if ( aclRequestString.equalsIgnoreCase( "public-read" ))
// All users granted READ access.
return new OrderedPair <Integer,Integer> (PERMISSION_READ,GRANTEE_ALLUSERS);
else if (aclRequestString.equalsIgnoreCase( "public-read-write" ))
else if (aclRequestString.equalsIgnoreCase( "public-read-write" ))
// All users granted READ and WRITE access
return new OrderedPair <Integer,Integer> ((PERMISSION_READ | PERMISSION_WRITE),GRANTEE_ALLUSERS);
else if (aclRequestString.equalsIgnoreCase( "authenticated-read" ))
else if (aclRequestString.equalsIgnoreCase( "authenticated-read" ))
// Authenticated users have READ access
return new OrderedPair <Integer,Integer> (PERMISSION_READ,GRANTEE_AUTHENTICATED);
else if (aclRequestString.equalsIgnoreCase( "private" ))
else if (aclRequestString.equalsIgnoreCase( "private" ))
// Only Owner gets FULL_CONTROL
return new OrderedPair <Integer,Integer> (PERMISSION_FULL,GRANTEE_USER);
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-read" ))
{
// Object Owner gets FULL_CONTROL, Bucket Owner gets READ
if ( target.equalsIgnoreCase( "SBucket" ))
if ( target.equalsIgnoreCase( "SBucket" ))
return new OrderedPair <Integer,Integer> (PERMISSION_READ, GRANTEE_USER);
else
return new OrderedPair <Integer,Integer> (PERMISSION_FULL, GRANTEE_USER);
return new OrderedPair <Integer,Integer> (PERMISSION_FULL, GRANTEE_USER);
}
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" ))
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" ))
{
// Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL
// This is equivalent to private when used with PUT Bucket
@ -95,7 +95,7 @@ public interface SAcl {
else throw new UnsupportedException( "Unknown Canned Access Policy: " + aclRequestString + " is not supported" );
}
*/
/** Return a Triple
/** Return a Triple
* < permission1, permission2, symbol >
* comprising
* two permissions - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
@ -103,7 +103,7 @@ public interface SAcl {
* permission1 applies to objects, permission2 applies to buckets.
* a symbol to indicate whether the principal is anonymous (i.e. string "A") or authenticated user (i.e.
* string "*") - otherwise null indicates a single ACL for all users.
*
*
* Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets.
* The ACL request string is treated as a request for a known cannedAccessPolicy
* @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs
@ -113,34 +113,34 @@ public interface SAcl {
//public static Triple <Integer,Integer,String> getCannedAccessControls ( String aclRequestString, String target, String ownerID );
/* throws UnsupportedException
{
if ( aclRequestString.equalsIgnoreCase( "public-read" ))
if ( aclRequestString.equalsIgnoreCase( "public-read" ))
// Owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ access.
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_READ,"A");
else if (aclRequestString.equalsIgnoreCase( "public-read-write" ))
else if (aclRequestString.equalsIgnoreCase( "public-read-write" ))
// Owner gets FULL_CONTROL and the anonymous principal (the 'A' symbol here) is granted READ and WRITE access
return new Triple <Integer, Integer, String> (PERMISSION_FULL, (PERMISSION_READ | PERMISSION_WRITE),"A");
else if (aclRequestString.equalsIgnoreCase( "authenticated-read" ))
else if (aclRequestString.equalsIgnoreCase( "authenticated-read" ))
// Owner gets FULL_CONTROL and ANY principal authenticated as a registered S3 user (the '*' symbol here) is granted READ access
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_READ,"*");
else if (aclRequestString.equalsIgnoreCase( "private" ))
else if (aclRequestString.equalsIgnoreCase( "private" ))
// This is termed the "private" or default ACL, "Owner gets FULL_CONTROL"
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_FULL,null);
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-read" ))
{
// Object Owner gets FULL_CONTROL, Bucket Owner gets READ
// This is equivalent to private when used with PUT Bucket
if ( target.equalsIgnoreCase( "SBucket" ))
return new Triple <Integer, Integer, String> (PERMISSION_FULL,PERMISSION_FULL ,null);
else
if ( target.equalsIgnoreCase( "SBucket" ))
return new Triple <Integer, Integer, String> (PERMISSION_FULL,PERMISSION_FULL ,null);
else
return new Triple <Integer, Integer, String> (PERMISSION_FULL,PERMISSION_READ,ownerID);
}
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" ))
else if (aclRequestString.equalsIgnoreCase( "bucket-owner-full-control" ))
{
// Object Owner gets FULL_CONTROL, Bucket Owner gets FULL_CONTROL
// This is equivalent to private when used with PUT Bucket
if ( target.equalsIgnoreCase( "SBucket" ))
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_FULL, null);
else
if ( target.equalsIgnoreCase( "SBucket" ))
return new Triple <Integer, Integer, String> (PERMISSION_FULL, PERMISSION_FULL, null);
else
return new Triple <Integer, Integer, String> (PERMISSION_FULL,PERMISSION_FULL, ownerID);
}
else throw new UnsupportedException( "Unknown Canned Access Policy: " + aclRequestString + " is not supported" );

View File

@ -34,13 +34,13 @@ import com.cloud.bridge.util.Triple;
/**
* A model of stored ACLs to remember the ACL permissions per canonicalUserID per grantee
* Hold the AWS S3 grantee and permission constants.
*
*
* This class implements two forms of getCannedAccessControls mappings, as static methods,
*
*
* (a) an OrderedPair which provides a maplet across
* < permission, grantee >
* when given an aclRequestString and a target (i.e. bucket or object),
*
*
* (b) a Triplet
* < permission1, permission2, symbol >
* when given an aclRequestString, a target (i.e. bucket or object) and the ID of the owner.
@ -155,13 +155,13 @@ public class SAclVO implements SAcl {
this.lastModifiedTime = lastModifiedTime;
}
/** Return an OrderedPair
/** Return an OrderedPair
* < permission, grantee >
* comprising
* a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
* SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL
* a grantee - which is one of GRANTEE_ALLUSERS, GRANTEE_AUTHENTICATED, GRANTEE_USER
*
*
* Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets.
* The ACL request string is treated as a request for a known cannedAccessPolicy
* @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs
@ -194,7 +194,7 @@ public class SAclVO implements SAcl {
throw new UnsupportedException("Unknown Canned Access Policy: " + aclRequestString + " is not supported");
}
/** Return a Triple
/** Return a Triple
* < permission1, permission2, symbol >
* comprising
* two permissions - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
@ -202,7 +202,7 @@ public class SAclVO implements SAcl {
* permission1 applies to objects, permission2 applies to buckets.
* a symbol to indicate whether the principal is anonymous (i.e. string "A") or authenticated user (i.e.
* string "*") - otherwise null indicates a single ACL for all users.
*
*
* Access controls that are specified via the "x-amz-acl:" headers in REST requests for buckets.
* The ACL request string is treated as a request for a known cannedAccessPolicy
* @param aclRequestString - The requested ACL from the set of AWS S3 canned ACLs

View File

@ -24,10 +24,10 @@ import java.util.Set;
/**
* Holds the relation
* Id,
* Name,
* Name,
* OwnerCanonicalId,
* SHost,
* CreateTime,
* SHost,
* CreateTime,
* VersioningStatus
* For ORM see "com/cloud/bridge/model/SHost.hbm.xml"
*/
@ -38,21 +38,21 @@ public interface SBucket {
public static final int VERSIONING_SUSPENDED = 2;
/* private Long id;
private String name;
private String ownerCanonicalId;
private SHost shost;
private Date createTime;
private int versioningStatus;
private Set<SObject> objectsInBucket = new HashSet<SObject>();
public SBucket() {
versioningStatus = VERSIONING_NULL;
}
public Long getId() {
return id;
}
@ -60,47 +60,47 @@ public interface SBucket {
private void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getOwnerCanonicalId() {
return ownerCanonicalId;
}
public void setOwnerCanonicalId(String ownerCanonicalId) {
this.ownerCanonicalId = ownerCanonicalId;
}
public SHost getShost() {
return shost;
}
public void setShost(SHost shost) {
this.shost = shost;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public int getVersioningStatus() {
return versioningStatus;
}
public void setVersioningStatus( int versioningStatus ) {
this.versioningStatus = versioningStatus;
}
public Set<SObject> getObjectsInBucket() {
return objectsInBucket;
}
@ -108,18 +108,18 @@ public interface SBucket {
public void setObjectsInBucket(Set<SObject> objectsInBucket) {
this.objectsInBucket = objectsInBucket;
}
@Override
public boolean equals(Object other) {
if(this == other)
return true;
if(!(other instanceof SBucket))
return false;
return getName().equals(((SBucket)other).getName());
}
@Override
public int hashCode() {
return getName().hashCode();

View File

@ -34,10 +34,10 @@ import javax.persistence.Transient;
/**
* Holds the relation
* Id,
* Name,
* Name,
* OwnerCanonicalId,
* SHost,
* CreateTime,
* SHost,
* CreateTime,
* VersioningStatus
* For ORM see "com/cloud/bridge/model/SHost.hbm.xml"
*/

View File

@ -32,20 +32,20 @@ public interface SHost {
STORAGE_HOST_TYPE_CASTOR //2
}
/* private Long id;
private String host;
private int hostType;
private MHostVO mhost;
private String exportRoot;
private String userOnHost;
private String userPassword;
private Set<SBucket> buckets = new HashSet<SBucket>();
private Set<SBucket> buckets = new HashSet<SBucket>();
private Set<MHostMount> mounts = new HashSet<MHostMount>();
public SHost() {
}
public Long getId() {
return id;
}
@ -53,15 +53,15 @@ public interface SHost {
private void setId(Long id) {
this.id = id;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getHostType() {
return hostType;
}
@ -81,19 +81,19 @@ public interface SHost {
public String getUserOnHost() {
return userOnHost;
}
public void setUserOnHost(String userOnHost) {
this.userOnHost = userOnHost;
}
public String getUserPassword() {
return userPassword;
}
public void setUserPassword(String userPassword) {
this.userPassword = userPassword;
}
public MHostVO getMhost() {
return mhost;
}
@ -109,7 +109,7 @@ public interface SHost {
public void setBuckets(Set<SBucket> buckets) {
this.buckets = buckets;
}
public Set<MHostMount> getMounts() {
return mounts;
}

View File

@ -189,7 +189,7 @@ public class SObjectVO {
/**
* S3 versioning allows the client to request the return of a specific version,
* not just the last version.
*
*
* @param wantVersion
* @return
*/

View File

@ -52,7 +52,7 @@ public class MultipartLoadDao {
/**
* If a multipart upload exists with the uploadId value then return the non-null creators
* accessKey.
*
*
* @param uploadId
* @return creator of the multipart upload, and NameKey of upload
*/
@ -65,9 +65,9 @@ public class MultipartLoadDao {
* The multipart upload was either successfully completed or was aborted. In either case, we need
* to remove all of its state from the tables. Note that we have cascade deletes so all tables with
* uploadId as a foreign key are automatically cleaned.
*
*
* @param uploadId
*
*
*/
public void deleteUpload(int uploadId) {
mpartUploadDao.deleteUpload(uploadId);
@ -75,7 +75,7 @@ public class MultipartLoadDao {
/**
* The caller needs to know who initiated the multipart upload.
*
*
* @param uploadId
* @return the access key value defining the initiator
*/
@ -85,12 +85,12 @@ public class MultipartLoadDao {
/**
* Create a new "in-process" multipart upload entry to keep track of its state.
*
*
* @param accessKey
* @param bucketName
* @param key
* @param cannedAccess
*
*
* @return if positive its the uploadId to be returned to the client
*
*/
@ -126,9 +126,9 @@ public class MultipartLoadDao {
/**
* Remember all the individual parts that make up the entire multipart upload so that once
* the upload is complete all the parts can be glued together into a single object. Note,
* the upload is complete all the parts can be glued together into a single object. Note,
* the caller can over write an existing part.
*
*
* @param uploadId
* @param partNumber
* @param md5
@ -172,7 +172,7 @@ public class MultipartLoadDao {
/**
* When the multipart are being composed into one object we need any meta data to be saved with
* the new re-constituted object.
*
*
* @param uploadId
* @return an array of S3MetaDataEntry (will be null if no meta values exist)
* @throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException
@ -202,10 +202,10 @@ public class MultipartLoadDao {
}
}
/**
* The result has to be ordered by key and if there is more than one identical key then all the
/**
* The result has to be ordered by key and if there is more than one identical key then all the
* identical keys are ordered by create time.
*
*
* @param bucketName
* @param maxParts
* @param prefix - can be null
@ -251,7 +251,7 @@ public class MultipartLoadDao {
* Return info on a range of upload parts that have already been stored in disk.
* Note that parts can be uploaded in any order yet we must returned an ordered list
* of parts thus we use the "ORDERED BY" clause to sort the list.
*
*
* @param uploadId
* @param maxParts
* @param startAt
@ -290,7 +290,7 @@ public class MultipartLoadDao {
/**
* How many parts exist after the endMarker part number?
*
*
* @param uploadId
* @param endMarker - can be used to see if getUploadedParts was truncated
* @return number of parts with partNumber greater than endMarker
@ -303,10 +303,10 @@ public class MultipartLoadDao {
/**
* A multipart upload request can have zero to many meta data entries to be applied to the
* final object. We need to remember all of the objects meta data until the multipart is complete.
*
*
* @param uploadId - defines an in-process multipart upload
* @param meta - an array of meta data to be assocated with the uploadId value
*
*
*/
private void saveMultipartMeta(int uploadId, S3MetaDataEntry[] meta) {
if (null == meta)
@ -332,7 +332,7 @@ public class MultipartLoadDao {
/**
* Reallocates an array with a new size, and copies the contents
* of the old array to the new array.
*
*
* @param oldArray the old array, to be reallocated.
* @param newSize the new array size.
* @return A new array with the same contents.

View File

@ -398,7 +398,7 @@ public class EC2RestServlet extends HttpServlet {
/**
* Provide an easy way to determine the version of the implementation running.
*
*
* This is an unauthenticated REST call.
*/
private void cloudEC2Version(HttpServletRequest request, HttpServletResponse response) {
@ -409,20 +409,20 @@ public class EC2RestServlet extends HttpServlet {
/**
* This request registers the Cloud.com account holder to the EC2 service. The Cloud.com
* account holder saves his API access and secret keys with the EC2 service so that
* account holder saves his API access and secret keys with the EC2 service so that
* the EC2 service can make Cloud.com API calls on his behalf. The given API access
* and secret key are saved into the "usercredentials" database table.
*
* and secret key are saved into the "usercredentials" database table.
*
* This is an unauthenticated REST call. The only required parameters are 'accesskey' and
* 'secretkey'.
*
* 'secretkey'.
*
* To verify that the given keys represent an existing account they are used to execute the
* Cloud.com's listAccounts API function. If the keys do not represent a valid account the
* listAccounts function will fail.
*
*
* A user can call this REST function any number of times, on each call the Cloud.com secret
* key is simply over writes any previously stored value.
*
*
* As with all REST calls HTTPS should be used to ensure their security.
*/
private void setUserKeys(HttpServletRequest request, HttpServletResponse response) {
@ -468,18 +468,18 @@ public class EC2RestServlet extends HttpServlet {
}
/**
* The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that
* The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that
* the client have a public/private key pair and the public key defined by a X509 certificate.
* Thus in order for a Cloud.com account holder to use the EC2's SOAP API he must register
* his X509 certificate with the EC2 service. This function allows the Cloud.com account
* holder to "load" his X509 certificate into the service. Note, that the SetUserKeys REST
* function must be called before this call.
*
*
* This is an authenticated REST call and as such must contain all the required REST parameters
* including: Signature, Timestamp, Expires, etc. The signature is calculated using the
* Cloud.com account holder's API access and secret keys and the Amazon defined EC2 signature
* algorithm.
*
*
* A user can call this REST function any number of times, on each call the X509 certificate
* simply over writes any previously stored value.
*/
@ -544,11 +544,11 @@ public class EC2RestServlet extends HttpServlet {
}
/**
* The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that
* The SOAP API for EC2 uses WS-Security to sign all client requests. This requires that
* the client have a public/private key pair and the public key defined by a X509 certificate.
* This REST call allows a Cloud.com account holder to remove a previouly "loaded" X509
* certificate out of the EC2 service.
*
*
* This is an unauthenticated REST call and as such must contain all the required REST parameters
* including: Signature, Timestamp, Expires, etc. The signature is calculated using the
* Cloud.com account holder's API access and secret keys and the Amazon defined EC2 signature
@ -707,11 +707,11 @@ public class EC2RestServlet extends HttpServlet {
}
/**
* The approach taken here is to map these REST calls into the same objects used
* The approach taken here is to map these REST calls into the same objects used
* to implement the matching SOAP requests (e.g., AttachVolume). This is done by parsing
* out the URL parameters and loading them into the relevant EC2XXX object(s). Once
* the parameters are loaded the appropriate EC2Engine function is called to perform
* the requested action. The result of the EC2Engine function is a standard
* the requested action. The result of the EC2Engine function is a standard
* Amazon WSDL defined object (e.g., AttachVolumeResponse Java object). Finally the
* serialize method is called on the returned response object to obtain the extected
* response XML.
@ -885,7 +885,7 @@ public class EC2RestServlet extends HttpServlet {
} while (true);
// -> list: IpPermissions.n.Groups.m.UserId and IpPermissions.n.Groups.m.GroupName
// -> list: IpPermissions.n.Groups.m.UserId and IpPermissions.n.Groups.m.GroupName
mCount = 1;
do {
String[] user = request.getParameterValues("IpPermissions." + nCount + ".Groups." + mCount + ".UserId");
@ -1780,7 +1780,7 @@ public class EC2RestServlet extends HttpServlet {
* &Filter.1.Value.1=i-1a2b3c4d
* &Filter.2.Name=attachment.delete-on-termination
* &Filter.2.Value.1=true
*
*
* @param request
* @return List<EC2Filter>
*/
@ -2076,7 +2076,7 @@ public class EC2RestServlet extends HttpServlet {
restAuth.setHTTPRequestURI(requestUri);
String queryString = request.getQueryString();
// getQueryString returns null (does it ever NOT return null for these),
// getQueryString returns null (does it ever NOT return null for these),
// we need to construct queryString to avoid changing the auth code...
if (queryString == null) {
// construct our idea of a queryString with parameters!
@ -2108,7 +2108,7 @@ public class EC2RestServlet extends HttpServlet {
/**
* We check this to reduce replay attacks.
*
*
* @param timeStamp
* @return true - if the request is not longer valid, false otherwise
* @throws ParseException
@ -2191,7 +2191,7 @@ public class EC2RestServlet extends HttpServlet {
}
/**
* Serialize Axis beans to XML output.
* Serialize Axis beans to XML output.
*/
private void serializeResponse(HttpServletResponse response, ADBBean EC2Response) throws ADBException, XMLStreamException, IOException {
OutputStream os = response.getOutputStream();

View File

@ -706,9 +706,9 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface {
/**
* Processes ec2-register
*
*
* @param
*
*
* @see <a href="http://docs.amazonwebservices.com/AWSEC2/2010-11-15/APIReference/index.html?ApiReference-query-RegisterImage.html">RegisterImage</a>
*/
public RegisterImageResponse registerImage(RegisterImage registerImage) {
@ -726,9 +726,9 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface {
/**
* Processes ec2-reset-image-attribute
*
*
* @param resetImageAttribute
*
*
* @see <a href="http://docs.amazonwebservices.com/AWSEC2/2010-11-15/APIReference/index.html?ApiReference-query-ResetInstanceAttribute.html">ResetInstanceAttribute</a>
*/
@ -749,9 +749,9 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface {
/**
* ec2-run-instances
*
*
* @param runInstances
*
*
* @see <a href="http://docs.amazonwebservices.com/AWSEC2/2010-11-15/APIReference/index.html?ApiReference-query-RunInstances.html">RunInstances</a>
*/
public RunInstancesResponse runInstances(RunInstances runInstances) {
@ -885,7 +885,7 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface {
/**
* @param modifyInstanceAttribute
* @return
* @return
*/
public static ModifyInstanceAttributeResponse toModifyInstanceAttributeResponse(Boolean status) {
ModifyInstanceAttributeResponse miat = new ModifyInstanceAttributeResponse();
@ -1196,7 +1196,7 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface {
return request;
}
// TODO make these filter set functions use generics
// TODO make these filter set functions use generics
private EC2GroupFilterSet toGroupFilterSet(FilterSetType fst) {
EC2GroupFilterSet gfs = new EC2GroupFilterSet();
@ -1580,9 +1580,9 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface {
/**
* Map our cloud state values into what Amazon defines.
* Where are the values that can be returned by our cloud api defined?
*
*
* @param cloudState
* @return
* @return
*/
public static int toAmazonCode(String cloudState) {
if (null == cloudState)
@ -1890,7 +1890,7 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface {
DetachVolumeResponse response = new DetachVolumeResponse();
DetachVolumeResponseType param1 = new DetachVolumeResponseType();
Calendar cal = Calendar.getInstance();
cal.set(1970, 1, 1); // return one value, Unix Epoch, what else can we return?
cal.set(1970, 1, 1); // return one value, Unix Epoch, what else can we return?
param1.setVolumeId(engineResponse.getId().toString());
param1.setInstanceId((null == engineResponse.getInstanceId() ? "" : engineResponse.getInstanceId().toString()));
@ -1918,7 +1918,7 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface {
if (null != engineResponse.getState())
param1.setStatus(engineResponse.getState());
else
param1.setStatus(""); // ToDo - throw an Soap Fault
param1.setStatus(""); // ToDo - throw an Soap Fault
// -> CloudStack seems to have issues with timestamp formats so just in case
Calendar cal = EC2RestAuth.parseDateString(engineResponse.getCreated());

View File

@ -212,7 +212,7 @@ public class S3RestServlet extends HttpServlet {
/**
* Provide an easy way to determine the version of the implementation running.
*
*
* This is an unauthenticated REST call.
*/
private void cloudS3Version(HttpServletRequest request, HttpServletResponse response) {
@ -223,20 +223,20 @@ public class S3RestServlet extends HttpServlet {
/**
* This request registers the user Cloud.com account holder to the S3 service. The Cloud.com
* account holder saves his API access and secret keys with the S3 service so that
* account holder saves his API access and secret keys with the S3 service so that
* each rest call he makes can be verified was originated from him. The given API access
* and secret key are saved into the "usercredentials" database table.
*
* and secret key are saved into the "usercredentials" database table.
*
* This is an unauthenticated REST call. The only required parameters are 'accesskey' and
* 'secretkey'.
*
* 'secretkey'.
*
* To verify that the given keys represent an existing account they are used to execute the
* Cloud.com's listAccounts API function. If the keys do not represent a valid account the
* listAccounts function will fail.
*
*
* A user can call this REST function any number of times, on each call the Cloud.com secret
* key is simply over writes any previously stored value.
*
*
* As with all REST calls HTTPS should be used to ensure their security.
*/
@DB
@ -274,7 +274,7 @@ public class S3RestServlet extends HttpServlet {
user = ucDao.persist(user);
txn.commit();
txn.close();
//credentialDao.setUserKeys( accessKey[0], secretKey[0] );
//credentialDao.setUserKeys( accessKey[0], secretKey[0] );
} catch (Exception e) {
logger.error("SetUserKeys " + e.getMessage(), e);
@ -392,7 +392,7 @@ public class S3RestServlet extends HttpServlet {
return new S3BucketAction(); // for ListAllMyBuckets
}
// Because there is a leading / at position 0 of pathInfo, now subtract this to process the remainder
// Because there is a leading / at position 0 of pathInfo, now subtract this to process the remainder
pathInfo = pathInfo.substring(1);
if (ServiceProvider.getInstance().getUseSubDomain())
@ -476,7 +476,7 @@ public class S3RestServlet extends HttpServlet {
// The purpose of the plain POST operation is to add an object to a specified bucket using HTML forms.
private S3ObjectAction routePlainPostRequest(HttpServletRequest request) {
// TODO - Remove the unnecessary fields below
// TODO - Remove the unnecessary fields below
// Obtain the mandatory fields from the HTML form or otherwise fail with a logger message
String keyString = request.getParameter("key");
String metatagString = request.getParameter("x-amz-meta-tag");
@ -487,7 +487,7 @@ public class S3RestServlet extends HttpServlet {
String accessKeyString = request.getParameter("AWSAccessKeyId");
String signatureString = request.getParameter("Signature");
// Obtain the discretionary fields from the HTML form
// Obtain the discretionary fields from the HTML form
String policyKeyString = request.getParameter("Policy");
String metauuidString = request.getParameter("x-amz-meta-uuid");
String redirectString = request.getParameter("redirect");
@ -519,7 +519,7 @@ public class S3RestServlet extends HttpServlet {
* A DIME request is really a SOAP request that we are dealing with, and so its
* authentication is the SOAP authentication approach. Since Axis2 does not handle
* DIME messages we deal with them here.
*
*
* @param request
* @param response
*/
@ -606,10 +606,10 @@ public class S3RestServlet extends HttpServlet {
* Convert the SOAP XML we extract from the DIME message into our local object.
* Here Axis2 is not parsing the SOAP for us. I tried to use the Amazon PutObject
* parser but it keep throwing exceptions.
*
*
* @param putObjectInline
* @return
* @throws Exception
* @return
* @throws Exception
*/
public static S3PutObjectRequest toEnginePutObjectRequest(InputStream is) throws Exception {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
@ -770,7 +770,7 @@ public class S3RestServlet extends HttpServlet {
/**
* Looking for the value of a specific child of the given parent node.
*
*
* @param parent
* @param childName
* @return

View File

@ -296,10 +296,10 @@ public class S3BucketAction implements ServletAction {
return nValue.getNodeValue();
}
/**
/**
* In order to support a policy on the "s3:CreateBucket" action we must be able to set and get
* policies before a bucket is actually created.
*
*
* @param request
* @param response
* @throws IOException
@ -321,7 +321,7 @@ public class S3BucketAction implements ServletAction {
}
}
// [B] "The bucket owner by default has permissions to attach bucket policies to their buckets using PUT Bucket policy."
// [B] "The bucket owner by default has permissions to attach bucket policies to their buckets using PUT Bucket policy."
// -> the bucket owner may want to restrict the IP address from where this can be executed
String client = UserContext.current().getCanonicalUserId();
S3PolicyContext context = new S3PolicyContext(PolicyActions.PutBucketPolicy, bucketName);
@ -563,7 +563,7 @@ public class S3BucketAction implements ServletAction {
OutputStream outputStream = response.getOutputStream();
response.setStatus(200);
response.setContentType("application/xml");
// The content-type literally should be "application/xml; charset=UTF-8"
// The content-type literally should be "application/xml; charset=UTF-8"
// but any compliant JVM supplies utf-8 by default;
MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter("GetBucketAccessControlPolicyResult", outputStream);
@ -907,11 +907,11 @@ public class S3BucketAction implements ServletAction {
}
/**
* Multipart upload is a complex operation with all the options defined by Amazon. Part of the functionality is
* provided by the query done against the database. The CommonPrefixes functionality is done the same way
* as done in the listBucketContents function (i.e., by iterating though the list to decide which output
* Multipart upload is a complex operation with all the options defined by Amazon. Part of the functionality is
* provided by the query done against the database. The CommonPrefixes functionality is done the same way
* as done in the listBucketContents function (i.e., by iterating though the list to decide which output
* element each key is placed).
*
*
* @param request
* @param response
* @throws IOException

View File

@ -209,7 +209,7 @@ public class S3ObjectAction implements ServletAction {
OutputStream outputStream = response.getOutputStream();
response.setStatus(200);
response.setContentType("application/xml");
// The content-type literally should be "application/xml; charset=UTF-8"
// The content-type literally should be "application/xml; charset=UTF-8"
// but any compliant JVM supplies utf-8 by default;
MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter("CopyObjectResult", outputStream);
@ -248,7 +248,7 @@ public class S3ObjectAction implements ServletAction {
OutputStream outputStream = response.getOutputStream();
response.setStatus(200);
response.setContentType("application/xml");
// The content-type literally should be "application/xml; charset=UTF-8"
// The content-type literally should be "application/xml; charset=UTF-8"
// but any compliant JVM supplies utf-8 by default;
MTOMAwareResultStreamWriter resultWriter = new MTOMAwareResultStreamWriter("GetObjectAccessControlPolicyResult", outputStream);
@ -377,7 +377,7 @@ public class S3ObjectAction implements ServletAction {
}
/**
* Once versioining is turned on then to delete an object requires specifying a version
* Once versioining is turned on then to delete an object requires specifying a version
* parameter. A deletion marker is set once versioning is turned on in a bucket.
*/
private void executeDeleteObject(HttpServletRequest request, HttpServletResponse response) throws IOException {
@ -468,7 +468,7 @@ public class S3ObjectAction implements ServletAction {
S3GetObjectResponse engineResponse = ServiceProvider.getInstance().getS3Engine().handleRequest(engineRequest);
response.setStatus(engineResponse.getResultCode());
//bucket lookup for non-existance key
//bucket lookup for non-existance key
if (engineResponse.getResultCode() == 404)
return;
@ -501,7 +501,7 @@ public class S3ObjectAction implements ServletAction {
// There is a problem with POST since the 'Signature' and 'AccessKey' parameters are not
// determined until we hit this function (i.e., they are encoded in the body of the message
// they are not HTTP request headers). All the values we used to get in the request headers
// they are not HTTP request headers). All the values we used to get in the request headers
// are not encoded in the request body.
//
// add ETag header computed as Base64 MD5 whenever object is uploaded or updated
@ -573,7 +573,7 @@ public class S3ObjectAction implements ServletAction {
}
state = 1;
} else if (1 == state && 0 == oneLine.length()) {
// -> data of a body part starts here
// -> data of a body part starts here
state = 2;
} else if (1 == state) {
// -> the name of the 'name-value' pair is encoded in the Content-Disposition header
@ -622,7 +622,7 @@ public class S3ObjectAction implements ServletAction {
/**
* Save all the information about the multipart upload request in the database so once it is finished
* (in the future) we can create the real S3 object.
*
*
* @throws IOException
*/
private void executeInitiateMultipartUpload(HttpServletRequest request, HttpServletResponse response) throws IOException {
@ -724,7 +724,7 @@ public class S3ObjectAction implements ServletAction {
/**
* This function is required to both parsing XML on the request and return XML as part of its result.
*
*
* @param request
* @param response
* @throws IOException
@ -1011,7 +1011,7 @@ public class S3ObjectAction implements ServletAction {
* as defined in rfc2616. Any characters that could cause an invalid HTTP header will
* prevent that meta data from being returned via the REST call (as is defined in the Amazon
* spec). These characters can be defined if using the SOAP API as well as the REST API.
*
*
* @param engineResponse
* @param response
*/
@ -1034,7 +1034,7 @@ public class S3ObjectAction implements ServletAction {
}
}
// -> cannot have HTTP separators in an HTTP header
// -> cannot have HTTP separators in an HTTP header
if (-1 != name.indexOf('(') || -1 != name.indexOf(')') || -1 != name.indexOf('@') || -1 != name.indexOf('<') || -1 != name.indexOf('>') || -1 != name.indexOf('\"') ||
-1 != name.indexOf('[') || -1 != name.indexOf(']') || -1 != name.indexOf('=') || -1 != name.indexOf(',') || -1 != name.indexOf(';') || -1 != name.indexOf(':') ||
-1 != name.indexOf('\\') || -1 != name.indexOf('/') || -1 != name.indexOf(' ') || -1 != name.indexOf('{') || -1 != name.indexOf('}') || -1 != name.indexOf('?') ||
@ -1054,7 +1054,7 @@ public class S3ObjectAction implements ServletAction {
/**
* Extract the name and value of all meta data so it can be written with the
* object that is being 'PUT'.
*
*
* @param request
* @return
*/
@ -1085,13 +1085,13 @@ public class S3ObjectAction implements ServletAction {
}
/**
* Parameters on the query string may or may not be name-value pairs.
* Parameters on the query string may or may not be name-value pairs.
* For example: "?acl&versionId=2", notice that "acl" has no value other
* than it is present.
*
*
* @param queryString - from a URL to locate the 'find' parameter
* @param find - name string to return first found
* @return the value matching the found name
* @return the value matching the found name
*/
private String returnParameter(String queryString, String find) {
int offset = queryString.indexOf(find);
@ -1128,11 +1128,11 @@ public class S3ObjectAction implements ServletAction {
* The Complete Multipart Upload function pass in the request body a list of
* all uploaded body parts. It is required that we verify that list matches
* what was uploaded.
*
*
* @param is
* @param parts
* @return error code, and error string
* @throws ParserConfigurationException, IOException, SAXException
* @throws ParserConfigurationException, IOException, SAXException
*/
private OrderedPair<Integer, String> verifyParts(InputStream is, S3MultipartPart[] parts) {
try {

View File

@ -107,9 +107,9 @@ import com.cloud.bridge.service.core.s3.S3SetBucketAccessControlPolicyRequest;
import com.cloud.bridge.service.core.s3.S3SetObjectAccessControlPolicyRequest;
import com.cloud.bridge.service.exception.InternalErrorException;
/*
/*
* Implementation of S3 service requests as operations defined by the interface, com.amazon.s3.AmazonS3SkeletonInterface.
* The operations dispatched from this class are of the form of SOAP operations which define business logic to be executed by the request.
* The operations dispatched from this class are of the form of SOAP operations which define business logic to be executed by the request.
* The methods required for S3 services in accordance with the skeleton are either implementations of the following
* getBucketLoggingStatus
* copyObject
@ -130,7 +130,7 @@ import com.cloud.bridge.service.exception.InternalErrorException;
* or throw and Axis2 fault otherwise.
* These skeleton methods can be used as the implementation of services to satisfy SOAP calls, but also to provide the output
* to be serialized by the AXIOM XML processor.
*
*
* */
public class S3SerializableServiceImplementation implements AmazonS3SkeletonInterface {

View File

@ -117,14 +117,14 @@ public class ServiceProvider extends ManagerBase {
}
public long getManagementHostId() {
// we want to limit mhost within its own session, id of the value will be returned
// we want to limit mhost within its own session, id of the value will be returned
long mhostId = 0;
if (mhost != null)
mhostId = mhost.getId() != null ? mhost.getId().longValue() : 0L;
return mhostId;
}
/**
/**
* We return a 2-tuple to distinguish between two cases:
* (1) there is no entry in the map for bucketName, and (2) there is a null entry
* in the map for bucketName. In case 2, the database was inspected for the
@ -145,7 +145,7 @@ public class ServiceProvider extends ManagerBase {
/**
* The policy parameter can be set to null, which means that there is no policy
* for the bucket so a database lookup is not necessary.
*
*
* @param bucketName
* @param policy
*/

View File

@ -21,7 +21,7 @@ public class EC2CreateKeyPair {
private String keyName;
/**
*
*
*/
public EC2CreateKeyPair() {
// TODO Auto-generated constructor stub

View File

@ -35,7 +35,7 @@ public class EC2CreateVolume {
/**
* Define the size of the volume to create
*
*
* @param size - valid values are [1 .. 1024] and represent gigBytes
*/
public void setSize(String size) {

View File

@ -22,7 +22,7 @@ public class EC2DeleteKeyPair {
private String keyName;
/**
*
*
*/
public EC2DeleteKeyPair() {
// TODO Auto-generated constructor stub

View File

@ -25,7 +25,7 @@ public class EC2DescribeKeyPairsResponse {
protected List<EC2SSHKeyPair> keyPairSet = new ArrayList<EC2SSHKeyPair>();
/**
*
*
*/
public EC2DescribeKeyPairsResponse() {
}

View File

@ -21,7 +21,7 @@ public class EC2DisassociateAddress {
private String publicIp;
/**
*
*
*/
public EC2DisassociateAddress() {
setPublicIp(null);

View File

@ -114,7 +114,7 @@ public class EC2Engine extends ManagerBase {
* Which management server to we talk to?
* Load a mapping form Amazon values for 'instanceType' to cloud defined
* diskOfferingId and serviceOfferingId.
*
*
* @throws IOException
*/
private void loadConfigValues() throws IOException {
@ -179,7 +179,7 @@ public class EC2Engine extends ManagerBase {
/**
* Helper function to manage the api connection
*
*
* @return
*/
private CloudStackApi getApi() {
@ -195,7 +195,7 @@ public class EC2Engine extends ManagerBase {
/**
* Verifies account can access CloudStack
*
*
* @param accessKey
* @param secretKey
* @return
@ -240,7 +240,7 @@ public class EC2Engine extends ManagerBase {
/**
* Creates a security group
*
*
* @param groupName
* @param groupDesc
* @return
@ -261,7 +261,7 @@ public class EC2Engine extends ManagerBase {
/**
* Deletes a security group
*
*
* @param groupName
* @return
*/
@ -281,7 +281,7 @@ public class EC2Engine extends ManagerBase {
/**
* returns a list of security groups
*
*
* @param request
* @return
*/
@ -303,7 +303,7 @@ public class EC2Engine extends ManagerBase {
/**
* CloudStack supports revoke only by using the ruleid of the ingress rule.
* We list all security groups and find the matching group and use the first ruleId we find.
*
*
* @param request
* @return
*/
@ -345,7 +345,7 @@ public class EC2Engine extends ManagerBase {
/**
* authorizeSecurityGroup
*
*
* @param request - ip permission parameters
*/
public boolean authorizeSecurityGroup(EC2AuthorizeRevokeSecurityGroup request) {
@ -388,7 +388,7 @@ public class EC2Engine extends ManagerBase {
/**
* Does the permission from the request (left) match the permission from the cloudStack query (right).
* If the cloudStack rule matches then we return its ruleId.
*
*
* @param permLeft
* @param permRight
* @return ruleId of the cloudstack rule
@ -453,7 +453,7 @@ public class EC2Engine extends ManagerBase {
/**
* Returns a list of all snapshots
*
*
* @param request
* @return
*/
@ -500,7 +500,7 @@ public class EC2Engine extends ManagerBase {
/**
* Creates a snapshot
*
*
* @param volumeId
* @return
*/
@ -535,7 +535,7 @@ public class EC2Engine extends ManagerBase {
/**
* Deletes a snapshot
*
*
* @param snapshotId
* @return
*/
@ -557,7 +557,7 @@ public class EC2Engine extends ManagerBase {
/**
* Modify an existing template
*
*
* @param request
* @return
*/
@ -642,7 +642,7 @@ public class EC2Engine extends ManagerBase {
// handlers
/**
* return password data from the instance
*
*
* @param instanceId
* @return
*/
@ -663,7 +663,7 @@ public class EC2Engine extends ManagerBase {
/**
* Lists SSH KeyPairs on the system
*
*
* @param request
* @return
*/
@ -683,7 +683,7 @@ public class EC2Engine extends ManagerBase {
/**
* Delete SSHKeyPair
*
*
* @param request
* @return
*/
@ -704,7 +704,7 @@ public class EC2Engine extends ManagerBase {
/**
* Create SSHKeyPair
*
*
* @param request
* @return
*/
@ -728,7 +728,7 @@ public class EC2Engine extends ManagerBase {
/**
* Import an existing SSH KeyPair
*
*
* @param request
* @return
*/
@ -751,7 +751,7 @@ public class EC2Engine extends ManagerBase {
/**
* list ip addresses that have been allocated
*
*
* @param request
* @return
*/
@ -771,7 +771,7 @@ public class EC2Engine extends ManagerBase {
/**
* release an IP Address
*
*
* @param request
* @return
*/
@ -794,7 +794,7 @@ public class EC2Engine extends ManagerBase {
/**
* Associate an address with an instance
*
*
* @param request
* @return
*/
@ -825,7 +825,7 @@ public class EC2Engine extends ManagerBase {
/**
* Disassociate an address from an instance
*
*
* @param request
* @return
*/
@ -849,7 +849,7 @@ public class EC2Engine extends ManagerBase {
/**
* Allocate an address
*
*
* @param request
* @return
*/
@ -887,7 +887,7 @@ public class EC2Engine extends ManagerBase {
/**
* List of templates available. We only support the imageSet version of this call or when no search parameters are passed
* which results in asking for all templates.
*
*
* @param request
* @return
*/
@ -921,10 +921,10 @@ public class EC2Engine extends ManagerBase {
* 1) listVolumes&virtualMachineId= -- gets the volumeId
* 2) listVirtualMachinees&id= -- gets the templateId
* 3) listTemplates&id= -- gets the osTypeId
*
*
* If we have to start and stop the VM in question then this function is
* going to take a long time to complete.
*
*
* @param request
* @return
*/
@ -985,7 +985,7 @@ public class EC2Engine extends ManagerBase {
/**
* Register a template
*
*
* @param request
* @return
*/
@ -1015,7 +1015,7 @@ public class EC2Engine extends ManagerBase {
* Deregister a template(image)
* Our implementation is different from Amazon in that we do delete the template
* when we deregister it. The cloud API has not deregister call.
*
*
* @param image
* @return
*/
@ -1032,7 +1032,7 @@ public class EC2Engine extends ManagerBase {
/**
* list instances
*
*
* @param request
* @return
*/
@ -1050,7 +1050,7 @@ public class EC2Engine extends ManagerBase {
/**
* list Zones
*
*
* @param request
* @return
*/
@ -1070,7 +1070,7 @@ public class EC2Engine extends ManagerBase {
/**
* list volumes
*
*
* @param request
* @return
*/
@ -1097,7 +1097,7 @@ public class EC2Engine extends ManagerBase {
/**
* Attach a volume to an instance
*
*
* @param request
* @return
*/
@ -1131,7 +1131,7 @@ public class EC2Engine extends ManagerBase {
/**
* Detach a volume from an instance
*
*
* @param request
* @return
*/
@ -1181,7 +1181,7 @@ public class EC2Engine extends ManagerBase {
/**
* Create a volume
*
*
* @param request
* @return
*/
@ -1240,7 +1240,7 @@ public class EC2Engine extends ManagerBase {
/**
* Delete a volume
*
*
* @param request
* @return
*/
@ -1332,7 +1332,7 @@ public class EC2Engine extends ManagerBase {
/**
* Reboot an instance or instances
*
*
* @param request
* @return
*/
@ -1369,7 +1369,7 @@ public class EC2Engine extends ManagerBase {
/**
* Using a template (AMI), launch n instances
*
*
* @param request
* @return
*/
@ -1488,7 +1488,7 @@ public class EC2Engine extends ManagerBase {
/**
* Start an instance or instances
*
*
* @param request
* @return
*/
@ -1528,7 +1528,7 @@ public class EC2Engine extends ManagerBase {
/**
* Stop an instance or instances
*
*
* @param request
* @return
*/
@ -1617,7 +1617,7 @@ public class EC2Engine extends ManagerBase {
* RunInstances includes a min and max count of requested instances to create.
* We have to be able to create the min number for the user or none at all. So
* here we determine what the user has left to create.
*
*
* @return -1 means no limit exists, other positive numbers give max number left that
* the user can create.
*/
@ -1655,7 +1655,7 @@ public class EC2Engine extends ManagerBase {
/**
* Performs the cloud API listVirtualMachines one or more times.
*
*
* @param virtualMachineIds - an array of instances we are interested in getting information on
* @param ifs - filter out unwanted instances
*/
@ -1678,7 +1678,7 @@ public class EC2Engine extends ManagerBase {
/**
* Get one or more templates depending on the volumeId parameter.
*
*
* @param volumeId - if interested in one specific volume, null if want to list all volumes
* @param instanceId - if interested in volumes for a specific instance, null if instance is not important
*/
@ -1736,9 +1736,9 @@ public class EC2Engine extends ManagerBase {
* Translate the given zone name into the required zoneId. Query for
* a list of all zones and match the zone name given. Amazon uses zone
* names while the Cloud API often requires the zoneId.
*
*
* @param zoneName - (e.g., 'AH'), if null return the first zone in the available list
*
*
* @return the zoneId that matches the given zone name
*/
private String toZoneId(String zoneName, String domainId) throws Exception {
@ -1766,7 +1766,7 @@ public class EC2Engine extends ManagerBase {
/**
* Convert from the Amazon instanceType strings to Cloud serviceOfferingId
*
*
*/
private CloudStackServiceOfferingVO getCSServiceOfferingId(String instanceType) throws Exception {
@ -1783,7 +1783,7 @@ public class EC2Engine extends ManagerBase {
/**
* Convert from the Cloud serviceOfferingId to the Amazon instanceType strings based
* on the loaded map.
*
*
* @param serviceOfferingId
* @return A valid value for the Amazon defined instanceType
* @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException
@ -1806,7 +1806,7 @@ public class EC2Engine extends ManagerBase {
/**
* Match the value in the 'description' field of the listOsTypes response to get
* the osTypeId.
*
*
* @param osTypeName
* @return the Cloud.com API osTypeId
*/
@ -1828,9 +1828,9 @@ public class EC2Engine extends ManagerBase {
/**
* More than one place we need to access the defined list of zones. If given a specific
* list of zones of interest, then only values from those zones are returned.
*
*
* @param interestedZones - can be null, should be a subset of all zones
*
*
* @return EC2DescribeAvailabilityZonesResponse
*/
private EC2DescribeAvailabilityZonesResponse listZones(String[] interestedZones, String domainId) throws Exception {
@ -1865,11 +1865,11 @@ public class EC2Engine extends ManagerBase {
/**
* Get information on one or more virtual machines depending on the instanceId parameter.
*
*
* @param instanceId - if null then return information on all existing instances, otherwise
* just return information on the matching instance.
* @param instances - a container object to fill with one or more EC2Instance objects
*
*
* @return the same object passed in as the "instances" parameter modified with one or more
* EC2Instance objects loaded.
*/
@ -1938,11 +1938,11 @@ public class EC2Engine extends ManagerBase {
/**
* Get one or more templates depending on the templateId parameter.
*
*
* @param templateId - if null then return information on all existing templates, otherwise
* just return information on the matching template.
* @param images - a container object to fill with one or more EC2Image objects
*
*
* @return the same object passed in as the "images" parameter modified with one or more
* EC2Image objects loaded.
*/
@ -2022,7 +2022,7 @@ public class EC2Engine extends ManagerBase {
/**
* List security groups
*
*
* @param interestedGroups
* @return
* @throws EC2ServiceException
@ -2199,7 +2199,7 @@ public class EC2Engine extends ManagerBase {
/**
* Convert ingress rule to EC2IpPermission records
*
*
* @param response
* @param group
* @return
@ -2234,7 +2234,7 @@ public class EC2Engine extends ManagerBase {
/**
* Find the current account based on the SecretKey
*
*
* @return
* @throws Exception
*/
@ -2265,7 +2265,7 @@ public class EC2Engine extends ManagerBase {
/**
* List networkOfferings by zone with securityGroup enabled
*
*
* @param zoneId
* @return
* @throws Exception
@ -2283,7 +2283,7 @@ public class EC2Engine extends ManagerBase {
/**
* Create a network
*
*
* @param zoneId
* @param offering
* @param owner
@ -2297,7 +2297,7 @@ public class EC2Engine extends ManagerBase {
/**
* List of networks without securityGroup enabled by zone
*
*
* @param zoneId
* @return
* @throws Exception
@ -2349,7 +2349,7 @@ public class EC2Engine extends ManagerBase {
/**
* Find a suitable network to use for deployVM
*
*
* @param zone
* @return
* @throws Exception
@ -2403,7 +2403,7 @@ public class EC2Engine extends ManagerBase {
/**
* Windows has its own device strings.
*
*
* @param hypervisor
* @param deviceId
* @return
@ -2462,7 +2462,7 @@ public class EC2Engine extends ManagerBase {
/**
* Translate the device name string into a Cloud Stack deviceId.
* deviceId 3 is reserved for CDROM and 0 for the ROOT disk
*
*
* @param device string
* @return deviceId value
*/
@ -2524,7 +2524,7 @@ public class EC2Engine extends ManagerBase {
/**
* Map CloudStack instance state to Amazon state strings
*
*
* @param state
* @return
*/
@ -2606,7 +2606,7 @@ public class EC2Engine extends ManagerBase {
/**
* Stop an instance
* Wait until one specific VM has stopped
*
*
* @param instanceId
* @return
* @throws Exception
@ -2625,7 +2625,7 @@ public class EC2Engine extends ManagerBase {
/**
* Start an existing stopped instance(VM)
*
*
* @param instanceId
* @return
* @throws Exception
@ -2644,7 +2644,7 @@ public class EC2Engine extends ManagerBase {
/**
* Cloud Stack API takes a comma separated list as a parameter.
*
*
* @throws UnsupportedEncodingException
*/
private String constructList(String[] elements) throws UnsupportedEncodingException {

View File

@ -40,9 +40,9 @@ public class EC2Filter {
}
/**
* From Amazon:
* "You can use wildcards with the filter values: * matches zero or more characters, and ? matches
* exactly one character. You can escape special characters using a backslash before the character. For
* From Amazon:
* "You can use wildcards with the filter values: * matches zero or more characters, and ? matches
* exactly one character. You can escape special characters using a backslash before the character. For
* example, a value of \*amazon\?\\ searches for the literal string *amazon?\. "
*/
public void addValueEncoded(String param) {

View File

@ -66,10 +66,10 @@ public class EC2GroupFilterSet {
/**
* For a filter to match a snapshot just one of its values has to match the volume.
* For a snapshot to be included in the instance response it must pass all the defined filters.
*
*
* @param sampleList - list of snapshots to test against the defined filters
* @return EC2DescribeSnapshotsResponse
* @throws ParseException
* @throws ParseException
*/
public EC2DescribeSecurityGroupsResponse evaluate(EC2DescribeSecurityGroupsResponse sampleList) throws ParseException {
EC2DescribeSecurityGroupsResponse resultList = new EC2DescribeSecurityGroupsResponse();

View File

@ -23,7 +23,7 @@ public class EC2ImportKeyPair {
private String publicKeyMaterial;
/**
*
*
*/
public EC2ImportKeyPair() {
// TODO Auto-generated constructor stub

View File

@ -70,10 +70,10 @@ public class EC2InstanceFilterSet {
/**
* For a filter to match an instance just one of its values has to match the volume.
* For an instance to be included in the instance response it must pass all the defined filters.
*
*
* @param sampleList - list of instances to test against the defined filters
* @return EC2DescribeInstancesResponse
* @throws ParseException
* @throws ParseException
*/
public EC2DescribeInstancesResponse evaluate(EC2DescribeInstancesResponse sampleList) throws ParseException {
EC2DescribeInstancesResponse resultList = new EC2DescribeInstancesResponse();

View File

@ -28,7 +28,7 @@ public class EC2IpPermission {
private String icmpType;
private Integer fromPort;
private Integer toPort;
private List<EC2SecurityGroup> userSet = new ArrayList<EC2SecurityGroup>(); // a list of groups identifying users
private List<EC2SecurityGroup> userSet = new ArrayList<EC2SecurityGroup>(); // a list of groups identifying users
private List<String> rangeSet = new ArrayList<String>(); // a list of strings identifying CIDR
public EC2IpPermission() {

View File

@ -65,7 +65,7 @@ public class EC2RegisterImage {
/**
* We redefine the expected format of this field to be:
* "format:zonename:ostypename:hypervisor"
*
*
* @param param
*/
public void setArchitecture(String param) {

View File

@ -69,10 +69,10 @@ public class EC2SnapshotFilterSet {
/**
* For a filter to match a snapshot just one of its values has to match the volume.
* For a snapshot to be included in the instance response it must pass all the defined filters.
*
*
* @param sampleList - list of snapshots to test against the defined filters
* @return EC2DescribeSnapshotsResponse
* @throws ParseException
* @throws ParseException
*/
public EC2DescribeSnapshotsResponse evaluate(EC2DescribeSnapshotsResponse sampleList) throws ParseException {
EC2DescribeSnapshotsResponse resultList = new EC2DescribeSnapshotsResponse();

View File

@ -71,10 +71,10 @@ public class EC2VolumeFilterSet {
/**
* For a filter to match a volume just one of its values has to match the volume.
* For a volume to be included in the volume response it must pass all the defined filters.
*
*
* @param sampleList - list of volumes to test against the defined filters
* @return EC2DescribeVolumeResponse
* @throws ParseException
* @throws ParseException
*/
public EC2DescribeVolumesResponse evaluate(EC2DescribeVolumesResponse sampleList) throws ParseException {
EC2DescribeVolumesResponse resultList = new EC2DescribeVolumesResponse();

View File

@ -27,7 +27,7 @@ public class S3BucketPolicy {
/**
* 'NORESULT' is returned when no applicable statement can be found to evaluate
* for the S3 access request. If no evaluated statement results to true then the
* default deny result is returned (allow ACL definitions to override it).
* default deny result is returned (allow ACL definitions to override it).
*/
public enum PolicyAccess {
ALLOW, DEFAULT_DENY, DENY
@ -68,13 +68,13 @@ public class S3BucketPolicy {
/**
* This function evaluates all applicable policy statements. Following the "evaluation logic"
* as defined by Amazon the type of access derived from the policy is returned.
*
* @param context - parameters from either the REST or SOAP request
*
* @param context - parameters from either the REST or SOAP request
* @param objectToAccess - key to the S3 object in the bucket associated by this policy, should be
* null if access is just to the bucket.
* @param userAccount - the user performing the access request
* @return PolicyAccess type
* @throws Exception
* @throws Exception
*/
public PolicyAccess eval(S3PolicyContext context, String userAccount) throws Exception {
PolicyAccess result = PolicyAccess.DEFAULT_DENY;
@ -83,7 +83,7 @@ public class S3BucketPolicy {
while (itr.hasNext()) {
S3PolicyStatement oneStatement = itr.next();
if (statementIsRelevant(oneStatement, context.getKeyName(), userAccount, context.getRequestedAction())) {
// -> a missing condition block means the statement is true
// -> a missing condition block means the statement is true
S3PolicyConditionBlock block = oneStatement.getConditionBlock();
if (null == block || block.isTrue(context, oneStatement.getSid())) {
result = oneStatement.getEffect();
@ -118,7 +118,7 @@ public class S3BucketPolicy {
/**
* Does the Policy Statement have anything to do with the requested access by the user?
*
*
* @return true - statement is relevant, false it is not
*/
private boolean statementIsRelevant(S3PolicyStatement oneStatement, String objectToAccess, String userAccount, PolicyActions operationRequested) {

View File

@ -75,7 +75,7 @@ public class S3ConditionalHeaders {
/**
* Takes the header value from HTTP "If-Match", for example is:
* If-Match: "xyzzy", "r2d2xxxx", "c3piozzzz"
*
*
* @param ifMatch
*/
public void setMatch(String ifMatch) {
@ -119,7 +119,7 @@ public class S3ConditionalHeaders {
/**
* Has the object been modified since the client has last checked?
*
*
* @param lastModified
* @return a negative value means that the object has not been modified since
* a postive value means that this test should be ignored.
@ -136,7 +136,7 @@ public class S3ConditionalHeaders {
/**
* Has the object been modified since the unmodified date?
*
*
* @param lastModified
* @return a negative value means that the object has been modified since
* a postive value means that this test should be ignored.
@ -154,7 +154,7 @@ public class S3ConditionalHeaders {
/**
* Does the object's contents (its MD5 signature) match what the client thinks
* it is?
*
*
* @param ETag - an MD5 signature of the content of the data being stored in S3
* @return a negative value means that the test has failed,
* a positive value means that the test succeeded or could not be done (so ignore it)
@ -173,7 +173,7 @@ public class S3ConditionalHeaders {
/**
* None of the given ETags in the "If-None-Match" can match the ETag parameter for this
* function to pass.
*
*
* @param ETag - an MD5 signature of the content of the data being stored in S3
* @return a negative value means that the test has failed,
* a positive value means that the test succeeded or could not be done (so ignore it)

View File

@ -19,7 +19,7 @@ package com.cloud.bridge.service.core.s3;
import java.util.Calendar;
public class S3CopyObjectResponse extends S3Response {
// -> 2 versions are important here:
// -> 2 versions are important here:
// (1) copyVersion: the version of the object's copy
// (2) putVersion: the version assigned to the copy after it is put
protected String copyVersion;

View File

@ -137,7 +137,7 @@ public class S3Engine {
/**
* Return a S3CopyObjectResponse which represents an object being copied from source
* to destination bucket.
* to destination bucket.
* Called from S3ObjectAction when copying an object.
* This can be treated as first a GET followed by a PUT of the object the user wants to copy.
*/
@ -188,7 +188,7 @@ public class S3Engine {
else
putRequest.setMetaEntries(request.getMetaEntries());
putRequest.setAcl(request.getAcl()); // -> if via a SOAP call
putRequest.setCannedAccess(request.getCannedAccess()); // -> if via a REST call
putRequest.setCannedAccess(request.getCannedAccess()); // -> if via a REST call
putRequest.setContentLength(originalObject.getContentLength());
putRequest.setData(originalObject.getData());
@ -250,7 +250,7 @@ public class S3Engine {
}
/**
* Return a S3Response which represents the effect of an object being deleted from its bucket.
* Return a S3Response which represents the effect of an object being deleted from its bucket.
* Called from S3BucketAction when deleting an object.
*/
@ -291,9 +291,9 @@ public class S3Engine {
bucketAdapter.deleteContainer(host_storagelocation_pair.getSecond(), request.getBucketName());
// Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl, SMeta and policy objects.
// To delete SMeta & SAcl objects:
// (1)Get all the objects in the bucket,
// (2)then all the items in each object,
// To delete SMeta & SAcl objects:
// (1)Get all the objects in the bucket,
// (2)then all the items in each object,
// (3) then all meta & acl data for each item
Set<SObjectVO> objectsInBucket = sbucket.getObjectsInBucket();
Iterator<SObjectVO> it = objectsInBucket.iterator();
@ -331,7 +331,7 @@ public class S3Engine {
}
/**
* Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins the bucket.
* Return a S3ListBucketResponse which represents a list of up to 1000 objects contained ins the bucket.
* Called from S3BucketAction for GETting objects and for GETting object versions.
*/
@ -362,7 +362,7 @@ public class S3Engine {
context.setEvalParam(ConditionKeys.Delimiter, delimiter);
verifyAccess(context, "SBucket", sbucket.getId(), SAcl.PERMISSION_READ);
// Wen execting the query, request one more item so that we know how to set isTruncated flag
// Wen execting the query, request one more item so that we know how to set isTruncated flag
List<SObjectVO> l = null;
if (includeVersions)
@ -388,7 +388,7 @@ public class S3Engine {
}
/**
* Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester.
* Return a S3ListAllMyBucketResponse which represents a list of all buckets owned by the requester.
* Called from S3BucketAction for GETting all buckets.
* To check on bucket policies defined we have to (look for and) evaluate the policy on each
* bucket the user owns.
@ -535,7 +535,7 @@ public class S3Engine {
/**
* The initiator must have permission to write to the bucket in question in order to initiate
* a multipart upload. Also check to make sure the special folder used to store parts of
* a multipart upload. Also check to make sure the special folder used to store parts of
* a multipart exists for this bucket.
* Called from S3ObjectAction during many stages of multipart upload.
*/
@ -573,7 +573,7 @@ public class S3Engine {
}
/**
* Save the object fragment in a special (i.e., hidden) directory inside the same mount point as
* Save the object fragment in a special (i.e., hidden) directory inside the same mount point as
* the bucket location that the final object will be stored in.
* Called from S3ObjectAction during many stages of multipart upload.
* @param request
@ -632,13 +632,13 @@ public class S3Engine {
}
/**
* Create the real object represented by all the parts of the multipart upload.
* Create the real object represented by all the parts of the multipart upload.
* Called from S3ObjectAction at completion of multipart upload.
* @param httpResp - Servlet response handle to return the headers of the response (including version header)
* @param httpResp - Servlet response handle to return the headers of the response (including version header)
* @param request - Normal parameters needed to create a new object (including metadata)
* @param parts - List of files that make up the multipart
* @param outputStream - Response output stream
* N.B. - This method can be long-lasting
* N.B. - This method can be long-lasting
* We are required to keep the connection alive by returning whitespace characters back periodically.
*/
@ -698,7 +698,7 @@ public class S3Engine {
}
/**
* Return a S3PutObjectInlineResponse which represents an object being created into a bucket
* Return a S3PutObjectInlineResponse which represents an object being created into a bucket
* Called from S3ObjectAction when PUTting or POTing an object.
*/
@DB
@ -759,7 +759,7 @@ public class S3Engine {
}
/**
* Return a S3PutObjectResponse which represents an object being created into a bucket
* Return a S3PutObjectResponse which represents an object being created into a bucket
* Called from S3RestServlet when processing a DIME request.
*/
@ -775,7 +775,7 @@ public class S3Engine {
if (bucket == null)
throw new NoSuchObjectException("Bucket " + bucketName + " does not exist");
// Is the caller allowed to write the object?
// Is the caller allowed to write the object?
// The allocObjectItem checks for the bucket policy PutObject permissions
OrderedPair<SObjectVO, SObjectItemVO> object_objectitem_pair = allocObjectItem(bucket, key, meta, acl, null);
OrderedPair<SHostVO, String> host_storagelocation_pair = getBucketStorageHost(bucket);
@ -818,8 +818,8 @@ public class S3Engine {
}
/**
* The ACL of an object is set at the object version level. By default, PUT sets the ACL of the latest
* version of an object. To set the ACL of a different version, using the versionId subresource.
* The ACL of an object is set at the object version level. By default, PUT sets the ACL of the latest
* version of an object. To set the ACL of a different version, using the versionId subresource.
* Called from S3ObjectAction to PUT an object's ACL.
*/
@ -884,7 +884,7 @@ public class S3Engine {
}
/**
* By default, GET returns ACL information about the latest version of an object. To return ACL
* By default, GET returns ACL information about the latest version of an object. To return ACL
* information about a different version, use the versionId subresource
* Called from S3ObjectAction to get an object's ACL.
*/
@ -1028,8 +1028,8 @@ public class S3Engine {
return response;
}
// [D] Return the contents of the object inline
// -> extract the meta data that corresponds the specific versioned item
// [D] Return the contents of the object inline
// -> extract the meta data that corresponds the specific versioned item
List<SMetaVO> itemMetaData = metaDao.getByTarget("SObjectItem", item.getId());
if (null != itemMetaData) {
@ -1149,7 +1149,7 @@ public class S3Engine {
} else {
// If there is no item with a null version then we are done
if (null == item.getVersion()) {
// Otherwiswe remove the entire object
// Otherwiswe remove the entire object
// Cascade-deleting can delete related SObject/SObjectItem objects, but not SAcl and SMeta objects.
storedPath = item.getStoredPath();
deleteMetaData(item.getId());
@ -1230,11 +1230,11 @@ public class S3Engine {
}
/**
* The 'versionIdMarker' parameter only makes sense if enableVersion is true.
* The 'versionIdMarker' parameter only makes sense if enableVersion is true.
* versionIdMarker is the starting point to return information back. So for example if an
* object has versions 1,2,3,4,5 and the versionIdMarker is '3', then 3,4,5 will be returned
* by this function. If the versionIdMarker is null then all versions are returned.
*
*
* TODO - how does the versionIdMarker work when there is a deletion marker in the object?
*/
private S3ListBucketObjectEntry[]
@ -1357,7 +1357,7 @@ public class S3Engine {
/**
* Locate the folder to hold upload parts at the same mount point as the upload's final bucket
* location. Create the upload folder dynamically.
*
*
* @param bucketName
*/
private void createUploadFolder(String bucketName) {
@ -1372,7 +1372,7 @@ public class S3Engine {
* The overrideName is used to create a hidden storage bucket (folder) in the same location
* as the given bucketName. This can be used to create a folder for parts of a multipart
* upload for the associated bucket.
*
*
* @param bucketName
* @param overrideName
* @return
@ -1417,12 +1417,12 @@ public class S3Engine {
}
/**
* If acl is set then the cannedAccessPolicy parameter should be null and is ignored.
* If acl is set then the cannedAccessPolicy parameter should be null and is ignored.
* The cannedAccessPolicy parameter is for REST Put requests only where a simple set of ACLs can be
* created with a single header value. Note that we do not currently support "anonymous" un-authenticated
* created with a single header value. Note that we do not currently support "anonymous" un-authenticated
* access in our implementation.
*
* @throws IOException
*
* @throws IOException
*/
@SuppressWarnings("deprecation")
public OrderedPair<SObjectVO, SObjectItemVO> allocObjectItem(SBucketVO bucket, String nameKey, S3MetaDataEntry[] meta, S3AccessControlList acl, String cannedAccessPolicy) {
@ -1540,7 +1540,7 @@ public class S3Engine {
* Note that canned policies can be set when the object's contents are set
*/
public void setCannedAccessControls(String cannedAccessPolicy, String target, long objectId, SBucketVO bucket) {
// Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy
// Find the permission and symbol for the principal corresponding to the requested cannedAccessPolicy
Triple<Integer, Integer, String> permission_permission_symbol_triple = SAclVO.getCannedAccessControls(cannedAccessPolicy, target, bucket.getOwnerCanonicalId());
if (null == permission_permission_symbol_triple.getThird())
setSingleAcl(target, objectId, permission_permission_symbol_triple.getFirst());
@ -1568,8 +1568,8 @@ public class S3Engine {
}
/**
* The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects).
*
* The Cloud Stack API Access key is used for for the Canonical User Id everywhere (buckets and objects).
*
* @param owner - this can be the Cloud Access Key for a bucket owner or one of the
* following special symbols:
* (a) '*' - any principal authenticated user (i.e., any user with a registered Cloud Access Key)
@ -1585,7 +1585,7 @@ public class S3Engine {
defaultGrant.setPermission(permission1);
defaultAcl.addGrant(defaultGrant);
// -> bucket owner
// -> bucket owner
defaultGrant = new S3Grant();
defaultGrant.setGrantee(SAcl.GRANTEE_USER);
defaultGrant.setCanonicalUserID(owner);
@ -1622,13 +1622,13 @@ public class S3Engine {
/**
* To determine access to a bucket or an object in a bucket evaluate first a define
* bucket policy and then any defined ACLs.
*
*
* @param context - all data needed for bucket policies
* @param target - used for ACL evaluation, object identifier
* @param targetId - used for ACL evaluation
* @param requestedPermission - ACL type access requested
*
* @throws ParseException, SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException
*
* @throws ParseException, SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException
*/
public static void verifyAccess(S3PolicyContext context, String target, long targetId, int requestedPermission) {
switch (verifyPolicy(context)) {
@ -1648,12 +1648,12 @@ public class S3Engine {
/**
* This method verifies that the accessing client has the requested
* permission on the object/bucket/Acl represented by the tuple: <target, targetId>
*
*
* For cases where an ACL is meant for any authenticated user we place a "*" for the
* Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key.
*
* For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the
* Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key.
* Canonical User Id. N.B. - "*" is not a legal Cloud (Bridge) Access key.
*
* For cases where an ACL is meant for any anonymous user (or 'AllUsers') we place a "A" for the
* Canonical User Id. N.B. - "A" is not a legal Cloud (Bridge) Access key.
*/
public static void accessAllowed(String target, long targetId, int requestedPermission) {
if (SAcl.PERMISSION_PASS == requestedPermission)
@ -1679,10 +1679,10 @@ public class S3Engine {
/**
* This method assumes that the bucket has been tested to make sure it exists before
* it is called.
*
* @param context
*
* @param context
* @return S3BucketPolicy
* @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException, ParseException
* @throws SQLException, ClassNotFoundException, IllegalAccessException, InstantiationException, ParseException
*/
public static S3BucketPolicy loadPolicy(S3PolicyContext context) throws InstantiationException, IllegalAccessException, ClassNotFoundException, SQLException, ParseException {
OrderedPair<S3BucketPolicy, Integer> result = ServiceProvider.getInstance().getBucketPolicy(context.getBucketName());
@ -1795,11 +1795,11 @@ public class S3Engine {
/**
* ifRange is true and ifUnmodifiedSince or IfMatch fails then we return the entire object (indicated by
* returning a -1 as the function result.
*
*
* @param ifCond - conditional get defined by these tests
* @param lastModified - value used on ifModifiedSince or ifUnmodifiedSince
* @param ETag - value used on ifMatch and ifNoneMatch
* @param ifRange - using an if-Range HTTP functionality
* @param ifRange - using an if-Range HTTP functionality
* @return -1 means return the entire object with an HTTP 200 (not a subrange)
*/
private int conditionPassed(S3ConditionalHeaders ifCond, Date lastModified, String ETag, boolean ifRange) {

View File

@ -25,7 +25,7 @@ import com.cloud.bridge.service.exception.UnsupportedException;
/**
* Each relation holds
* a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED
* a grantee - which is one of SAcl.GRANTEE_USER, SAcl.GRANTEE_ALLUSERS, SAcl.GRANTEE_AUTHENTICATED
* a permission - which is one of SAcl.PERMISSION_PASS, SAcl.PERMISSION_NONE, SAcl.PERMISSION_READ,
* SAcl.PERMISSION_WRITE, SAcl.PERMISSION_READ_ACL, SAcl.PERMISSION_WRITE_ACL, SAcl.PERMISSION_FULL
* canonicalUserID

View File

@ -26,7 +26,7 @@ import java.util.Map;
* AWS S3 in constructing the URL for requesting RESTful services. The three possibilities are
* (*) hostname followed by bucket as path information (sometimes termed the path style)
* (*) bucketname before hostname, so that bucketname appears addressible as a subdomain (termed the subdomain style)
* (*) bucketname as a DNS resolvable entry so that path information conveys extra parameters (termed the
* (*) bucketname as a DNS resolvable entry so that path information conveys extra parameters (termed the
* virtual hosting style).
* The path information is held as a Map of key-value pairs termed pathArgs.
* Specification as provided at http://docs.amazonwebservices.com/AmazonS3/latest/dev/VirtualHosting.html.

View File

@ -41,7 +41,7 @@ public class S3ListAllMyBucketsEntry {
// java.util.Date d = creationDate.getTime();
// com.cloud.bridge.util.ISO8601SimpleDateTimeFormat sdf = new com.cloud.bridge.util.ISO8601SimpleDateTimeFormat();
// sdf.format(d);
// sdf.format(d);
// java.lang.StringBuffer b = com.cloud.bridge.util.ISO8601SimpleDateTimeFormat.format(d); return b;
return creationDate;

View File

@ -54,9 +54,9 @@ public class S3PolicyArnCondition extends S3PolicyCondition {
return keys.get(key);
}
/**
/**
* Convert the key's values into the type depending on the what the condition expects.
* @throws ParseException
* @throws ParseException
*/
public void setKey(ConditionKeys key, String[] values) throws ParseException {
if (PolicyConditions.ArnLike == condition || PolicyConditions.ArnNotLike == condition) {

View File

@ -53,11 +53,11 @@ public class S3PolicyBoolCondition extends S3PolicyCondition {
return keys.get(key);
}
/**
/**
* Documentation on Bool conditions is nearly non-existent. Only found that
* the 'SecureTransport' key is relvant and have not found any examples.
*
* @throws ParseException
* the 'SecureTransport' key is relvant and have not found any examples.
*
* @throws ParseException
*/
public void setKey(ConditionKeys key, String[] values) throws ParseException {
keys.put(key, values);

View File

@ -44,7 +44,7 @@ public class S3PolicyConditionBlock {
* the entire block evaluate to false. If no conditions are present and the
* condition is relevant to the request, then the default condition is considered
* to be true.
* @throws Exception
* @throws Exception
*/
public boolean isTrue(S3PolicyContext context, String SID) throws Exception {
Iterator<S3PolicyCondition> itr = conditionList.iterator();

View File

@ -57,9 +57,9 @@ public class S3PolicyDateCondition extends S3PolicyCondition {
return keys.get(key);
}
/**
/**
* Convert the key's values into the type depending on the what the condition expects.
* @throws ParseException
* @throws ParseException
*/
public void setKey(ConditionKeys key, String[] values) throws ParseException {
Calendar[] dates = new Calendar[values.length];
@ -82,7 +82,7 @@ public class S3PolicyDateCondition extends S3PolicyCondition {
* Evaluation logic is as follows:
* 1) An 'AND' operation is used over all defined keys
* 2) An 'OR' operation is used over all key values
*
*
* Each condition has one or more keys, and each keys have one or more values to test.
*/
public boolean isTrue(S3PolicyContext context, String SID) {

View File

@ -54,10 +54,10 @@ public class S3PolicyIPAddressCondition extends S3PolicyCondition {
return keys.get(key);
}
/**
/**
* Convert the key's values into the type depending on the what the condition expects.
* @throws ParseException
* @throws IOException
* @throws ParseException
* @throws IOException
*/
public void setKey(ConditionKeys key, String[] values) throws ParseException, Exception {
IpAddressRange[] addresses = new IpAddressRange[values.length];
@ -77,8 +77,8 @@ public class S3PolicyIPAddressCondition extends S3PolicyCondition {
if (!itr.hasNext())
return false;
// -> returns the Internet Protocol (IP) address of the client or last proxy that sent the request.
// For HTTP servlets, same as the value of the CGI variable REMOTE_ADDR.
// -> returns the Internet Protocol (IP) address of the client or last proxy that sent the request.
// For HTTP servlets, same as the value of the CGI variable REMOTE_ADDR.
IpAddressRange toCompareWith = IpAddressRange.parseRange(context.getRemoveAddr());
if (null == toCompareWith)
return false;

View File

@ -52,10 +52,10 @@ public class S3PolicyNumericCondition extends S3PolicyCondition {
return keys.get(key);
}
/**
/**
* Convert the key's values into the type depending on the what
* the condition expects.
* @throws ParseException
* @throws ParseException
*/
public void setKey(ConditionKeys key, String[] values) throws ParseException {
Float[] numbers = new Float[values.length];

View File

@ -54,11 +54,11 @@ public class S3PolicyStringCondition extends S3PolicyCondition {
return keys.get(key);
}
/**
/**
* Convert the key's values into the type depending on the what the condition expects.
* To implement "like" tests we use regexes.
*
* @throws ParseException
*
* @throws ParseException
*/
public void setKey(ConditionKeys key, String[] values) throws ParseException {

View File

@ -31,10 +31,10 @@ public class AuthenticationUtils {
/**
* The combination of the Issuer and the serial number of a X509 certificate
* must be globally unique. The Issuer can be described by its Distinguished Name (DN).
* The uniqueId is constructed by appending a ", serial=" onto the end of the Issuer's
* must be globally unique. The Issuer can be described by its Distinguished Name (DN).
* The uniqueId is constructed by appending a ", serial=" onto the end of the Issuer's
* DN (thus keeping the DN format).
*
*
* @param cert
*/
public static String X509CertUniqueId(Certificate cert) {

View File

@ -53,7 +53,7 @@ public class DatabindingConverterUtil extends ConverterUtil {
}
// Otherwise String convertToString(Object any) is handled by invoker (which happens to be superclass).
// No need to reference super explicitly because it is the invoker of static methods
// No need to reference super explicitly because it is the invoker of static methods
// @see org.apache.axis2.databinding.utils.ConverterUtil
}

View File

@ -46,7 +46,7 @@ public class DateHelper {
// convert the string with this value
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssz");
//this is zero time so we need to add that TZ indicator for
//this is zero time so we need to add that TZ indicator for
if (dateString.endsWith("Z")) {
dateString = dateString.substring(0, dateString.length() - 1) + "GMT-00:00";
} else { // -> -0700 is valid but we need to change it to -07:00 for SimpleDateFormat

View File

@ -68,7 +68,7 @@ public class EC2RestAuth {
} catch (Exception e) {
}
// -> the time zone is GMT if not defined
// -> the time zone is GMT if not defined
try {
formatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
cal.setTime(formatter.parse(created));
@ -108,8 +108,8 @@ public class EC2RestAuth {
/**
* Assuming that a port number is to be included.
*
* @param header - contents of the "Host:" header, skipping the 'Host:' preamble.
*
* @param header - contents of the "Host:" header, skipping the 'Host:' preamble.
*/
public void setHostHeader(String hostHeader) {
if (null == hostHeader)
@ -128,7 +128,7 @@ public class EC2RestAuth {
/**
* The given query string needs to be pulled apart, sorted by paramter name, and reconstructed.
* We sort the query string values via a TreeMap.
*
*
* @param query - this string still has all URL encoding in place.
*/
public void setQueryString(String query) {
@ -177,19 +177,19 @@ public class EC2RestAuth {
}
/**
* The request is authenticated if we can regenerate the same signature given
* The request is authenticated if we can regenerate the same signature given
* on the request. Before calling this function make sure to set the header values
* defined by the public values above.
*
*
* @param httpVerb - the type of HTTP request (e.g., GET, PUT)
* @param secretKey - value obtained from the AWSAccessKeyId
* @param signature - the signature we are trying to recreate, note can be URL-encoded
* @param method - { "HmacSHA1", "HmacSHA256" }
*
* @throws SignatureException
*
*
* @throws SignatureException
*
* @return true if request has been authenticated, false otherwise
* @throws UnsupportedEncodingException
* @throws UnsupportedEncodingException
*/
public boolean verifySignature(String httpVerb, String secretKey, String signature, String method) throws SignatureException, UnsupportedEncodingException {
@ -218,12 +218,12 @@ public class EC2RestAuth {
/**
* This function generates the single string that will be used to sign with a users
* secret key.
*
*
* StringToSign = HTTP-Verb + "\n" +
* ValueOfHostHeaderInLowercase + "\n" +
* HTTPRequestURI + "\n" +
* HTTPRequestURI + "\n" +
* CanonicalizedQueryString
*
*
* @return The single StringToSign or null.
*/
private String genStringToSign(String httpVerb) {
@ -251,7 +251,7 @@ public class EC2RestAuth {
/**
* Create a signature by the following method:
* new String( Base64( SHA1 or SHA256 ( key, byte array )))
*
*
* @param signIt - the data to generate a keyed HMAC over
* @param secretKey - the user's unique key for the HMAC operation
* @param useSHA1 - if false use SHA256

View File

@ -27,20 +27,20 @@ import java.util.Calendar;
import java.util.TimeZone;
/**
* Format and parse a date string which is expected to be in ISO 8601 DateTimeFormat especially for
* Format and parse a date string which is expected to be in ISO 8601 DateTimeFormat especially for
* use in XML documents.
* An example is for use with GMTDateTimeUserType to provide parsing of DateTime format strings into
* An example is for use with GMTDateTimeUserType to provide parsing of DateTime format strings into
* accurate Java Date representations based on UTC.
* The purpose of this class is to allow the creation of accurate date time representations following
* the ISO 8601 format YYYY-MM-DDThh:MM:ss
* using the letter "T" as the date/time separator
* This representation may be immediately followed by a "Z" (Zulu i.e. at zero offset from GMT) to indicate UTC
* or, otherwise, to a specific time zone. If a time zone (tz) is encoded then this is held as the difference
* between the local time in the tz and UCT, expressed as a positive(+) or negative(-) offset (hhMM) appended
* This representation may be immediately followed by a "Z" (Zulu i.e. at zero offset from GMT) to indicate UTC
* or, otherwise, to a specific time zone. If a time zone (tz) is encoded then this is held as the difference
* between the local time in the tz and UCT, expressed as a positive(+) or negative(-) offset (hhMM) appended
* to the format.
* The default case holds no tz information and assumes that a date time representation referenced to Zulu
* (i.e. zero offset from GMT) is required. When formatting an existing Date transform it into the Zulu timezone
* so that it is explicitly at GMT with zero offset. This provides the default representation for the encoding
* so that it is explicitly at GMT with zero offset. This provides the default representation for the encoding
* of AWS datetime values.
* For testing, it may be useful to note that, as at 2012, a city whose time is always in the Zulu timezone is
* Reykjavik, Iceland.

View File

@ -20,7 +20,7 @@ import java.io.IOException;
/**
* Represents a network IP address or a range of addresses.
* A range is useful when representing IP addresses defined in
* A range is useful when representing IP addresses defined in
* CIDR format. The range is a 32 bit IP inclusive.
*/
public class IpAddressRange {
@ -100,10 +100,10 @@ public class IpAddressRange {
return range;
}
/**
* In order to do unsigned math here we must use long types so that high order bits
/**
* In order to do unsigned math here we must use long types so that high order bits
* are not used as the sign of the number.
*
*
* @param ipAddress
* @return
*/

View File

@ -29,12 +29,12 @@ import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
/**
* JsonAccessor provides the functionality to allow navigating JSON object graph using simple expressions,
* JsonAccessor provides the functionality to allow navigating JSON object graph using simple expressions,
* for example, following property access expressions are all valid ones
*
*
* rootobj.level1obj[1].property
* this[0].level1obj[1].property
*
*
*/
public class JsonAccessor {
private JsonElement _json;

View File

@ -16,7 +16,7 @@
// under the License.
package com.cloud.bridge.util;
/**
/**
* Reusable class whose instances encode any ordered pair (or 2-tuple) of values of types T1 and T2
* Provide getters: getFirst(), getSecond()
* Provide setters: setFirst(val), setSecond(val)

Some files were not shown because too many files have changed in this diff Show More