CLOUDSTACK-3950: add "regionsecondaryenabled" in listCapabilitiesCmd

response.
This commit is contained in:
Min Chen 2013-08-20 15:59:18 -07:00
parent bcd36508de
commit e65a302f15
3 changed files with 62 additions and 52 deletions

View File

@ -18,10 +18,11 @@ package org.apache.cloudstack.api.command.user.config;
import java.util.Map;
import org.apache.log4j.Logger;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.response.CapabilitiesResponse;
import org.apache.log4j.Logger;
import com.cloud.user.Account;
@ -52,6 +53,7 @@ public class ListCapabilitiesCmd extends BaseCmd {
response.setProjectInviteRequired((Boolean)capabilities.get("projectInviteRequired"));
response.setAllowUsersCreateProjects((Boolean)capabilities.get("allowusercreateprojects"));
response.setDiskOffMaxSize((Long)capabilities.get("customDiskOffMaxSize"));
response.setRegionSecondaryEnabled((Boolean)capabilities.get("regionSecondaryEnabled"));
if (capabilities.containsKey("apiLimitInterval")) {
response.setApiLimitInterval((Integer) capabilities.get("apiLimitInterval"));
}

View File

@ -16,11 +16,12 @@
// under the License.
package org.apache.cloudstack.api.response;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
@SuppressWarnings("unused")
public class CapabilitiesResponse extends BaseResponse {
@ -46,6 +47,9 @@ public class CapabilitiesResponse extends BaseResponse {
"create disk from disk offering with custom size")
private Long diskOffMaxSize;
@SerializedName("regionsecondaryenabled") @Param(description = "true if region wide secondary is enabled, false otherwise")
private boolean regionSecondaryEnabled;
@SerializedName("apilimitinterval") @Param(description="time interval (in seconds) to reset api count")
private Integer apiLimitInterval;
@ -81,6 +85,10 @@ public class CapabilitiesResponse extends BaseResponse {
this.diskOffMaxSize = diskOffMaxSize;
}
public void setRegionSecondaryEnabled(boolean regionSecondaryEnabled){
this.regionSecondaryEnabled = regionSecondaryEnabled;
}
public void setApiLimitInterval(Integer apiLimitInterval) {
this.apiLimitInterval = apiLimitInterval;
}

View File

@ -22,21 +22,8 @@ import java.util.UUID;
import javax.inject.Inject;
import com.cloud.configuration.Config;
import com.cloud.configuration.ConfigurationVO;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.user.Account;
import com.cloud.utils.db.DB;
import com.cloud.vm.VMInstanceVO;
import junit.framework.Assert;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -45,10 +32,21 @@ import org.mockito.Mockito;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.framework.config.ConfigurationVO;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.configuration.Config;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter.NetworkType;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.DataCenter.NetworkType;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
@ -60,15 +58,17 @@ import com.cloud.org.Cluster.ClusterType;
import com.cloud.org.Managed.ManagedState;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.db.DB;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachineProfile;
@ -361,45 +361,45 @@ public class StorageAllocatorTest {
@Test
public void testCLOUDSTACK3481() {
try {
createDb();
try {
createDb();
StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
pool.setHypervisor(HypervisorType.KVM);
pool.setScope(ScopeType.ZONE);
pool.setClusterId(null);
pool.setPodId(null);
storagePoolDao.update(pool.getId(), pool);
StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
pool.setHypervisor(HypervisorType.KVM);
pool.setScope(ScopeType.ZONE);
pool.setClusterId(null);
pool.setPodId(null);
storagePoolDao.update(pool.getId(), pool);
DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.KVM);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getAccountId()).thenReturn(1L);
Mockito.when(vmProfile.getHypervisorType()).thenReturn(HypervisorType.KVM);
Mockito.when(vmProfile.getOwner()).thenReturn(account);
Mockito.when(
storageMgr.storagePoolHasEnoughSpace(Matchers.anyListOf(Volume.class),
Matchers.any(StoragePool.class))).thenReturn(true);
Mockito.when(storageMgr.storagePoolHasEnoughIops(Matchers.anyListOf(Volume.class),
Matchers.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.KVM);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getAccountId()).thenReturn(1L);
Mockito.when(vmProfile.getHypervisorType()).thenReturn(HypervisorType.KVM);
Mockito.when(vmProfile.getOwner()).thenReturn(account);
Mockito.when(
storageMgr.storagePoolHasEnoughSpace(Matchers.anyListOf(Volume.class),
Matchers.any(StoragePool.class))).thenReturn(true);
Mockito.when(storageMgr.storagePoolHasEnoughIops(Matchers.anyListOf(Volume.class),
Matchers.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
if (foundAcct > 1 || foundAcct == 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
if (foundAcct > 1 || foundAcct == 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
@Test