mirror of
https://github.com/apache/cloudstack.git
synced 2025-12-16 10:32:34 +01:00
backports for 4.11.1 from master (#2621)
* CLOUDSTACK-10147 Disabled Xenserver Cluster can still deploy VM's. Added code to skip disabled clusters when selecting a host (#2442) (cherry picked from commit c3488a51db4bce4ec32c09e6fef78193d360cf3f) Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com> * CLOUDSTACK-10318: Bug on sorting ACL rules list in chrome (#2478) (cherry picked from commit 4412563f19ec8b808fe4c79e2baf658507a84873) Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com> * CLOUDSTACK-10284:Creating a snapshot from VM Snapshot generates error if hypervisor is not KVM. Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com> * CLOUDSTACK-10221: Allow IPv6 when creating a Basic Network (#2397) Since CloudStack 4.10 Basic Networking supports IPv6 and thus should be allowed to be specified when creating a network. Signed-off-by: Wido den Hollander <wido@widodh.nl> (cherry picked from commit 9733a10ecda5f1af0f2c0fa863fc976a3e710946) Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com> * CLOUDSTACK-10214: Unable to remove local primary storage (#2390) Allow admins to remove primary storage pool. Cherry-picked from eba2e1d8a1ce4e86b4df144db03e96739da455e5 Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com> * dateutil: constistency of tzdate input and output (#2392) Signed-off-by: Yoan Blanc <yoan.blanc@exoscale.ch> Signed-off-by: Daan Hoogland <daan.hoogland@shapeblue.com> (cherry picked from commit 2ad520282319da9a03061b8c744e51a4ffdf94a2) Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com> * CLOUDSTACK-10054:Volume download times out in 3600 seconds (#2244) (cherry picked from commit bb607d07a97476dc4fb934b3d75df6affba47086) Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com> * When creating a new account (via domain admin) it is possible to select “root admin” as the role for the new user (#2606) * create account with domain admin showing 'root admin' role Domain admins should not be able to assign the role of root admin to new users. Therefore, the role ‘root admin’ (or any other of the same type) should not be visible to domain admins. * License and formatting * Break long sentence into multiple lines * Fix wording of method 'getCurrentAccount' * fix typo in variable name * [CLOUDSTACK-10259] Missing float part of secondary storage data in listAccounts * [CLOUDSTACK-9338] ACS not accounting resources of VMs with custom service offering ACS is accounting the resources properly when deploying VMs with custom service offerings. However, there are other methods (such as updateResourceCount) that do not execute the resource accounting properly, and these methods update the resource count for an account in the database. Therefore, if a user deploys VMs with custom service offerings, and later this user calls the “updateResourceCount” method, it (the method) will only account for VMs with normal service offerings, and update this as the number of resources used by the account. This will result in a smaller number of resources to be accounted for the given account than the real used value. The problem becomes worse because if the user starts to delete these VMs, it is possible to reach negative values of resources allocated (breaking all of the resource limiting for accounts). This is a very serious attack vector for public cloud providers! * [CLOUDSTACK-10230] User should not be able to use removed “Guest OS type” (#2404) * [CLOUDSTACK-10230] User is able to change to “Guest OS type” that has been removed Users are able to change the OS type of VMs to “Guest OS type” that has been removed. This becomes a security issue when we try to force users to use HVM VMs (Meltdown/Spectre thing). A removed “guest os type” should not be usable by any users in the cloud. * Remove trailing lines that are breaking build due to checkstyle compliance * Remove unused imports * fix classes that were in the wrong folder structure * Updates to capacity management
This commit is contained in:
parent
bd0959517b
commit
4534cefa40
@ -38,8 +38,8 @@ public interface Resource {
|
||||
private ResourceOwnerType[] supportedOwners;
|
||||
private int ordinal;
|
||||
public static final long bytesToKiB = 1024;
|
||||
public static final long bytesToMiB = 1024 * 1024;
|
||||
public static final long bytesToGiB = 1024 * 1024 * 1024;
|
||||
public static final long bytesToMiB = bytesToKiB * 1024;
|
||||
public static final long bytesToGiB = bytesToMiB * 1024;
|
||||
|
||||
ResourceType(String name, int ordinal, ResourceOwnerType... supportedOwners) {
|
||||
this.name = name;
|
||||
|
||||
@ -17,38 +17,64 @@
|
||||
|
||||
package org.apache.cloudstack.acl;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.acl.RolePermission.Permission;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public interface RoleService {
|
||||
|
||||
ConfigKey<Boolean> EnableDynamicApiChecker = new ConfigKey<>("Advanced", Boolean.class, "dynamic.apichecker.enabled", "false",
|
||||
"If set to true, this enables the dynamic role-based api access checker and disables the default static role-based api access checker.",
|
||||
true);
|
||||
"If set to true, this enables the dynamic role-based api access checker and disables the default static role-based api access checker.", true);
|
||||
|
||||
boolean isEnabled();
|
||||
Role findRole(final Long id);
|
||||
Role createRole(final String name, final RoleType roleType, final String description);
|
||||
Role updateRole(final Role role, final String name, final RoleType roleType, final String description);
|
||||
boolean deleteRole(final Role role);
|
||||
|
||||
RolePermission findRolePermission(final Long id);
|
||||
RolePermission findRolePermissionByUuid(final String uuid);
|
||||
/**
|
||||
* Searches for a role with the given ID. If the ID is null or less than zero, this method will return null.
|
||||
* This method will also return null if no role is found with the provided ID.
|
||||
* Moreover, we will check if the requested role is of 'Admin' type; roles with 'Admin' type should only be visible to 'root admins'.
|
||||
* Therefore, if a non-'root admin' user tries to search for an 'Admin' role, this method will return null.
|
||||
*/
|
||||
Role findRole(Long id);
|
||||
|
||||
Role createRole(String name, RoleType roleType, String description);
|
||||
|
||||
Role updateRole(Role role, String name, RoleType roleType, String description);
|
||||
|
||||
boolean deleteRole(Role role);
|
||||
|
||||
RolePermission findRolePermission(Long id);
|
||||
|
||||
RolePermission findRolePermissionByUuid(String uuid);
|
||||
|
||||
RolePermission createRolePermission(Role role, Rule rule, Permission permission, String description);
|
||||
|
||||
RolePermission createRolePermission(final Role role, final Rule rule, final Permission permission, final String description);
|
||||
/**
|
||||
* updateRolePermission updates the order/position of an role permission
|
||||
* @param role The role whose permissions needs to be re-ordered
|
||||
* @param newOrder The new list of ordered role permissions
|
||||
*/
|
||||
boolean updateRolePermission(final Role role, final List<RolePermission> newOrder);
|
||||
boolean updateRolePermission(final Role role, final RolePermission rolePermission, final Permission permission);
|
||||
boolean deleteRolePermission(final RolePermission rolePermission);
|
||||
boolean updateRolePermission(Role role, List<RolePermission> newOrder);
|
||||
|
||||
boolean updateRolePermission(Role role, RolePermission rolePermission, Permission permission);
|
||||
|
||||
boolean deleteRolePermission(RolePermission rolePermission);
|
||||
|
||||
/**
|
||||
* List all roles configured in the database. Roles that have the type {@link RoleType#Admin} will not be shown for users that are not 'root admin'.
|
||||
*/
|
||||
List<Role> listRoles();
|
||||
List<Role> findRolesByName(final String name);
|
||||
List<Role> findRolesByType(final RoleType roleType);
|
||||
List<RolePermission> findAllPermissionsBy(final Long roleId);
|
||||
|
||||
/**
|
||||
* Find all roles that have the giving {@link String} as part of their name.
|
||||
* If the user calling the method is not a 'root admin', roles of type {@link RoleType#Admin} wil lbe removed of the returned list.
|
||||
*/
|
||||
List<Role> findRolesByName(String name);
|
||||
|
||||
/**
|
||||
* Find all roles by {@link RoleType}. If the role type is {@link RoleType#Admin}, the calling account must be a root admin, otherwise we return an empty list.
|
||||
*/
|
||||
List<Role> findRolesByType(RoleType roleType);
|
||||
|
||||
List<RolePermission> findAllPermissionsBy(Long roleId);
|
||||
}
|
||||
|
||||
@ -17,31 +17,25 @@
|
||||
|
||||
package org.apache.cloudstack.api.command.admin.acl;
|
||||
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.InsufficientCapacityException;
|
||||
import com.cloud.exception.NetworkRuleConflictException;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import com.cloud.user.Account;
|
||||
import com.google.common.base.Strings;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.acl.Role;
|
||||
import org.apache.cloudstack.acl.RoleType;
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.BaseCmd;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
import org.apache.cloudstack.api.ServerApiException;
|
||||
import org.apache.cloudstack.api.response.ListResponse;
|
||||
import org.apache.cloudstack.api.response.RoleResponse;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import com.cloud.user.Account;
|
||||
import com.google.common.base.Strings;
|
||||
|
||||
@APICommand(name = ListRolesCmd.APINAME, description = "Lists dynamic roles in CloudStack", responseObject = RoleResponse.class,
|
||||
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false,
|
||||
since = "4.9.0",
|
||||
authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin})
|
||||
@APICommand(name = ListRolesCmd.APINAME, description = "Lists dynamic roles in CloudStack", responseObject = RoleResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.9.0", authorized = {
|
||||
RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin})
|
||||
public class ListRolesCmd extends BaseCmd {
|
||||
public static final String APINAME = "listRoles";
|
||||
|
||||
@ -112,13 +106,13 @@ public class ListRolesCmd extends BaseCmd {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
|
||||
final List<Role> roles;
|
||||
public void execute() {
|
||||
List<Role> roles;
|
||||
if (getId() != null && getId() > 0L) {
|
||||
roles = Collections.singletonList(roleService.findRole(getId()));
|
||||
} else if (!Strings.isNullOrEmpty(getName())) {
|
||||
} else if (StringUtils.isNotBlank(getName())) {
|
||||
roles = roleService.findRolesByName(getName());
|
||||
} else if (getRoleType() != null){
|
||||
} else if (getRoleType() != null) {
|
||||
roles = roleService.findRolesByType(getRoleType());
|
||||
} else {
|
||||
roles = roleService.listRoles();
|
||||
|
||||
@ -29,7 +29,6 @@ import org.apache.cloudstack.api.EntityReference;
|
||||
import com.cloud.serializer.Param;
|
||||
import com.cloud.user.Account;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@EntityReference(value = Account.class)
|
||||
public class AccountResponse extends BaseResponse implements ResourceLimitAndCountResponse {
|
||||
@SerializedName(ApiConstants.ID)
|
||||
@ -222,7 +221,7 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou
|
||||
|
||||
@SerializedName("secondarystoragetotal")
|
||||
@Param(description = "the total secondary storage space (in GiB) owned by account", since = "4.2.0")
|
||||
private Long secondaryStorageTotal;
|
||||
private float secondaryStorageTotal;
|
||||
|
||||
@SerializedName("secondarystorageavailable")
|
||||
@Param(description = "the total secondary storage space (in GiB) available to be used for this account", since = "4.2.0")
|
||||
@ -501,7 +500,7 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSecondaryStorageTotal(Long secondaryStorageTotal) {
|
||||
public void setSecondaryStorageTotal(float secondaryStorageTotal) {
|
||||
this.secondaryStorageTotal = secondaryStorageTotal;
|
||||
}
|
||||
|
||||
|
||||
@ -165,7 +165,7 @@ public class DomainResponse extends BaseResponse implements ResourceLimitAndCoun
|
||||
private String secondaryStorageLimit;
|
||||
|
||||
@SerializedName("secondarystoragetotal") @Param(description="the total secondary storage space (in GiB) owned by domain", since="4.2.0")
|
||||
private Long secondaryStorageTotal;
|
||||
private float secondaryStorageTotal;
|
||||
|
||||
@SerializedName("secondarystorageavailable") @Param(description="the total secondary storage space (in GiB) available to be used for this domain", since="4.2.0")
|
||||
private String secondaryStorageAvailable;
|
||||
@ -399,7 +399,7 @@ public class DomainResponse extends BaseResponse implements ResourceLimitAndCoun
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSecondaryStorageTotal(Long secondaryStorageTotal) {
|
||||
public void setSecondaryStorageTotal(float secondaryStorageTotal) {
|
||||
this.secondaryStorageTotal = secondaryStorageTotal;
|
||||
}
|
||||
|
||||
|
||||
@ -29,7 +29,6 @@ import com.cloud.projects.Project;
|
||||
import com.cloud.serializer.Param;
|
||||
|
||||
@EntityReference(value = Project.class)
|
||||
@SuppressWarnings("unused")
|
||||
public class ProjectResponse extends BaseResponse implements ResourceLimitAndCountResponse {
|
||||
|
||||
@SerializedName(ApiConstants.ID)
|
||||
@ -134,7 +133,7 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou
|
||||
|
||||
@SerializedName("secondarystoragetotal")
|
||||
@Param(description = "the total secondary storage space (in GiB) owned by project", since = "4.2.0")
|
||||
private Long secondaryStorageTotal;
|
||||
private float secondaryStorageTotal;
|
||||
|
||||
@SerializedName("secondarystorageavailable")
|
||||
@Param(description = "the total secondary storage space (in GiB) available to be used for this project", since = "4.2.0")
|
||||
@ -414,7 +413,7 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSecondaryStorageTotal(Long secondaryStorageTotal) {
|
||||
public void setSecondaryStorageTotal(float secondaryStorageTotal) {
|
||||
this.secondaryStorageTotal = secondaryStorageTotal;
|
||||
}
|
||||
|
||||
|
||||
@ -54,7 +54,7 @@ public interface ResourceLimitAndCountResponse {
|
||||
|
||||
public void setSecondaryStorageLimit(String secondaryStorageLimit);
|
||||
|
||||
public void setSecondaryStorageTotal(Long secondaryStorageTotal);
|
||||
public void setSecondaryStorageTotal(float secondaryStorageTotal);
|
||||
|
||||
public void setSecondaryStorageAvailable(String secondaryStorageAvailable);
|
||||
|
||||
|
||||
@ -37,6 +37,11 @@ public class IsoProcessor extends AdapterBase implements Processor {
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) {
|
||||
return process(templatePath, format, templateName, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) {
|
||||
if (format != null) {
|
||||
s_logger.debug("We don't handle conversion from " + format + " to ISO.");
|
||||
return null;
|
||||
|
||||
@ -42,11 +42,15 @@ import com.cloud.utils.script.Script;
|
||||
|
||||
public class OVAProcessor extends AdapterBase implements Processor {
|
||||
private static final Logger s_logger = Logger.getLogger(OVAProcessor.class);
|
||||
|
||||
StorageLayer _storage;
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
|
||||
return process(templatePath, format, templateName, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException {
|
||||
if (format != null) {
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("We currently don't handle conversion from " + format + " to OVA.");
|
||||
@ -66,8 +70,7 @@ public class OVAProcessor extends AdapterBase implements Processor {
|
||||
s_logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName);
|
||||
String templateFileFullPath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension();
|
||||
File templateFile = new File(templateFileFullPath);
|
||||
|
||||
Script command = new Script("tar", 0, s_logger);
|
||||
Script command = new Script("tar", processTimeout, s_logger);
|
||||
command.add("--no-same-owner");
|
||||
command.add("--no-same-permissions");
|
||||
command.add("-xf", templateFileFullPath);
|
||||
|
||||
@ -44,6 +44,8 @@ public interface Processor extends Adapter {
|
||||
*/
|
||||
FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException;
|
||||
|
||||
FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException;
|
||||
|
||||
public static class FormatInfo {
|
||||
public ImageFormat format;
|
||||
public long size;
|
||||
|
||||
@ -40,8 +40,13 @@ public class QCOW2Processor extends AdapterBase implements Processor {
|
||||
|
||||
private StorageLayer _storage;
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
|
||||
return process(templatePath, format, templateName, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException {
|
||||
if (format != null) {
|
||||
s_logger.debug("We currently don't handle conversion from " + format + " to QCOW2.");
|
||||
return null;
|
||||
|
||||
@ -45,8 +45,13 @@ public class RawImageProcessor extends AdapterBase implements Processor {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
|
||||
return process(templatePath, format, templateName, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException {
|
||||
if (format != null) {
|
||||
s_logger.debug("We currently don't handle conversion from " + format + " to raw image.");
|
||||
return null;
|
||||
|
||||
@ -35,6 +35,11 @@ public class TARProcessor extends AdapterBase implements Processor {
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) {
|
||||
return process(templatePath, format, templateName, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) {
|
||||
if (format != null) {
|
||||
s_logger.debug("We currently don't handle conversion from " + format + " to TAR.");
|
||||
return null;
|
||||
|
||||
@ -58,6 +58,11 @@ public class VhdProcessor extends AdapterBase implements Processor {
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
|
||||
return process(templatePath, format, templateName, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException {
|
||||
if (format != null) {
|
||||
s_logger.debug("We currently don't handle conversion from " + format + " to VHD.");
|
||||
return null;
|
||||
|
||||
@ -44,6 +44,11 @@ public class VmdkProcessor extends AdapterBase implements Processor {
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException {
|
||||
return process(templatePath, format, templateName, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException {
|
||||
if (format != null) {
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("We currently don't handle conversion from " + format + " to VMDK.");
|
||||
|
||||
@ -51,6 +51,8 @@ public class TemplateOrVolumePostUploadCommand {
|
||||
|
||||
private String defaultMaxAccountSecondaryStorage;
|
||||
|
||||
private long processTimeout;
|
||||
|
||||
private long accountId;
|
||||
|
||||
private Integer nfsVersion;
|
||||
@ -206,4 +208,12 @@ public class TemplateOrVolumePostUploadCommand {
|
||||
public void setNfsVersion(Integer nfsVersion) {
|
||||
this.nfsVersion = nfsVersion;
|
||||
}
|
||||
|
||||
public void setProcessTimeout(long processTimeout) {
|
||||
this.processTimeout = processTimeout;
|
||||
}
|
||||
|
||||
public long getProcessTimeout() {
|
||||
return processTimeout;
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,6 +38,7 @@ import java.util.stream.Collectors;
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.utils.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
|
||||
@ -2116,16 +2117,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
||||
|
||||
boolean ipv6 = false;
|
||||
|
||||
if (ip6Gateway != null && ip6Cidr != null) {
|
||||
if (StringUtils.isNotBlank(ip6Gateway) && StringUtils.isNotBlank(ip6Cidr)) {
|
||||
ipv6 = true;
|
||||
}
|
||||
// Validate zone
|
||||
final DataCenterVO zone = _dcDao.findById(zoneId);
|
||||
if (zone.getNetworkType() == NetworkType.Basic) {
|
||||
if (ipv6) {
|
||||
throw new InvalidParameterValueException("IPv6 is not supported in Basic zone");
|
||||
}
|
||||
|
||||
// In Basic zone the network should have aclType=Domain, domainId=1, subdomainAccess=true
|
||||
if (aclType == null || aclType != ACLType.Domain) {
|
||||
throw new InvalidParameterValueException("Only AclType=Domain can be specified for network creation in Basic zone");
|
||||
@ -2188,6 +2185,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
||||
}
|
||||
}
|
||||
|
||||
if (ipv6 && !NetUtils.isValidIp6Cidr(ip6Cidr)) {
|
||||
throw new InvalidParameterValueException("Invalid IPv6 cidr specified");
|
||||
}
|
||||
|
||||
//TODO(VXLAN): Support VNI specified
|
||||
// VlanId can be specified only when network offering supports it
|
||||
final boolean vlanSpecified = vlanId != null;
|
||||
@ -2328,7 +2329,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
||||
userNetwork.setGateway(gateway);
|
||||
}
|
||||
|
||||
if (ip6Cidr != null && ip6Gateway != null) {
|
||||
if (StringUtils.isNotBlank(ip6Gateway) && StringUtils.isNotBlank(ip6Cidr)) {
|
||||
userNetwork.setIp6Cidr(ip6Cidr);
|
||||
userNetwork.setIp6Gateway(ip6Gateway);
|
||||
}
|
||||
|
||||
@ -57,4 +57,16 @@ public interface ResourceCountDao extends GenericDao<ResourceCountVO, Long> {
|
||||
Set<Long> listRowsToUpdateForDomain(long domainId, ResourceType type);
|
||||
|
||||
long removeEntriesByOwner(long ownerId, ResourceOwnerType ownerType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Counts the number of CPU cores allocated for the given account.
|
||||
* Side note: This method is not using the "resource_count" table. It is executing the actual count instead.
|
||||
*/
|
||||
long countCpuNumberAllocatedToAccount(long accountId);
|
||||
|
||||
/**
|
||||
* Counts the amount of memory allocated for the given account.
|
||||
* Side note: This method is not using the "resource_count" table. It is executing the actual count instead.
|
||||
*/
|
||||
long countMemoryAllocatedToAccount(long accountId);
|
||||
}
|
||||
@ -16,6 +16,9 @@
|
||||
// under the License.
|
||||
package com.cloud.configuration.dao;
|
||||
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
@ -42,6 +45,7 @@ import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@Component
|
||||
public class ResourceCountDaoImpl extends GenericDaoBase<ResourceCountVO, Long> implements ResourceCountDao {
|
||||
@ -248,4 +252,41 @@ public class ResourceCountDaoImpl extends GenericDaoBase<ResourceCountVO, Long>
|
||||
return 0;
|
||||
}
|
||||
|
||||
private String baseSqlCountComputingResourceAllocatedToAccount = "Select "
|
||||
+ " SUM((CASE "
|
||||
+ " WHEN so.%s is not null THEN so.%s "
|
||||
+ " ELSE CONVERT(vmd.value, UNSIGNED INTEGER) "
|
||||
+ " END)) as total "
|
||||
+ " from vm_instance vm "
|
||||
+ " join service_offering_view so on so.id = vm.service_offering_id "
|
||||
+ " left join user_vm_details vmd on vmd.vm_id = vm.id and vmd.name = '%s' "
|
||||
+ " where vm.type = 'User' and state not in ('Destroyed', 'Error', 'Expunging') and display_vm = true and account_id = ? ";
|
||||
|
||||
@Override
|
||||
public long countCpuNumberAllocatedToAccount(long accountId) {
|
||||
String sqlCountCpuNumberAllocatedToAccount = String.format(baseSqlCountComputingResourceAllocatedToAccount, ResourceType.cpu, ResourceType.cpu, "cpuNumber");
|
||||
return executeSqlCountComputingResourcesForAccount(accountId, sqlCountCpuNumberAllocatedToAccount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long countMemoryAllocatedToAccount(long accountId) {
|
||||
String serviceOfferingRamSizeField = "ram_size";
|
||||
String sqlCountCpuNumberAllocatedToAccount = String.format(baseSqlCountComputingResourceAllocatedToAccount, serviceOfferingRamSizeField, serviceOfferingRamSizeField, "memory");
|
||||
return executeSqlCountComputingResourcesForAccount(accountId, sqlCountCpuNumberAllocatedToAccount);
|
||||
}
|
||||
|
||||
private long executeSqlCountComputingResourcesForAccount(long accountId, String sqlCountComputingResourcesAllocatedToAccount) {
|
||||
try (TransactionLegacy tx = TransactionLegacy.currentTxn()) {
|
||||
PreparedStatement pstmt = tx.prepareAutoCloseStatement(sqlCountComputingResourcesAllocatedToAccount);
|
||||
pstmt.setLong(1, accountId);
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
if (!rs.next()) {
|
||||
throw new CloudRuntimeException(String.format("An unexpected case happened while counting allocated computing resources for account: " + accountId));
|
||||
}
|
||||
return rs.getLong("total");
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -32,6 +32,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
@ -434,13 +435,14 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
||||
@DB
|
||||
public Pair<Boolean, Long> getCommandHostDelegation(long hostId, Command cmd) {
|
||||
boolean needDelegation = false;
|
||||
|
||||
if (cmd instanceof StorageSubSystemCommand) {
|
||||
Boolean fullCloneEnabled = VmwareFullClone.value();
|
||||
StorageSubSystemCommand c = (StorageSubSystemCommand)cmd;
|
||||
c.setExecuteInSequence(fullCloneEnabled);
|
||||
}
|
||||
|
||||
if (cmd instanceof DownloadCommand) {
|
||||
cmd.setContextParam(VmwareManager.s_vmwareOVAPackageTimeout.key(), String.valueOf(VmwareManager.s_vmwareOVAPackageTimeout.value()));
|
||||
}
|
||||
//NOTE: the hostid can be a hypervisor host, or a ssvm agent. For copycommand, if it's for volume upload, the hypervisor
|
||||
//type is empty, so we need to check the format of volume at first.
|
||||
if (cmd instanceof CopyCommand) {
|
||||
@ -514,11 +516,11 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
|
||||
cmd.setContextParam("execid", String.valueOf(execLog.getId()));
|
||||
cmd.setContextParam("noderuninfo", String.format("%d-%d", _clusterMgr.getManagementNodeId(), _clusterMgr.getCurrentRunId()));
|
||||
cmd.setContextParam("vCenterSessionTimeout", String.valueOf(_vmwareMgr.getVcenterSessionTimeout()));
|
||||
cmd.setContextParam(VmwareManager.s_vmwareOVAPackageTimeout.key(), String.valueOf(VmwareManager.s_vmwareOVAPackageTimeout.value()));
|
||||
|
||||
if (cmd instanceof BackupSnapshotCommand || cmd instanceof CreatePrivateTemplateFromVolumeCommand ||
|
||||
cmd instanceof CreatePrivateTemplateFromSnapshotCommand || cmd instanceof CopyVolumeCommand || cmd instanceof CopyCommand ||
|
||||
cmd instanceof CreateVolumeOVACommand || cmd instanceof PrepareOVAPackingCommand || cmd instanceof CreateVolumeFromSnapshotCommand) {
|
||||
|
||||
String workerName = _vmwareMgr.composeWorkerName();
|
||||
long checkPointId = 1;
|
||||
// FIXME: Fix long checkPointId = _checkPointMgr.pushCheckPoint(new VmwareCleanupMaid(hostDetails.get("guid"), workerName));
|
||||
|
||||
@ -42,6 +42,9 @@ public interface VmwareManager {
|
||||
static final ConfigKey<String> s_vmwareSearchExcludeFolder = new ConfigKey<String>("Advanced", String.class, "vmware.search.exclude.folders", null,
|
||||
"Comma seperated list of Datastore Folders to exclude from VMWare search", true, ConfigKey.Scope.Global);
|
||||
|
||||
static final ConfigKey<Integer> s_vmwareOVAPackageTimeout = new ConfigKey<Integer>(Integer.class, "vmware.package.ova.timeout", "Advanced", "3600",
|
||||
"Vmware script timeout for ova packaging process", true, ConfigKey.Scope.Global, 1000);
|
||||
|
||||
String composeWorkerName();
|
||||
|
||||
String getSystemVMIsoFileNameOnDatastore();
|
||||
|
||||
@ -136,6 +136,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
||||
private static final Logger s_logger = Logger.getLogger(VmwareManagerImpl.class);
|
||||
|
||||
private static final long SECONDS_PER_MINUTE = 60;
|
||||
|
||||
private int _timeout;
|
||||
|
||||
private String _instance;
|
||||
@ -204,7 +205,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
||||
private int _additionalPortRangeSize;
|
||||
private int _routerExtraPublicNics = 2;
|
||||
private int _vCenterSessionTimeout = 1200000; // Timeout in milliseconds
|
||||
|
||||
private String _rootDiskController = DiskControllerType.ide.toString();
|
||||
|
||||
private final String _dataDiskController = DiskControllerType.osdefault.toString();
|
||||
@ -229,9 +229,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
||||
|
||||
@Override
|
||||
public ConfigKey<?>[] getConfigKeys() {
|
||||
return new ConfigKey<?>[] {s_vmwareNicHotplugWaitTimeout, s_vmwareCleanOldWorderVMs, templateCleanupInterval, s_vmwareSearchExcludeFolder};
|
||||
return new ConfigKey<?>[] {s_vmwareNicHotplugWaitTimeout, s_vmwareCleanOldWorderVMs, templateCleanupInterval, s_vmwareSearchExcludeFolder, s_vmwareOVAPackageTimeout};
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
s_logger.info("Configure VmwareManagerImpl, manager name: " + name);
|
||||
|
||||
@ -51,7 +51,7 @@ public interface VmwareStorageManager {
|
||||
|
||||
boolean execute(VmwareHostService hostService, CreateEntityDownloadURLCommand cmd);
|
||||
|
||||
public void createOva(String path, String name);
|
||||
public void createOva(String path, String name, int archiveTimeout);
|
||||
|
||||
public String createOvaForTemplate(TemplateObjectTO template);
|
||||
public String createOvaForTemplate(TemplateObjectTO template, int archiveTimeout);
|
||||
}
|
||||
|
||||
@ -98,28 +98,32 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
@Override
|
||||
public boolean execute(VmwareHostService hostService, CreateEntityDownloadURLCommand cmd) {
|
||||
DataTO data = cmd.getData();
|
||||
int timeout = NumbersUtil.parseInt(cmd.getContextParam(VmwareManager.s_vmwareOVAPackageTimeout.key()),
|
||||
Integer.valueOf(VmwareManager.s_vmwareOVAPackageTimeout.defaultValue()) * VmwareManager.s_vmwareOVAPackageTimeout.multiplier());
|
||||
if (data == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
String newPath = null;
|
||||
if (data.getObjectType() == DataObjectType.VOLUME) {
|
||||
newPath = createOvaForVolume((VolumeObjectTO)data);
|
||||
newPath = createOvaForVolume((VolumeObjectTO)data, timeout);
|
||||
} else if (data.getObjectType() == DataObjectType.TEMPLATE) {
|
||||
newPath = createOvaForTemplate((TemplateObjectTO)data);
|
||||
newPath = createOvaForTemplate((TemplateObjectTO)data, timeout);
|
||||
}
|
||||
if (newPath != null) {
|
||||
cmd.setInstallPath(newPath);
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createOva(String path, String name) {
|
||||
public void createOva(String path, String name, int archiveTimeout) {
|
||||
Script commandSync = new Script(true, "sync", 0, s_logger);
|
||||
commandSync.execute();
|
||||
|
||||
Script command = new Script(false, "tar", 0, s_logger);
|
||||
Script command = new Script(false, "tar", archiveTimeout, s_logger);
|
||||
command.setWorkDir(path);
|
||||
command.add("-cf", name + ".ova");
|
||||
command.add(name + ".ovf"); // OVF file should be the first file in OVA archive
|
||||
@ -155,7 +159,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String createOvaForTemplate(TemplateObjectTO template) {
|
||||
public String createOvaForTemplate(TemplateObjectTO template, int archiveTimeout) {
|
||||
DataStoreTO storeTO = template.getDataStore();
|
||||
if (!(storeTO instanceof NfsTO)) {
|
||||
s_logger.debug("Can only handle NFS storage, while creating OVA from template");
|
||||
@ -173,7 +177,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
s_logger.debug("OVA file found at: " + installFullPath);
|
||||
} else {
|
||||
if (new File(installFullPath + ".meta").exists()) {
|
||||
createOVAFromMetafile(installFullPath + ".meta");
|
||||
createOVAFromMetafile(installFullPath + ".meta", archiveTimeout);
|
||||
} else {
|
||||
String msg = "Unable to find OVA or OVA MetaFile to prepare template.";
|
||||
s_logger.error(msg);
|
||||
@ -190,7 +194,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
|
||||
//Fang: new command added;
|
||||
// Important! we need to sync file system before we can safely use tar to work around a linux kernal bug(or feature)
|
||||
public String createOvaForVolume(VolumeObjectTO volume) {
|
||||
public String createOvaForVolume(VolumeObjectTO volume, int archiveTimeout) {
|
||||
DataStoreTO storeTO = volume.getDataStore();
|
||||
if (!(storeTO instanceof NfsTO)) {
|
||||
s_logger.debug("can only handle nfs storage, when create ova from volume");
|
||||
@ -215,15 +219,17 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
} else {
|
||||
Script commandSync = new Script(true, "sync", 0, s_logger);
|
||||
commandSync.execute();
|
||||
|
||||
Script command = new Script(false, "tar", 0, s_logger);
|
||||
Script command = new Script(false, "tar", archiveTimeout, s_logger);
|
||||
command.setWorkDir(installFullPath);
|
||||
command.add("-cf", volumeUuid + ".ova");
|
||||
command.add(volumeUuid + ".ovf"); // OVF file should be the first file in OVA archive
|
||||
command.add(volumeUuid + "-disk0.vmdk");
|
||||
|
||||
command.execute();
|
||||
return volumePath;
|
||||
String result = command.execute();
|
||||
if (result != Script.ERR_TIMEOUT) {
|
||||
return volumePath;
|
||||
}
|
||||
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
s_logger.info("Exception for createVolumeOVA");
|
||||
@ -1046,7 +1052,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
|
||||
// here we use a method to return the ovf and vmdk file names; Another way to do it:
|
||||
// create a new class, and like TemplateLocation.java and create templateOvfInfo.java to handle it;
|
||||
private String createOVAFromMetafile(String metafileName) throws Exception {
|
||||
private String createOVAFromMetafile(String metafileName, int archiveTimeout) throws Exception {
|
||||
File ova_metafile = new File(metafileName);
|
||||
Properties props = null;
|
||||
String ovaFileName = "";
|
||||
@ -1080,7 +1086,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
||||
s_logger.info("ova: " + ovaFileName + ", ovf:" + ovfFileName + ", vmdk:" + disks[0] + ".");
|
||||
Script commandSync = new Script(true, "sync", 0, s_logger);
|
||||
commandSync.execute();
|
||||
Script command = new Script(false, "tar", 0, s_logger);
|
||||
Script command = new Script(false, "tar", archiveTimeout, s_logger);
|
||||
command.setWorkDir(exportDir); // Fang: pass this in to the method?
|
||||
command.add("-cf", ovaFileName);
|
||||
command.add(ovfFileName); // OVF file should be the first file in OVA archive
|
||||
|
||||
@ -145,8 +145,11 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe
|
||||
}
|
||||
|
||||
protected Answer execute(CreateEntityDownloadURLCommand cmd) {
|
||||
_storageMgr.execute(this, cmd);
|
||||
return _resource.defaultAction(cmd);
|
||||
boolean success = _storageMgr.execute(this, cmd);
|
||||
if (success) {
|
||||
return _resource.defaultAction(cmd);
|
||||
}
|
||||
return new Answer(cmd, false, "Failed to download");
|
||||
}
|
||||
|
||||
private Answer execute(PrimaryStorageDownloadCommand cmd) {
|
||||
|
||||
@ -21,6 +21,8 @@ package com.cloud.storage.resource;
|
||||
import java.io.File;
|
||||
import java.util.EnumMap;
|
||||
|
||||
import com.cloud.hypervisor.vmware.manager.VmwareManager;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
@ -95,6 +97,8 @@ public class VmwareStorageSubsystemCommandHandler extends StorageSubsystemComman
|
||||
DataTO destData = cmd.getDestTO();
|
||||
DataStoreTO srcDataStore = srcData.getDataStore();
|
||||
DataStoreTO destDataStore = destData.getDataStore();
|
||||
int timeout = NumbersUtil.parseInt(cmd.getContextParam(VmwareManager.s_vmwareOVAPackageTimeout.key()),
|
||||
Integer.valueOf(VmwareManager.s_vmwareOVAPackageTimeout.defaultValue()) * VmwareManager.s_vmwareOVAPackageTimeout.multiplier());
|
||||
//if copied between s3 and nfs cache, go to resource
|
||||
boolean needDelegation = false;
|
||||
if (destDataStore instanceof NfsTO && destDataStore.getRole() == DataStoreRole.ImageCache) {
|
||||
@ -112,11 +116,11 @@ public class VmwareStorageSubsystemCommandHandler extends StorageSubsystemComman
|
||||
String path = vol.getPath();
|
||||
int index = path.lastIndexOf(File.separator);
|
||||
String name = path.substring(index + 1);
|
||||
storageManager.createOva(parentPath + File.separator + path, name);
|
||||
storageManager.createOva(parentPath + File.separator + path, name, timeout);
|
||||
vol.setPath(path + File.separator + name + ".ova");
|
||||
} else if (srcData.getObjectType() == DataObjectType.TEMPLATE) {
|
||||
// sync template from NFS cache to S3 in NFS migration to S3 case
|
||||
storageManager.createOvaForTemplate((TemplateObjectTO)srcData);
|
||||
storageManager.createOvaForTemplate((TemplateObjectTO)srcData, timeout);
|
||||
} else if (srcData.getObjectType() == DataObjectType.SNAPSHOT) {
|
||||
// pack ova first
|
||||
// sync snapshot from NFS cache to S3 in NFS migration to S3 case
|
||||
@ -126,7 +130,7 @@ public class VmwareStorageSubsystemCommandHandler extends StorageSubsystemComman
|
||||
int index = path.lastIndexOf(File.separator);
|
||||
String name = path.substring(index + 1);
|
||||
String snapDir = path.substring(0, index);
|
||||
storageManager.createOva(parentPath + File.separator + snapDir, name);
|
||||
storageManager.createOva(parentPath + File.separator + snapDir, name, timeout);
|
||||
if (destData.getObjectType() == DataObjectType.TEMPLATE) {
|
||||
//create template from snapshot on src at first, then copy it to s3
|
||||
TemplateObjectTO cacheTemplate = (TemplateObjectTO)destData;
|
||||
@ -169,7 +173,7 @@ public class VmwareStorageSubsystemCommandHandler extends StorageSubsystemComman
|
||||
int index = path.lastIndexOf(File.separator);
|
||||
String name = path.substring(index + 1);
|
||||
String dir = path.substring(0, index);
|
||||
storageManager.createOva(parentPath + File.separator + dir, name);
|
||||
storageManager.createOva(parentPath + File.separator + dir, name, timeout);
|
||||
newSnapshot.setPath(newSnapshot.getPath() + ".ova");
|
||||
newSnapshot.setDataStore(cmd.getCacheTO().getDataStore());
|
||||
CopyCommand newCmd = new CopyCommand(newSnapshot, destData, cmd.getWait(), cmd.executeInSequence());
|
||||
|
||||
@ -43,6 +43,7 @@ import com.cloud.storage.Snapshot.State;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
@ -474,7 +475,9 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
|
||||
if (volumes != null) {
|
||||
for (VolumeVO volume : volumes) {
|
||||
usedIops += volume.getMinIops() != null ? volume.getMinIops() : 0;
|
||||
if (!Volume.State.Creating.equals(volume.getState())) {
|
||||
usedIops += volume.getMinIops() != null ? volume.getMinIops() : 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -214,7 +214,7 @@ public class AccountJoinDaoImpl extends GenericDaoBase<AccountJoinVO, Long> impl
|
||||
//get resource limits for secondary storage space and convert it from Bytes to GiB
|
||||
long secondaryStorageLimit = ApiDBUtils.findCorrectResourceLimit(account.getSecondaryStorageLimit(), account.getId(), ResourceType.secondary_storage);
|
||||
String secondaryStorageLimitDisplay = (fullView || secondaryStorageLimit == -1) ? "Unlimited" : String.valueOf(secondaryStorageLimit / ResourceType.bytesToGiB);
|
||||
long secondaryStorageTotal = (account.getSecondaryStorageTotal() == null) ? 0 : (account.getSecondaryStorageTotal() / ResourceType.bytesToGiB);
|
||||
float secondaryStorageTotal = (account.getSecondaryStorageTotal() == null) ? 0 : (account.getSecondaryStorageTotal() / (ResourceType.bytesToGiB * 1f));
|
||||
String secondaryStorageAvail = (fullView || secondaryStorageLimit == -1) ? "Unlimited" : String.valueOf((secondaryStorageLimit / ResourceType.bytesToGiB)
|
||||
- secondaryStorageTotal);
|
||||
|
||||
|
||||
@ -183,7 +183,7 @@ public class DomainJoinDaoImpl extends GenericDaoBase<DomainJoinVO, Long> implem
|
||||
//get resource limits for secondary storage space and convert it from Bytes to GiB
|
||||
long secondaryStorageLimit = ApiDBUtils.findCorrectResourceLimitForDomain(domain.getSecondaryStorageLimit(), ResourceType.secondary_storage, domain.getId());
|
||||
String secondaryStorageLimitDisplay = (fullView || secondaryStorageLimit == -1) ? "Unlimited" : String.valueOf(secondaryStorageLimit / ResourceType.bytesToGiB);
|
||||
long secondaryStorageTotal = (domain.getSecondaryStorageTotal() == null) ? 0 : (domain.getSecondaryStorageTotal() / ResourceType.bytesToGiB);
|
||||
float secondaryStorageTotal = (domain.getSecondaryStorageTotal() == null) ? 0 : (domain.getSecondaryStorageTotal() / (ResourceType.bytesToGiB * 1f));
|
||||
String secondaryStorageAvail = (fullView || secondaryStorageLimit == -1) ? "Unlimited" : String.valueOf((secondaryStorageLimit / ResourceType.bytesToGiB) - secondaryStorageTotal);
|
||||
response.setSecondaryStorageLimit(secondaryStorageLimitDisplay);
|
||||
response.setSecondaryStorageTotal(secondaryStorageTotal);
|
||||
|
||||
@ -1040,6 +1040,11 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
||||
for (Long clusterId : clusterList) {
|
||||
ClusterVO clusterVO = _clusterDao.findById(clusterId);
|
||||
|
||||
if (clusterVO.getAllocationState() == Grouping.AllocationState.Disabled) {
|
||||
s_logger.debug("Cannot deploy in disabled cluster " + clusterId + ", skipping this cluster");
|
||||
avoid.addCluster(clusterVO.getId());
|
||||
}
|
||||
|
||||
if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) {
|
||||
s_logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster");
|
||||
avoid.addCluster(clusterVO.getId());
|
||||
|
||||
@ -69,7 +69,6 @@ import com.cloud.projects.Project;
|
||||
import com.cloud.projects.ProjectAccount.Role;
|
||||
import com.cloud.projects.dao.ProjectAccountDao;
|
||||
import com.cloud.projects.dao.ProjectDao;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
@ -100,9 +99,6 @@ import com.cloud.utils.db.TransactionCallback;
|
||||
import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
@ -947,51 +943,11 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
||||
}
|
||||
|
||||
public long countCpusForAccount(long accountId) {
|
||||
GenericSearchBuilder<ServiceOfferingVO, SumCount> cpuSearch = _serviceOfferingDao.createSearchBuilder(SumCount.class);
|
||||
cpuSearch.select("sum", Func.SUM, cpuSearch.entity().getCpu());
|
||||
SearchBuilder<UserVmVO> join1 = _userVmDao.createSearchBuilder();
|
||||
join1.and("accountId", join1.entity().getAccountId(), Op.EQ);
|
||||
join1.and("type", join1.entity().getType(), Op.EQ);
|
||||
join1.and("state", join1.entity().getState(), SearchCriteria.Op.NIN);
|
||||
join1.and("displayVm", join1.entity().isDisplayVm(), Op.EQ);
|
||||
cpuSearch.join("offerings", join1, cpuSearch.entity().getId(), join1.entity().getServiceOfferingId(), JoinBuilder.JoinType.INNER);
|
||||
cpuSearch.done();
|
||||
|
||||
SearchCriteria<SumCount> sc = cpuSearch.create();
|
||||
sc.setJoinParameters("offerings", "accountId", accountId);
|
||||
sc.setJoinParameters("offerings", "type", VirtualMachine.Type.User);
|
||||
sc.setJoinParameters("offerings", "state", new Object[] {State.Destroyed, State.Error, State.Expunging});
|
||||
sc.setJoinParameters("offerings", "displayVm", 1);
|
||||
List<SumCount> cpus = _serviceOfferingDao.customSearch(sc, null);
|
||||
if (cpus != null) {
|
||||
return cpus.get(0).sum;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
return _resourceCountDao.countCpuNumberAllocatedToAccount(accountId);
|
||||
}
|
||||
|
||||
public long calculateMemoryForAccount(long accountId) {
|
||||
GenericSearchBuilder<ServiceOfferingVO, SumCount> memorySearch = _serviceOfferingDao.createSearchBuilder(SumCount.class);
|
||||
memorySearch.select("sum", Func.SUM, memorySearch.entity().getRamSize());
|
||||
SearchBuilder<UserVmVO> join1 = _userVmDao.createSearchBuilder();
|
||||
join1.and("accountId", join1.entity().getAccountId(), Op.EQ);
|
||||
join1.and("type", join1.entity().getType(), Op.EQ);
|
||||
join1.and("state", join1.entity().getState(), SearchCriteria.Op.NIN);
|
||||
join1.and("displayVm", join1.entity().isDisplayVm(), Op.EQ);
|
||||
memorySearch.join("offerings", join1, memorySearch.entity().getId(), join1.entity().getServiceOfferingId(), JoinBuilder.JoinType.INNER);
|
||||
memorySearch.done();
|
||||
|
||||
SearchCriteria<SumCount> sc = memorySearch.create();
|
||||
sc.setJoinParameters("offerings", "accountId", accountId);
|
||||
sc.setJoinParameters("offerings", "type", VirtualMachine.Type.User);
|
||||
sc.setJoinParameters("offerings", "state", new Object[] {State.Destroyed, State.Error, State.Expunging});
|
||||
sc.setJoinParameters("offerings", "displayVm", 1);
|
||||
List<SumCount> memory = _serviceOfferingDao.customSearch(sc, null);
|
||||
if (memory != null) {
|
||||
return memory.get(0).sum;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
return _resourceCountDao.countMemoryAllocatedToAccount(accountId);
|
||||
}
|
||||
|
||||
public long calculateSecondaryStorageForAccount(long accountId) {
|
||||
|
||||
@ -887,10 +887,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
s_logger.warn("Unable to delete storage id: " + id + " due to it is not in Maintenance state");
|
||||
throw new InvalidParameterValueException("Unable to delete storage due to it is not in Maintenance state, id: " + id);
|
||||
}
|
||||
if (sPool.isLocal()) {
|
||||
s_logger.warn("Unable to delete local storage id:" + id);
|
||||
throw new InvalidParameterValueException("Unable to delete local storage id: " + id);
|
||||
}
|
||||
|
||||
Pair<Long, Long> vlms = _volsDao.getCountAndTotalByPool(id);
|
||||
if (forced) {
|
||||
@ -1728,6 +1724,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
}
|
||||
|
||||
private boolean checkUsagedSpace(StoragePool pool) {
|
||||
// Managed storage does not currently deal with accounting for physically used space (only provisioned space). Just return true if "pool" is managed.
|
||||
if (pool.isManaged()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
StatsCollector sc = StatsCollector.getInstance();
|
||||
double storageUsedThreshold = CapacityManager.StorageCapacityDisableThreshold.valueIn(pool.getDataCenterId());
|
||||
if (sc != null) {
|
||||
@ -1806,6 +1807,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
if(s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Destination pool id: " + pool.getId());
|
||||
}
|
||||
|
||||
StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId());
|
||||
long allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null);
|
||||
long totalAskingSize = 0;
|
||||
@ -1833,70 +1835,114 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl);
|
||||
}
|
||||
}
|
||||
// A ready state volume is already allocated in a pool. so the asking size is zero for it.
|
||||
// In case the volume is moving across pools or is not ready yet, the asking size has to be computed
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("pool id for the volume with id: " + volumeVO.getId() + " is " + volumeVO.getPoolId());
|
||||
s_logger.debug("Pool ID for the volume with ID " + volumeVO.getId() + " is " + volumeVO.getPoolId());
|
||||
}
|
||||
|
||||
// A ready-state volume is already allocated in a pool, so the asking size is zero for it.
|
||||
// In case the volume is moving across pools or is not ready yet, the asking size has to be computed.
|
||||
if ((volumeVO.getState() != Volume.State.Ready) || (volumeVO.getPoolId() != pool.getId())) {
|
||||
if (ScopeType.ZONE.equals(poolVO.getScope()) && volumeVO.getTemplateId() != null) {
|
||||
VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volumeVO.getTemplateId());
|
||||
totalAskingSize += getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeVO, poolVO);
|
||||
|
||||
if (tmpl != null && !ImageFormat.ISO.equals(tmpl.getFormat())) {
|
||||
// Storage plug-ins for zone-wide primary storage can be designed in such a way as to store a template on the
|
||||
// primary storage once and make use of it in different clusters (via cloning).
|
||||
// This next call leads to CloudStack asking how many more bytes it will need for the template (if the template is
|
||||
// already stored on the primary storage, then the answer is 0).
|
||||
|
||||
if (clusterId != null && _clusterDao.getSupportsResigning(clusterId)) {
|
||||
totalAskingSize += getBytesRequiredForTemplate(tmpl, pool);
|
||||
}
|
||||
}
|
||||
}
|
||||
totalAskingSize += getAskingSizeForTemplateBasedOnClusterAndStoragePool(volumeVO.getTemplateId(), clusterId, poolVO);
|
||||
}
|
||||
}
|
||||
|
||||
long totalOverProvCapacity;
|
||||
|
||||
if (pool.getPoolType().supportsOverProvisioning()) {
|
||||
BigDecimal overProvFactor = getStorageOverProvisioningFactor(pool.getId());
|
||||
|
||||
totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();
|
||||
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor "
|
||||
+ overProvFactor.toString());
|
||||
s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes());
|
||||
|
||||
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with over-provisioning factor " +
|
||||
overProvFactor.toString());
|
||||
s_logger.debug("Total over-provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes());
|
||||
} else {
|
||||
totalOverProvCapacity = pool.getCapacityBytes();
|
||||
|
||||
s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString());
|
||||
}
|
||||
|
||||
s_logger.debug("Total capacity of the pool " + poolVO.getName() + " id: " + pool.getId() + " is " + totalOverProvCapacity);
|
||||
s_logger.debug("Total capacity of the pool " + poolVO.getName() + " with ID " + pool.getId() + " is " + totalOverProvCapacity);
|
||||
|
||||
double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId());
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity +
|
||||
", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " +
|
||||
storageAllocatedThreshold);
|
||||
s_logger.debug("Checking pool with ID " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize: " +
|
||||
totalOverProvCapacity + ", totalAllocatedSize: " + allocatedSizeWithTemplate + ", askingSize: " + totalAskingSize +
|
||||
", allocated disable threshold: " + storageAllocatedThreshold);
|
||||
}
|
||||
|
||||
double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity);
|
||||
|
||||
if (usedPercentage > storageAllocatedThreshold) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() +
|
||||
" since its allocated percentage: " + usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " +
|
||||
s_logger.debug("Insufficient un-allocated capacity on the pool with ID " + pool.getId() + " for volume allocation: " + volumes.toString() +
|
||||
" since its allocated percentage " + usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold " +
|
||||
storageAllocatedThreshold + ", skipping this pool");
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() +
|
||||
", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " +
|
||||
s_logger.debug("Insufficient un-allocated capacity on the pool with ID " + pool.getId() + " for volume allocation: " + volumes.toString() +
|
||||
"; not enough storage, maxSize: " + totalOverProvCapacity + ", totalAllocatedSize: " + allocatedSizeWithTemplate + ", askingSize: " +
|
||||
totalAskingSize);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Storage plug-ins for managed storage can be designed in such a way as to store a template on the primary storage once and
|
||||
* make use of it via storage-side cloning.
|
||||
*
|
||||
* This method determines how many more bytes it will need for the template (if the template is already stored on the primary storage,
|
||||
* then the answer is 0).
|
||||
*/
|
||||
private long getAskingSizeForTemplateBasedOnClusterAndStoragePool(Long templateId, Long clusterId, StoragePoolVO storagePoolVO) {
|
||||
if (templateId == null || clusterId == null || storagePoolVO == null || !storagePoolVO.isManaged()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(templateId);
|
||||
|
||||
if (tmpl == null || ImageFormat.ISO.equals(tmpl.getFormat())) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
HypervisorType hypervisorType = tmpl.getHypervisorType();
|
||||
|
||||
// The getSupportsResigning method is applicable for XenServer as a UUID-resigning patch may or may not be installed on those hypervisor hosts.
|
||||
if (_clusterDao.getSupportsResigning(clusterId) || HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) {
|
||||
return getBytesRequiredForTemplate(tmpl, storagePoolVO);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
private long getDataObjectSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
|
||||
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||
|
||||
if (storeDriver instanceof PrimaryDataStoreDriver) {
|
||||
PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver;
|
||||
|
||||
VolumeInfo volumeInfo = volFactory.getVolume(volume.getId());
|
||||
|
||||
return primaryStoreDriver.getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, pool);
|
||||
}
|
||||
|
||||
return volume.getSize();
|
||||
}
|
||||
|
||||
private DiskOfferingVO getDiskOfferingVO(Volume volume) {
|
||||
Long diskOfferingId = volume.getDiskOfferingId();
|
||||
|
||||
@ -1915,21 +1961,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
return null;
|
||||
}
|
||||
|
||||
private long getDataObjectSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) {
|
||||
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||
|
||||
if (storeDriver instanceof PrimaryDataStoreDriver) {
|
||||
PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver;
|
||||
|
||||
VolumeInfo volumeInfo = volFactory.getVolume(volume.getId());
|
||||
|
||||
return primaryStoreDriver.getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, pool);
|
||||
}
|
||||
|
||||
return volume.getSize();
|
||||
}
|
||||
|
||||
private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) {
|
||||
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName());
|
||||
DataStoreDriver storeDriver = storeProvider.getDataStoreDriver();
|
||||
|
||||
@ -367,6 +367,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
dataObject.getDataStore().getRole().toString());
|
||||
command.setLocalPath(volumeStore.getLocalDownloadPath());
|
||||
//using the existing max upload size configuration
|
||||
command.setProcessTimeout(NumbersUtil.parseLong(_configDao.getValue("vmware.package.ova.timeout"), 3600));
|
||||
command.setMaxUploadSize(_configDao.getValue(Config.MaxUploadVolumeSize.key()));
|
||||
command.setDefaultMaxAccountSecondaryStorage(_configDao.getValue(Config.DefaultMaxAccountSecondaryStorage.key()));
|
||||
command.setAccountId(vol.getAccountId());
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -18,6 +18,7 @@ package org.apache.cloudstack.acl;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import javax.inject.Inject;
|
||||
@ -37,11 +38,15 @@ import org.apache.cloudstack.api.command.admin.acl.UpdateRolePermissionCmd;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.event.ActionEvent;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.exception.PermissionDeniedException;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
import com.cloud.utils.ListUtils;
|
||||
import com.cloud.utils.component.ManagerBase;
|
||||
@ -52,18 +57,23 @@ import com.cloud.utils.db.TransactionStatus;
|
||||
import com.google.common.base.Strings;
|
||||
|
||||
public class RoleManagerImpl extends ManagerBase implements RoleService, Configurable, PluggableService {
|
||||
|
||||
private Logger logger = Logger.getLogger(getClass());
|
||||
|
||||
@Inject
|
||||
private AccountDao accountDao;
|
||||
@Inject
|
||||
private RoleDao roleDao;
|
||||
@Inject
|
||||
private RolePermissionsDao rolePermissionsDao;
|
||||
@Inject
|
||||
private AccountManager accountManager;
|
||||
|
||||
private void checkCallerAccess() {
|
||||
if (!isEnabled()) {
|
||||
throw new PermissionDeniedException("Dynamic api checker is not enabled, aborting role operation");
|
||||
}
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
Account caller = getCurrentAccount();
|
||||
if (caller == null || caller.getRoleId() == null) {
|
||||
throw new PermissionDeniedException("Restricted API called by an invalid user account");
|
||||
}
|
||||
@ -79,11 +89,30 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu
|
||||
}
|
||||
|
||||
@Override
|
||||
public Role findRole(final Long id) {
|
||||
public Role findRole(Long id) {
|
||||
if (id == null || id < 1L) {
|
||||
logger.trace(String.format("Role ID is invalid [%s]", id));
|
||||
return null;
|
||||
}
|
||||
return roleDao.findById(id);
|
||||
RoleVO role = roleDao.findById(id);
|
||||
if (role == null) {
|
||||
logger.trace(String.format("Role not found [id=%s]", id));
|
||||
return null;
|
||||
}
|
||||
Account account = getCurrentAccount();
|
||||
if (!accountManager.isRootAdmin(account.getId()) && RoleType.Admin == role.getRoleType()) {
|
||||
logger.debug(String.format("Role [id=%s, name=%s] is of 'Admin' type and is only visible to 'Root admins'.", id, role.getName()));
|
||||
return null;
|
||||
}
|
||||
return role;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple call to {@link CallContext#current()} to retrieve the current calling account.
|
||||
* This method facilitates unit testing, it avoids mocking static methods.
|
||||
*/
|
||||
protected Account getCurrentAccount() {
|
||||
return CallContext.current().getCallingAccount();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -125,7 +154,7 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu
|
||||
if (roleType != null && roleType == RoleType.Unknown) {
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unknown is not a valid role type");
|
||||
}
|
||||
RoleVO roleVO = (RoleVO) role;
|
||||
RoleVO roleVO = (RoleVO)role;
|
||||
if (!Strings.isNullOrEmpty(name)) {
|
||||
roleVO.setName(name);
|
||||
}
|
||||
@ -214,26 +243,55 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Role> findRolesByName(final String name) {
|
||||
public List<Role> findRolesByName(String name) {
|
||||
List<? extends Role> roles = null;
|
||||
if (!Strings.isNullOrEmpty(name)) {
|
||||
if (StringUtils.isNotBlank(name)) {
|
||||
roles = roleDao.findAllByName(name);
|
||||
}
|
||||
removeRootAdminRolesIfNeeded(roles);
|
||||
return ListUtils.toListOfInterface(roles);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Role> findRolesByType(final RoleType roleType) {
|
||||
List<? extends Role> roles = null;
|
||||
if (roleType != null) {
|
||||
roles = roleDao.findAllByRoleType(roleType);
|
||||
/**
|
||||
* Removes roles of the given list that have the type '{@link RoleType#Admin}' if the user calling the method is not a 'root admin'.
|
||||
* The actual removal is executed via {@link #removeRootAdminRoles(List)}. Therefore, if the method is called by a 'root admin', we do nothing here.
|
||||
*/
|
||||
protected void removeRootAdminRolesIfNeeded(List<? extends Role> roles) {
|
||||
Account account = getCurrentAccount();
|
||||
if (!accountManager.isRootAdmin(account.getId())) {
|
||||
removeRootAdminRoles(roles);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all roles that have the {@link RoleType#Admin}.
|
||||
*/
|
||||
protected void removeRootAdminRoles(List<? extends Role> roles) {
|
||||
if (CollectionUtils.isEmpty(roles)) {
|
||||
return;
|
||||
}
|
||||
Iterator<? extends Role> rolesIterator = roles.iterator();
|
||||
while (rolesIterator.hasNext()) {
|
||||
Role role = rolesIterator.next();
|
||||
if (RoleType.Admin == role.getRoleType()) {
|
||||
rolesIterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Role> findRolesByType(RoleType roleType) {
|
||||
if (roleType == null || RoleType.Admin == roleType && !accountManager.isRootAdmin(getCurrentAccount().getId())) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<? extends Role> roles = roleDao.findAllByRoleType(roleType);
|
||||
return ListUtils.toListOfInterface(roles);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Role> listRoles() {
|
||||
List<? extends Role> roles = roleDao.listAll();
|
||||
removeRootAdminRolesIfNeeded(roles);
|
||||
return ListUtils.toListOfInterface(roles);
|
||||
}
|
||||
|
||||
@ -253,7 +311,7 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu
|
||||
|
||||
@Override
|
||||
public ConfigKey<?>[] getConfigKeys() {
|
||||
return new ConfigKey<?>[]{RoleService.EnableDynamicApiChecker};
|
||||
return new ConfigKey<?>[] {RoleService.EnableDynamicApiChecker};
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -269,4 +327,4 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu
|
||||
cmdList.add(DeleteRolePermissionCmd.class);
|
||||
return cmdList;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
224
server/test/com/cloud/vm/UserVmManagerImplTest.java
Normal file
224
server/test/com/cloud/vm/UserVmManagerImplTest.java
Normal file
@ -0,0 +1,224 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.vm;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.apache.cloudstack.api.BaseCmd.HTTPMethod;
|
||||
import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.BDDMockito;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.powermock.api.mockito.PowerMockito;
|
||||
import org.powermock.core.classloader.annotations.PrepareForTest;
|
||||
import org.powermock.modules.junit4.PowerMockRunner;
|
||||
|
||||
import com.cloud.exception.InsufficientCapacityException;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import com.cloud.storage.GuestOSVO;
|
||||
import com.cloud.storage.dao.GuestOSDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.uservm.UserVm;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.UserVmDetailsDao;
|
||||
|
||||
@RunWith(PowerMockRunner.class)
|
||||
public class UserVmManagerImplTest {
|
||||
|
||||
@Spy
|
||||
@InjectMocks
|
||||
private UserVmManagerImpl userVmManagerImpl = new UserVmManagerImpl();
|
||||
|
||||
@Mock
|
||||
private GuestOSDao guestOSDao;
|
||||
|
||||
@Mock
|
||||
private UserVmDao userVmDao;
|
||||
|
||||
@Mock
|
||||
private UpdateVMCmd updateVmCommand;
|
||||
|
||||
@Mock
|
||||
private AccountManager accountManager;
|
||||
|
||||
@Mock
|
||||
private UserVmDetailsDao userVmDetailVO;
|
||||
|
||||
@Mock
|
||||
private UserVmVO userVmVoMock;
|
||||
|
||||
private long vmId = 1l;
|
||||
|
||||
@Before
|
||||
public void beforeTest() {
|
||||
Mockito.when(updateVmCommand.getId()).thenReturn(vmId);
|
||||
|
||||
Mockito.when(userVmDao.findById(Mockito.eq(vmId))).thenReturn(userVmVoMock);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void validateGuestOsIdForUpdateVirtualMachineCommandTestOsTypeNull() {
|
||||
Mockito.when(updateVmCommand.getOsTypeId()).thenReturn(null);
|
||||
userVmManagerImpl.validateGuestOsIdForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void validateGuestOsIdForUpdateVirtualMachineCommandTestOsTypeNotFound() {
|
||||
Mockito.when(updateVmCommand.getOsTypeId()).thenReturn(1l);
|
||||
|
||||
userVmManagerImpl.validateGuestOsIdForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void validateGuestOsIdForUpdateVirtualMachineCommandTestOsTypeFound() {
|
||||
Mockito.when(updateVmCommand.getOsTypeId()).thenReturn(1l);
|
||||
Mockito.when(guestOSDao.findById(Mockito.eq(1l))).thenReturn(Mockito.mock(GuestOSVO.class));
|
||||
|
||||
userVmManagerImpl.validateGuestOsIdForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void validateInputsAndPermissionForUpdateVirtualMachineCommandTestVmNotFound() {
|
||||
Mockito.when(userVmDao.findById(Mockito.eq(vmId))).thenReturn(null);
|
||||
|
||||
userVmManagerImpl.validateInputsAndPermissionForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
}
|
||||
|
||||
@Test
|
||||
@PrepareForTest(CallContext.class)
|
||||
public void validateInputsAndPermissionForUpdateVirtualMachineCommandTest() {
|
||||
Mockito.doNothing().when(userVmManagerImpl).validateGuestOsIdForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
|
||||
Account accountMock = Mockito.mock(Account.class);
|
||||
CallContext callContextMock = Mockito.mock(CallContext.class);
|
||||
|
||||
PowerMockito.mockStatic(CallContext.class);
|
||||
BDDMockito.given(CallContext.current()).willReturn(callContextMock);
|
||||
Mockito.when(callContextMock.getCallingAccount()).thenReturn(accountMock);
|
||||
|
||||
Mockito.doNothing().when(accountManager).checkAccess(accountMock, null, true, userVmVoMock);
|
||||
userVmManagerImpl.validateInputsAndPermissionForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
|
||||
Mockito.verify(userVmManagerImpl).validateGuestOsIdForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
Mockito.verify(accountManager).checkAccess(accountMock, null, true, userVmVoMock);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateVirtualMachineTestDisplayChanged() throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
configureDoNothingForMethodsThatWeDoNotWantToTest();
|
||||
|
||||
Mockito.when(userVmVoMock.isDisplay()).thenReturn(true);
|
||||
Mockito.doNothing().when(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
|
||||
userVmManagerImpl.updateVirtualMachine(updateVmCommand);
|
||||
verifyMethodsThatAreAlwaysExecuted();
|
||||
|
||||
Mockito.verify(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
Mockito.verify(userVmDetailVO, Mockito.times(0)).removeDetails(vmId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateVirtualMachineTestCleanUpTrue() throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
configureDoNothingForMethodsThatWeDoNotWantToTest();
|
||||
|
||||
Mockito.when(updateVmCommand.isCleanupDetails()).thenReturn(true);
|
||||
|
||||
Mockito.doNothing().when(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
Mockito.doNothing().when(userVmDetailVO).removeDetails(vmId);
|
||||
|
||||
userVmManagerImpl.updateVirtualMachine(updateVmCommand);
|
||||
verifyMethodsThatAreAlwaysExecuted();
|
||||
Mockito.verify(userVmDetailVO).removeDetails(vmId);
|
||||
Mockito.verify(userVmManagerImpl, Mockito.times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateVirtualMachineTestCleanUpTrueAndDetailEmpty() throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
prepareAndExecuteMethodDealingWithDetails(true, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateVirtualMachineTestCleanUpTrueAndDetailsNotEmpty() throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
prepareAndExecuteMethodDealingWithDetails(true, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateVirtualMachineTestCleanUpFalseAndDetailsNotEmpty() throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
prepareAndExecuteMethodDealingWithDetails(false, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateVirtualMachineTestCleanUpFalseAndDetailsEmpty() throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
prepareAndExecuteMethodDealingWithDetails(false, false);
|
||||
}
|
||||
|
||||
private void prepareAndExecuteMethodDealingWithDetails(boolean cleanUpDetails, boolean isDetailsEmpty) throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
configureDoNothingForMethodsThatWeDoNotWantToTest();
|
||||
|
||||
HashMap<String, String> details = new HashMap<>();
|
||||
if(!isDetailsEmpty) {
|
||||
details.put("", "");
|
||||
}
|
||||
Mockito.when(updateVmCommand.getDetails()).thenReturn(details);
|
||||
Mockito.when(updateVmCommand.isCleanupDetails()).thenReturn(cleanUpDetails);
|
||||
|
||||
configureDoNothingForDetailsMethod();
|
||||
|
||||
userVmManagerImpl.updateVirtualMachine(updateVmCommand);
|
||||
verifyMethodsThatAreAlwaysExecuted();
|
||||
|
||||
Mockito.verify(userVmVoMock, Mockito.times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).setDetails(details);
|
||||
Mockito.verify(userVmDetailVO, Mockito.times(cleanUpDetails ? 1: 0)).removeDetails(vmId);
|
||||
Mockito.verify(userVmDao, Mockito.times(cleanUpDetails || isDetailsEmpty ? 0 : 1)).saveDetails(userVmVoMock);
|
||||
Mockito.verify(userVmManagerImpl, Mockito.times(0)).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
}
|
||||
|
||||
private void configureDoNothingForDetailsMethod() {
|
||||
Mockito.doNothing().when(userVmManagerImpl).updateDisplayVmFlag(false, vmId, userVmVoMock);
|
||||
Mockito.doNothing().when(userVmDetailVO).removeDetails(vmId);
|
||||
Mockito.doNothing().when(userVmDao).saveDetails(userVmVoMock);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void verifyMethodsThatAreAlwaysExecuted() throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
Mockito.verify(userVmManagerImpl).validateInputsAndPermissionForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
Mockito.verify(userVmManagerImpl).getSecurityGroupIdList(updateVmCommand);
|
||||
Mockito.verify(userVmManagerImpl).updateVirtualMachine(Mockito.anyLong(), Mockito.anyString(), Mockito.anyString(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyLong(),
|
||||
Mockito.anyString(), Mockito.anyBoolean(), Mockito.any(HTTPMethod.class), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.anyListOf(Long.class),
|
||||
Mockito.anyMap());
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void configureDoNothingForMethodsThatWeDoNotWantToTest() throws ResourceUnavailableException, InsufficientCapacityException {
|
||||
Mockito.doNothing().when(userVmManagerImpl).validateInputsAndPermissionForUpdateVirtualMachineCommand(updateVmCommand);
|
||||
Mockito.doReturn(new ArrayList<Long>()).when(userVmManagerImpl).getSecurityGroupIdList(updateVmCommand);
|
||||
Mockito.doReturn(Mockito.mock(UserVm.class)).when(userVmManagerImpl).updateVirtualMachine(Mockito.anyLong(), Mockito.anyString(), Mockito.anyString(), Mockito.anyBoolean(),
|
||||
Mockito.anyBoolean(), Mockito.anyLong(),
|
||||
Mockito.anyString(), Mockito.anyBoolean(), Mockito.any(HTTPMethod.class), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.anyListOf(Long.class),
|
||||
Mockito.anyMap());
|
||||
}
|
||||
}
|
||||
@ -16,6 +16,7 @@
|
||||
// under the License.
|
||||
|
||||
package com.cloud.vm;
|
||||
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
@ -44,17 +45,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.cloud.dc.VlanVO;
|
||||
import com.cloud.dc.dao.VlanDao;
|
||||
import com.cloud.network.dao.IPAddressVO;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.Spy;
|
||||
|
||||
import org.apache.cloudstack.acl.ControlledEntity;
|
||||
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
|
||||
import org.apache.cloudstack.api.BaseCmd;
|
||||
@ -71,12 +61,23 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.configuration.ConfigurationManager;
|
||||
import com.cloud.dc.DataCenter.NetworkType;
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
import com.cloud.dc.VlanVO;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.dc.dao.VlanDao;
|
||||
import com.cloud.deploy.DeployDestination;
|
||||
import com.cloud.event.dao.UsageEventDao;
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
@ -93,6 +94,7 @@ import com.cloud.network.Network.GuestType;
|
||||
import com.cloud.network.Network.Service;
|
||||
import com.cloud.network.NetworkModel;
|
||||
import com.cloud.network.dao.IPAddressDao;
|
||||
import com.cloud.network.dao.IPAddressVO;
|
||||
import com.cloud.network.dao.NetworkDao;
|
||||
import com.cloud.network.dao.NetworkVO;
|
||||
import com.cloud.network.element.UserDataServiceProvider;
|
||||
@ -127,138 +129,107 @@ import com.cloud.vm.dao.VMInstanceDao;
|
||||
import com.cloud.vm.snapshot.VMSnapshotVO;
|
||||
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class UserVmManagerTest {
|
||||
|
||||
@Spy
|
||||
UserVmManagerImpl _userVmMgr = new UserVmManagerImpl();
|
||||
@InjectMocks
|
||||
private UserVmManagerImpl _userVmMgr;
|
||||
@Mock
|
||||
VirtualMachineManager _itMgr;
|
||||
private VirtualMachineManager _itMgr;
|
||||
@Mock
|
||||
VolumeOrchestrationService _storageMgr;
|
||||
private VolumeOrchestrationService _storageMgr;
|
||||
@Mock
|
||||
Account _account;
|
||||
private Account _account;
|
||||
@Mock
|
||||
AccountManager _accountMgr;
|
||||
private AccountManager _accountMgr;
|
||||
@Mock
|
||||
AccountService _accountService;
|
||||
private AccountService _accountService;
|
||||
@Mock
|
||||
ConfigurationManager _configMgr;
|
||||
private ConfigurationManager _configMgr;
|
||||
@Mock
|
||||
CapacityManager _capacityMgr;
|
||||
private CapacityManager _capacityMgr;
|
||||
@Mock
|
||||
AccountDao _accountDao;
|
||||
private AccountDao _accountDao;
|
||||
@Mock
|
||||
ConfigurationDao _configDao;
|
||||
private ConfigurationDao _configDao;
|
||||
@Mock
|
||||
UserDao _userDao;
|
||||
private UserDao _userDao;
|
||||
@Mock
|
||||
UserVmDao _vmDao;
|
||||
private UserVmDao _vmDao;
|
||||
@Mock
|
||||
VMInstanceDao _vmInstanceDao;
|
||||
private VMInstanceDao _vmInstanceDao;
|
||||
@Mock
|
||||
VMTemplateDao _templateDao;
|
||||
private VMTemplateDao _templateDao;
|
||||
@Mock
|
||||
TemplateDataStoreDao _templateStoreDao;
|
||||
private TemplateDataStoreDao _templateStoreDao;
|
||||
@Mock
|
||||
VolumeDao _volsDao;
|
||||
private VolumeDao _volsDao;
|
||||
@Mock
|
||||
RestoreVMCmd _restoreVMCmd;
|
||||
private RestoreVMCmd _restoreVMCmd;
|
||||
@Mock
|
||||
AccountVO _accountMock;
|
||||
private AccountVO _accountMock;
|
||||
@Mock
|
||||
UserVO _userMock;
|
||||
private UserVO _userMock;
|
||||
@Mock
|
||||
UserVmVO _vmMock;
|
||||
private UserVmVO _vmMock;
|
||||
@Mock
|
||||
VMInstanceVO _vmInstance;
|
||||
private VMInstanceVO _vmInstance;
|
||||
@Mock
|
||||
VMTemplateVO _templateMock;
|
||||
private VMTemplateVO _templateMock;
|
||||
@Mock
|
||||
TemplateDataStoreVO _templateDataStoreMock;
|
||||
private TemplateDataStoreVO _templateDataStoreMock;
|
||||
@Mock
|
||||
VolumeVO _volumeMock;
|
||||
private VolumeVO _volumeMock;
|
||||
@Mock
|
||||
List<VolumeVO> _rootVols;
|
||||
private List<VolumeVO> _rootVols;
|
||||
@Mock
|
||||
Account _accountMock2;
|
||||
private Account _accountMock2;
|
||||
@Mock
|
||||
ServiceOfferingDao _offeringDao;
|
||||
private ServiceOfferingDao _offeringDao;
|
||||
@Mock
|
||||
ServiceOfferingVO _offeringVo;
|
||||
private ServiceOfferingVO _offeringVo;
|
||||
@Mock
|
||||
EntityManager _entityMgr;
|
||||
private EntityManager _entityMgr;
|
||||
@Mock
|
||||
ResourceLimitService _resourceLimitMgr;
|
||||
private ResourceLimitService _resourceLimitMgr;
|
||||
@Mock
|
||||
PrimaryDataStoreDao _storagePoolDao;
|
||||
private PrimaryDataStoreDao _storagePoolDao;
|
||||
@Mock
|
||||
UsageEventDao _usageEventDao;
|
||||
private UsageEventDao _usageEventDao;
|
||||
@Mock
|
||||
VMSnapshotDao _vmSnapshotDao;
|
||||
private VMSnapshotDao _vmSnapshotDao;
|
||||
@Mock
|
||||
UpdateVmNicIpCmd _updateVmNicIpCmd;
|
||||
private UpdateVmNicIpCmd _updateVmNicIpCmd;
|
||||
@Mock
|
||||
NicDao _nicDao;
|
||||
private NicDao _nicDao;
|
||||
@Mock
|
||||
VlanDao _vlanDao;
|
||||
private VlanDao _vlanDao;
|
||||
@Mock
|
||||
NicVO _nicMock;
|
||||
private NicVO _nicMock;
|
||||
@Mock
|
||||
NetworkModel _networkModel;
|
||||
private NetworkModel _networkModel;
|
||||
@Mock
|
||||
NetworkDao _networkDao;
|
||||
private NetworkDao _networkDao;
|
||||
@Mock
|
||||
NetworkVO _networkMock;
|
||||
private NetworkVO _networkMock;
|
||||
@Mock
|
||||
DataCenterDao _dcDao;
|
||||
private DataCenterDao _dcDao;
|
||||
@Mock
|
||||
DataCenterVO _dcMock;
|
||||
private DataCenterVO _dcMock;
|
||||
@Mock
|
||||
IpAddressManager _ipAddrMgr;
|
||||
private IpAddressManager _ipAddrMgr;
|
||||
@Mock
|
||||
IPAddressDao _ipAddressDao;
|
||||
private IPAddressDao _ipAddressDao;
|
||||
@Mock
|
||||
NetworkOfferingDao _networkOfferingDao;
|
||||
private NetworkOfferingDao _networkOfferingDao;
|
||||
@Mock
|
||||
NetworkOfferingVO _networkOfferingMock;
|
||||
private NetworkOfferingVO _networkOfferingMock;
|
||||
@Mock
|
||||
NetworkOrchestrationService _networkMgr;
|
||||
private NetworkOrchestrationService _networkMgr;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
|
||||
_userVmMgr._vmDao = _vmDao;
|
||||
_userVmMgr._vmInstanceDao = _vmInstanceDao;
|
||||
_userVmMgr._templateDao = _templateDao;
|
||||
_userVmMgr._templateStoreDao = _templateStoreDao;
|
||||
_userVmMgr._volsDao = _volsDao;
|
||||
_userVmMgr._usageEventDao = _usageEventDao;
|
||||
_userVmMgr._itMgr = _itMgr;
|
||||
_userVmMgr.volumeMgr = _storageMgr;
|
||||
_userVmMgr._accountDao = _accountDao;
|
||||
_userVmMgr._accountService = _accountService;
|
||||
_userVmMgr._userDao = _userDao;
|
||||
_userVmMgr._accountMgr = _accountMgr;
|
||||
_userVmMgr._configMgr = _configMgr;
|
||||
_userVmMgr._offeringDao = _offeringDao;
|
||||
_userVmMgr._capacityMgr = _capacityMgr;
|
||||
_userVmMgr._resourceLimitMgr = _resourceLimitMgr;
|
||||
_userVmMgr._scaleRetry = 2;
|
||||
_userVmMgr._entityMgr = _entityMgr;
|
||||
_userVmMgr._storagePoolDao = _storagePoolDao;
|
||||
_userVmMgr._vmSnapshotDao = _vmSnapshotDao;
|
||||
_userVmMgr._configDao = _configDao;
|
||||
_userVmMgr._nicDao = _nicDao;
|
||||
_userVmMgr._vlanDao = _vlanDao;
|
||||
_userVmMgr._networkModel = _networkModel;
|
||||
_userVmMgr._networkDao = _networkDao;
|
||||
_userVmMgr._dcDao = _dcDao;
|
||||
_userVmMgr._ipAddrMgr = _ipAddrMgr;
|
||||
_userVmMgr._ipAddressDao = _ipAddressDao;
|
||||
_userVmMgr._networkOfferingDao = _networkOfferingDao;
|
||||
_userVmMgr._networkMgr = _networkMgr;
|
||||
|
||||
doReturn(3L).when(_account).getId();
|
||||
doReturn(8L).when(_vmMock).getAccountId();
|
||||
when(_accountDao.findById(anyLong())).thenReturn(_accountMock);
|
||||
@ -267,43 +238,40 @@ public class UserVmManagerTest {
|
||||
when(_vmMock.getId()).thenReturn(314L);
|
||||
when(_vmInstance.getId()).thenReturn(1L);
|
||||
when(_vmInstance.getServiceOfferingId()).thenReturn(2L);
|
||||
List<VMSnapshotVO> mockList = mock(List.class);
|
||||
|
||||
List<VMSnapshotVO> mockList = new ArrayList<>();
|
||||
when(_vmSnapshotDao.findByVm(anyLong())).thenReturn(mockList);
|
||||
when(mockList.size()).thenReturn(0);
|
||||
when(_templateStoreDao.findByTemplateZoneReady(anyLong(),anyLong())).thenReturn(_templateDataStoreMock);
|
||||
when(_templateStoreDao.findByTemplateZoneReady(anyLong(), anyLong())).thenReturn(_templateDataStoreMock);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testValidateRootDiskResize()
|
||||
{
|
||||
public void testValidateRootDiskResize() {
|
||||
HypervisorType hypervisorType = HypervisorType.Any;
|
||||
Long rootDiskSize = Long.valueOf(10);
|
||||
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
||||
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
||||
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
|
||||
Map<String, String> customParameters = new HashMap<String, String>();
|
||||
Map<String, String> vmDetals = new HashMap<String, String>();
|
||||
|
||||
|
||||
vmDetals.put("rootDiskController","ide");
|
||||
vmDetals.put("rootDiskController", "ide");
|
||||
when(vm.getDetails()).thenReturn(vmDetals);
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize<<30)+1);
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize << 30) + 1);
|
||||
//Case 1: >
|
||||
try{
|
||||
try {
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
Assert.fail("Function should throw InvalidParameterValueException");
|
||||
}catch(Exception e){
|
||||
} catch (Exception e) {
|
||||
assertThat(e, instanceOf(InvalidParameterValueException.class));
|
||||
}
|
||||
|
||||
//Case 2: =
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize<<30));
|
||||
customParameters.put("rootdisksize","10");
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize << 30));
|
||||
customParameters.put("rootdisksize", "10");
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
assert(!customParameters.containsKey("rootdisksize"));
|
||||
assert (!customParameters.containsKey("rootdisksize"));
|
||||
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize<<30)-1);
|
||||
when(templateVO.getSize()).thenReturn((rootDiskSize << 30) - 1);
|
||||
|
||||
//Case 3: <
|
||||
|
||||
@ -315,12 +283,12 @@ public class UserVmManagerTest {
|
||||
try {
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
Assert.fail("Function should throw InvalidParameterValueException");
|
||||
}catch(Exception e) {
|
||||
} catch (Exception e) {
|
||||
assertThat(e, instanceOf(InvalidParameterValueException.class));
|
||||
}
|
||||
|
||||
//Case 3.3: 1->(rootDiskController==scsi)
|
||||
vmDetals.put("rootDiskController","scsi");
|
||||
vmDetals.put("rootDiskController", "scsi");
|
||||
_userVmMgr.validateRootDiskResize(hypervisorType, rootDiskSize, templateVO, vm, customParameters);
|
||||
}
|
||||
|
||||
@ -344,8 +312,7 @@ public class UserVmManagerTest {
|
||||
|
||||
// Test restoreVm when VM is in stopped state
|
||||
@Test
|
||||
public void testRestoreVMF2() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
|
||||
ResourceAllocationException {
|
||||
public void testRestoreVMF2() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException {
|
||||
|
||||
doReturn(VirtualMachine.State.Stopped).when(_vmMock).getState();
|
||||
when(_vmDao.findById(anyLong())).thenReturn(_vmMock);
|
||||
@ -381,8 +348,7 @@ public class UserVmManagerTest {
|
||||
|
||||
// Test restoreVM when VM is in running state
|
||||
@Test
|
||||
public void testRestoreVMF3() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
|
||||
ResourceAllocationException {
|
||||
public void testRestoreVMF3() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException {
|
||||
|
||||
doReturn(VirtualMachine.State.Running).when(_vmMock).getState();
|
||||
when(_vmDao.findById(anyLong())).thenReturn(_vmMock);
|
||||
@ -418,8 +384,7 @@ public class UserVmManagerTest {
|
||||
|
||||
// Test restoreVM on providing new template Id, when VM is in running state
|
||||
@Test
|
||||
public void testRestoreVMF4() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
|
||||
ResourceAllocationException {
|
||||
public void testRestoreVMF4() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException {
|
||||
doReturn(VirtualMachine.State.Running).when(_vmMock).getState();
|
||||
when(_vmDao.findById(anyLong())).thenReturn(_vmMock);
|
||||
when(_volsDao.findByInstanceAndType(314L, Volume.Type.ROOT)).thenReturn(_rootVols);
|
||||
@ -438,9 +403,9 @@ public class UserVmManagerTest {
|
||||
doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong());
|
||||
when(_volumeMock.getId()).thenReturn(3L);
|
||||
doNothing().when(_volsDao).detachVolume(anyLong());
|
||||
List<VMSnapshotVO> mockList = mock(List.class);
|
||||
|
||||
List<VMSnapshotVO> mockList = new ArrayList<>();
|
||||
when(_vmSnapshotDao.findByVm(anyLong())).thenReturn(mockList);
|
||||
when(mockList.size()).thenReturn(0);
|
||||
when(_templateMock.getUuid()).thenReturn("b1a3626e-72e0-4697-8c7c-a110940cc55d");
|
||||
|
||||
Account account = new AccountVO("testaccount", 1L, "networkdomain", (short)0, "uuid");
|
||||
@ -458,13 +423,12 @@ public class UserVmManagerTest {
|
||||
} finally {
|
||||
CallContext.unregister();
|
||||
}
|
||||
|
||||
verify(_vmMock, times(0)).setIsoId(Mockito.anyLong());
|
||||
}
|
||||
|
||||
// Test restoreVM on providing new ISO Id, when VM(deployed using ISO) is in running state
|
||||
@Test
|
||||
public void testRestoreVMF5() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException,
|
||||
ResourceAllocationException {
|
||||
public void testRestoreVMF5() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException {
|
||||
doReturn(VirtualMachine.State.Running).when(_vmMock).getState();
|
||||
when(_vmDao.findById(anyLong())).thenReturn(_vmMock);
|
||||
when(_volsDao.findByInstanceAndType(314L, Volume.Type.ROOT)).thenReturn(_rootVols);
|
||||
@ -485,9 +449,9 @@ public class UserVmManagerTest {
|
||||
doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong());
|
||||
when(_volumeMock.getId()).thenReturn(3L);
|
||||
doNothing().when(_volsDao).detachVolume(anyLong());
|
||||
List<VMSnapshotVO> mockList = mock(List.class);
|
||||
List<VMSnapshotVO> mockList = new ArrayList<>();
|
||||
when(_vmSnapshotDao.findByVm(anyLong())).thenReturn(mockList);
|
||||
when(mockList.size()).thenReturn(0);
|
||||
|
||||
when(_templateMock.getUuid()).thenReturn("b1a3626e-72e0-4697-8c7c-a110940cc55d");
|
||||
|
||||
Account account = new AccountVO("testaccount", 1L, "networkdomain", (short)0, "uuid");
|
||||
@ -512,7 +476,7 @@ public class UserVmManagerTest {
|
||||
|
||||
// Test scaleVm on incompatible HV.
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void testScaleVMF1() throws Exception {
|
||||
public void testScaleVMF1() throws Exception {
|
||||
|
||||
ScaleVMCmd cmd = new ScaleVMCmd();
|
||||
Class<?> _class = cmd.getClass();
|
||||
@ -527,7 +491,7 @@ public class UserVmManagerTest {
|
||||
|
||||
when(_vmInstanceDao.findById(anyLong())).thenReturn(_vmInstance);
|
||||
|
||||
// UserContext.current().setEventDetails("Vm Id: "+getId());
|
||||
// UserContext.current().setEventDetails("Vm Id: "+getId());
|
||||
Account account = new AccountVO("testaccount", 1L, "networkdomain", (short)0, "uuid");
|
||||
UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN);
|
||||
//AccountVO(String accountName, long domainId, String networkDomain, short type, int regionId)
|
||||
@ -544,7 +508,7 @@ public class UserVmManagerTest {
|
||||
|
||||
// Test scaleVm on equal service offerings.
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void testScaleVMF2() throws Exception {
|
||||
public void testScaleVMF2() throws Exception {
|
||||
|
||||
ScaleVMCmd cmd = new ScaleVMCmd();
|
||||
Class<?> _class = cmd.getClass();
|
||||
@ -566,9 +530,7 @@ public class UserVmManagerTest {
|
||||
|
||||
doNothing().when(_itMgr).checkIfCanUpgrade(_vmMock, _offeringVo);
|
||||
|
||||
ServiceOffering so1 = getSvcoffering(512);
|
||||
ServiceOffering so2 = getSvcoffering(256);
|
||||
|
||||
ServiceOffering so1 = getSvcoffering(512);
|
||||
when(_offeringDao.findById(anyLong())).thenReturn((ServiceOfferingVO)so1);
|
||||
when(_offeringDao.findByIdIncludingRemoved(anyLong(), anyLong())).thenReturn((ServiceOfferingVO)so1);
|
||||
|
||||
@ -585,7 +547,7 @@ public class UserVmManagerTest {
|
||||
|
||||
// Test scaleVm for Stopped vm.
|
||||
//@Test(expected=InvalidParameterValueException.class)
|
||||
public void testScaleVMF3() throws Exception {
|
||||
public void testScaleVMF3() throws Exception {
|
||||
|
||||
ScaleVMCmd cmd = new ScaleVMCmd();
|
||||
Class<?> _class = cmd.getClass();
|
||||
@ -601,8 +563,8 @@ public class UserVmManagerTest {
|
||||
when(_vmInstanceDao.findById(anyLong())).thenReturn(_vmInstance);
|
||||
doReturn(Hypervisor.HypervisorType.XenServer).when(_vmInstance).getHypervisorType();
|
||||
|
||||
ServiceOffering so1 = getSvcoffering(512);
|
||||
ServiceOffering so2 = getSvcoffering(256);
|
||||
ServiceOffering so1 = getSvcoffering(512);
|
||||
ServiceOffering so2 = getSvcoffering(256);
|
||||
|
||||
when(_entityMgr.findById(eq(ServiceOffering.class), anyLong())).thenReturn(so2);
|
||||
when(_entityMgr.findById(ServiceOffering.class, 1L)).thenReturn(so1);
|
||||
@ -626,7 +588,7 @@ public class UserVmManagerTest {
|
||||
}
|
||||
|
||||
// Test scaleVm for Running vm. Full positive test.
|
||||
public void testScaleVMF4() throws Exception {
|
||||
public void testScaleVMF4() throws Exception {
|
||||
|
||||
ScaleVMCmd cmd = new ScaleVMCmd();
|
||||
Class<?> _class = cmd.getClass();
|
||||
@ -647,8 +609,8 @@ public class UserVmManagerTest {
|
||||
when(_vmInstanceDao.findById(anyLong())).thenReturn(_vmInstance);
|
||||
doReturn(Hypervisor.HypervisorType.XenServer).when(_vmInstance).getHypervisorType();
|
||||
|
||||
ServiceOffering so1 = getSvcoffering(512);
|
||||
ServiceOffering so2 = getSvcoffering(256);
|
||||
ServiceOffering so1 = getSvcoffering(512);
|
||||
ServiceOffering so2 = getSvcoffering(256);
|
||||
|
||||
when(_entityMgr.findById(eq(ServiceOffering.class), anyLong())).thenReturn(so2);
|
||||
when(_entityMgr.findById(ServiceOffering.class, 1L)).thenReturn(so1);
|
||||
@ -656,7 +618,7 @@ public class UserVmManagerTest {
|
||||
doReturn(VirtualMachine.State.Running).when(_vmInstance).getState();
|
||||
|
||||
//when(ApiDBUtils.getCpuOverprovisioningFactor()).thenReturn(3f);
|
||||
when(_capacityMgr.checkIfHostHasCapacity(anyLong(), anyInt(), anyLong(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(false);
|
||||
when(_capacityMgr.checkIfHostHasCapacity(anyLong(), anyInt(), anyLong(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(false);
|
||||
when(_itMgr.reConfigureVm(_vmInstance.getUuid(), so1, false)).thenReturn(_vmInstance);
|
||||
|
||||
doReturn(true).when(_itMgr).upgradeVmDb(anyLong(), anyLong());
|
||||
@ -675,8 +637,6 @@ public class UserVmManagerTest {
|
||||
}
|
||||
|
||||
private ServiceOfferingVO getSvcoffering(int ramSize) {
|
||||
|
||||
long id = 4L;
|
||||
String name = "name";
|
||||
String displayText = "displayText";
|
||||
int cpu = 1;
|
||||
@ -686,15 +646,14 @@ public class UserVmManagerTest {
|
||||
boolean ha = false;
|
||||
boolean useLocalStorage = false;
|
||||
|
||||
ServiceOfferingVO serviceOffering =
|
||||
new ServiceOfferingVO(name, cpu, ramSize, speed, null, null, ha, displayText, Storage.ProvisioningType.THIN,
|
||||
useLocalStorage, false, null, false, null, false);
|
||||
ServiceOfferingVO serviceOffering = new ServiceOfferingVO(name, cpu, ramSize, speed, null, null, ha, displayText, Storage.ProvisioningType.THIN, useLocalStorage, false, null, false, null,
|
||||
false);
|
||||
return serviceOffering;
|
||||
}
|
||||
|
||||
// Test Move VM b/w accounts where caller is not ROOT/Domain admin
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void testMoveVmToUser1() throws Exception {
|
||||
public void testMoveVmToUser1() throws Exception {
|
||||
AssignVMCmd cmd = new AssignVMCmd();
|
||||
Class<?> _class = cmd.getClass();
|
||||
|
||||
@ -717,7 +676,7 @@ public class UserVmManagerTest {
|
||||
CallContext.register(user, caller);
|
||||
try {
|
||||
|
||||
_userVmMgr.moveVMToUser(cmd);
|
||||
_userVmMgr.moveVMToUser(cmd);
|
||||
} finally {
|
||||
CallContext.unregister();
|
||||
}
|
||||
@ -725,7 +684,7 @@ public class UserVmManagerTest {
|
||||
|
||||
// Test Move VM b/w accounts where caller doesn't have access to the old or new account
|
||||
@Test(expected = PermissionDeniedException.class)
|
||||
public void testMoveVmToUser2() throws Exception {
|
||||
public void testMoveVmToUser2() throws Exception {
|
||||
AssignVMCmd cmd = new AssignVMCmd();
|
||||
Class<?> _class = cmd.getClass();
|
||||
|
||||
@ -756,8 +715,7 @@ public class UserVmManagerTest {
|
||||
|
||||
when(_accountMgr.finalizeOwner(any(Account.class), anyString(), anyLong(), anyLong())).thenReturn(newAccount);
|
||||
|
||||
doThrow(new PermissionDeniedException("Access check failed")).when(_accountMgr).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class),
|
||||
any(ControlledEntity.class));
|
||||
doThrow(new PermissionDeniedException("Access check failed")).when(_accountMgr).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class));
|
||||
|
||||
CallContext.register(user, caller);
|
||||
|
||||
@ -805,7 +763,8 @@ public class UserVmManagerTest {
|
||||
when(_dcMock.getNetworkType()).thenReturn(NetworkType.Advanced);
|
||||
|
||||
when(_ipAddrMgr.allocateGuestIP(Mockito.eq(_networkMock), anyString())).thenReturn("10.10.10.10");
|
||||
doNothing().when(_networkMgr).implementNetworkElementsAndResources(Mockito.any(DeployDestination.class), Mockito.any(ReservationContext.class), Mockito.eq(_networkMock), Mockito.eq(_networkOfferingMock));
|
||||
doNothing().when(_networkMgr).implementNetworkElementsAndResources(Mockito.any(DeployDestination.class), Mockito.any(ReservationContext.class), Mockito.eq(_networkMock),
|
||||
Mockito.eq(_networkOfferingMock));
|
||||
when(_nicDao.persist(any(NicVO.class))).thenReturn(nic);
|
||||
|
||||
Account caller = new AccountVO("testaccount", 1, "networkdomain", (short)0, UUID.randomUUID().toString());
|
||||
|
||||
288
server/test/org/apache/cloudstack/acl/RoleManagerImplTest.java
Normal file
288
server/test/org/apache/cloudstack/acl/RoleManagerImplTest.java
Normal file
@ -0,0 +1,288 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.acl;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.acl.dao.RoleDao;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class RoleManagerImplTest {
|
||||
|
||||
@Spy
|
||||
@InjectMocks
|
||||
private RoleManagerImpl roleManagerImpl;
|
||||
@Mock
|
||||
private AccountManager accountManagerMock;
|
||||
@Mock
|
||||
private RoleDao roleDaoMock;
|
||||
|
||||
@Mock
|
||||
private Account accountMock;
|
||||
private long accountMockId = 100l;
|
||||
|
||||
@Mock
|
||||
private RoleVO roleVoMock;
|
||||
private long roleMockId = 1l;
|
||||
|
||||
@Before
|
||||
public void beforeTest() {
|
||||
Mockito.doReturn(accountMockId).when(accountMock).getId();
|
||||
Mockito.doReturn(accountMock).when(roleManagerImpl).getCurrentAccount();
|
||||
|
||||
Mockito.doReturn(roleMockId).when(roleVoMock).getId();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRoleTestIdNull() {
|
||||
Role returnedRole = roleManagerImpl.findRole(null);
|
||||
Assert.assertNull(returnedRole);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRoleTestIdZero() {
|
||||
Role returnedRole = roleManagerImpl.findRole(0l);
|
||||
Assert.assertNull(returnedRole);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRoleTestIdNegative() {
|
||||
Role returnedRole = roleManagerImpl.findRole(-1l);
|
||||
Assert.assertNull(returnedRole);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRoleTestRoleNotFound() {
|
||||
Mockito.doReturn(null).when(roleDaoMock).findById(roleMockId);
|
||||
Role returnedRole = roleManagerImpl.findRole(roleMockId);
|
||||
Assert.assertNull(returnedRole);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRoleTestNotRootAdminAndNotRoleAdminType() {
|
||||
Mockito.doReturn(RoleType.DomainAdmin).when(roleVoMock).getRoleType();
|
||||
Mockito.doReturn(roleVoMock).when(roleDaoMock).findById(roleMockId);
|
||||
Mockito.doReturn(false).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
Role returnedRole = roleManagerImpl.findRole(roleMockId);
|
||||
|
||||
Assert.assertEquals(roleMockId, returnedRole.getId());
|
||||
Mockito.verify(accountManagerMock).isRootAdmin(accountMockId);
|
||||
Mockito.verify(roleVoMock, Mockito.times(1)).getRoleType();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRoleTestRootAdminAndNotRoleAdminType() {
|
||||
Mockito.doReturn(RoleType.DomainAdmin).when(roleVoMock).getRoleType();
|
||||
Mockito.doReturn(roleVoMock).when(roleDaoMock).findById(roleMockId);
|
||||
Mockito.doReturn(true).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
Role returnedRole = roleManagerImpl.findRole(roleMockId);
|
||||
|
||||
Assert.assertEquals(roleMockId, returnedRole.getId());
|
||||
Mockito.verify(accountManagerMock).isRootAdmin(accountMockId);
|
||||
Mockito.verify(roleVoMock, Mockito.times(0)).getRoleType();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRoleTestRootAdminAndRoleAdminType() {
|
||||
Mockito.doReturn(RoleType.Admin).when(roleVoMock).getRoleType();
|
||||
Mockito.doReturn(roleVoMock).when(roleDaoMock).findById(roleMockId);
|
||||
Mockito.doReturn(true).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
Role returnedRole = roleManagerImpl.findRole(roleMockId);
|
||||
|
||||
Assert.assertEquals(roleMockId, returnedRole.getId());
|
||||
Mockito.verify(accountManagerMock).isRootAdmin(accountMockId);
|
||||
Mockito.verify(roleVoMock, Mockito.times(0)).getRoleType();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRoleTestNotRootAdminAndRoleAdminType() {
|
||||
Mockito.doReturn(RoleType.Admin).when(roleVoMock).getRoleType();
|
||||
Mockito.doReturn(roleVoMock).when(roleDaoMock).findById(roleMockId);
|
||||
Mockito.doReturn(false).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
Role returnedRole = roleManagerImpl.findRole(roleMockId);
|
||||
|
||||
Assert.assertNull(returnedRole);
|
||||
Mockito.verify(accountManagerMock).isRootAdmin(accountMockId);
|
||||
Mockito.verify(roleVoMock, Mockito.times(1)).getRoleType();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRolesByNameTestNullRoleName() {
|
||||
List<Role> rolesFound = roleManagerImpl.findRolesByName(null);
|
||||
|
||||
Assert.assertTrue(CollectionUtils.isEmpty(rolesFound));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRolesByNameTestEmptyRoleName() {
|
||||
List<Role> rolesFound = roleManagerImpl.findRolesByName("");
|
||||
|
||||
Assert.assertTrue(CollectionUtils.isEmpty(rolesFound));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRolesByNameTestBlankRoleName() {
|
||||
List<Role> rolesFound = roleManagerImpl.findRolesByName(" ");
|
||||
|
||||
Assert.assertTrue(CollectionUtils.isEmpty(rolesFound));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRolesByNameTest() {
|
||||
String roleName = "roleName";
|
||||
ArrayList<Role> toBeReturned = new ArrayList<>();
|
||||
Mockito.doReturn(toBeReturned).when(roleDaoMock).findAllByName(roleName);
|
||||
|
||||
roleManagerImpl.findRolesByName(roleName);
|
||||
|
||||
Mockito.verify(roleManagerImpl).removeRootAdminRolesIfNeeded(toBeReturned);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeRootAdminRolesIfNeededTestRootAdmin() {
|
||||
Mockito.doReturn(accountMock).when(roleManagerImpl).getCurrentAccount();
|
||||
Mockito.doReturn(true).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
List<Role> roles = new ArrayList<>();
|
||||
roleManagerImpl.removeRootAdminRolesIfNeeded(roles);
|
||||
|
||||
Mockito.verify(roleManagerImpl, Mockito.times(0)).removeRootAdminRoles(roles);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeRootAdminRolesIfNeededTestNonRootAdminUser() {
|
||||
Mockito.doReturn(accountMock).when(roleManagerImpl).getCurrentAccount();
|
||||
Mockito.doReturn(false).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
List<Role> roles = new ArrayList<>();
|
||||
roleManagerImpl.removeRootAdminRolesIfNeeded(roles);
|
||||
|
||||
Mockito.verify(roleManagerImpl, Mockito.times(1)).removeRootAdminRoles(roles);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void removeRootAdminRolesTest() {
|
||||
List<Role> roles = new ArrayList<>();
|
||||
Role roleRootAdmin = Mockito.mock(Role.class);
|
||||
Mockito.doReturn(RoleType.Admin).when(roleRootAdmin).getRoleType();
|
||||
|
||||
Role roleDomainAdmin = Mockito.mock(Role.class);
|
||||
Mockito.doReturn(RoleType.DomainAdmin).when(roleDomainAdmin).getRoleType();
|
||||
|
||||
Role roleResourceAdmin = Mockito.mock(Role.class);
|
||||
Mockito.doReturn(RoleType.ResourceAdmin).when(roleResourceAdmin).getRoleType();
|
||||
|
||||
Role roleUser = Mockito.mock(Role.class);
|
||||
Mockito.doReturn(RoleType.User).when(roleUser).getRoleType();
|
||||
|
||||
roles.add(roleRootAdmin);
|
||||
roles.add(roleDomainAdmin);
|
||||
roles.add(roleResourceAdmin);
|
||||
roles.add(roleUser);
|
||||
|
||||
roleManagerImpl.removeRootAdminRoles(roles);
|
||||
|
||||
Assert.assertEquals(3, roles.size());
|
||||
Assert.assertEquals(roleDomainAdmin, roles.get(0));
|
||||
Assert.assertEquals(roleResourceAdmin, roles.get(1));
|
||||
Assert.assertEquals(roleUser, roles.get(2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRolesByTypeTestNullRoleType() {
|
||||
List<Role> returnedRoles = roleManagerImpl.findRolesByType(null);
|
||||
|
||||
Assert.assertEquals(0, returnedRoles.size());
|
||||
Mockito.verify(accountManagerMock, Mockito.times(0)).isRootAdmin(Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRolesByTypeTestAdminRoleNonRootAdminUser() {
|
||||
Mockito.doReturn(accountMock).when(roleManagerImpl).getCurrentAccount();
|
||||
Mockito.doReturn(false).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
List<Role> returnedRoles = roleManagerImpl.findRolesByType(RoleType.Admin);
|
||||
|
||||
Assert.assertEquals(0, returnedRoles.size());
|
||||
Mockito.verify(accountManagerMock, Mockito.times(1)).isRootAdmin(Mockito.anyLong());
|
||||
Mockito.verify(roleDaoMock, Mockito.times(0)).findAllByRoleType(Mockito.any(RoleType.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRolesByTypeTestAdminRoleRootAdminUser() {
|
||||
Mockito.doReturn(accountMock).when(roleManagerImpl).getCurrentAccount();
|
||||
Mockito.doReturn(true).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
List<Role> roles = new ArrayList<>();
|
||||
roles.add(Mockito.mock(Role.class));
|
||||
Mockito.doReturn(roles).when(roleDaoMock).findAllByRoleType(RoleType.Admin);
|
||||
List<Role> returnedRoles = roleManagerImpl.findRolesByType(RoleType.Admin);
|
||||
|
||||
Assert.assertEquals(1, returnedRoles.size());
|
||||
Mockito.verify(accountManagerMock, Mockito.times(1)).isRootAdmin(Mockito.anyLong());
|
||||
Mockito.verify(roleDaoMock, Mockito.times(1)).findAllByRoleType(Mockito.any(RoleType.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void findRolesByTypeTestNonAdminRoleRootAdminUser() {
|
||||
Mockito.doReturn(accountMock).when(roleManagerImpl).getCurrentAccount();
|
||||
Mockito.doReturn(true).when(accountManagerMock).isRootAdmin(accountMockId);
|
||||
|
||||
List<Role> roles = new ArrayList<>();
|
||||
roles.add(Mockito.mock(Role.class));
|
||||
Mockito.doReturn(roles).when(roleDaoMock).findAllByRoleType(RoleType.User);
|
||||
List<Role> returnedRoles = roleManagerImpl.findRolesByType(RoleType.User);
|
||||
|
||||
Assert.assertEquals(1, returnedRoles.size());
|
||||
Mockito.verify(accountManagerMock, Mockito.times(0)).isRootAdmin(Mockito.anyLong());
|
||||
Mockito.verify(roleDaoMock, Mockito.times(1)).findAllByRoleType(Mockito.any(RoleType.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void listRolesTest() {
|
||||
List<Role> roles = new ArrayList<>();
|
||||
roles.add(Mockito.mock(Role.class));
|
||||
|
||||
Mockito.doReturn(roles).when(roleDaoMock).listAll();
|
||||
Mockito.doNothing().when(roleManagerImpl).removeRootAdminRolesIfNeeded(roles);
|
||||
|
||||
List<Role> returnedRoles = roleManagerImpl.listRoles();
|
||||
|
||||
Assert.assertEquals(roles.size(), returnedRoles.size());
|
||||
Mockito.verify(roleDaoMock).listAll();
|
||||
Mockito.verify(roleManagerImpl).removeRootAdminRolesIfNeeded(roles);
|
||||
}
|
||||
}
|
||||
@ -87,6 +87,8 @@ public class HttpUploadServerHandler extends SimpleChannelInboundHandler<HttpObj
|
||||
|
||||
private static final String HEADER_HOST = "X-Forwarded-Host";
|
||||
|
||||
private static long processTimeout;
|
||||
|
||||
public HttpUploadServerHandler(NfsSecondaryStorageResource storageResource) {
|
||||
this.storageResource = storageResource;
|
||||
}
|
||||
@ -152,7 +154,6 @@ public class HttpUploadServerHandler extends SimpleChannelInboundHandler<HttpObj
|
||||
Map<String, List<String>> uriAttributes = decoderQuery.parameters();
|
||||
uuid = uriAttributes.get("uuid").get(0);
|
||||
logger.info("URI: uuid=" + uuid);
|
||||
|
||||
UploadEntity uploadEntity = null;
|
||||
try {
|
||||
// Validate the request here
|
||||
@ -175,6 +176,7 @@ public class HttpUploadServerHandler extends SimpleChannelInboundHandler<HttpObj
|
||||
}
|
||||
//set the base directory to download the file
|
||||
DiskFileUpload.baseDirectory = uploadEntity.getInstallPathPrefix();
|
||||
this.processTimeout = uploadEntity.getProcessTimeout();
|
||||
logger.info("base directory: " + DiskFileUpload.baseDirectory);
|
||||
try {
|
||||
//initialize the decoder
|
||||
@ -243,7 +245,7 @@ public class HttpUploadServerHandler extends SimpleChannelInboundHandler<HttpObj
|
||||
storageResource.updateStateMapWithError(uuid, errorString);
|
||||
return HttpResponseStatus.BAD_REQUEST;
|
||||
}
|
||||
String status = storageResource.postUpload(uuid, fileUpload.getFile().getName());
|
||||
String status = storageResource.postUpload(uuid, fileUpload.getFile().getName(), processTimeout);
|
||||
if (status != null) {
|
||||
responseContent.append(status);
|
||||
storageResource.updateStateMapWithError(uuid, status);
|
||||
|
||||
@ -251,6 +251,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
||||
protected String createTemplateFromSnapshotXenScript;
|
||||
private HashMap<String, UploadEntity> uploadEntityStateMap = new HashMap<String, UploadEntity>();
|
||||
private String _ssvmPSK = null;
|
||||
private long processTimeout;
|
||||
|
||||
public void setParentPath(String path) {
|
||||
_parent = path;
|
||||
@ -3350,6 +3351,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
||||
throw new InvalidParameterValueException(errorMessage);
|
||||
} else {
|
||||
uuid = cmd.getEntityUUID();
|
||||
processTimeout = cmd.getProcessTimeout();
|
||||
if (isOneTimePostUrlUsed(cmd)) {
|
||||
uploadEntity = uploadEntityStateMap.get(uuid);
|
||||
StringBuilder errorMessage = new StringBuilder("The one time post url is already used");
|
||||
@ -3371,6 +3373,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
||||
uploadEntity = new UploadEntity(uuid, cmd.getEntityId(), UploadEntity.Status.IN_PROGRESS, cmd.getName(), absolutePath);
|
||||
uploadEntity.setMetaDataPopulated(true);
|
||||
uploadEntity.setResourceType(UploadEntity.ResourceType.valueOf(cmd.getType()));
|
||||
uploadEntity.setProcessTimeout(processTimeout);
|
||||
uploadEntity.setFormat(Storage.ImageFormat.valueOf(cmd.getImageFormat()));
|
||||
//relative path with out ssvm mount info.
|
||||
uploadEntity.setTemplatePath(absolutePath);
|
||||
@ -3452,7 +3455,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
||||
return (int)Math.ceil(sizeInBytes * 1.0d / (1024 * 1024 * 1024));
|
||||
}
|
||||
|
||||
public String postUpload(String uuid, String filename) {
|
||||
public String postUpload(String uuid, String filename, long processTimeout) {
|
||||
UploadEntity uploadEntity = uploadEntityStateMap.get(uuid);
|
||||
int installTimeoutPerGig = 180 * 60 * 1000;
|
||||
|
||||
@ -3557,7 +3560,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
||||
for (Processor processor : processors.values()) {
|
||||
FormatInfo info = null;
|
||||
try {
|
||||
info = processor.process(resourcePath, null, templateName);
|
||||
info = processor.process(resourcePath, null, templateName, processTimeout * 1000);
|
||||
} catch (InternalErrorException e) {
|
||||
s_logger.error("Template process exception ", e);
|
||||
return e.toString();
|
||||
|
||||
@ -90,7 +90,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
|
||||
private String _name;
|
||||
StorageLayer _storage;
|
||||
public Map<String, Processor> _processors;
|
||||
|
||||
private long _processTimeout;
|
||||
private Integer _nfsVersion;
|
||||
|
||||
public class Completion implements DownloadCompleteCallback {
|
||||
@ -459,7 +459,7 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
|
||||
|
||||
FormatInfo info = null;
|
||||
try {
|
||||
info = processor.process(resourcePath, null, templateName);
|
||||
info = processor.process(resourcePath, null, templateName, this._processTimeout);
|
||||
} catch (InternalErrorException e) {
|
||||
s_logger.error("Template process exception ", e);
|
||||
return e.toString();
|
||||
@ -677,6 +677,8 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
|
||||
|
||||
@Override
|
||||
public DownloadAnswer handleDownloadCommand(SecondaryStorageResource resource, DownloadCommand cmd) {
|
||||
int timeout = NumbersUtil.parseInt(cmd.getContextParam("vmware.package.ova.timeout"), 3600000);
|
||||
this._processTimeout = timeout;
|
||||
ResourceType resourceType = cmd.getResourceType();
|
||||
if (cmd instanceof DownloadProgressCommand) {
|
||||
return handleDownloadProgressCmd(resource, (DownloadProgressCommand)cmd);
|
||||
|
||||
@ -34,6 +34,7 @@ public class UploadEntity {
|
||||
private int maxSizeInGB;
|
||||
private String description;
|
||||
private long contentLength;
|
||||
private long processTimeout;
|
||||
|
||||
public static enum ResourceType {
|
||||
VOLUME, TEMPLATE
|
||||
@ -60,6 +61,14 @@ public class UploadEntity {
|
||||
this.entityId=entityId;
|
||||
}
|
||||
|
||||
public void setProcessTimeout(long processTimeout) {
|
||||
this.processTimeout = processTimeout;
|
||||
}
|
||||
|
||||
public long getProcessTimeout() {
|
||||
return processTimeout;
|
||||
}
|
||||
|
||||
public UploadEntity(){
|
||||
|
||||
}
|
||||
|
||||
334
test/integration/plugins/solidfire/TestCapacityManagement.py
Normal file
334
test/integration/plugins/solidfire/TestCapacityManagement.py
Normal file
@ -0,0 +1,334 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import random
|
||||
import SignedAPICall
|
||||
import XenAPI
|
||||
|
||||
from solidfire.factory import ElementFactory
|
||||
|
||||
from util import sf_util
|
||||
|
||||
# All tests inherit from cloudstackTestCase
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||
|
||||
# Import Integration Libraries
|
||||
|
||||
# base - contains all resources as entities and defines create, delete, list operations on them
|
||||
from marvin.lib.base import Account, ServiceOffering, StoragePool, User, VirtualMachine
|
||||
|
||||
# common - commonly used methods for all tests are listed here
|
||||
from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts
|
||||
|
||||
# utils - utility classes for common cleanup, external library wrappers, etc.
|
||||
from marvin.lib.utils import cleanup_resources
|
||||
|
||||
# Prerequisites:
|
||||
# Only one zone
|
||||
# Only one pod
|
||||
# Only one cluster
|
||||
#
|
||||
# Running the tests:
|
||||
# If using XenServer, verify the "xen_server_hostname" variable is correct.
|
||||
#
|
||||
# Note:
|
||||
# If you do have more than one cluster, you might need to change this line: cls.cluster = list_clusters(cls.apiClient)[0]
|
||||
|
||||
|
||||
class TestData():
|
||||
# constants
|
||||
account = "account"
|
||||
capacityBytes = "capacitybytes"
|
||||
capacityIops = "capacityiops"
|
||||
clusterId = "clusterId"
|
||||
computeOffering = "computeoffering"
|
||||
computeOffering2 = "computeoffering2"
|
||||
domainId = "domainId"
|
||||
email = "email"
|
||||
firstname = "firstname"
|
||||
hypervisor = "hypervisor"
|
||||
lastname = "lastname"
|
||||
mvip = "mvip"
|
||||
name = "name"
|
||||
password = "password"
|
||||
port = "port"
|
||||
primaryStorage = "primarystorage"
|
||||
primaryStorage2 = "primarystorage2"
|
||||
provider = "provider"
|
||||
scope = "scope"
|
||||
solidFire = "solidfire"
|
||||
storageTag = "SolidFire_SAN_1"
|
||||
storageTag2 = "SolidFire_SAN_2"
|
||||
tags = "tags"
|
||||
url = "url"
|
||||
user = "user"
|
||||
username = "username"
|
||||
xenServer = "xenserver"
|
||||
zoneId = "zoneId"
|
||||
|
||||
hypervisor_type = xenServer
|
||||
xen_server_hostname = "XenServer-6.5-1"
|
||||
|
||||
def __init__(self):
|
||||
self.testdata = {
|
||||
TestData.solidFire: {
|
||||
TestData.mvip: "10.117.40.120",
|
||||
TestData.username: "admin",
|
||||
TestData.password: "admin",
|
||||
TestData.port: 443,
|
||||
TestData.url: "https://10.117.40.120:443"
|
||||
},
|
||||
TestData.xenServer: {
|
||||
TestData.username: "root",
|
||||
TestData.password: "solidfire"
|
||||
},
|
||||
TestData.account: {
|
||||
TestData.email: "test@test.com",
|
||||
TestData.firstname: "John",
|
||||
TestData.lastname: "Doe",
|
||||
TestData.username: "test",
|
||||
TestData.password: "test"
|
||||
},
|
||||
TestData.user: {
|
||||
TestData.email: "user@test.com",
|
||||
TestData.firstname: "Jane",
|
||||
TestData.lastname: "Doe",
|
||||
TestData.username: "testuser",
|
||||
TestData.password: "password"
|
||||
},
|
||||
TestData.primaryStorage: {
|
||||
TestData.name: "SolidFire-%d" % random.randint(0, 100),
|
||||
TestData.scope: "ZONE",
|
||||
TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||
TestData.provider: "SolidFire",
|
||||
TestData.tags: TestData.storageTag,
|
||||
TestData.capacityIops: 100000,
|
||||
TestData.capacityBytes: 214748364800, # 200 GiB
|
||||
TestData.hypervisor: "Any"
|
||||
},
|
||||
TestData.primaryStorage2: {
|
||||
TestData.name: "SolidFire-%d" % random.randint(0, 100),
|
||||
TestData.scope: "ZONE",
|
||||
TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||
TestData.provider: "SolidFire",
|
||||
TestData.tags: TestData.storageTag2,
|
||||
TestData.capacityIops: 800,
|
||||
TestData.capacityBytes: 2251799813685248, # 2 PiB
|
||||
TestData.hypervisor: "Any"
|
||||
},
|
||||
TestData.computeOffering: {
|
||||
TestData.name: "SF_CO_1",
|
||||
"displaytext": "SF_CO_1 (Min IOPS = 300; Max IOPS = 600)",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100,
|
||||
"memory": 128,
|
||||
"storagetype": "shared",
|
||||
"customizediops": False,
|
||||
"miniops": "300",
|
||||
"maxiops": "600",
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag
|
||||
},
|
||||
TestData.computeOffering2: {
|
||||
TestData.name: "SF_CO_2",
|
||||
"displaytext": "SF_CO_2 (Min IOPS = 300; Max IOPS = 600)",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100,
|
||||
"memory": 128,
|
||||
"storagetype": "shared",
|
||||
"customizediops": False,
|
||||
"miniops": "300",
|
||||
"maxiops": "600",
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag2
|
||||
},
|
||||
TestData.zoneId: 1,
|
||||
TestData.clusterId: 1,
|
||||
TestData.domainId: 1,
|
||||
TestData.url: "10.117.40.114"
|
||||
}
|
||||
|
||||
|
||||
class TestCapacityManagement(cloudstackTestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# Set up API client
|
||||
testclient = super(TestCapacityManagement, cls).getClsTestClient()
|
||||
|
||||
cls.apiClient = testclient.getApiClient()
|
||||
cls.configData = testclient.getParsedTestDataConfig()
|
||||
cls.dbConnection = testclient.getDbConnection()
|
||||
|
||||
cls.testdata = TestData().testdata
|
||||
|
||||
sf_util.set_supports_resign(True, cls.dbConnection)
|
||||
|
||||
cls._connect_to_hypervisor()
|
||||
|
||||
# Set up SolidFire connection
|
||||
solidfire = cls.testdata[TestData.solidFire]
|
||||
|
||||
cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password])
|
||||
|
||||
# Get Resources from Cloud Infrastructure
|
||||
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||
cls.cluster = list_clusters(cls.apiClient)[0]
|
||||
cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
|
||||
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||
|
||||
# Create test account
|
||||
cls.account = Account.create(
|
||||
cls.apiClient,
|
||||
cls.testdata["account"],
|
||||
admin=1
|
||||
)
|
||||
|
||||
# Set up connection to make customized API calls
|
||||
cls.user = User.create(
|
||||
cls.apiClient,
|
||||
cls.testdata["user"],
|
||||
account=cls.account.name,
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
|
||||
url = cls.testdata[TestData.url]
|
||||
|
||||
api_url = "http://" + url + ":8080/client/api"
|
||||
userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)
|
||||
|
||||
cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)
|
||||
|
||||
primarystorage = cls.testdata[TestData.primaryStorage]
|
||||
|
||||
cls.primary_storage = StoragePool.create(
|
||||
cls.apiClient,
|
||||
primarystorage,
|
||||
scope=primarystorage[TestData.scope],
|
||||
zoneid=cls.zone.id,
|
||||
provider=primarystorage[TestData.provider],
|
||||
tags=primarystorage[TestData.tags],
|
||||
capacityiops=primarystorage[TestData.capacityIops],
|
||||
capacitybytes=primarystorage[TestData.capacityBytes],
|
||||
hypervisor=primarystorage[TestData.hypervisor]
|
||||
)
|
||||
|
||||
primarystorage2 = cls.testdata[TestData.primaryStorage2]
|
||||
|
||||
cls.primary_storage_2 = StoragePool.create(
|
||||
cls.apiClient,
|
||||
primarystorage2,
|
||||
scope=primarystorage2[TestData.scope],
|
||||
zoneid=cls.zone.id,
|
||||
provider=primarystorage2[TestData.provider],
|
||||
tags=primarystorage2[TestData.tags],
|
||||
capacityiops=primarystorage2[TestData.capacityIops],
|
||||
capacitybytes=primarystorage2[TestData.capacityBytes],
|
||||
hypervisor=primarystorage2[TestData.hypervisor]
|
||||
)
|
||||
|
||||
cls.compute_offering = ServiceOffering.create(
|
||||
cls.apiClient,
|
||||
cls.testdata[TestData.computeOffering]
|
||||
)
|
||||
|
||||
cls.compute_offering_2 = ServiceOffering.create(
|
||||
cls.apiClient,
|
||||
cls.testdata[TestData.computeOffering2]
|
||||
)
|
||||
|
||||
# Resources that are to be destroyed
|
||||
cls._cleanup = [
|
||||
cls.compute_offering,
|
||||
cls.compute_offering_2,
|
||||
cls.user,
|
||||
cls.account
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cleanup_resources(cls.apiClient, cls._cleanup)
|
||||
|
||||
cls.primary_storage.delete(cls.apiClient)
|
||||
cls.primary_storage_2.delete(cls.apiClient)
|
||||
|
||||
sf_util.purge_solidfire_volumes(cls.sfe)
|
||||
except Exception as e:
|
||||
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
||||
|
||||
def setUp(self):
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
cleanup_resources(self.apiClient, self.cleanup)
|
||||
|
||||
def test_01_not_enough_storage_space(self):
|
||||
self._run_vms(self.compute_offering.id)
|
||||
|
||||
def test_02_not_enough_storage_performance(self):
|
||||
self._run_vms(self.compute_offering_2.id)
|
||||
|
||||
def _run_vms(self, compute_offering_id):
|
||||
try:
|
||||
# Based on the primary storage's space or performance and the storage requirements
|
||||
# of the compute offering, we should fail to create a VM on the third try.
|
||||
for _ in range(0, 3):
|
||||
number = random.randint(0, 1000)
|
||||
|
||||
vm_name = {
|
||||
TestData.name: "VM-%d" % number,
|
||||
"displayname": "Test VM %d" % number
|
||||
}
|
||||
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiClient,
|
||||
vm_name,
|
||||
accountid=self.account.name,
|
||||
zoneid=self.zone.id,
|
||||
serviceofferingid=compute_offering_id,
|
||||
templateid=self.template.id,
|
||||
domainid=self.domain.id,
|
||||
startvm=True
|
||||
)
|
||||
|
||||
self.cleanup.append(virtual_machine)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.assertEqual(
|
||||
len(self.cleanup),
|
||||
2,
|
||||
"Only two VMs should have been successfully created."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _connect_to_hypervisor(cls):
|
||||
host_ip = "https://" + \
|
||||
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name=TestData.xen_server_hostname)[0].ipaddress
|
||||
|
||||
cls.xen_session = XenAPI.Session(host_ip)
|
||||
|
||||
xen_server = cls.testdata[TestData.xenServer]
|
||||
|
||||
cls.xen_session.xenapi.login_with_password(xen_server[TestData.username], xen_server[TestData.password])
|
||||
@ -2859,7 +2859,10 @@
|
||||
if (jsonObj.state == "Ready") {
|
||||
allowedActions.push("remove");
|
||||
allowedActions.push("revertToVMSnapshot");
|
||||
allowedActions.push("takeSnapshot");
|
||||
|
||||
if (args && args.context && args.context.instances && args.context.instances[0].hypervisor && args.context.instances[0].hypervisor === "KVM") {
|
||||
allowedActions.push("takeSnapshot");
|
||||
}
|
||||
}
|
||||
|
||||
return allowedActions;
|
||||
|
||||
@ -1334,7 +1334,7 @@
|
||||
url: createURL('listNetworkACLs&aclid=' + args.context.aclLists[0].id),
|
||||
success: function(json) {
|
||||
var items = json.listnetworkaclsresponse.networkacl.sort(function(a, b) {
|
||||
return a.number >= b.number;
|
||||
return a.number - b.number;
|
||||
}).map(function(acl) {
|
||||
if (parseInt(acl.protocol)) { // protocol number
|
||||
acl.protocolnumber = acl.protocol;
|
||||
|
||||
@ -38,10 +38,14 @@ public class DateUtil {
|
||||
return new Date();
|
||||
}
|
||||
|
||||
// yyyy-MM-ddTHH:mm:ssZxxxx
|
||||
// yyyy-MM-ddTHH:mm:ssZZZZ or yyyy-MM-ddTHH:mm:ssZxxxx
|
||||
public static Date parseTZDateString(String str) throws ParseException {
|
||||
DateFormat dfParse = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'Z");
|
||||
return dfParse.parse(str);
|
||||
try {
|
||||
return s_outputFormat.parse(str);
|
||||
} catch (ParseException e) {
|
||||
final DateFormat dfParse = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'Z");
|
||||
return dfParse.parse(str);
|
||||
}
|
||||
}
|
||||
|
||||
public static Date parseDateString(TimeZone tz, String dateString) {
|
||||
|
||||
@ -233,18 +233,23 @@ public class Script implements Callable<String> {
|
||||
}
|
||||
|
||||
while (true) {
|
||||
_logger.debug("Executing while with timeout : " + _timeout);
|
||||
try {
|
||||
if (_process.waitFor() == 0) {
|
||||
_logger.debug("Execution is successful.");
|
||||
if (interpreter != null) {
|
||||
return interpreter.drain() ? task.getResult() : interpreter.interpret(ir);
|
||||
} else {
|
||||
// null return exitValue apparently
|
||||
return String.valueOf(_process.exitValue());
|
||||
//process execution completed within timeout period
|
||||
if (_process.waitFor(_timeout, TimeUnit.MILLISECONDS)) {
|
||||
//process completed successfully
|
||||
if (_process.exitValue() == 0) {
|
||||
_logger.debug("Execution is successful.");
|
||||
if (interpreter != null) {
|
||||
return interpreter.drain() ? task.getResult() : interpreter.interpret(ir);
|
||||
} else {
|
||||
// null return exitValue apparently
|
||||
return String.valueOf(_process.exitValue());
|
||||
}
|
||||
} else { //process failed
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} //timeout
|
||||
} catch (InterruptedException e) {
|
||||
if (!_isTimeOut) {
|
||||
/*
|
||||
@ -254,24 +259,25 @@ public class Script implements Callable<String> {
|
||||
_logger.debug("We are interrupted but it's not a timeout, just continue");
|
||||
continue;
|
||||
}
|
||||
|
||||
TimedOutLogger log = new TimedOutLogger(_process);
|
||||
Task timedoutTask = new Task(log, ir);
|
||||
|
||||
timedoutTask.run();
|
||||
if (!_passwordCommand) {
|
||||
_logger.warn("Timed out: " + buildCommandLine(command) + ". Output is: " + timedoutTask.getResult());
|
||||
} else {
|
||||
_logger.warn("Timed out: " + buildCommandLine(command));
|
||||
}
|
||||
|
||||
return ERR_TIMEOUT;
|
||||
} finally {
|
||||
if (future != null) {
|
||||
future.cancel(false);
|
||||
}
|
||||
Thread.interrupted();
|
||||
}
|
||||
|
||||
//timeout without completing the process
|
||||
TimedOutLogger log = new TimedOutLogger(_process);
|
||||
Task timedoutTask = new Task(log, ir);
|
||||
|
||||
timedoutTask.run();
|
||||
if (!_passwordCommand) {
|
||||
_logger.warn("Timed out: " + buildCommandLine(command) + ". Output is: " + timedoutTask.getResult());
|
||||
} else {
|
||||
_logger.warn("Timed out: " + buildCommandLine(command));
|
||||
}
|
||||
|
||||
return ERR_TIMEOUT;
|
||||
}
|
||||
|
||||
_logger.debug("Exit value is " + _process.exitValue());
|
||||
@ -300,7 +306,7 @@ public class Script implements Callable<String> {
|
||||
IOUtils.closeQuietly(_process.getErrorStream());
|
||||
IOUtils.closeQuietly(_process.getOutputStream());
|
||||
IOUtils.closeQuietly(_process.getInputStream());
|
||||
_process.destroy();
|
||||
_process.destroyForcibly();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -27,6 +27,9 @@ import java.util.TimeZone;
|
||||
|
||||
import com.cloud.utils.DateUtil.IntervalType;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class DateUtilTest {
|
||||
|
||||
@ -44,17 +47,25 @@ public class DateUtilTest {
|
||||
if (args.length == 2) {
|
||||
System.out.println("Next run time: " + DateUtil.getNextRunTime(IntervalType.getIntervalType(args[0]), args[1], "GMT", time).toString());
|
||||
}
|
||||
|
||||
time = new Date();
|
||||
DateFormat dfDate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'Z");
|
||||
String str = dfDate.format(time);
|
||||
System.out.println("Formated TZ time string : " + str);
|
||||
try {
|
||||
Date dtParsed = DateUtil.parseTZDateString(str);
|
||||
System.out.println("Parsed TZ time string : " + dtParsed.toString());
|
||||
} catch (ParseException e) {
|
||||
System.err.println("Parsing failed\n string : " + str + "\nexception :" + e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void zonedTimeFormatLegacy() throws ParseException {
|
||||
Date time = new Date();
|
||||
DateFormat dfDate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'Z");
|
||||
String str = dfDate.format(time);
|
||||
Date dtParsed = DateUtil.parseTZDateString(str);
|
||||
|
||||
assertEquals(time.toString(), dtParsed.toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void zonedTimeFormat() throws ParseException {
|
||||
Date time = new Date();
|
||||
DateFormat dfDate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
|
||||
String str = dfDate.format(time);
|
||||
Date dtParsed = DateUtil.parseTZDateString(str);
|
||||
|
||||
assertEquals(time.toString(), dtParsed.toString());
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user