CLOUDSTACK-2056: DeploymentPlanner choice via ServiceOffering

- Changes merged from planner_reserve branch
- Exposing deploymentplanner as an optional parameter while creating a service offering
- changes to DeploymentPlanningManagerImpl to make sure host reserve-release happens between conflicting planner usages.
This commit is contained in:
Prachi Damle 2013-05-16 14:53:46 -07:00
parent 15be97772e
commit a2eb7bab1e
50 changed files with 2878 additions and 1091 deletions

View File

@ -0,0 +1,45 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.deploy;
import java.util.List;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
/**
*/
public interface DeploymentClusterPlanner extends DeploymentPlanner {
/**
* This is called to determine list of possible clusters where a virtual
* machine can be deployed.
*
* @param vm
* virtual machine.
* @param plan
* deployment plan that tells you where it's being deployed to.
* @param avoid
* avoid these data centers, pods, clusters, or hosts.
* @return DeployDestination for that virtual machine.
*/
List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid)
throws InsufficientServerCapacityException;
PlannerResourceUsage getResourceUsage();
}

View File

@ -35,6 +35,7 @@ import com.cloud.vm.VirtualMachineProfile;
/**
*/
public interface DeploymentPlanner extends Adapter {
/**
* plan is called to determine where a virtual machine should be running.
*
@ -46,6 +47,7 @@ public interface DeploymentPlanner extends Adapter {
* avoid these data centers, pods, clusters, or hosts.
* @return DeployDestination for that virtual machine.
*/
@Deprecated
DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException;
/**
@ -88,6 +90,10 @@ public interface DeploymentPlanner extends Adapter {
userconcentratedpod_firstfit;
}
public enum PlannerResourceUsage {
Shared, Dedicated;
}
public static class ExcludeList {
private Set<Long> _dcIds;
private Set<Long> _podIds;
@ -99,10 +105,22 @@ public interface DeploymentPlanner extends Adapter {
}
public ExcludeList(Set<Long> _dcIds, Set<Long> _podIds, Set<Long> _clusterIds, Set<Long> _hostIds, Set<Long> _poolIds) {
this._dcIds = _dcIds;
this._podIds = _podIds;
this._clusterIds = _clusterIds;
this._poolIds = _poolIds;
if (_dcIds != null) {
this._dcIds = new HashSet<Long>(_dcIds);
}
if (_podIds != null) {
this._podIds = new HashSet<Long>(_podIds);
}
if (_clusterIds != null) {
this._clusterIds = new HashSet<Long>(_clusterIds);
}
if (_hostIds != null) {
this._hostIds = new HashSet<Long>(_hostIds);
}
if (_poolIds != null) {
this._poolIds = new HashSet<Long>(_poolIds);
}
}
public boolean add(InsufficientCapacityException e) {

View File

@ -423,6 +423,7 @@ public class EventTypes {
public static final String EVENT_INTERNAL_LB_VM_START = "INTERNALLBVM.START";
public static final String EVENT_INTERNAL_LB_VM_STOP = "INTERNALLBVM.STOP";
public static final String EVENT_HOST_RESERVATION_RELEASE = "HOST.RESERVATION.RELEASE";
// Dedicated guest vlan range
public static final String EVENT_GUEST_VLAN_RANGE_DEDICATE = "GUESTVLANRANGE.DEDICATE";
public static final String EVENT_DEDICATED_GUEST_VLAN_RANGE_RELEASE = "GUESTVLANRANGE.RELEASE";
@ -728,7 +729,6 @@ public class EventTypes {
entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_UPDATE, AutoScaleVmGroup.class.getName());
entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_ENABLE, AutoScaleVmGroup.class.getName());
entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_DISABLE, AutoScaleVmGroup.class.getName());
entityEventDetails.put(EVENT_GUEST_VLAN_RANGE_DEDICATE, GuestVlan.class.getName());
entityEventDetails.put(EVENT_DEDICATED_GUEST_VLAN_RANGE_RELEASE, GuestVlan.class.getName());
}

View File

@ -108,4 +108,6 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity,
boolean getDefaultUse();
String getSystemVmType();
String getDeploymentPlanner();
}

View File

@ -107,4 +107,6 @@ public interface ResourceService {
List<? extends S3> listS3s(ListS3sCmd cmd);
boolean releaseHostReservation(Long hostId);
}

View File

@ -420,4 +420,6 @@ public interface ManagementService {
*/
List<? extends Capacity> listTopConsumedResources(ListCapacityCmd cmd);
List<String> listDeploymentPlanners();
}

View File

@ -496,6 +496,7 @@ public class ApiConstants {
public static final String AFFINITY_GROUP_NAMES = "affinitygroupnames";
public static final String ASA_INSIDE_PORT_PROFILE = "insideportprofile";
public static final String AFFINITY_GROUP_ID = "affinitygroupid";
public static final String DEPLOYMENT_PLANNER = "deploymentplanner";
public static final String ACL_ID = "aclid";
public static final String NUMBER = "number";

View File

@ -0,0 +1,71 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.config;
import java.util.ArrayList;
import java.util.List;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.BaseListCmd;
import org.apache.cloudstack.api.response.DeploymentPlannersResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.log4j.Logger;
@APICommand(name = "listDeploymentPlanners", description = "Lists all DeploymentPlanners available.", responseObject = DeploymentPlannersResponse.class)
public class ListDeploymentPlannersCmd extends BaseListCmd {
public static final Logger s_logger = Logger.getLogger(ListDeploymentPlannersCmd.class.getName());
private static final String s_name = "listdeploymentplannersresponse";
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return s_name;
}
@Override
public void execute(){
List<String> planners = _mgr.listDeploymentPlanners();
ListResponse<DeploymentPlannersResponse> response = new ListResponse<DeploymentPlannersResponse>();
List<DeploymentPlannersResponse> plannerResponses = new ArrayList<DeploymentPlannersResponse>();
for (String planner : planners) {
DeploymentPlannersResponse plannerResponse = new DeploymentPlannersResponse();
plannerResponse.setName(planner);
plannerResponse.setObjectName("deploymentPlanner");
plannerResponses.add(plannerResponse);
}
response.setResponses(plannerResponses);
response.setResponseName(getCommandName());
this.setResponseObject(response);
}
}

View File

@ -0,0 +1,105 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api.command.admin.host;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.log4j.Logger;
import com.cloud.async.AsyncJob;
import com.cloud.event.EventTypes;
import com.cloud.user.Account;
import com.cloud.user.UserContext;
@APICommand(name = "releaseHostReservation", description = "Releases host reservation.", responseObject = SuccessResponse.class)
public class ReleaseHostReservationCmd extends BaseAsyncCmd {
public static final Logger s_logger = Logger.getLogger(ReleaseHostReservationCmd.class.getName());
private static final String s_name = "releasehostreservationresponse";
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=HostResponse.class,
required=true, description="the host ID")
private Long id;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
public Long getId() {
return id;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@Override
public String getCommandName() {
return s_name;
}
@Override
public long getEntityOwnerId() {
Account account = UserContext.current().getCaller();
if (account != null) {
return account.getId();
}
return Account.ACCOUNT_ID_SYSTEM;
}
@Override
public String getEventType() {
return EventTypes.EVENT_HOST_RESERVATION_RELEASE;
}
@Override
public String getEventDescription() {
return "releasing reservation for host: " + getId();
}
@Override
public AsyncJob.Type getInstanceType() {
return AsyncJob.Type.Host;
}
@Override
public Long getInstanceId() {
return getId();
}
@Override
public void execute(){
boolean result = _resourceService.releaseHostReservation(getId());
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
this.setResponseObject(response);
} else {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release host reservation");
}
}
}

View File

@ -84,6 +84,9 @@ public class CreateServiceOfferingCmd extends BaseCmd {
@Parameter(name=ApiConstants.NETWORKRATE, type=CommandType.INTEGER, description="data transfer rate in megabits per second allowed. Supported only for non-System offering and system offerings having \"domainrouter\" systemvmtype")
private Integer networkRate;
@Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "The deployment planner heuristics used to deploy a VM of this offering. If null, value of global config vm.deployment.planner is used")
private String deploymentPlanner;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -148,6 +151,9 @@ public class CreateServiceOfferingCmd extends BaseCmd {
return networkRate;
}
public String getDeploymentPlanner() {
return deploymentPlanner;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////

View File

@ -18,6 +18,8 @@ package org.apache.cloudstack.api.response;
import java.util.Date;
import javax.persistence.Column;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import org.apache.cloudstack.api.EntityReference;
@ -82,6 +84,8 @@ public class ServiceOfferingResponse extends BaseResponse {
@SerializedName(ApiConstants.NETWORKRATE) @Param(description="data transfer rate in megabits per second allowed.")
private Integer networkRate;
@SerializedName(ApiConstants.DEPLOYMENT_PLANNER) @Param(description="deployment strategy used to deploy VM.")
private String deploymentPlanner;
public String getId() {
return id;
@ -225,4 +229,12 @@ public class ServiceOfferingResponse extends BaseResponse {
public void setNetworkRate(Integer networkRate) {
this.networkRate = networkRate;
}
public String getDeploymentPlanner() {
return deploymentPlanner;
}
public void setDeploymentPlanner(String deploymentPlanner) {
this.deploymentPlanner = deploymentPlanner;
}
}

View File

@ -540,15 +540,11 @@
Deployment planners
-->
<bean id="UserDispersingPlanner" class="com.cloud.deploy.UserDispersingPlanner">
<property name="name" value="UserDispersing"/>
<property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
<property name="name" value="UserDispersingPlanner"/>
</bean>
<bean id="UserConcentratedPodPlanner" class="com.cloud.deploy.UserConcentratedPodPlanner">
<property name="name" value="UserConcentratedPod"/>
<property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
<property name="name" value="UserConcentratedPodPlanner"/>
</bean>
<bean id="clusterBasedAgentLoadBalancerPlanner" class="com.cloud.cluster.agentlb.ClusterBasedAgentLoadBalancerPlanner">
@ -605,10 +601,6 @@
<property name="name" value="OvmGuru"/>
</bean>
<bean id="HypervisorPlannerSelector" class="com.cloud.deploy.HypervisorVmPlannerSelector">
<property name="name" value="HypervisorPlannerSelector"/>
</bean>
<!--
Managers
-->
@ -623,6 +615,7 @@
<property name="UserPasswordEncoders" value="#{userPasswordEncoders.Adapters}" />
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
<property name="AffinityGroupProcessors" value="#{affinityProcessors.Adapters}" />
<property name="Planners" value="#{deploymentPlanners.Adapters}" />
</bean>
<bean id="storageManagerImpl" class="com.cloud.storage.StorageManagerImpl">
@ -630,9 +623,7 @@
</bean>
<bean id="FirstFitPlanner" class="com.cloud.deploy.FirstFitPlanner">
<property name="name" value="First Fit"/>
<property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
<property name="name" value="FirstFitPlanner"/>
</bean>
<bean id="resourceManagerImpl" class="com.cloud.resource.ResourceManagerImpl" >
@ -833,17 +824,13 @@
</bean>
<bean id="BareMetalPlanner" class="com.cloud.baremetal.manager.BareMetalPlanner">
<property name="name" value="BareMetal Fit"/>
<property name="name" value="BareMetalPlanner"/>
</bean>
<bean id="BaremetalGuru" class="com.cloud.baremetal.manager.BareMetalGuru">
<property name="name" value="BaremetalGuru"/>
</bean>
<bean id="BaremetalPlannerSelector" class="com.cloud.baremetal.manager.BaremetalPlannerSelector">
<property name="name" value="BaremetalPlannerSelector"/>
</bean>
<bean id="BaremetalManager" class="com.cloud.baremetal.manager.BaremetalManagerImpl"/>
<bean id="BaremetalDhcpManager" class="com.cloud.baremetal.networkservice.BaremetalDhcpManagerImpl"/>
<bean id="BaremetalKickStartPxeService" class="com.cloud.baremetal.networkservice.BaremetalKickStartServiceImpl"/>
@ -859,6 +846,8 @@
<bean id="DeploymentPlanningManager" class="com.cloud.deploy.DeploymentPlanningManagerImpl">
<property name="Planners" value="#{deploymentPlanners.Adapters}" />
<property name="AffinityGroupProcessors" value="#{affinityProcessors.Adapters}" />
<property name="StoragePoolAllocators" value="#{storagePoolAllocators.Adapters}" />
<property name="HostAllocators" value="#{hostAllocators.Adapters}" />
</bean>
<bean id="AffinityGroupJoinDaoImpl" class="com.cloud.api.query.dao.AffinityGroupJoinDaoImpl">
@ -868,4 +857,7 @@
<bean id="AffinityGroupVMMapDaoImpl" class="org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDaoImpl">
</bean>
<bean id="PlannerHostReservationDaoImpl" class="com.cloud.deploy.dao.PlannerHostReservationDaoImpl">
</bean>
</beans>

View File

@ -212,6 +212,7 @@ listConfigurations=1
ldapConfig=1
ldapRemove=1
listCapabilities=15
listDeploymentPlanners=1
#### pod commands
createPod=1
@ -261,6 +262,7 @@ listHosts=3
findHostsForMigration=1
addSecondaryStorage=1
updateHostPassword=1
releaseHostReservation=1
#### volume commands
attachVolume=15

View File

@ -174,5 +174,10 @@ public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffe
return false;
}
@Override
public String getDeploymentPlanner() {
// TODO Auto-generated method stub
return null;
}
}

View File

@ -68,6 +68,9 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering
@Column(name="sort_key")
int sortKey;
@Column(name = "deployment_planner")
private String deploymentPlanner = null;
protected ServiceOfferingVO() {
super();
}
@ -104,6 +107,15 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering
this.hostTag = hostTag;
}
public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps,
boolean offerHA, boolean limitResourceUse, boolean volatileVm, String displayText, boolean useLocalStorage,
boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId,
String hostTag, String deploymentPlanner) {
this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, limitResourceUse, volatileVm,
displayText, useLocalStorage, recreatable, tags, systemUse, vm_type, domainId, hostTag);
this.deploymentPlanner = deploymentPlanner;
}
@Override
public boolean getOfferHA() {
return offerHA;
@ -208,4 +220,9 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering
return volatileVm;
}
@Override
public String getDeploymentPlanner() {
return deploymentPlanner;
}
}

View File

@ -17,6 +17,10 @@
package com.cloud.upgrade.dao;
import com.cloud.deploy.DeploymentPlanner;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import org.apache.log4j.Logger;
import java.io.File;
import java.sql.Connection;
import java.sql.Date;
@ -25,12 +29,7 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.UUID;
import com.cloud.network.vpc.NetworkACL;
import org.apache.log4j.Logger;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
public class Upgrade410to420 implements DbUpgrade {
final static Logger s_logger = Logger.getLogger(Upgrade410to420.class);
@ -70,6 +69,7 @@ public class Upgrade410to420 implements DbUpgrade {
updatePrimaryStore(conn);
addEgressFwRulesForSRXGuestNw(conn);
upgradeEIPNetworkOfferings(conn);
updateGlobalDeploymentPlanner(conn);
upgradeDefaultVpcOffering(conn);
upgradePhysicalNtwksWithInternalLbProvider(conn);
updateNetworkACLs(conn);
@ -563,6 +563,53 @@ public class Upgrade410to420 implements DbUpgrade {
}
}
private void updateGlobalDeploymentPlanner(Connection conn) {
PreparedStatement pstmt = null;
ResultSet rs = null;
try {
pstmt = conn
.prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'");
rs = pstmt.executeQuery();
while (rs.next()) {
String globalValue = rs.getString(1);
String plannerName = "FirstFitPlanner";
if (globalValue != null) {
if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.random.toString())) {
plannerName = "FirstFitPlanner";
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())) {
plannerName = "FirstFitPlanner";
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_firstfit
.toString())) {
plannerName = "UserConcentratedPodPlanner";
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_random
.toString())) {
plannerName = "UserConcentratedPodPlanner";
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString())) {
plannerName = "UserDispersingPlanner";
}
}
// update vm.deployment.planner global config
pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'");
pstmt.setString(1, plannerName);
pstmt.executeUpdate();
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to set vm.deployment.planner global config", e);
} finally {
try {
if (rs != null) {
rs.close();
}
if (pstmt != null) {
pstmt.close();
}
} catch (SQLException e) {
}
}
}
private void upgradeDefaultVpcOffering(Connection conn) {
PreparedStatement pstmt = null;
@ -596,8 +643,6 @@ public class Upgrade410to420 implements DbUpgrade {
}
}
private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) {
PreparedStatement pstmt = null;
@ -644,7 +689,6 @@ public class Upgrade410to420 implements DbUpgrade {
} catch (SQLException e) {
}
}
}
private void addHostDetailsIndex(Connection conn) {

View File

@ -116,4 +116,8 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
*/
List<String> listDistinctHostNames(long networkId, VirtualMachine.Type... types);
List<VMInstanceVO> findByHostInStates(Long hostId, State... states);
List<VMInstanceVO> listStartingWithNoHostId();
}

View File

@ -83,6 +83,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
protected GenericSearchBuilder<VMInstanceVO, Long> CountRunningByAccount;
protected SearchBuilder<VMInstanceVO> NetworkTypeSearch;
protected GenericSearchBuilder<VMInstanceVO, String> DistinctHostNameSearch;
protected SearchBuilder<VMInstanceVO> HostAndStateSearch;
protected SearchBuilder<VMInstanceVO> StartingWithNoHostSearch;
@Inject ResourceTagDao _tagsDao;
@Inject NicDao _nicDao;
@ -209,6 +211,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
CountRunningByAccount.and("state", CountRunningByAccount.entity().getState(), SearchCriteria.Op.EQ);
CountRunningByAccount.done();
HostAndStateSearch = createSearchBuilder();
HostAndStateSearch.and("host", HostAndStateSearch.entity().getHostId(), Op.EQ);
HostAndStateSearch.and("states", HostAndStateSearch.entity().getState(), Op.IN);
HostAndStateSearch.done();
StartingWithNoHostSearch = createSearchBuilder();
StartingWithNoHostSearch.and("state", StartingWithNoHostSearch.entity().getState(), Op.EQ);
StartingWithNoHostSearch.and("host", StartingWithNoHostSearch.entity().getHostId(), Op.NULL);
StartingWithNoHostSearch.done();
_updateTimeAttr = _allAttributes.get("updateTime");
assert _updateTimeAttr != null : "Couldn't get this updateTime attribute";
}
@ -625,4 +637,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
return result;
}
@Override
public List<VMInstanceVO> findByHostInStates(Long hostId, State... states) {
SearchCriteria<VMInstanceVO> sc = HostAndStateSearch.create();
sc.setParameters("host", hostId);
sc.setParameters("states", (Object[]) states);
return listBy(sc);
}
@Override
public List<VMInstanceVO> listStartingWithNoHostId() {
SearchCriteria<VMInstanceVO> sc = StartingWithNoHostSearch.create();
sc.setParameters("state", State.Starting);
return listBy(sc);
}
}

View File

@ -65,6 +65,14 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
}
List<StoragePoolVO> pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
// add remaining pools in cluster, that did not match tags, to avoid set
List<StoragePoolVO> allPools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null);
allPools.removeAll(pools);
for (StoragePoolVO pool : allPools) {
avoid.addPool(pool.getId());
}
if (pools.size() == 0) {
if (s_logger.isDebugEnabled()) {
String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString();
@ -80,6 +88,8 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
} else {
avoid.addPool(pool.getId());
}
}

View File

@ -85,7 +85,9 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
if (filter(avoid, pol, dskCh, plan)) {
s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
suitablePools.add(pol);
}
} else {
avoid.addPool(pool.getId());
}
}
if (suitablePools.size() == returnUpTo) {
@ -101,8 +103,19 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
}
} else {
avoid.addPool(pool.getId());
}
}
// add remaining pools in cluster, that did not match tags, to avoid
// set
List<StoragePoolVO> allPools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(),
plan.getPodId(), plan.getClusterId(), null);
allPools.removeAll(availablePools);
for (StoragePoolVO pool : allPools) {
avoid.addPool(pool.getId());
}
}
if (s_logger.isDebugEnabled()) {

View File

@ -67,6 +67,13 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
// add remaining pools in zone, that did not match tags, to avoid set
List<StoragePoolVO> allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
allPools.removeAll(storagePools);
for (StoragePoolVO pool : allPools) {
avoid.addPool(pool.getId());
}
for (StoragePoolVO storage : storagePools) {
if (suitablePools.size() == returnUpTo) {
break;
@ -74,7 +81,9 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
}
} else {
avoid.addPool(pol.getId());
}
}
return suitablePools;
}

View File

@ -24,13 +24,12 @@ import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Local(value=DeploymentPlanner.class)
public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner {
public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentClusterPlanner {
private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class);
@ -141,15 +140,4 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
}
@Override
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
if(vm.getHypervisorType() != HypervisorType.BareMetal){
//check the allocation strategy
if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString()))){
return true;
}
}
return false;
}
}

View File

@ -29,14 +29,13 @@ import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.configuration.Config;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.Pair;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Local(value=DeploymentPlanner.class)
public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner {
public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentClusterPlanner {
private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class);
@ -191,17 +190,6 @@ public class UserDispersingPlanner extends FirstFitPlanner implements Deployment
}
@Override
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
if(vm.getHypervisorType() != HypervisorType.BareMetal){
//check the allocation strategy
if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) {
return true;
}
}
return false;
}
float _userDispersionWeight;

View File

@ -1,39 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.baremetal.manager;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import com.cloud.deploy.AbstractDeployPlannerSelector;
import com.cloud.deploy.DeployPlannerSelector;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.vm.UserVmVO;
@Local(value = {DeployPlannerSelector.class})
public class BaremetalPlannerSelector extends AbstractDeployPlannerSelector{
@Override
public String selectPlanner(UserVmVO vm) {
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
return "BareMetalPlanner";
}
return null;
}
}

View File

@ -90,6 +90,11 @@
<artifactId>cloud-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-ipc</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-framework-events</artifactId>

View File

@ -169,6 +169,13 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
}
}
// add all hosts that we are not considering to the avoid list
List<HostVO> allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, null);
allhostsInCluster.removeAll(clusterHosts);
for (HostVO host : allhostsInCluster) {
avoid.addHost(host.getId());
}
return allocateTo(plan, offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity, account);
}
@ -285,6 +292,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Not using host " + host.getId() + "; numCpusGood: " + numCpusGood + "; cpuFreqGood: " + cpuFreqGood + ", host has capacity?" + hostHasCapacity);
}
avoid.addHost(host.getId());
}
}

View File

@ -73,6 +73,7 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase<ServiceOfferingJo
offeringResponse.setDomainId(offering.getDomainUuid());
offeringResponse.setNetworkRate(offering.getRateMbps());
offeringResponse.setHostTag(offering.getHostTag());
offeringResponse.setDeploymentPlanner(offering.getDeploymentPlanner());
offeringResponse.setObjectName("serviceoffering");
return offeringResponse;

View File

@ -106,6 +106,9 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit
@Column(name="domain_path")
private String domainPath = null;
@Column(name = "deployment_planner")
private String deploymentPlanner;
public ServiceOfferingJoinVO() {
}
@ -307,5 +310,13 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit
this.vm_type = vm_type;
}
public String getDeploymentPlanner() {
return deploymentPlanner;
}
public void setDeploymentPlanner(String deploymentPlanner) {
this.deploymentPlanner = deploymentPlanner;
}
}

View File

@ -27,6 +27,8 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.framework.messagebus.MessageBus;
import org.apache.cloudstack.framework.messagebus.PublishScope;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -77,11 +79,14 @@ import com.cloud.utils.db.DB;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.fsm.StateListener;
import com.cloud.vm.UserVmDetailVO;
import com.cloud.vm.UserVmVO;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.Event;
import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.UserVmDetailsDao;
import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.snapshot.VMSnapshot;
import com.cloud.vm.snapshot.VMSnapshotVO;
@ -121,6 +126,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
protected VMSnapshotDao _vmSnapshotDao;
@Inject
protected UserVmDao _userVMDao;
@Inject
protected UserVmDetailsDao _userVmDetailsDao;
@Inject
ClusterDetailsDao _clusterDetailsDao;
@ -132,6 +139,11 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
long _extraBytesPerVolume = 0;
private float _storageOverProvisioningFactor = 1.0f;
@Inject
MessageBus _messageBus;
private static final String MESSAGE_RESERVED_CAPACITY_FREED_FLAG = "Message.ReservedCapacityFreed.Flag";
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600);
@ -552,6 +564,20 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId());
reservedMemory += so.getRamSize() * 1024L * 1024L;
reservedCpu += so.getCpu() * so.getSpeed();
} else {
// signal if not done already, that the VM has been stopped for skip.counting.hours,
// hence capacity will not be reserved anymore.
UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), MESSAGE_RESERVED_CAPACITY_FREED_FLAG);
if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) {
_messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm);
if (vm.getType() == VirtualMachine.Type.User) {
UserVmVO userVM = _userVMDao.findById(vm.getId());
_userVMDao.loadDetails(userVM);
userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true");
_userVMDao.saveDetails(userVM);
}
}
}
}
@ -688,6 +714,18 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager,
allocateVmCapacity(vm, fromLastHost);
}
if (newState == State.Stopped) {
if (vm.getType() == VirtualMachine.Type.User) {
UserVmVO userVM = _userVMDao.findById(vm.getId());
_userVMDao.loadDetails(userVM);
// free the message sent flag if it exists
userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "false");
_userVMDao.saveDetails(userVM);
}
}
return true;
}

View File

@ -214,6 +214,8 @@ public enum Config {
SecStorageProxy("Advanced", AgentManager.class, String.class, "secstorage.proxy", null, "http proxy used by ssvm, in http://username:password@proxyserver:port format", null),
AlertPurgeInterval("Advanced", ManagementServer.class, Integer.class, "alert.purge.interval", "86400", "The interval (in seconds) to wait before running the alert purge thread", null),
AlertPurgeDelay("Advanced", ManagementServer.class, Integer.class, "alert.purge.delay", "0", "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts", null),
HostReservationReleasePeriod("Advanced", ManagementServer.class, Integer.class, "host.reservation.release.period", "300000", "The interval in milliseconds between host reservation release checks", null),
// LB HealthCheck Interval.
LBHealthCheck("Advanced", ManagementServer.class, String.class, "healthcheck.update.interval", "600",
@ -235,6 +237,7 @@ public enum Config {
ApplyAllocationAlgorithmToPods("Advanced", ManagementServer.class, Boolean.class, "apply.allocation.algorithm.to.pods", "false", "If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation", "true,false"),
VmUserDispersionWeight("Advanced", ManagementServer.class, Float.class, "vm.user.dispersion.weight", "1", "Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 - weight of user dispersion)", null),
VmAllocationAlgorithm("Advanced", ManagementServer.class, String.class, "vm.allocation.algorithm", "random", "'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit' : Order in which hosts within a cluster will be considered for VM/volume allocation.", null),
VmDeploymentPlanner("Advanced", ManagementServer.class, String.class, "vm.deployment.planner", "FirstFitPlanner", "'FirstFitPlanner', 'UserDispersingPlanner', 'UserConcentratedPodPlanner': DeploymentPlanner heuristic that will be used for VM deployment.", null),
EndpointeUrl("Advanced", ManagementServer.class, String.class, "endpointe.url", "http://localhost:8080/client/api", "Endpointe Url", null),
ElasticLoadBalancerEnabled("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.enabled", "false", "Whether the load balancing service is enabled for basic zones", "true,false"),
ElasticLoadBalancerNetwork("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.network", "guest", "Whether the elastic load balancing service public ips are taken from the public or guest network", "guest,public"),

View File

@ -79,10 +79,11 @@ public interface ConfigurationManager extends ConfigurationService, Manager {
* TODO
* @param id
* @param useVirtualNetwork
* @param deploymentPlanner
* @return ID
*/
ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired,
boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate);
boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner);
/**
* Creates a new disk offering

View File

@ -162,6 +162,7 @@ import com.cloud.org.Grouping.AllocationState;
import com.cloud.projects.Project;
import com.cloud.projects.ProjectManager;
import com.cloud.server.ConfigurationServer;
import com.cloud.server.ManagementService;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.DiskOfferingVO;
@ -346,6 +347,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
@Inject
NicIpAliasDao _nicIpAliasDao;
@Inject
public ManagementService _mgr;
// FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao?
@Inject protected DataCenterLinkLocalIpAddressDao _LinkLocalIpAllocDao;
@ -2034,17 +2038,29 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
throw new InvalidParameterValueException("Network rate can be specified only for non-System offering and system offerings having \"domainrouter\" systemvmtype");
}
if (cmd.getDeploymentPlanner() != null) {
List<String> planners = _mgr.listDeploymentPlanners();
if (planners != null && !planners.isEmpty()) {
if (!planners.contains(cmd.getDeploymentPlanner())) {
throw new InvalidParameterValueException(
"Invalid name for Deployment Planner specified, please use listDeploymentPlanners to get the valid set");
}
} else {
throw new InvalidParameterValueException("No deployment planners found");
}
}
return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(),
localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate());
localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate(), cmd.getDeploymentPlanner());
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_SERVICE_OFFERING_CREATE, eventDescription = "creating service offering")
public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type, String name, int cpu, int ramSize, int speed, String displayText,
boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) {
boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) {
tags = cleanupTags(tags);
ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, volatileVm, displayText, localStorageRequired, false, tags, isSystem, vm_type,
domainId, hostTag);
domainId, hostTag, deploymentPlanner);
if ((offering = _serviceOfferingDao.persist(offering)) != null) {
UserContext.current().setEventDetails("Service offering id=" + offering.getId());

View File

@ -1,84 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.deploy;
import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.UserVmVO;
public abstract class AbstractDeployPlannerSelector extends AdapterBase implements DeployPlannerSelector {
protected Map<String, Object> params;
protected String name;
protected int runLevel;
@Inject
protected ConfigurationDao _configDao;
protected String _allocationAlgorithm = "random";
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public void setConfigParams(Map<String, Object> params) {
this.params = params;
}
@Override
public Map<String, Object> getConfigParams() {
return params;
}
@Override
public int getRunLevel() {
return runLevel;
}
@Override
public void setRunLevel(int level) {
this.runLevel = level;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
_allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key());
return true;
}
@Override
public boolean start() {
return true;
}
@Override
public boolean stop() {
return true;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -49,6 +49,7 @@ import com.cloud.dc.Pod;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
@ -81,7 +82,7 @@ import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
@Local(value=DeploymentPlanner.class)
public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPlanner {
private static final Logger s_logger = Logger.getLogger(FirstFitPlanner.class);
@Inject protected HostDao _hostDao;
@Inject protected DataCenterDao _dcDao;
@ -103,28 +104,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
@Inject DataStoreManager dataStoreMgr;
@Inject protected ClusterDetailsDao _clusterDetailsDao;
protected List<StoragePoolAllocator> _storagePoolAllocators;
public List<StoragePoolAllocator> getStoragePoolAllocators() {
return _storagePoolAllocators;
}
public void setStoragePoolAllocators(
List<StoragePoolAllocator> _storagePoolAllocators) {
this._storagePoolAllocators = _storagePoolAllocators;
}
protected List<HostAllocator> _hostAllocators;
public List<HostAllocator> getHostAllocators() {
return _hostAllocators;
}
public void setHostAllocators(List<HostAllocator> _hostAllocators) {
this._hostAllocators = _hostAllocators;
}
protected String _allocationAlgorithm = "random";
protected String _globalDeploymentPlanner = "FirstFitPlanner";
@Override
public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
public List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
DeploymentPlan plan, ExcludeList avoid)
throws InsufficientServerCapacityException {
VirtualMachine vm = vmProfile.getVirtualMachine();
@ -138,136 +124,19 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
return null;
}
ServiceOffering offering = vmProfile.getServiceOffering();
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
if (s_logger.isDebugEnabled()) {
s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm);
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() +
", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No"));
}
String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
if(plan.getHostId() != null && haVmTag == null){
Long hostIdSpecified = plan.getHostId();
if (s_logger.isDebugEnabled()){
s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: "
+ hostIdSpecified);
}
HostVO host = _hostDao.findById(hostIdSpecified);
if (host == null) {
s_logger.debug("The specified host cannot be found");
} else if (avoid.shouldAvoid(host)) {
s_logger.debug("The specified host is in avoid set");
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Looking for suitable pools for this host under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
}
// search for storage under the zone, pod, cluster of the host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile,
lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
// choose the potential pool for this VM for this host
if (!suitableVolumeStoragePools.isEmpty()) {
List<Host> suitableHosts = new ArrayList<Host>();
suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
suitableHosts, suitableVolumeStoragePools);
if (potentialResources != null) {
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// remove the reused vol<->pool from destination, since
// we don't have to prepare this volume.
for (Volume vol : readyAndReusedVolumes) {
storageVolMap.remove(vol);
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
s_logger.debug("Returning Deployment Destination: " + dest);
return dest;
}
}
}
s_logger.debug("Cannnot deploy to specified host, returning.");
return null;
}
if (vm.getLastHostId() != null && haVmTag == null) {
s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId());
HostVO host = _hostDao.findById(vm.getLastHostId());
if(host == null){
s_logger.debug("The last host of this VM cannot be found");
}else if(avoid.shouldAvoid(host)){
s_logger.debug("The last host of this VM is in avoid set");
}else if(_capacityMgr.checkIfHostReachMaxGuestLimit(host)){
s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
}else{
if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
long cluster_id = host.getClusterId();
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio");
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){
s_logger.debug("The last host of this VM is UP and has enough capacity");
s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
//search for storage under the zone, pod, cluster of the last host.
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
//choose the potential pool for this VM for this host
if(!suitableVolumeStoragePools.isEmpty()){
List<Host> suitableHosts = new ArrayList<Host>();
suitableHosts.add(host);
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
if(potentialResources != null){
Pod pod = _podDao.findById(host.getPodId());
Cluster cluster = _clusterDao.findById(host.getClusterId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
for(Volume vol : readyAndReusedVolumes){
storageVolMap.remove(vol);
}
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
s_logger.debug("Returning Deployment Destination: "+ dest);
return dest;
}
}
}else{
s_logger.debug("The last host of this VM does not have enough capacity");
}
}else{
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: "+host.getStatus().name() + ", host resource state is: "+host.getResourceState());
}
}
s_logger.debug("Cannot choose the last host to deploy this VM ");
}
List<Long> clusterList = new ArrayList<Long>();
if (plan.getClusterId() != null) {
Long clusterIdSpecified = plan.getClusterId();
s_logger.debug("Searching resources only under specified Cluster: "+ clusterIdSpecified);
ClusterVO cluster = _clusterDao.findById(plan.getClusterId());
if (cluster != null ){
clusterList.add(clusterIdSpecified);
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
if (avoid.shouldAvoid(cluster)) {
s_logger.debug("The specified cluster is in avoid set, returning.");
} else {
clusterList.add(clusterIdSpecified);
removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
}
return clusterList;
}else{
s_logger.debug("The specified cluster cannot be found, returning.");
avoid.addCluster(plan.getClusterId());
@ -280,11 +149,15 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
HostPodVO pod = _podDao.findById(podIdSpecified);
if (pod != null) {
DeployDestination dest = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
if(dest == null){
avoid.addPod(plan.getPodId());
if (avoid.shouldAvoid(pod)) {
s_logger.debug("The specified pod is in avoid set, returning.");
} else {
clusterList = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
if (clusterList == null) {
avoid.addPod(plan.getPodId());
}
}
return dest;
return clusterList;
} else {
s_logger.debug("The specified Pod cannot be found, returning.");
avoid.addPod(plan.getPodId());
@ -305,7 +178,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}
private DeployDestination scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
private List<Long> scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
ServiceOffering offering = vmProfile.getServiceOffering();
int requiredCpu = offering.getCpu() * offering.getSpeed();
@ -341,20 +214,24 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
if(!podsWithCapacity.isEmpty()){
prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan);
if (prioritizedPodIds == null || prioritizedPodIds.isEmpty()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("No Pods found for destination, returning.");
}
return null;
}
List<Long> clusterList = new ArrayList<Long>();
//loop over pods
for(Long podId : prioritizedPodIds){
s_logger.debug("Checking resources under Pod: "+podId);
DeployDestination dest = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid);
if(dest != null){
return dest;
List<Long> clustersUnderPod = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan,
avoid);
if (clustersUnderPod != null) {
clusterList.addAll(clustersUnderPod);
}
avoid.addPod(podId);
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("No Pods found for destination, returning.");
}
return null;
return clusterList;
}else{
if (s_logger.isDebugEnabled()) {
s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning.");
@ -363,7 +240,69 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}
}
private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
private Map<Short, Float> getCapacityThresholdMap() {
// Lets build this real time so that the admin wont have to restart MS
// if he changes these values
Map<Short, Float> disableThresholdMap = new HashMap<Short, Float>();
String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key());
float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F);
disableThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, cpuDisableThreshold);
String memoryDisableThresholdString = _configDao.getValue(Config.MemoryCapacityDisableThreshold.key());
float memoryDisableThreshold = NumbersUtil.parseFloat(memoryDisableThresholdString, 0.85F);
disableThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, memoryDisableThreshold);
return disableThresholdMap;
}
private List<Short> getCapacitiesForCheckingThreshold() {
List<Short> capacityList = new ArrayList<Short>();
capacityList.add(Capacity.CAPACITY_TYPE_CPU);
capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
return capacityList;
}
private void removeClustersCrossingThreshold(List<Long> clusterListForVmAllocation, ExcludeList avoid,
VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan) {
List<Short> capacityList = getCapacitiesForCheckingThreshold();
List<Long> clustersCrossingThreshold = new ArrayList<Long>();
ServiceOffering offering = vmProfile.getServiceOffering();
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
// For each capacity get the cluster list crossing the threshold and
// remove it from the clusterList that will be used for vm allocation.
for (short capacity : capacityList) {
if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0) {
return;
}
if (capacity == Capacity.CAPACITY_TYPE_CPU) {
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity,
plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested);
} else if (capacity == Capacity.CAPACITY_TYPE_MEMORY) {
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity,
plan.getDataCenterId(), Config.MemoryCapacityDisableThreshold.key(), ram_requested);
}
if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0) {
// addToAvoid Set
avoid.addClusterList(clustersCrossingThreshold);
// Remove clusters crossing disabled threshold
clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
" crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters");
}
}
}
private List<Long> scanClustersForDestinationInZoneOrPod(long id, boolean isZone,
VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid) {
VirtualMachine vm = vmProfile.getVirtualMachine();
ServiceOffering offering = vmProfile.getServiceOffering();
@ -396,6 +335,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
prioritizedClusterIds.removeAll(disabledClusters);
}
}
removeClustersCrossingThreshold(prioritizedClusterIds, avoid, vmProfile, plan);
}else{
if (s_logger.isDebugEnabled()) {
s_logger.debug("No clusters found having a host with enough capacity, returning.");
@ -404,7 +346,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}
if(!prioritizedClusterIds.isEmpty()){
List<Long> clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan);
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
return clusterList; //return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
}else{
if (s_logger.isDebugEnabled()) {
s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning.");
@ -452,114 +394,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
return disabledPods;
}
private List<Short> getCapacitiesForCheckingThreshold(){
List<Short> capacityList = new ArrayList<Short>();
capacityList.add(Capacity.CAPACITY_TYPE_CPU);
capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
return capacityList;
}
private void removeClustersCrossingThreshold(List<Long> clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
List<Short> capacityList = getCapacitiesForCheckingThreshold();
List<Long> clustersCrossingThreshold = new ArrayList<Long>();
ServiceOffering offering = vmProfile.getServiceOffering();
int cpu_requested = offering.getCpu() * offering.getSpeed();
long ram_requested = offering.getRamSize() * 1024L * 1024L;
// For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation.
for(short capacity : capacityList){
if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){
return;
}
if (capacity == Capacity.CAPACITY_TYPE_CPU) {
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested);
}
else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) {
clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(),
Config.MemoryCapacityDisableThreshold.key(), ram_requested );
}
if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){
// addToAvoid Set
avoid.addClusterList(clustersCrossingThreshold);
// Remove clusters crossing disabled threshold
clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
" crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters");
}
}
}
private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile<? extends VirtualMachine> vmProfile,
DeploymentPlan plan, ExcludeList avoid, DataCenter dc){
if (s_logger.isTraceEnabled()) {
s_logger.trace("ClusterId List to consider: " + clusterList);
}
removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
for(Long clusterId : clusterList){
Cluster clusterVO = _clusterDao.findById(clusterId);
if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) {
s_logger.debug("Cluster: "+clusterId + " has HyperVisorType that does not match the VM, skipping this cluster");
avoid.addCluster(clusterVO.getId());
continue;
}
s_logger.debug("Checking resources in Cluster: "+clusterId + " under Pod: "+clusterVO.getPodId());
//search for resources(hosts and storage) under this zone, pod, cluster.
DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
//find suitable hosts under this cluster, need as many hosts as we get.
List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
//if found suitable hosts in this cluster, find suitable storage pools for each volume of the VM
if(suitableHosts != null && !suitableHosts.isEmpty()){
if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
Pod pod = _podDao.findById(clusterVO.getPodId());
DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
return dest;
}
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
List<Volume> readyAndReusedVolumes = result.second();
//choose the potential host and pool for the VM
if(!suitableVolumeStoragePools.isEmpty()){
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
if(potentialResources != null){
Pod pod = _podDao.findById(clusterVO.getPodId());
Host host = _hostDao.findById(potentialResources.first().getId());
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
// remove the reused vol<->pool from destination, since we don't have to prepare this volume.
for(Volume vol : readyAndReusedVolumes){
storageVolMap.remove(vol);
}
DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap );
s_logger.debug("Returning Deployment Destination: "+ dest);
return dest;
}
}else{
s_logger.debug("No suitable storagePools found under this Cluster: "+clusterId);
}
}else{
s_logger.debug("No suitable hosts found under this Cluster: "+clusterId);
}
avoid.addCluster(clusterVO.getId());
}
s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. ");
return null;
}
protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){
//look at the aggregate available cpu and ram per cluster
@ -630,215 +464,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
}
protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools){
s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
boolean hostCanAccessPool = false;
boolean haveEnoughSpace = false;
Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>();
TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() {
@Override
public int compare(Volume v1, Volume v2) {
if(v1.getSize() < v2.getSize())
return 1;
else
return -1;
}
});
volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet());
boolean multipleVolume = volumesOrderBySizeDesc.size() > 1;
for(Host potentialHost : suitableHosts){
Map<StoragePool,List<Volume>> volumeAllocationMap = new HashMap<StoragePool,List<Volume>>();
for(Volume vol : volumesOrderBySizeDesc){
haveEnoughSpace = false;
s_logger.debug("Checking if host: "+potentialHost.getId() +" can access any suitable storage pool for volume: "+ vol.getVolumeType());
List<StoragePool> volumePoolList = suitableVolumeStoragePools.get(vol);
hostCanAccessPool = false;
for(StoragePool potentialSPool : volumePoolList){
if(hostCanAccessSPool(potentialHost, potentialSPool)){
hostCanAccessPool = true;
if(multipleVolume){
List<Volume> requestVolumes = null;
if(volumeAllocationMap.containsKey(potentialSPool))
requestVolumes = volumeAllocationMap.get(potentialSPool);
else
requestVolumes = new ArrayList<Volume>();
requestVolumes.add(vol);
if(!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool))
continue;
volumeAllocationMap.put(potentialSPool,requestVolumes);
}
storage.put(vol, potentialSPool);
haveEnoughSpace = true;
break;
}
}
if(!hostCanAccessPool){
break;
}
if(!haveEnoughSpace) {
s_logger.warn("insufficient capacity to allocate all volumes");
break;
}
}
if(hostCanAccessPool && haveEnoughSpace){
s_logger.debug("Found a potential host " + "id: "+potentialHost.getId() + " name: " +potentialHost.getName() + " and associated storage pools for this VM");
return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
}
}
s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM");
return null;
}
protected boolean hostCanAccessSPool(Host host, StoragePool pool){
boolean hostCanAccessSPool = false;
StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId());
if(hostPoolLinkage != null){
hostCanAccessSPool = true;
}
s_logger.debug("Host: "+ host.getId() + (hostCanAccessSPool ?" can" : " cannot") + " access pool: "+ pool.getId());
return hostCanAccessSPool;
}
protected List<Host> findSuitableHosts(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){
List<Host> suitableHosts = new ArrayList<Host>();
for(HostAllocator allocator : _hostAllocators) {
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo);
if (suitableHosts != null && !suitableHosts.isEmpty()) {
break;
}
}
if(suitableHosts.isEmpty()){
s_logger.debug("No suitable hosts found");
}
return suitableHosts;
}
protected Pair<Map<Volume, List<StoragePool>>, List<Volume>> findSuitablePoolsForVolumes(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){
List<VolumeVO> volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId());
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
//for each volume find list of suitable storage pools by calling the allocators
for (VolumeVO toBeCreated : volumesTobeCreated) {
s_logger.debug("Checking suitable pools for volume (Id, Type): ("+toBeCreated.getId() +"," +toBeCreated.getVolumeType().name() + ")");
//If the plan specifies a poolId, it means that this VM's ROOT volume is ready and the pool should be reused.
//In this case, also check if rest of the volumes are ready and can be reused.
if(plan.getPoolId() != null){
s_logger.debug("Volume has pool(" + plan.getPoolId() + ") already allocated, checking if pool can be reused, poolId: "+toBeCreated.getPoolId());
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
StoragePool pool = null;
if(toBeCreated.getPoolId() != null){
s_logger.debug("finding pool by id '" + toBeCreated.getPoolId() + "'");
pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId());
}else{
s_logger.debug("finding pool by id '" + plan.getPoolId() + "'");
pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId());
}
if(pool != null){
if(!pool.isInMaintenance()){
if(!avoid.shouldAvoid(pool)){
long exstPoolDcId = pool.getDataCenterId();
long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1;
long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1;
if(plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId){
s_logger.debug("Planner need not allocate a pool for this volume since its READY");
suitablePools.add(pool);
suitableVolumeStoragePools.put(toBeCreated, suitablePools);
if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) {
readyAndReusedVolumes.add(toBeCreated);
}
continue;
}else{
s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume");
}
}else{
s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume");
}
}else{
s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume");
}
}else{
s_logger.debug("Unable to find pool by provided id");
}
}
if(s_logger.isDebugEnabled()){
s_logger.debug("We need to allocate new storagepool for this volume");
}
if(!isRootAdmin(plan.getReservationContext())){
if(!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())){
if(s_logger.isDebugEnabled()){
s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled");
s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning.");
}
//Cannot find suitable storage pools under this cluster for this volume since allocation_state is disabled.
//- remove any suitable pools found for other volumes.
//All volumes should get suitable pools under this cluster; else we cant use this cluster.
suitableVolumeStoragePools.clear();
break;
}
}
s_logger.debug("Calling StoragePoolAllocators to find suitable pools");
DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId());
DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType());
boolean useLocalStorage = false;
if (vmProfile.getType() != VirtualMachine.Type.User) {
String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key());
if (ssvmUseLocalStorage.equalsIgnoreCase("true")) {
useLocalStorage = true;
}
} else {
useLocalStorage = diskOffering.getUseLocalStorage();
// TODO: this is a hacking fix for the problem of deploy ISO-based VM on local storage
// when deploying VM based on ISO, we have a service offering and an additional disk offering, use-local storage flag is actually
// saved in service offering, overrde the flag from service offering when it is a ROOT disk
if(!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) {
if(toBeCreated.getVolumeType() == Volume.Type.ROOT)
useLocalStorage = true;
}
}
diskProfile.setUseLocalStorage(useLocalStorage);
boolean foundPotentialPools = false;
for(StoragePoolAllocator allocator : _storagePoolAllocators) {
final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo);
if (suitablePools != null && !suitablePools.isEmpty()) {
suitableVolumeStoragePools.put(toBeCreated, suitablePools);
foundPotentialPools = true;
break;
}
}
if(!foundPotentialPools){
s_logger.debug("No suitable pools found for volume: "+toBeCreated +" under cluster: "+plan.getClusterId());
//No suitable storage pools found under this cluster for this volume. - remove any suitable pools found for other volumes.
//All volumes should get suitable pools under this cluster; else we cant use this cluster.
suitableVolumeStoragePools.clear();
break;
}
}
if(suitableVolumeStoragePools.isEmpty()){
s_logger.debug("No suitable pools found");
}
return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
}
private boolean isRootAdmin(ReservationContext reservationContext) {
if(reservationContext != null){
if(reservationContext.getAccount() != null){
@ -859,10 +484,17 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
@Override
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
if(vm.getHypervisorType() != HypervisorType.BareMetal){
//check the allocation strategy
if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString()))) {
return true;
// check what the ServiceOffering says. If null, check the global config
ServiceOffering offering = vm.getServiceOffering();
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
if (offering != null && offering.getDeploymentPlanner() != null) {
if (offering.getDeploymentPlanner().equals(this.getName())) {
return true;
}
} else {
if (_globalDeploymentPlanner != null && _globalDeploymentPlanner.equals(this._name)) {
return true;
}
}
}
return false;
@ -872,29 +504,20 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
_allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key());
_globalDeploymentPlanner = _configDao.getValue(Config.VmDeploymentPlanner.key());
return true;
}
private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId){
// Check if the zone exists in the system
DataCenterVO zone = _dcDao.findById(zoneId);
if(zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()){
s_logger.info("Zone is currently disabled, cannot allocate to this zone: "+ zoneId);
return false;
}
Pod pod = _podDao.findById(podId);
if(pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()){
s_logger.info("Pod is currently disabled, cannot allocate to this pod: "+ podId);
return false;
}
@Override
public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan,
ExcludeList avoid) throws InsufficientServerCapacityException {
// TODO Auto-generated method stub
return null;
}
Cluster cluster = _clusterDao.findById(clusterId);
if(cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()){
s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: "+ clusterId);
return false;
}
return true;
@Override
public PlannerResourceUsage getResourceUsage() {
return PlannerResourceUsage.Shared;
}
}

View File

@ -1,54 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.deploy;
import javax.ejb.Local;
import org.apache.log4j.Logger;
import com.cloud.deploy.DeploymentPlanner.AllocationAlgorithm;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.vm.UserVmVO;
@Local(value = {DeployPlannerSelector.class})
public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector {
private static final Logger s_logger = Logger.getLogger(HypervisorVmPlannerSelector.class);
@Override
public String selectPlanner(UserVmVO vm) {
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
//check the allocation strategy
if (_allocationAlgorithm != null) {
if (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString())
|| _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString())) {
return "FirstFitPlanner";
} else if (_allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) {
return "UserDispersingPlanner";
} else if (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString())
|| _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString())) {
return "UserConcentratedPodPlanner";
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("The allocation algorithm is null, cannot select the planner");
}
}
}
return null;
}
}

View File

@ -0,0 +1,117 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.deploy;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage;
@Entity
@Table(name = "op_host_planner_reservation")
public class PlannerHostReservationVO implements InternalIdentity {
@Id
@GeneratedValue(strategy=GenerationType.IDENTITY)
@Column(name="id")
private long id;
@Column(name="host_id")
private Long hostId;
@Column(name="data_center_id")
private Long dataCenterId;
@Column(name="pod_id")
private Long podId;
@Column(name="cluster_id")
private Long clusterId;
@Column(name = "resource_usage")
@Enumerated(EnumType.STRING)
private PlannerResourceUsage resourceUsage;
public PlannerHostReservationVO() {
}
public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId) {
this.hostId = hostId;
this.dataCenterId = dataCenterId;
this.podId = podId;
this.clusterId = clusterId;
}
public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId,
PlannerResourceUsage resourceUsage) {
this.hostId = hostId;
this.dataCenterId = dataCenterId;
this.podId = podId;
this.clusterId = clusterId;
this.resourceUsage = resourceUsage;
}
@Override
public long getId() {
return id;
}
public Long getHostId() {
return hostId;
}
public void setHostId(Long hostId) {
this.hostId = hostId;
}
public Long getDataCenterId() {
return dataCenterId;
}
public void setDataCenterId(Long dataCenterId) {
this.dataCenterId = dataCenterId;
}
public Long getPodId() {
return podId;
}
public void setPodId(long podId) {
this.podId = new Long(podId);
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(long clusterId) {
this.clusterId = new Long(clusterId);
}
public PlannerResourceUsage getResourceUsage() {
return resourceUsage;
}
public void setResourceUsage(PlannerResourceUsage resourceType) {
this.resourceUsage = resourceType;
}
}

View File

@ -14,11 +14,17 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.deploy;
package com.cloud.deploy.dao;
import com.cloud.utils.component.Adapter;
import com.cloud.vm.UserVmVO;
import java.util.List;
import com.cloud.deploy.PlannerHostReservationVO;
import com.cloud.utils.db.GenericDao;
public interface PlannerHostReservationDao extends GenericDao<PlannerHostReservationVO, Long> {
PlannerHostReservationVO findByHostId(long hostId);
List<PlannerHostReservationVO> listAllReservedHosts();
public interface DeployPlannerSelector extends Adapter {
String selectPlanner(UserVmVO vm);
}

View File

@ -0,0 +1,63 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.deploy.dao;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.ejb.Local;
import com.cloud.deploy.PlannerHostReservationVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@Local(value = { PlannerHostReservationDao.class })
public class PlannerHostReservationDaoImpl extends GenericDaoBase<PlannerHostReservationVO, Long> implements
PlannerHostReservationDao {
private SearchBuilder<PlannerHostReservationVO> _hostIdSearch;
private SearchBuilder<PlannerHostReservationVO> _reservedHostSearch;
public PlannerHostReservationDaoImpl() {
}
@PostConstruct
protected void init() {
_hostIdSearch = createSearchBuilder();
_hostIdSearch.and("hostId", _hostIdSearch.entity().getHostId(), SearchCriteria.Op.EQ);
_hostIdSearch.done();
_reservedHostSearch = createSearchBuilder();
_reservedHostSearch.and("usage", _reservedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.NNULL);
_reservedHostSearch.done();
}
@Override
public PlannerHostReservationVO findByHostId(long hostId) {
SearchCriteria<PlannerHostReservationVO> sc = _hostIdSearch.create();
sc.setParameters("hostId", hostId);
return findOneBy(sc);
}
@Override
public List<PlannerHostReservationVO> listAllReservedHosts() {
SearchCriteria<PlannerHostReservationVO> sc = _reservedHostSearch.create();
return listBy(sc);
}
}

View File

@ -85,6 +85,10 @@ import com.cloud.dc.dao.ClusterVSMMapDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.DataCenterIpAddressDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.deploy.PlannerHostReservationVO;
import com.cloud.deploy.dao.PlannerHostReservationDao;
import com.cloud.event.ActionEvent;
import com.cloud.event.EventTypes;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InvalidParameterValueException;
@ -212,6 +216,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
protected HighAvailabilityManager _haMgr;
@Inject
protected StorageService _storageSvr;
@Inject
PlannerHostReservationDao _plannerHostReserveDao;
protected List<? extends Discoverer> _discoverers;
public List<? extends Discoverer> getDiscoverers() {
@ -2851,4 +2857,41 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
ResourceState.Enabled);
return sc.list();
}
@Override
@DB
@ActionEvent(eventType = EventTypes.EVENT_HOST_RESERVATION_RELEASE, eventDescription = "releasing host reservation", async = true)
public boolean releaseHostReservation(Long hostId) {
Transaction txn = Transaction.currentTxn();
try {
txn.start();
PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId);
if (reservationEntry != null) {
long id = reservationEntry.getId();
PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true);
if (hostReservation == null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored.");
}
txn.rollback();
return false;
}
hostReservation.setResourceUsage(null);
_plannerHostReserveDao.persist(hostReservation);
txn.commit();
return true;
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Host reservation for host: " + hostId
+ " does not even exist. Release reservartion call is ignored.");
}
return false;
} catch (CloudRuntimeException e) {
throw e;
} catch (Throwable t) {
s_logger.error("Unable to release host reservation for host: " + hostId, t);
txn.rollback();
return false;
}
}
}

View File

@ -78,6 +78,7 @@ import org.apache.cloudstack.api.command.admin.host.FindHostsForMigrationCmd;
import org.apache.cloudstack.api.command.admin.host.ListHostsCmd;
import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd;
import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd;
import org.apache.cloudstack.api.command.admin.host.ReleaseHostReservationCmd;
import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd;
import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd;
import org.apache.cloudstack.api.command.admin.internallb.ConfigureInternalLoadBalancerElementCmd;
@ -462,6 +463,7 @@ import com.cloud.dc.dao.HostPodDao;
import com.cloud.dc.dao.PodVlanMapDao;
import com.cloud.dc.dao.VlanDao;
import com.cloud.deploy.DataCenterDeployment;
import com.cloud.deploy.DeploymentPlanner;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.domain.DomainVO;
import com.cloud.domain.dao.DomainDao;
@ -589,6 +591,7 @@ import com.cloud.vm.dao.VMInstanceDao;
import edu.emory.mathcs.backport.java.util.Arrays;
import edu.emory.mathcs.backport.java.util.Collections;
import org.apache.cloudstack.api.command.admin.config.ListDeploymentPlannersCmd;
public class ManagementServerImpl extends ManagerBase implements ManagementServer {
@ -726,6 +729,16 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
private List<UserAuthenticator> _userAuthenticators;
private List<UserAuthenticator> _userPasswordEncoders;
protected List<DeploymentPlanner> _planners;
public List<DeploymentPlanner> getPlanners() {
return _planners;
}
public void setPlanners(List<DeploymentPlanner> _planners) {
this._planners = _planners;
}
@Inject ClusterManager _clusterMgr;
private String _hashKey = null;
private String _encryptionKey = null;
@ -2903,7 +2916,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(ListAffinityGroupsCmd.class);
cmdList.add(UpdateVMAffinityGroupCmd.class);
cmdList.add(ListAffinityGroupTypesCmd.class);
cmdList.add(ListDeploymentPlannersCmd.class);
cmdList.add(ReleaseHostReservationCmd.class);
cmdList.add(AddResourceDetailCmd.class);
cmdList.add(RemoveResourceDetailCmd.class);
cmdList.add(ListResourceDetailsCmd.class);
@ -4056,4 +4070,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
}
@Override
public List<String> listDeploymentPlanners() {
List<String> plannersAvailable = new ArrayList<String>();
for (DeploymentPlanner planner : _planners) {
plannersAvailable.add(planner.getName());
}
return plannersAvailable;
}
}

View File

@ -98,7 +98,6 @@ import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.deploy.DataCenterDeployment;
import com.cloud.deploy.DeployDestination;
import com.cloud.deploy.DeployPlannerSelector;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.domain.DomainVO;
import com.cloud.domain.dao.DomainDao;
@ -402,9 +401,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
@Inject
AffinityGroupDao _affinityGroupDao;
@Inject
List<DeployPlannerSelector> plannerSelectors;
protected ScheduledExecutorService _executor = null;
protected int _expungeInterval;
protected int _expungeDelay;
@ -3174,15 +3170,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid());
String plannerName = null;
for (DeployPlannerSelector dps : plannerSelectors) {
plannerName = dps.selectPlanner(vm);
if (plannerName != null) {
break;
}
}
// Get serviceOffering for Virtual Machine
ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId());
String plannerName = offering.getDeploymentPlanner();
if (plannerName == null) {
throw new CloudRuntimeException(String.format("cannot find DeployPlannerSelector for vm[uuid:%s, hypervisorType:%s]", vm.getUuid(), vm.getHypervisorType()));
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
plannerName = "BareMetalPlanner";
} else {
plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key());
}
}
String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString());

View File

@ -608,4 +608,10 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
return null;
}
@Override
public boolean releaseHostReservation(Long hostId) {
// TODO Auto-generated method stub
return false;
}
}

View File

@ -0,0 +1,359 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.vm;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.storage.StorageManager;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.capacity.CapacityManager;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.agent.AgentManager;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.deploy.DataCenterDeployment;
import com.cloud.deploy.DeployDestination;
import com.cloud.deploy.DeploymentClusterPlanner;
import com.cloud.deploy.DeploymentPlanner;
import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage;
import com.cloud.deploy.DeploymentPlanningManagerImpl;
import com.cloud.deploy.FirstFitPlanner;
import com.cloud.deploy.PlannerHostReservationVO;
import com.cloud.deploy.dao.PlannerHostReservationDao;
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
import org.apache.cloudstack.affinity.dao.AffinityGroupDao;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.framework.messagebus.MessageBus;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.test.utils.SpringUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.FilterType;
import org.springframework.context.annotation.ComponentScan.Filter;
import org.springframework.core.type.classreading.MetadataReader;
import org.springframework.core.type.classreading.MetadataReaderFactory;
import org.springframework.core.type.filter.TypeFilter;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.test.context.support.AnnotationConfigContextLoader;
import com.cloud.exception.AffinityConflictException;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.user.AccountManager;
import com.cloud.utils.component.ComponentContext;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(loader = AnnotationConfigContextLoader.class)
public class DeploymentPlanningManagerImplTest {
@Inject
DeploymentPlanningManagerImpl _dpm;
@Inject
PlannerHostReservationDao _plannerHostReserveDao;
@Inject VirtualMachineProfileImpl vmProfile;
@Inject
AffinityGroupVMMapDao _affinityGroupVMMapDao;
@Inject
ExcludeList avoids;
@Inject
DataCenterVO dc;
@Inject
DataCenterDao _dcDao;
@Inject
FirstFitPlanner _planner;
@Inject
ClusterDao _clusterDao;
private static long domainId = 5L;
private static long dataCenterId = 1L;
@BeforeClass
public static void setUp() throws ConfigurationException {
}
@Before
public void testSetUp() {
ComponentContext.initComponentsLifeCycle();
PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(200L, 1L, 2L, 3L, PlannerResourceUsage.Shared);
Mockito.when(_plannerHostReserveDao.persist(Mockito.any(PlannerHostReservationVO.class))).thenReturn(reservationVO);
Mockito.when(_plannerHostReserveDao.findById(Mockito.anyLong())).thenReturn(reservationVO);
Mockito.when(_affinityGroupVMMapDao.countAffinityGroupsForVm(Mockito.anyLong())).thenReturn(0L);
VMInstanceVO vm = new VMInstanceVO();
Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm);
Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc);
Mockito.when(dc.getId()).thenReturn(dataCenterId);
ClusterVO clusterVO = new ClusterVO();
clusterVO.setHypervisorType(HypervisorType.XenServer.toString());
Mockito.when(_clusterDao.findById(Mockito.anyLong())).thenReturn(clusterVO);
Mockito.when(_planner.getName()).thenReturn("FirstFitPlanner");
List<DeploymentPlanner> planners = new ArrayList<DeploymentPlanner>();
planners.add(_planner);
_dpm.setPlanners(planners);
}
@Test
public void dataCenterAvoidTest() throws InsufficientServerCapacityException, AffinityConflictException {
ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
"test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner");
Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(true);
DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
assertNull("DataCenter is in avoid set, destination should be null! ", dest);
}
@Test
public void plannerCannotHandleTest() throws InsufficientServerCapacityException, AffinityConflictException {
ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
"test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null,
"UserDispersingPlanner");
Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false);
Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(false);
DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
assertNull("Planner cannot handle, destination should be null! ", dest);
}
@Test
public void emptyClusterListTest() throws InsufficientServerCapacityException, AffinityConflictException {
ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false,
"test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner");
Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering);
DataCenterDeployment plan = new DataCenterDeployment(dataCenterId);
Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false);
Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(true);
Mockito.when(((DeploymentClusterPlanner) _planner).orderClusters(vmProfile, plan, avoids)).thenReturn(null);
DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids);
assertNull("Planner cannot handle, destination should be null! ", dest);
}
@Configuration
@ComponentScan(basePackageClasses = { DeploymentPlanningManagerImpl.class }, includeFilters = { @Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM) }, useDefaultFilters = false)
public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration {
@Bean
public FirstFitPlanner firstFitPlanner() {
return Mockito.mock(FirstFitPlanner.class);
}
@Bean
public DeploymentPlanner deploymentPlanner() {
return Mockito.mock(DeploymentPlanner.class);
}
@Bean
public DataCenterVO dataCenter() {
return Mockito.mock(DataCenterVO.class);
}
@Bean
public ExcludeList excludeList() {
return Mockito.mock(ExcludeList.class);
}
@Bean
public VirtualMachineProfileImpl virtualMachineProfileImpl() {
return Mockito.mock(VirtualMachineProfileImpl.class);
}
@Bean
public ClusterDetailsDao clusterDetailsDao() {
return Mockito.mock(ClusterDetailsDao.class);
}
@Bean
public DataStoreManager cataStoreManager() {
return Mockito.mock(DataStoreManager.class);
}
@Bean
public StorageManager storageManager() {
return Mockito.mock(StorageManager.class);
}
@Bean
public HostDao hostDao() {
return Mockito.mock(HostDao.class);
}
@Bean
public HostPodDao hostPodDao() {
return Mockito.mock(HostPodDao.class);
}
@Bean
public ClusterDao clusterDao() {
return Mockito.mock(ClusterDao.class);
}
@Bean
public GuestOSDao guestOSDao() {
return Mockito.mock(GuestOSDao.class);
}
@Bean
public GuestOSCategoryDao guestOSCategoryDao() {
return Mockito.mock(GuestOSCategoryDao.class);
}
@Bean
public CapacityManager capacityManager() {
return Mockito.mock(CapacityManager.class);
}
@Bean
public StoragePoolHostDao storagePoolHostDao() {
return Mockito.mock(StoragePoolHostDao.class);
}
@Bean
public VolumeDao volumeDao() {
return Mockito.mock(VolumeDao.class);
}
@Bean
public ConfigurationDao configurationDao() {
return Mockito.mock(ConfigurationDao.class);
}
@Bean
public DiskOfferingDao diskOfferingDao() {
return Mockito.mock(DiskOfferingDao.class);
}
@Bean
public PrimaryDataStoreDao primaryDataStoreDao() {
return Mockito.mock(PrimaryDataStoreDao.class);
}
@Bean
public CapacityDao capacityDao() {
return Mockito.mock(CapacityDao.class);
}
@Bean
public PlannerHostReservationDao plannerHostReservationDao() {
return Mockito.mock(PlannerHostReservationDao.class);
}
@Bean
public AffinityGroupProcessor affinityGroupProcessor() {
return Mockito.mock(AffinityGroupProcessor.class);
}
@Bean
public AffinityGroupDao affinityGroupDao() {
return Mockito.mock(AffinityGroupDao.class);
}
@Bean
public AffinityGroupVMMapDao affinityGroupVMMapDao() {
return Mockito.mock(AffinityGroupVMMapDao.class);
}
@Bean
public AccountManager accountManager() {
return Mockito.mock(AccountManager.class);
}
@Bean
public AgentManager agentManager() {
return Mockito.mock(AgentManager.class);
}
@Bean
public MessageBus messageBus() {
return Mockito.mock(MessageBus.class);
}
@Bean
public UserVmDao userVMDao() {
return Mockito.mock(UserVmDao.class);
}
@Bean
public VMInstanceDao vmInstanceDao() {
return Mockito.mock(VMInstanceDao.class);
}
@Bean
public DataCenterDao dataCenterDao() {
return Mockito.mock(DataCenterDao.class);
}
public static class Library implements TypeFilter {
@Override
public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class);
return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs);
}
}
}
}

View File

@ -431,7 +431,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu
*/
@Override
public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, boolean offerHA,
boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) {
boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) {
// TODO Auto-generated method stub
return null;
}

View File

@ -82,6 +82,7 @@ import com.cloud.offerings.dao.NetworkOfferingDao;
import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
import com.cloud.offerings.dao.NetworkOfferingServiceMapDaoImpl;
import com.cloud.projects.ProjectManager;
import com.cloud.server.ManagementService;
import com.cloud.service.dao.ServiceOfferingDaoImpl;
import com.cloud.storage.dao.DiskOfferingDaoImpl;
import com.cloud.storage.dao.S3DaoImpl;
@ -156,6 +157,11 @@ useDefaultFilters=false
public class ChildTestConfiguration {
@Bean
public ManagementService managementService() {
return Mockito.mock(ManagementService.class);
}
@Bean
public AccountManager acctMgr() {
return Mockito.mock(AccountManager.class);

View File

@ -0,0 +1,42 @@
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
you under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
xmlns:tx="http://www.springframework.org/schema/tx" xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/tx
http://www.springframework.org/schema/tx/spring-tx-3.0.xsd
http://www.springframework.org/schema/aop
http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context-3.0.xsd">
<context:annotation-config />
<!-- @DB support -->
<bean id="componentContext" class="com.cloud.utils.component.ComponentContext" />
<bean id="transactionContextBuilder" class="com.cloud.utils.db.TransactionContextBuilder" />
<bean id="actionEventInterceptor" class="com.cloud.event.ActionEventInterceptor" />
<bean id="instantiatePostProcessor" class="com.cloud.utils.component.ComponentInstantiationPostProcessor">
<property name="Interceptors">
<list>
<ref bean="transactionContextBuilder" />
<ref bean="actionEventInterceptor" />
</list>
</property>
</bean>
<bean class="org.apache.cloudstack.affinity.AffinityApiTestConfiguration" />
</beans>

View File

@ -973,9 +973,61 @@ CREATE TABLE `cloud`.`network_asa1000v_map` (
ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `eip_associate_public_ip` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if public IP is associated with user VM creation by default when EIP service is enabled.' AFTER `elastic_ip_service`;
-- Re-enable foreign key checking, at the end of the upgrade path
SET foreign_key_checks = 1;
CREATE TABLE `cloud`.`op_host_planner_reservation` (
`id` bigint unsigned NOT NULL auto_increment,
`data_center_id` bigint unsigned NOT NULL,
`pod_id` bigint unsigned,
`cluster_id` bigint unsigned,
`host_id` bigint unsigned,
`resource_usage` varchar(255) COMMENT 'Shared(between planners) Vs Dedicated (exclusive usage to a planner)',
PRIMARY KEY (`id`),
INDEX `i_op_host_planner_reservation__host_resource_usage`(`host_id`, `resource_usage`),
CONSTRAINT `fk_planner_reservation__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_planner_reservation__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `cloud`.`data_center`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_planner_reservation__pod_id` FOREIGN KEY (`pod_id`) REFERENCES `cloud`.`host_pod_ref`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_planner_reservation__cluster_id` FOREIGN KEY (`cluster_id`) REFERENCES `cloud`.`cluster`(`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
ALTER TABLE `cloud`.`service_offering` ADD COLUMN `deployment_planner` varchar(255) COMMENT 'Planner heuristics used to deploy a VM of this offering; if null global config vm.deployment.planner is used';
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.deployment.planner', 'FirstFitPlanner', '[''FirstFitPlanner'', ''UserDispersingPlanner'', ''UserConcentratedPodPlanner'']: DeploymentPlanner heuristic that will be used for VM deployment.');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'host.reservation.release.period', '300000', 'The interval in milliseconds between host reservation release checks');
DROP VIEW IF EXISTS `cloud`.`service_offering_view`;
CREATE VIEW `cloud`.`service_offering_view` AS
select
service_offering.id,
disk_offering.uuid,
disk_offering.name,
disk_offering.display_text,
disk_offering.created,
disk_offering.tags,
disk_offering.removed,
disk_offering.use_local_storage,
disk_offering.system_use,
service_offering.cpu,
service_offering.speed,
service_offering.ram_size,
service_offering.nw_rate,
service_offering.mc_rate,
service_offering.ha_enabled,
service_offering.limit_cpu_use,
service_offering.host_tag,
service_offering.default_use,
service_offering.vm_type,
service_offering.sort_key,
service_offering.deployment_planner,
domain.id domain_id,
domain.uuid domain_uuid,
domain.name domain_name,
domain.path domain_path
from
`cloud`.`service_offering`
inner join
`cloud`.`disk_offering` ON service_offering.id = disk_offering.id
left join
`cloud`.`domain` ON disk_offering.domain_id = domain.id;
-- Add "default" field to account/user tables
ALTER TABLE `cloud`.`account` ADD COLUMN `default` int(1) unsigned NOT NULL DEFAULT '0' COMMENT '1 if account is default';
@ -1605,3 +1657,8 @@ CREATE TABLE `cloud`.`nic_ip_alias` (
alter table `cloud`.`vpc_gateways` add column network_acl_id bigint unsigned default 1 NOT NULL;
update `cloud`.`vpc_gateways` set network_acl_id = 2;
-- Re-enable foreign key checking, at the end of the upgrade path
SET foreign_key_checks = 1;

View File

@ -0,0 +1,164 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
import marvin
from marvin import cloudstackTestCase
from marvin.cloudstackTestCase import *
import unittest
import hashlib
import random
class TestDeployVmWithVariedPlanners(cloudstackTestCase):
"""
This test tests that we can create serviceOfferings with different deployment Planners and deploy virtual machines into a user account
using these service offerings and builtin template
"""
def setUp(self):
"""
CloudStack internally saves its passwords in md5 form and that is how we
specify it in the API. Python's hashlib library helps us to quickly hash
strings as follows
"""
mdf = hashlib.md5()
mdf.update('password')
mdf_pass = mdf.hexdigest()
self.apiClient = self.testClient.getApiClient() #Get ourselves an API client
self.acct = createAccount.createAccountCmd() #The createAccount command
self.acct.accounttype = 0 #We need a regular user. admins have accounttype=1
self.acct.firstname = 'test'
self.acct.lastname = 'user' #What's up doc?
self.acct.username = 'testuser'
self.acct.password = mdf_pass #The md5 hashed password string
self.acct.email = 'test@domain.com'
self.acct.account = 'testacct'
self.acct.domainid = 1 #The default ROOT domain
self.acctResponse = self.apiClient.createAccount(self.acct)
# And upon successful creation we'll log a helpful message in our logs
# using the default debug logger of the test framework
self.debug("successfully created account: %s, id: \
%s"%(self.acctResponse.name, \
self.acctResponse.id))
#Create service offerings with varied planners
self.svcOfferingFirstFit = createServiceOffering.createServiceOfferingCmd()
self.svcOfferingFirstFit.name = 'Tiny Instance FirstFit'
self.svcOfferingFirstFit.displaytext = 'Tiny Instance with FirstFitPlanner'
self.svcOfferingFirstFit.cpuspeed = 100
self.svcOfferingFirstFit.cpunumber = 1
self.svcOfferingFirstFit.memory = 256
self.svcOfferingFirstFit.deploymentplanner = 'FirstFitPlanner'
self.svcOfferingFirstFitResponse = self.apiClient.createServiceOffering(self.svcOfferingFirstFit)
self.debug("successfully created serviceofferring name: %s, id: \
%s, deploymentPlanner: %s"%(self.svcOfferingFirstFitResponse.name, \
self.svcOfferingFirstFitResponse.id,self.svcOfferingFirstFitResponse.deploymentplanner))
#Create service offerings with varied planners
self.svcOfferingUserDispersing = createServiceOffering.createServiceOfferingCmd()
self.svcOfferingUserDispersing.name = 'Tiny Instance UserDispersing'
self.svcOfferingUserDispersing.displaytext = 'Tiny Instance with UserDispersingPlanner'
self.svcOfferingUserDispersing.cpuspeed = 100
self.svcOfferingUserDispersing.cpunumber = 1
self.svcOfferingUserDispersing.memory = 256
self.svcOfferingUserDispersing.deploymentplanner = 'FirstFitPlanner'
self.svcOfferingUserDispersingResponse = self.apiClient.createServiceOffering(self.svcOfferingUserDispersing)
self.debug("successfully created serviceofferring name: %s, id: \
%s, deploymentPlanner: %s"%(self.svcOfferingUserDispersingResponse.name, \
self.svcOfferingUserDispersingResponse.id,self.svcOfferingUserDispersingResponse.deploymentplanner))
def test_DeployVm(self):
"""
Let's start by defining the attributes of our VM that we will be
deploying on CloudStack. We will be assuming a single zone is available
and is configured and all templates are Ready
The hardcoded values are used only for brevity.
"""
deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd()
deployVmCmd.zoneid = 1
deployVmCmd.account = self.acct.account
deployVmCmd.domainid = self.acct.domainid
deployVmCmd.templateid = 5 #For default template- CentOS 5.6(64 bit)
deployVmCmd.serviceofferingid = self.svcOfferingFirstFitResponse.id
deployVmResponse = self.apiClient.deployVirtualMachine(deployVmCmd)
self.debug("VM %s was deployed in the job %s"%(deployVmResponse.id, deployVmResponse.jobid))
# At this point our VM is expected to be Running. Let's find out what
# listVirtualMachines tells us about VMs in this account
listVmCmd = listVirtualMachines.listVirtualMachinesCmd()
listVmCmd.id = deployVmResponse.id
listVmResponse = self.apiClient.listVirtualMachines(listVmCmd)
self.assertNotEqual(len(listVmResponse), 0, "Check if the list API \
returns a non-empty response")
vm1 = listVmResponse[0]
self.assertEqual(vm1.id, deployVmResponse.id, "Check if the VM returned \
is the same as the one we deployed")
self.assertEqual(vm1.state, "Running", "Check if VM has reached \
a state of running")
deployVm2Cmd = deployVirtualMachine.deployVirtualMachineCmd()
deployVm2Cmd.zoneid = 1
deployVm2Cmd.account = self.acct.account
deployVm2Cmd.domainid = self.acct.domainid
deployVm2Cmd.templateid = 5 #For default template- CentOS 5.6(64 bit)
deployVm2Cmd.serviceofferingid = self.svcOfferingFirstFitResponse.id
deployVm2Response = self.apiClient.deployVirtualMachine(deployVm2Cmd)
self.debug("VM %s was deployed in the job %s"%(deployVm2Response.id, deployVm2Response.jobid))
# At this point our VM is expected to be Running. Let's find out what
# listVirtualMachines tells us about VMs in this account
listVm2Cmd = listVirtualMachines.listVirtualMachinesCmd()
listVm2Cmd.id = deployVm2Response.id
listVm2Response = self.apiClient.listVirtualMachines(listVm2Cmd)
self.assertNotEqual(len(listVm2Response), 0, "Check if the list API \
returns a non-empty response")
vm2 = listVm2Response[0]
self.assertEqual(vm2.id, deployVm2Response.id, "Check if the VM returned \
is the same as the one we deployed")
self.assertEqual(vm2.state, "Running", "Check if VM has reached \
a state of running")
def tearDown(self): # Teardown will delete the Account as well as the VM once the VM reaches "Running" state
"""
And finally let us cleanup the resources we created by deleting the
account. All good unittests are atomic and rerunnable this way
"""
deleteAcct = deleteAccount.deleteAccountCmd()
deleteAcct.id = self.acctResponse.id
self.apiClient.deleteAccount(deleteAcct)
deleteSvcOfferingFirstFit = deleteServiceOffering.deleteServiceOfferingCmd()
deleteSvcOfferingFirstFit.id = self.svcOfferingFirstFitResponse.id
self.apiClient.deleteServiceOffering(deleteSvcOfferingFirstFit);
deleteSvcOfferingUserDispersing = deleteServiceOffering.deleteServiceOfferingCmd()
deleteSvcOfferingUserDispersing.id = self.svcOfferingUserDispersingResponse.id
self.apiClient.deleteServiceOffering(deleteSvcOfferingUserDispersing);

View File

@ -142,6 +142,7 @@ known_categories = {
'listNics':'Nic',
'AffinityGroup': 'Affinity Group',
'InternalLoadBalancer': 'Internal LB',
'DeploymentPlanners': 'Configuration',
}