diff --git a/api/src/com/cloud/agent/api/PvlanSetupCommand.java b/api/src/com/cloud/agent/api/PvlanSetupCommand.java new file mode 100644 index 00000000000..ee1f046d6d9 --- /dev/null +++ b/api/src/com/cloud/agent/api/PvlanSetupCommand.java @@ -0,0 +1,121 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import java.net.URI; + +import com.cloud.utils.net.NetUtils; + +public class PvlanSetupCommand extends Command { + public enum Type { + DHCP, + VM + } + private String op; + private String primary; + private String isolated; + private String vmMac; + private String dhcpName; + private String dhcpMac; + private String dhcpIp; + private Type type; + private String networkTag; + + protected PvlanSetupCommand() {} + + protected PvlanSetupCommand(Type type, String op, URI uri, String networkTag) + { + this.type = type; + this.op = op; + this.primary = NetUtils.getPrimaryPvlanFromUri(uri); + this.isolated = NetUtils.getIsolatedPvlanFromUri(uri); + this.networkTag = networkTag; + } + + static public PvlanSetupCommand createDhcpSetup(String op, URI uri, String networkTag, String dhcpName, String dhcpMac, String dhcpIp) + { + PvlanSetupCommand cmd = new PvlanSetupCommand(Type.DHCP, op, uri, networkTag); + cmd.setDhcpName(dhcpName); + cmd.setDhcpMac(dhcpMac); + cmd.setDhcpIp(dhcpIp); + return cmd; + } + + static public PvlanSetupCommand createVmSetup(String op, URI uri, String networkTag, String vmMac) + { + PvlanSetupCommand cmd = new PvlanSetupCommand(Type.VM, op, uri, networkTag); + cmd.setVmMac(vmMac); + return cmd; + } + + @Override + public boolean executeInSequence() { + return true; + } + + public String getOp() { + return op; + } + + public String getPrimary() { + return primary; + } + + public String getIsolated() { + return isolated; + } + + public String getVmMac() { + return vmMac; + } + + protected void setVmMac(String vmMac) { + this.vmMac = vmMac; + } + + public String getDhcpMac() { + return dhcpMac; + } + + protected void setDhcpMac(String dhcpMac) { + this.dhcpMac = dhcpMac; + } + + public String getDhcpIp() { + return dhcpIp; + } + + protected void setDhcpIp(String dhcpIp) { + this.dhcpIp = dhcpIp; + } + + public Type getType() { + return type; + } + + public String getDhcpName() { + return dhcpName; + } + + public void setDhcpName(String dhcpName) { + this.dhcpName = dhcpName; + } + + public String getNetworkTag() { + return networkTag; + } +} diff --git a/api/src/com/cloud/deploy/DeploymentClusterPlanner.java b/api/src/com/cloud/deploy/DeploymentClusterPlanner.java new file mode 100644 index 00000000000..1a19c71dbfa --- /dev/null +++ b/api/src/com/cloud/deploy/DeploymentClusterPlanner.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + +import java.util.List; + +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +/** + */ +public interface DeploymentClusterPlanner extends DeploymentPlanner { + /** + * This is called to determine list of possible clusters where a virtual + * machine can be deployed. + * + * @param vm + * virtual machine. + * @param plan + * deployment plan that tells you where it's being deployed to. + * @param avoid + * avoid these data centers, pods, clusters, or hosts. + * @return DeployDestination for that virtual machine. + */ + List orderClusters(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) + throws InsufficientServerCapacityException; + + PlannerResourceUsage getResourceUsage(); + +} diff --git a/api/src/com/cloud/deploy/DeploymentPlanner.java b/api/src/com/cloud/deploy/DeploymentPlanner.java index 537dd314733..eb56a591f6b 100644 --- a/api/src/com/cloud/deploy/DeploymentPlanner.java +++ b/api/src/com/cloud/deploy/DeploymentPlanner.java @@ -35,6 +35,7 @@ import com.cloud.vm.VirtualMachineProfile; /** */ public interface DeploymentPlanner extends Adapter { + /** * plan is called to determine where a virtual machine should be running. * @@ -46,6 +47,7 @@ public interface DeploymentPlanner extends Adapter { * avoid these data centers, pods, clusters, or hosts. * @return DeployDestination for that virtual machine. */ + @Deprecated DeployDestination plan(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException; /** @@ -88,6 +90,10 @@ public interface DeploymentPlanner extends Adapter { userconcentratedpod_firstfit; } + public enum PlannerResourceUsage { + Shared, Dedicated; + } + public static class ExcludeList { private Set _dcIds; private Set _podIds; @@ -99,10 +105,22 @@ public interface DeploymentPlanner extends Adapter { } public ExcludeList(Set _dcIds, Set _podIds, Set _clusterIds, Set _hostIds, Set _poolIds) { - this._dcIds = _dcIds; - this._podIds = _podIds; - this._clusterIds = _clusterIds; - this._poolIds = _poolIds; + if (_dcIds != null) { + this._dcIds = new HashSet(_dcIds); + } + if (_podIds != null) { + this._podIds = new HashSet(_podIds); + } + if (_clusterIds != null) { + this._clusterIds = new HashSet(_clusterIds); + } + + if (_hostIds != null) { + this._hostIds = new HashSet(_hostIds); + } + if (_poolIds != null) { + this._poolIds = new HashSet(_poolIds); + } } public boolean add(InsufficientCapacityException e) { diff --git a/api/src/com/cloud/event/EventTypes.java b/api/src/com/cloud/event/EventTypes.java index d272c99bc02..ee7f5b7d89f 100755 --- a/api/src/com/cloud/event/EventTypes.java +++ b/api/src/com/cloud/event/EventTypes.java @@ -134,6 +134,7 @@ public class EventTypes { public static final String EVENT_REMOVE_FROM_GLOBAL_LOAD_BALANCER_RULE = "GLOBAL.LB.REMOVE"; public static final String EVENT_GLOBAL_LOAD_BALANCER_CREATE = "GLOBAL.LB.CREATE"; public static final String EVENT_GLOBAL_LOAD_BALANCER_DELETE = "GLOBAL.LB.DELETE"; + public static final String EVENT_GLOBAL_LOAD_BALANCER_UPDATE = "GLOBAL.LB.UPDATE"; // Account events public static final String EVENT_ACCOUNT_ENABLE = "ACCOUNT.ENABLE"; @@ -422,6 +423,7 @@ public class EventTypes { public static final String EVENT_INTERNAL_LB_VM_START = "INTERNALLBVM.START"; public static final String EVENT_INTERNAL_LB_VM_STOP = "INTERNALLBVM.STOP"; + public static final String EVENT_HOST_RESERVATION_RELEASE = "HOST.RESERVATION.RELEASE"; // Dedicated guest vlan range public static final String EVENT_GUEST_VLAN_RANGE_DEDICATE = "GUESTVLANRANGE.DEDICATE"; public static final String EVENT_DEDICATED_GUEST_VLAN_RANGE_RELEASE = "GUESTVLANRANGE.RELEASE"; @@ -727,7 +729,6 @@ public class EventTypes { entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_UPDATE, AutoScaleVmGroup.class.getName()); entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_ENABLE, AutoScaleVmGroup.class.getName()); entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_DISABLE, AutoScaleVmGroup.class.getName()); - entityEventDetails.put(EVENT_GUEST_VLAN_RANGE_DEDICATE, GuestVlan.class.getName()); entityEventDetails.put(EVENT_DEDICATED_GUEST_VLAN_RANGE_RELEASE, GuestVlan.class.getName()); } diff --git a/api/src/com/cloud/host/Status.java b/api/src/com/cloud/host/Status.java index 97b151dc723..dd49122c13b 100755 --- a/api/src/com/cloud/host/Status.java +++ b/api/src/com/cloud/host/Status.java @@ -147,6 +147,7 @@ public enum Status { s_fsm.addTransition(Status.Down, Event.Remove, Status.Removed); s_fsm.addTransition(Status.Down, Event.ManagementServerDown, Status.Down); s_fsm.addTransition(Status.Down, Event.AgentDisconnected, Status.Down); + s_fsm.addTransition(Status.Down, Event.PingTimeout, Status.Down); s_fsm.addTransition(Status.Alert, Event.AgentConnected, Status.Connecting); s_fsm.addTransition(Status.Alert, Event.Ping, Status.Up); s_fsm.addTransition(Status.Alert, Event.Remove, Status.Removed); diff --git a/api/src/com/cloud/network/Networks.java b/api/src/com/cloud/network/Networks.java index f085e9f3029..5aede053d50 100755 --- a/api/src/com/cloud/network/Networks.java +++ b/api/src/com/cloud/network/Networks.java @@ -63,6 +63,7 @@ public class Networks { Storage("storage", Integer.class), Lswitch("lswitch", String.class), Mido("mido", String.class), + Pvlan("pvlan", String.class), UnDecided(null, null); private String scheme; diff --git a/api/src/com/cloud/offering/ServiceOffering.java b/api/src/com/cloud/offering/ServiceOffering.java index 165369c5e9b..45d5f38952b 100755 --- a/api/src/com/cloud/offering/ServiceOffering.java +++ b/api/src/com/cloud/offering/ServiceOffering.java @@ -108,4 +108,6 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity, boolean getDefaultUse(); String getSystemVmType(); + + String getDeploymentPlanner(); } diff --git a/api/src/com/cloud/resource/ResourceService.java b/api/src/com/cloud/resource/ResourceService.java index 268bcd61770..ce0df635bfe 100755 --- a/api/src/com/cloud/resource/ResourceService.java +++ b/api/src/com/cloud/resource/ResourceService.java @@ -101,12 +101,12 @@ public interface ResourceService { S3 discoverS3(AddS3Cmd cmd) throws DiscoveryException; - - List getSupportedHypervisorTypes(long zoneId, boolean forVirtualRouter, Long podId); Pair, Integer> listSwifts(ListSwiftsCmd cmd); List listS3s(ListS3sCmd cmd); + boolean releaseHostReservation(Long hostId); + } diff --git a/api/src/com/cloud/server/ManagementService.java b/api/src/com/cloud/server/ManagementService.java index efbb1d64def..518a6480c03 100755 --- a/api/src/com/cloud/server/ManagementService.java +++ b/api/src/com/cloud/server/ManagementService.java @@ -388,5 +388,7 @@ public interface ManagementService { * @return List of capacities */ List listTopConsumedResources(ListCapacityCmd cmd); + + List listDeploymentPlanners(); } diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index 1b745cf892d..8d7739c13e1 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -230,6 +230,7 @@ public class ApiConstants { public static final String VLAN_RANGE = "vlanrange"; public static final String REMOVE_VLAN="removevlan"; public static final String VLAN_ID = "vlanid"; + public static final String ISOLATED_PVLAN = "isolatedpvlan"; public static final String VM_AVAILABLE = "vmavailable"; public static final String VM_LIMIT = "vmlimit"; public static final String VM_TOTAL = "vmtotal"; @@ -495,6 +496,7 @@ public class ApiConstants { public static final String AFFINITY_GROUP_NAMES = "affinitygroupnames"; public static final String ASA_INSIDE_PORT_PROFILE = "insideportprofile"; public static final String AFFINITY_GROUP_ID = "affinitygroupid"; + public static final String DEPLOYMENT_PLANNER = "deploymentplanner"; public static final String ACL_ID = "aclid"; public static final String NUMBER = "number"; diff --git a/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java b/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java new file mode 100644 index 00000000000..598b620c301 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.config; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.response.DeploymentPlannersResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.log4j.Logger; + +@APICommand(name = "listDeploymentPlanners", description = "Lists all DeploymentPlanners available.", responseObject = DeploymentPlannersResponse.class) +public class ListDeploymentPlannersCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListDeploymentPlannersCmd.class.getName()); + + private static final String s_name = "listdeploymentplannersresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute(){ + List planners = _mgr.listDeploymentPlanners(); + ListResponse response = new ListResponse(); + List plannerResponses = new ArrayList(); + + for (String planner : planners) { + DeploymentPlannersResponse plannerResponse = new DeploymentPlannersResponse(); + plannerResponse.setName(planner); + plannerResponse.setObjectName("deploymentPlanner"); + plannerResponses.add(plannerResponse); + } + + response.setResponses(plannerResponses); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java b/api/src/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java new file mode 100644 index 00000000000..d09cf38cc50 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.host; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.log4j.Logger; + +import com.cloud.async.AsyncJob; +import com.cloud.event.EventTypes; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + +@APICommand(name = "releaseHostReservation", description = "Releases host reservation.", responseObject = SuccessResponse.class) +public class ReleaseHostReservationCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(ReleaseHostReservationCmd.class.getName()); + + private static final String s_name = "releasehostreservationresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=HostResponse.class, + required=true, description="the host ID") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_HOST_RESERVATION_RELEASE; + } + + @Override + public String getEventDescription() { + return "releasing reservation for host: " + getId(); + } + + @Override + public AsyncJob.Type getInstanceType() { + return AsyncJob.Type.Host; + } + + @Override + public Long getInstanceId() { + return getId(); + } + + @Override + public void execute(){ + boolean result = _resourceService.releaseHostReservation(getId()); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release host reservation"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkIsolationMethodsCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkIsolationMethodsCmd.java index 7eef22a78b4..0d23fd67557 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkIsolationMethodsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkIsolationMethodsCmd.java @@ -44,7 +44,7 @@ public class ListNetworkIsolationMethodsCmd extends BaseListCmd{ isolationResponses.add(isolationMethod); } } - response.setResponses(isolationResponses, methods.length); + response.setResponses(isolationResponses, isolationResponses.size()); response.setResponseName(getCommandName()); this.setResponseObject(response); diff --git a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index 0e35276d914..c155b706fc0 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -84,6 +84,9 @@ public class CreateServiceOfferingCmd extends BaseCmd { @Parameter(name=ApiConstants.NETWORKRATE, type=CommandType.INTEGER, description="data transfer rate in megabits per second allowed. Supported only for non-System offering and system offerings having \"domainrouter\" systemvmtype") private Integer networkRate; + @Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "The deployment planner heuristics used to deploy a VM of this offering. If null, value of global config vm.deployment.planner is used") + private String deploymentPlanner; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -148,6 +151,9 @@ public class CreateServiceOfferingCmd extends BaseCmd { return networkRate; } + public String getDeploymentPlanner() { + return deploymentPlanner; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java b/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java index 667c4c89966..445c4ed37f4 100644 --- a/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java @@ -80,6 +80,9 @@ public class CreateNetworkCmd extends BaseCmd { @Parameter(name=ApiConstants.VLAN, type=CommandType.STRING, description="the ID or VID of the network") private String vlan; + @Parameter(name=ApiConstants.ISOLATED_PVLAN, type=CommandType.STRING, description="the isolated private vlan for this network") + private String isolatedPvlan; + @Parameter(name=ApiConstants.NETWORK_DOMAIN, type=CommandType.STRING, description="network domain") private String networkDomain; @@ -141,6 +144,10 @@ public class CreateNetworkCmd extends BaseCmd { return vlan; } + public String getIsolatedPvlan() { + return isolatedPvlan; + } + public String getAccountName() { return accountName; } diff --git a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/AssignToGlobalLoadBalancerRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/AssignToGlobalLoadBalancerRuleCmd.java index 1c07a0aad53..1575cd34d07 100644 --- a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/AssignToGlobalLoadBalancerRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/AssignToGlobalLoadBalancerRuleCmd.java @@ -95,7 +95,7 @@ public class AssignToGlobalLoadBalancerRuleCmd extends BaseAsyncCmd { @Override public String getEventDescription() { - return "applying load balancer rules " + StringUtils.join(getLoadBalancerRulesIds(), ",") + + return "assign load balancer rules " + StringUtils.join(getLoadBalancerRulesIds(), ",") + " to global load balancer rule " + getGlobalLoadBalancerRuleId(); } diff --git a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java index b08b6aeff17..ac3349516df 100644 --- a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java @@ -85,7 +85,11 @@ public class CreateGlobalLoadBalancerRuleCmd extends BaseAsyncCreateCmd { } public String getAlgorithm() { - return algorithm; + if (algorithm != null) { + return algorithm; + } else { + return GlobalLoadBalancerRule.Algorithm.RoundRobin.name(); + } } public String getGslbMethod() { @@ -158,7 +162,7 @@ public class CreateGlobalLoadBalancerRuleCmd extends BaseAsyncCreateCmd { @Override public String getEventDescription() { - return "creating a global load balancer: " + getName() + " for account: " + getAccountName(); + return "creating a global load balancer rule Id: " + getEntityId(); } diff --git a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java index 424b1072887..fe5decdf5fc 100644 --- a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java @@ -77,12 +77,12 @@ public class DeleteGlobalLoadBalancerRuleCmd extends BaseAsyncCmd { @Override public String getEventType() { - return EventTypes.EVENT_LOAD_BALANCER_DELETE; + return EventTypes.EVENT_GLOBAL_LOAD_BALANCER_DELETE; } @Override public String getEventDescription() { - return "deleting global load balancer: " + getGlobalLoadBalancerId(); + return "deleting global load balancer rule: " + getGlobalLoadBalancerId(); } @Override diff --git a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/UpdateGlobalLoadBalancerRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/UpdateGlobalLoadBalancerRuleCmd.java index 10694e1633e..4e2c0fd0fb9 100644 --- a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/UpdateGlobalLoadBalancerRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/UpdateGlobalLoadBalancerRuleCmd.java @@ -17,11 +17,11 @@ package org.apache.cloudstack.api.command.user.region.ha.gslb; +import com.cloud.event.EventTypes; +import com.cloud.region.ha.GlobalLoadBalancerRule; import com.cloud.region.ha.GlobalLoadBalancingRulesService; -import org.apache.cloudstack.api.APICommand; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseListTaggedResourcesCmd; -import org.apache.cloudstack.api.Parameter; +import com.cloud.user.Account; +import org.apache.cloudstack.api.*; import org.apache.cloudstack.api.response.GlobalLoadBalancerResponse; import org.apache.cloudstack.api.response.LoadBalancerResponse; import org.apache.log4j.Logger; @@ -29,7 +29,7 @@ import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = "updateGlobalLoadBalancerRule", description = "update global load balancer rules.", responseObject = LoadBalancerResponse.class) -public class UpdateGlobalLoadBalancerRuleCmd extends BaseListTaggedResourcesCmd { +public class UpdateGlobalLoadBalancerRuleCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(GlobalLoadBalancerResponse.class.getName()); private static final String s_name = "updategloballoadbalancerruleresponse"; @@ -88,9 +88,27 @@ public class UpdateGlobalLoadBalancerRuleCmd extends BaseListTaggedResourcesCmd return s_name; } + @Override + public long getEntityOwnerId() { + GlobalLoadBalancerRule lb = _entityMgr.findById(GlobalLoadBalancerRule.class, getId()); + if (lb != null) { + return lb.getAccountId(); + } + return Account.ACCOUNT_ID_SYSTEM; + } + @Override public void execute() { _gslbService.updateGlobalLoadBalancerRule(this); } + @Override + public String getEventType() { + return EventTypes.EVENT_GLOBAL_LOAD_BALANCER_UPDATE; + } + + @Override + public String getEventDescription() { + return null; + } } diff --git a/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index f35e87e3b0f..08ebbb05887 100644 --- a/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -18,6 +18,8 @@ package org.apache.cloudstack.api.response; import java.util.Date; +import javax.persistence.Column; + import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.EntityReference; @@ -82,6 +84,8 @@ public class ServiceOfferingResponse extends BaseResponse { @SerializedName(ApiConstants.NETWORKRATE) @Param(description="data transfer rate in megabits per second allowed.") private Integer networkRate; + @SerializedName(ApiConstants.DEPLOYMENT_PLANNER) @Param(description="deployment strategy used to deploy VM.") + private String deploymentPlanner; public String getId() { return id; @@ -225,4 +229,12 @@ public class ServiceOfferingResponse extends BaseResponse { public void setNetworkRate(Integer networkRate) { this.networkRate = networkRate; } + + public String getDeploymentPlanner() { + return deploymentPlanner; + } + + public void setDeploymentPlanner(String deploymentPlanner) { + this.deploymentPlanner = deploymentPlanner; + } } diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in index 2ec6b59e96b..ba47db3cd94 100644 --- a/client/tomcatconf/applicationContext.xml.in +++ b/client/tomcatconf/applicationContext.xml.in @@ -545,15 +545,11 @@ Deployment planners --> - - - + - - - + @@ -610,10 +606,6 @@ - - - - @@ -628,6 +620,7 @@ + @@ -635,9 +628,7 @@ - - - + @@ -842,17 +833,13 @@ - + - - - - @@ -868,6 +855,8 @@ + + @@ -877,4 +866,7 @@ + + + diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index 0520f4a2062..6946c7e4d5f 100644 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -212,6 +212,7 @@ listConfigurations=1 ldapConfig=1 ldapRemove=1 listCapabilities=15 +listDeploymentPlanners=1 #### pod commands createPod=1 @@ -267,6 +268,7 @@ listHosts=3 findHostsForMigration=1 addSecondaryStorage=1 updateHostPassword=1 +releaseHostReservation=1 #### volume commands attachVolume=15 diff --git a/core/src/com/cloud/agent/api/PlugNicCommand.java b/core/src/com/cloud/agent/api/PlugNicCommand.java index b896e4540cb..d10c6808a59 100644 --- a/core/src/com/cloud/agent/api/PlugNicCommand.java +++ b/core/src/com/cloud/agent/api/PlugNicCommand.java @@ -17,11 +17,13 @@ package com.cloud.agent.api; import com.cloud.agent.api.to.NicTO; +import com.cloud.vm.VirtualMachine; public class PlugNicCommand extends Command { NicTO nic; String instanceName; + VirtualMachine.Type vmType; public NicTO getNic() { return nic; @@ -35,12 +37,17 @@ public class PlugNicCommand extends Command { protected PlugNicCommand() { } - public PlugNicCommand(NicTO nic, String instanceName) { + public PlugNicCommand(NicTO nic, String instanceName, VirtualMachine.Type vmtype) { this.nic = nic; this.instanceName = instanceName; + this.vmType = vmtype; } public String getVmName() { return instanceName; } + + public VirtualMachine.Type getVMType() { + return vmType; + } } diff --git a/docs/en-US/build-rpm.xml b/docs/en-US/build-rpm.xml index 574065833ff..7caf924bfe4 100644 --- a/docs/en-US/build-rpm.xml +++ b/docs/en-US/build-rpm.xml @@ -41,9 +41,9 @@ under the License. You probably want to ensure that your environment variables will survive a logout/reboot. Be sure to update ~/.bashrc with the PATH and JAVA_HOME variables. - Building RPMs for $PRODUCT; is fairly simple. Assuming you already have the source downloaded and have uncompressed the tarball into a local directory, you're going to be able to generate packages in just a few minutes. + Building RPMs for &PRODUCT; is fairly simple. Assuming you already have the source downloaded and have uncompressed the tarball into a local directory, you're going to be able to generate packages in just a few minutes. Packaging has Changed - If you've created packages for $PRODUCT; previously, you should be aware that the process has changed considerably since the project has moved to using Apache Maven. Please be sure to follow the steps in this section closely. + If you've created packages for &PRODUCT; previously, you should be aware that the process has changed considerably since the project has moved to using Apache Maven. Please be sure to follow the steps in this section closely.
Generating RPMS @@ -69,7 +69,7 @@ under the License. Configuring your systems to use your new yum repository Now that your yum repository is populated with RPMs and metadata - we need to configure the machines that need to install $PRODUCT;. + we need to configure the machines that need to install &PRODUCT;. Create a file named /etc/yum.repos.d/cloudstack.repo with this information: [apache-cloudstack] @@ -79,7 +79,7 @@ under the License. gpgcheck=0 - Completing this step will allow you to easily install $PRODUCT; on a number of machines across the network. + Completing this step will allow you to easily install &PRODUCT; on a number of machines across the network.
diff --git a/docs/en-US/deployment-architecture-overview.xml b/docs/en-US/deployment-architecture-overview.xml index e3103c52c1c..835898ced7f 100644 --- a/docs/en-US/deployment-architecture-overview.xml +++ b/docs/en-US/deployment-architecture-overview.xml @@ -49,7 +49,7 @@ multi-node Management Server installation and up to tens of thousands of hosts using any of several advanced networking setups. For information about deployment options, see the "Choosing a Deployment Architecture" - section of the $PRODUCT; Installation Guide. + section of the &PRODUCT; Installation Guide. diff --git a/docs/en-US/event-framework.xml b/docs/en-US/event-framework.xml index 88c45c9033d..0f62fac1407 100644 --- a/docs/en-US/event-framework.xml +++ b/docs/en-US/event-framework.xml @@ -24,7 +24,7 @@ Event notification framework provides a means for the Management Server components to publish and subscribe to &PRODUCT; events. Event notification is achieved by implementing the concept of event bus abstraction in the Management Server. An event bus is introduced in the - Management Server that allows the &PRODUCT;components and extension plug-ins to subscribe to the + Management Server that allows the &PRODUCT; components and extension plug-ins to subscribe to the events by using the Advanced Message Queuing Protocol (AMQP) client. In &PRODUCT;, a default implementation of event bus is provided as a plug-in that uses the RabbitMQ AMQP client. The AMQP client pushes the published events to a compatible AMQP server. Therefore all the &PRODUCT; diff --git a/docs/en-US/gslb.xml b/docs/en-US/gslb.xml index d5d2d203265..968e8e2cefa 100644 --- a/docs/en-US/gslb.xml +++ b/docs/en-US/gslb.xml @@ -45,7 +45,7 @@ A typical GSLB environment is comprised of the following components: - GSLB Site: In &PRODUCT;terminology, GSLB sites are + GSLB Site: In &PRODUCT; terminology, GSLB sites are represented by zones that are mapped to data centers, each of which has various network appliances. Each GSLB site is managed by a NetScaler appliance that is local to that site. Each of these appliances treats its own site as the local site and all other diff --git a/docs/en-US/ipv6-support.xml b/docs/en-US/ipv6-support.xml index c7f7744393e..bc14c8eab0e 100644 --- a/docs/en-US/ipv6-support.xml +++ b/docs/en-US/ipv6-support.xml @@ -21,7 +21,7 @@ -->
IPv6 Support in &PRODUCT; - &PRODUCT;supports Internet Protocol version 6 (IPv6), the recent version of the Internet + &PRODUCT; supports Internet Protocol version 6 (IPv6), the recent version of the Internet Protocol (IP) that defines routing the network traffic. IPv6 uses a 128-bit address that exponentially expands the current address space that is available to the users. IPv6 addresses consist of eight groups of four hexadecimal digits separated by colons, for example, diff --git a/docs/en-US/vmware-cluster-config-dvswitch.xml b/docs/en-US/vmware-cluster-config-dvswitch.xml index 3468c1bea4e..a3250f4f380 100644 --- a/docs/en-US/vmware-cluster-config-dvswitch.xml +++ b/docs/en-US/vmware-cluster-config-dvswitch.xml @@ -21,7 +21,7 @@ -->
Configuring a vSphere Cluster with VMware Distributed Virtual Switch - &PRODUCT;supports VMware vNetwork Distributed Switch (VDS) for virtual network configuration + &PRODUCT; supports VMware vNetwork Distributed Switch (VDS) for virtual network configuration in a VMware vSphere environment. This section helps you configure VMware VDS in a &PRODUCT; deployment. Each vCenter server instance can support up to 128 VDS instances and each VDS instance can manage up to 500 VMware hosts. diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDetailsDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDetailsDaoImpl.java index 02f8c2c546c..e0ae778911c 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDetailsDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDetailsDaoImpl.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.engine.datacenter.entity.api.db.dao; +import java.sql.PreparedStatement; +import java.sql.SQLException; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -30,18 +32,19 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; @Component(value="EngineHostDetailsDao") @Local(value=HostDetailsDao.class) public class HostDetailsDaoImpl extends GenericDaoBase implements HostDetailsDao { protected final SearchBuilder HostSearch; protected final SearchBuilder DetailSearch; - + public HostDetailsDaoImpl() { HostSearch = createSearchBuilder(); HostSearch.and("hostId", HostSearch.entity().getHostId(), SearchCriteria.Op.EQ); HostSearch.done(); - + DetailSearch = createSearchBuilder(); DetailSearch.and("hostId", DetailSearch.entity().getHostId(), SearchCriteria.Op.EQ); DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); @@ -53,7 +56,7 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement SearchCriteria sc = DetailSearch.create(); sc.setParameters("hostId", hostId); sc.setParameters("name", name); - + DetailVO detail = findOneIncludingRemovedBy(sc); if("password".equals(name) && detail != null){ detail.setValue(DBEncryptionUtil.decrypt(detail.getValue())); @@ -65,7 +68,7 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement public Map findDetails(long hostId) { SearchCriteria sc = HostSearch.create(); sc.setParameters("hostId", hostId); - + List results = search(sc, null); Map details = new HashMap(results.size()); for (DetailVO result : results) { @@ -77,12 +80,12 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement } return details; } - + @Override public void deleteDetails(long hostId) { SearchCriteria sc = HostSearch.create(); sc.setParameters("hostId", hostId); - + List results = search(sc, null); for (DetailVO result : results) { remove(result.getId()); @@ -91,19 +94,27 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement @Override public void persist(long hostId, Map details) { + final String InsertOrUpdateSql = "INSERT INTO `cloud`.`host_details` (host_id, name, value) VALUES (?,?,?) ON DUPLICATE KEY UPDATE value=?"; + Transaction txn = Transaction.currentTxn(); txn.start(); - SearchCriteria sc = HostSearch.create(); - sc.setParameters("hostId", hostId); - expunge(sc); - + for (Map.Entry detail : details.entrySet()) { - String value = detail.getValue(); - if("password".equals(detail.getKey())){ - value = DBEncryptionUtil.encrypt(value); - } - DetailVO vo = new DetailVO(hostId, detail.getKey(), value); - persist(vo); + String value = detail.getValue(); + if ("password".equals(detail.getKey())) { + value = DBEncryptionUtil.encrypt(value); + } + try { + PreparedStatement pstmt = txn.prepareAutoCloseStatement(InsertOrUpdateSql); + pstmt.setLong(1, hostId); + pstmt.setString(2, detail.getKey()); + pstmt.setString(3, value); + pstmt.setString(4, value); + pstmt.executeUpdate(); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to persist the host_details key: " + detail.getKey() + + " for host id: " + hostId, e); + } } txn.commit(); } diff --git a/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java index b6a9cef9ee9..47cdeb30633 100644 --- a/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.host.dao; +import java.sql.PreparedStatement; +import java.sql.SQLException; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -30,18 +32,19 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value=HostDetailsDao.class) public class HostDetailsDaoImpl extends GenericDaoBase implements HostDetailsDao { protected final SearchBuilder HostSearch; protected final SearchBuilder DetailSearch; - + public HostDetailsDaoImpl() { HostSearch = createSearchBuilder(); HostSearch.and("hostId", HostSearch.entity().getHostId(), SearchCriteria.Op.EQ); HostSearch.done(); - + DetailSearch = createSearchBuilder(); DetailSearch.and("hostId", DetailSearch.entity().getHostId(), SearchCriteria.Op.EQ); DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); @@ -53,7 +56,7 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement SearchCriteria sc = DetailSearch.create(); sc.setParameters("hostId", hostId); sc.setParameters("name", name); - + DetailVO detail = findOneIncludingRemovedBy(sc); if("password".equals(name) && detail != null){ detail.setValue(DBEncryptionUtil.decrypt(detail.getValue())); @@ -65,7 +68,7 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement public Map findDetails(long hostId) { SearchCriteria sc = HostSearch.create(); sc.setParameters("hostId", hostId); - + List results = search(sc, null); Map details = new HashMap(results.size()); for (DetailVO result : results) { @@ -77,12 +80,12 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement } return details; } - + @Override public void deleteDetails(long hostId) { SearchCriteria sc = HostSearch.create(); sc.setParameters("hostId", hostId); - + List results = search(sc, null); for (DetailVO result : results) { remove(result.getId()); @@ -91,19 +94,27 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement @Override public void persist(long hostId, Map details) { + final String InsertOrUpdateSql = "INSERT INTO `cloud`.`host_details` (host_id, name, value) VALUES (?,?,?) ON DUPLICATE KEY UPDATE value=?"; + Transaction txn = Transaction.currentTxn(); txn.start(); - SearchCriteria sc = HostSearch.create(); - sc.setParameters("hostId", hostId); - expunge(sc); - + for (Map.Entry detail : details.entrySet()) { - String value = detail.getValue(); - if("password".equals(detail.getKey())){ - value = DBEncryptionUtil.encrypt(value); - } - DetailVO vo = new DetailVO(hostId, detail.getKey(), value); - persist(vo); + String value = detail.getValue(); + if ("password".equals(detail.getKey())) { + value = DBEncryptionUtil.encrypt(value); + } + try { + PreparedStatement pstmt = txn.prepareAutoCloseStatement(InsertOrUpdateSql); + pstmt.setLong(1, hostId); + pstmt.setString(2, detail.getKey()); + pstmt.setString(3, value); + pstmt.setString(4, value); + pstmt.executeUpdate(); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to persist the host_details key: " + detail.getKey() + + " for host id: " + hostId, e); + } } txn.commit(); } diff --git a/engine/schema/src/com/cloud/migration/ServiceOffering21VO.java b/engine/schema/src/com/cloud/migration/ServiceOffering21VO.java index d07be6462f1..7a49e63e5b3 100644 --- a/engine/schema/src/com/cloud/migration/ServiceOffering21VO.java +++ b/engine/schema/src/com/cloud/migration/ServiceOffering21VO.java @@ -174,5 +174,10 @@ public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffe return false; } + @Override + public String getDeploymentPlanner() { + // TODO Auto-generated method stub + return null; + } } diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDao.java b/engine/schema/src/com/cloud/network/dao/NetworkDao.java index 43cabe751f6..d0a1a256efc 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkDao.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkDao.java @@ -113,4 +113,6 @@ public interface NetworkDao extends GenericDao , StateDao listRedundantNetworks(); List listByAclId(long aclId); + + int getNonSystemNetworkCountByVpcId(long vpcId); } diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java index 5b3b526b640..c55cf28273a 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java @@ -162,6 +162,9 @@ public class NetworkDaoImpl extends GenericDaoBase implements N CountBy.and("offeringId", CountBy.entity().getNetworkOfferingId(), Op.EQ); CountBy.and("vpcId", CountBy.entity().getVpcId(), Op.EQ); CountBy.and("removed", CountBy.entity().getRemoved(), Op.NULL); + SearchBuilder ntwkOffJoin = _ntwkOffDao.createSearchBuilder(); + ntwkOffJoin.and("isSystem", ntwkOffJoin.entity().isSystemOnly(), Op.EQ); + CountBy.join("offerings", ntwkOffJoin, CountBy.entity().getNetworkOfferingId(), ntwkOffJoin.entity().getId(), JoinBuilder.JoinType.INNER); CountBy.done(); PhysicalNetworkSearch = createSearchBuilder(); @@ -627,4 +630,14 @@ public class NetworkDaoImpl extends GenericDaoBase implements N return listBy(sc, null); } + + + @Override + public int getNonSystemNetworkCountByVpcId(long vpcId) { + SearchCriteria sc = CountBy.create(); + sc.setParameters("vpcId", vpcId); + sc.setJoinParameters("offerings", "isSystem", false); + List results = customSearch(sc, null); + return results.get(0); + } } diff --git a/engine/schema/src/com/cloud/service/ServiceOfferingVO.java b/engine/schema/src/com/cloud/service/ServiceOfferingVO.java index 94a73515e6a..fd31d301bc3 100755 --- a/engine/schema/src/com/cloud/service/ServiceOfferingVO.java +++ b/engine/schema/src/com/cloud/service/ServiceOfferingVO.java @@ -68,6 +68,9 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering @Column(name="sort_key") int sortKey; + @Column(name = "deployment_planner") + private String deploymentPlanner = null; + protected ServiceOfferingVO() { super(); } @@ -104,6 +107,15 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering this.hostTag = hostTag; } + public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, + boolean offerHA, boolean limitResourceUse, boolean volatileVm, String displayText, boolean useLocalStorage, + boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId, + String hostTag, String deploymentPlanner) { + this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, limitResourceUse, volatileVm, + displayText, useLocalStorage, recreatable, tags, systemUse, vm_type, domainId, hostTag); + this.deploymentPlanner = deploymentPlanner; + } + @Override public boolean getOfferHA() { return offerHA; @@ -208,4 +220,9 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering return volatileVm; } + @Override + public String getDeploymentPlanner() { + return deploymentPlanner; + } + } diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java index f14a3edc1f6..5e108e0a770 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java @@ -17,6 +17,10 @@ package com.cloud.upgrade.dao; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import org.apache.log4j.Logger; import java.io.File; import java.sql.Connection; import java.sql.Date; @@ -25,12 +29,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.UUID; - import com.cloud.network.vpc.NetworkACL; -import org.apache.log4j.Logger; - -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; public class Upgrade410to420 implements DbUpgrade { final static Logger s_logger = Logger.getLogger(Upgrade410to420.class); @@ -70,9 +69,12 @@ public class Upgrade410to420 implements DbUpgrade { updatePrimaryStore(conn); addEgressFwRulesForSRXGuestNw(conn); upgradeEIPNetworkOfferings(conn); + updateGlobalDeploymentPlanner(conn); upgradeDefaultVpcOffering(conn); upgradePhysicalNtwksWithInternalLbProvider(conn); updateNetworkACLs(conn); + addHostDetailsIndex(conn); + updateNetworksForPrivateGateways(conn); } private void updateSystemVmTemplates(Connection conn) { @@ -562,10 +564,56 @@ public class Upgrade410to420 implements DbUpgrade { } } } - - - private void upgradeDefaultVpcOffering(Connection conn) { + private void updateGlobalDeploymentPlanner(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + + try { + pstmt = conn + .prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'"); + rs = pstmt.executeQuery(); + while (rs.next()) { + String globalValue = rs.getString(1); + String plannerName = "FirstFitPlanner"; + + if (globalValue != null) { + if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.random.toString())) { + plannerName = "FirstFitPlanner"; + } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())) { + plannerName = "FirstFitPlanner"; + } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_firstfit + .toString())) { + plannerName = "UserConcentratedPodPlanner"; + } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_random + .toString())) { + plannerName = "UserConcentratedPodPlanner"; + } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString())) { + plannerName = "UserDispersingPlanner"; + } + } + // update vm.deployment.planner global config + pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'"); + pstmt.setString(1, plannerName); + pstmt.executeUpdate(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to set vm.deployment.planner global config", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + + + private void upgradeDefaultVpcOffering(Connection conn) { PreparedStatement pstmt = null; ResultSet rs = null; @@ -581,7 +629,7 @@ public class Upgrade410to420 implements DbUpgrade { pstmt.setString(3, "InternalLbVm"); pstmt.executeUpdate(); } - + } catch (SQLException e) { throw new CloudRuntimeException("Unable update the default VPC offering with the internal lb service", e); } finally { @@ -596,9 +644,7 @@ public class Upgrade410to420 implements DbUpgrade { } } } - - - + private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) { PreparedStatement pstmt = null; @@ -617,7 +663,7 @@ public class Upgrade410to420 implements DbUpgrade { pstmt.setString(1, uuid); pstmt.setLong(2, pNtwkId); pstmt.executeUpdate(); - + //Add internal lb vm to the list of physical network elements PreparedStatement pstmt1 = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network_service_providers`" + " WHERE physical_network_id=? AND provider_name='InternalLbVm'"); @@ -631,7 +677,7 @@ public class Upgrade410to420 implements DbUpgrade { pstmt1.executeUpdate(); } } - + } catch (SQLException e) { throw new CloudRuntimeException("Unable existing physical networks with internal lb provider", e); } finally { @@ -645,6 +691,62 @@ public class Upgrade410to420 implements DbUpgrade { } catch (SQLException e) { } } - + } + + private void addHostDetailsIndex(Connection conn) { + s_logger.debug("Checking if host_details index exists, if not we will add it"); + PreparedStatement pstmt = null; + ResultSet rs = null; + try { + pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` where KEY_NAME = 'fk_host_details__host_id'"); + rs = pstmt.executeQuery(); + if (rs.next()) { + s_logger.debug("Index already exists on host_details - not adding new one"); + } else { + // add the index + PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER IGNORE TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)"); + pstmtUpdate.executeUpdate(); + s_logger.debug("Index did not exist on host_details - added new one"); + pstmtUpdate.close(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Failed to check/update the host_details index ", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + + + private void updateNetworksForPrivateGateways(Connection conn) { + + PreparedStatement pstmt = null; + ResultSet rs = null; + + try { + //1) get all non removed gateways + pstmt = conn.prepareStatement("SELECT network_id, vpc_id FROM `cloud`.`vpc_gateways` WHERE type='Private' AND removed IS null"); + rs = pstmt.executeQuery(); + while (rs.next()) { + Long networkId = rs.getLong(1); + Long vpcId = rs.getLong(2); + //2) Update networks with vpc_id if its set to NULL + pstmt = conn.prepareStatement("UPDATE `cloud`.`networks` set vpc_id=? where id=? and vpc_id is NULL and removed is NULL"); + pstmt.setLong(1, vpcId); + pstmt.setLong(2, networkId); + pstmt.executeUpdate(); + + } + } catch (SQLException e) { + throw new CloudRuntimeException("Failed to update private networks with VPC id.", e); + } } } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java index c3fc5a6f6c1..4ed7c27d491 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java @@ -118,18 +118,18 @@ public class UsageNetworkOfferingDaoImpl extends GenericDaoBase, StateDao< * @return list of VMInstanceVO running on that host. */ List listByHostId(long hostId); - + /** * List VMs by zone ID * @param zoneId * @return list of VMInstanceVO in the specified zone */ List listByZoneId(long zoneId); - + /** * List VMs by pod ID * @param podId @@ -59,32 +59,32 @@ public interface VMInstanceDao extends GenericDao, StateDao< * @return list of VMInstanceVO in the specified zone, deployed from the specified template, that are not expunged */ public List listNonExpungedByZoneAndTemplate(long zoneId, long templateId); - + /** * Find vm instance with names like. - * + * * @param name name that fits SQL like. * @return list of VMInstanceVO */ List findVMInstancesLike(String name); - + List findVMInTransition(Date time, State... states); List listByTypes(VirtualMachine.Type... types); - + VMInstanceVO findByIdTypes(long id, VirtualMachine.Type... types); - + VMInstanceVO findVMByInstanceName(String name); void updateProxyId(long id, Long proxyId, Date time); List listByHostIdTypes(long hostid, VirtualMachine.Type... types); - + List listUpByHostIdTypes(long hostid, VirtualMachine.Type... types); List listByZoneIdAndType(long zoneId, VirtualMachine.Type type); List listUpByHostId(Long hostId); List listByLastHostId(Long hostId); - + List listByTypeAndState(VirtualMachine.Type type, State state); List listByAccountId(long accountId); @@ -92,9 +92,9 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listByClusterId(long clusterId); // this does not pull up VMs which are starting List listLHByClusterId(long clusterId); // get all the VMs even starting one on this cluster - + List listVmsMigratingFromHost(Long hostId); - + public Long countRunningByHostId(long hostId); Pair, Map> listClusterIdsInZoneByVmCount(long zoneId, long accountId); @@ -106,7 +106,7 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listHostIdsByVmCount(long dcId, Long podId, Long clusterId, long accountId); Long countRunningByAccount(long accountId); - + List listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types); /** @@ -116,4 +116,8 @@ public interface VMInstanceDao extends GenericDao, StateDao< */ List listDistinctHostNames(long networkId, VirtualMachine.Type... types); + List findByHostInStates(Long hostId, State... states); + + List listStartingWithNoHostId(); + } diff --git a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index 7198b7c24e0..ffb1a0b8b3d 100644 --- a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -5,7 +5,7 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, @@ -83,30 +83,32 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected GenericSearchBuilder CountRunningByAccount; protected SearchBuilder NetworkTypeSearch; protected GenericSearchBuilder DistinctHostNameSearch; - + protected SearchBuilder HostAndStateSearch; + protected SearchBuilder StartingWithNoHostSearch; + @Inject ResourceTagDao _tagsDao; @Inject NicDao _nicDao; - + protected Attribute _updateTimeAttr; - - private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1 = + + private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1 = "SELECT host.cluster_id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE "; private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2 = " AND host.type = 'Routing' GROUP BY host.cluster_id ORDER BY 2 ASC "; - + private static final String ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT pod.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host_pod_ref` pod LEFT JOIN `cloud`.`vm_instance` vm ON pod.id = vm.pod_id WHERE pod.data_center_id = ? " + " GROUP BY pod.id ORDER BY 2 ASC "; - + private static final String ORDER_HOSTS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT host.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE host.data_center_id = ? " + " AND host.pod_id = ? AND host.cluster_id = ? AND host.type = 'Routing' " + " GROUP BY host.id ORDER BY 2 ASC "; @Inject protected HostDao _hostDao; - + public VMInstanceDaoImpl() { } - + @PostConstruct protected void init() { @@ -114,14 +116,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem IdStatesSearch.and("id", IdStatesSearch.entity().getId(), Op.EQ); IdStatesSearch.and("states", IdStatesSearch.entity().getState(), Op.IN); IdStatesSearch.done(); - + VMClusterSearch = createSearchBuilder(); SearchBuilder hostSearch = _hostDao.createSearchBuilder(); VMClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), VMClusterSearch.entity().getHostId(), JoinType.INNER); hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ); VMClusterSearch.done(); - + LHVMClusterSearch = createSearchBuilder(); SearchBuilder hostSearch1 = _hostDao.createSearchBuilder(); LHVMClusterSearch.join("hostSearch1", hostSearch1, hostSearch1.entity().getId(), LHVMClusterSearch.entity().getLastHostId(), JoinType.INNER); @@ -129,7 +131,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem hostSearch1.and("clusterId", hostSearch1.entity().getClusterId(), SearchCriteria.Op.EQ); LHVMClusterSearch.done(); - + AllFieldsSearch = createSearchBuilder(); AllFieldsSearch.and("host", AllFieldsSearch.entity().getHostId(), Op.EQ); AllFieldsSearch.and("lastHost", AllFieldsSearch.entity().getLastHostId(), Op.EQ); @@ -169,23 +171,23 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem IdTypesSearch.and("id", IdTypesSearch.entity().getId(), Op.EQ); IdTypesSearch.and("types", IdTypesSearch.entity().getType(), Op.IN); IdTypesSearch.done(); - + HostIdTypesSearch = createSearchBuilder(); HostIdTypesSearch.and("hostid", HostIdTypesSearch.entity().getHostId(), Op.EQ); HostIdTypesSearch.and("types", HostIdTypesSearch.entity().getType(), Op.IN); HostIdTypesSearch.done(); - + HostIdUpTypesSearch = createSearchBuilder(); HostIdUpTypesSearch.and("hostid", HostIdUpTypesSearch.entity().getHostId(), Op.EQ); HostIdUpTypesSearch.and("types", HostIdUpTypesSearch.entity().getType(), Op.IN); HostIdUpTypesSearch.and("states", HostIdUpTypesSearch.entity().getState(), Op.NIN); HostIdUpTypesSearch.done(); - + HostUpSearch = createSearchBuilder(); HostUpSearch.and("host", HostUpSearch.entity().getHostId(), Op.EQ); HostUpSearch.and("states", HostUpSearch.entity().getState(), Op.IN); HostUpSearch.done(); - + InstanceNameSearch = createSearchBuilder(); InstanceNameSearch.and("instanceName", InstanceNameSearch.entity().getInstanceName(), Op.EQ); InstanceNameSearch.done(); @@ -194,21 +196,31 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem CountVirtualRoutersByAccount.select(null, Func.COUNT, null); CountVirtualRoutersByAccount.and("account", CountVirtualRoutersByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); CountVirtualRoutersByAccount.and("type", CountVirtualRoutersByAccount.entity().getType(), SearchCriteria.Op.EQ); - CountVirtualRoutersByAccount.and("state", CountVirtualRoutersByAccount.entity().getState(), SearchCriteria.Op.NIN); + CountVirtualRoutersByAccount.and("state", CountVirtualRoutersByAccount.entity().getState(), SearchCriteria.Op.NIN); CountVirtualRoutersByAccount.done(); - + CountRunningByHost = createSearchBuilder(Long.class); CountRunningByHost.select(null, Func.COUNT, null); CountRunningByHost.and("host", CountRunningByHost.entity().getHostId(), SearchCriteria.Op.EQ); CountRunningByHost.and("state", CountRunningByHost.entity().getState(), SearchCriteria.Op.EQ); - CountRunningByHost.done(); + CountRunningByHost.done(); CountRunningByAccount = createSearchBuilder(Long.class); CountRunningByAccount.select(null, Func.COUNT, null); CountRunningByAccount.and("account", CountRunningByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); CountRunningByAccount.and("state", CountRunningByAccount.entity().getState(), SearchCriteria.Op.EQ); - CountRunningByAccount.done(); - + CountRunningByAccount.done(); + + HostAndStateSearch = createSearchBuilder(); + HostAndStateSearch.and("host", HostAndStateSearch.entity().getHostId(), Op.EQ); + HostAndStateSearch.and("states", HostAndStateSearch.entity().getState(), Op.IN); + HostAndStateSearch.done(); + + StartingWithNoHostSearch = createSearchBuilder(); + StartingWithNoHostSearch.and("state", StartingWithNoHostSearch.entity().getState(), Op.EQ); + StartingWithNoHostSearch.and("host", StartingWithNoHostSearch.entity().getHostId(), Op.NULL); + StartingWithNoHostSearch.done(); + _updateTimeAttr = _allAttributes.get("updateTime"); assert _updateTimeAttr != null : "Couldn't get this updateTime attribute"; } @@ -219,7 +231,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("account", accountId); return listBy(sc); } - + @Override public List findVMInstancesLike(String name) { SearchCriteria sc = NameLikeSearch.create(); @@ -234,7 +246,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return listBy(sc); } - + @Override public List listByZoneId(long zoneId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -242,7 +254,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return listBy(sc); } - + @Override public List listByPodId(long podId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -263,7 +275,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setJoinParameters("hostSearch1", "clusterId", clusterId); return listBy(sc); } - + @Override public List listByZoneIdAndType(long zoneId, VirtualMachine.Type type) { SearchCriteria sc = AllFieldsSearch.create(); @@ -271,8 +283,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("type", type.toString()); return listBy(sc); } - - + + @Override public List listNonExpungedByZoneAndTemplate(long zoneId, long templateId) { SearchCriteria sc = ZoneTemplateNonExpungedSearch.create(); @@ -310,7 +322,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("states", new Object[] {State.Destroyed, State.Stopped, State.Expunging}); return listBy(sc); } - + @Override public List listUpByHostId(Long hostId) { SearchCriteria sc = HostUpSearch.create(); @@ -318,14 +330,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("states", new Object[] {State.Starting, State.Running}); return listBy(sc); } - + @Override public List listByTypes(Type... types) { SearchCriteria sc = TypesSearch.create(); sc.setParameters("types", (Object[]) types); return listBy(sc); } - + @Override public List listByTypeAndState(VirtualMachine.Type type, State state) { SearchCriteria sc = AllFieldsSearch.create(); @@ -348,7 +360,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("instanceName", name); return findOneBy(sc); } - + @Override public void updateProxyId(long id, Long proxyId, Date time) { VMInstanceVO vo = createForUpdate(); @@ -369,12 +381,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @SuppressWarnings("unchecked") Pair hosts = (Pair)opaque; Long newHostId = hosts.second(); - + VMInstanceVO vmi = (VMInstanceVO)vm; Long oldHostId = vmi.getHostId(); Long oldUpdated = vmi.getUpdated(); Date oldUpdateDate = vmi.getUpdateTime(); - + SearchCriteria sc = StateChangeSearch.create(); sc.setParameters("id", vmi.getId()); sc.setParameters("states", oldState); @@ -383,7 +395,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem vmi.incrUpdated(); UpdateBuilder ub = getUpdateBuilder(vmi); - + ub.set(vmi, "state", newState); ub.set(vmi, "hostId", newHostId); ub.set(vmi, "podIdToDeployIn", vmi.getPodIdToDeployIn()); @@ -393,7 +405,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem if (result == 0 && s_logger.isDebugEnabled()) { VMInstanceVO vo = findByIdIncludingRemoved(vm.getId()); - + if (vo != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); str.append(": DB Data={Host=").append(vo.getHostId()).append("; State=").append(vo.getState().toString()).append("; updated=").append(vo.getUpdated()).append("; time=").append(vo.getUpdateTime()); @@ -407,7 +419,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } return result > 0; } - + @Override public List listByLastHostId(Long hostId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -415,7 +427,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", State.Stopped); return listBy(sc); } - + @Override public Long countAllocatedVirtualRoutersForAccount(long accountId) { SearchCriteria sc = CountVirtualRoutersByAccount.create(); @@ -424,7 +436,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", new Object[] {State.Destroyed, State.Error, State.Expunging}); return customSearch(sc, null).get(0); } - + @Override public List listVmsMigratingFromHost(Long hostId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -432,7 +444,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", State.Migrating); return listBy(sc); } - + @Override public Long countRunningByHostId(long hostId){ SearchCriteria sc = CountRunningByHost.create(); @@ -455,7 +467,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, accountId); pstmt.setLong(2, zoneId); - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long clusterId = rs.getLong(1); @@ -484,11 +496,11 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, accountId); pstmt.setLong(2, podId); - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long clusterId = rs.getLong(1); - result.add(clusterId); + result.add(clusterId); clusterVmCountMap.put(clusterId, rs.getDouble(2)); } return new Pair, Map>(result, clusterVmCountMap); @@ -511,11 +523,11 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem pstmt = txn.prepareAutoCloseStatement(sql); pstmt.setLong(1, accountId); pstmt.setLong(2, dataCenterId); - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long podId = rs.getLong(1); - result.add(podId); + result.add(podId); podVmCountMap.put(podId, rs.getDouble(2)); } return new Pair, Map>(result, podVmCountMap); @@ -523,7 +535,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); } catch (Throwable e) { throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); - } + } } @Override @@ -538,7 +550,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem pstmt.setLong(2, dcId); pstmt.setLong(3, podId); pstmt.setLong(4, clusterId); - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { result.add(rs.getLong(1)); @@ -548,9 +560,9 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); } catch (Throwable e) { throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); - } + } } - + @Override public Long countRunningByAccount(long accountId){ SearchCriteria sc = CountRunningByAccount.create(); @@ -558,18 +570,18 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", State.Running); return customSearch(sc, null).get(0); } - + @Override public List listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types) { if (NetworkTypeSearch == null) { - + SearchBuilder nicSearch = _nicDao.createSearchBuilder(); nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); NetworkTypeSearch = createSearchBuilder(); NetworkTypeSearch.and("types", NetworkTypeSearch.entity().getType(), SearchCriteria.Op.IN); NetworkTypeSearch.and("removed", NetworkTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - NetworkTypeSearch.join("nicSearch", nicSearch, NetworkTypeSearch.entity().getId(), + NetworkTypeSearch.join("nicSearch", nicSearch, NetworkTypeSearch.entity().getId(), nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); NetworkTypeSearch.done(); } @@ -577,27 +589,27 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem SearchCriteria sc = NetworkTypeSearch.create(); if (types != null && types.length != 0) { sc.setParameters("types", (Object[]) types); - } + } sc.setJoinParameters("nicSearch", "networkId", networkId); return listBy(sc); } - - - + + + @Override public List listDistinctHostNames(long networkId, VirtualMachine.Type... types) { if (DistinctHostNameSearch == null) { - + SearchBuilder nicSearch = _nicDao.createSearchBuilder(); nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); DistinctHostNameSearch = createSearchBuilder(String.class); DistinctHostNameSearch.selectField(DistinctHostNameSearch.entity().getHostName()); - + DistinctHostNameSearch.and("types", DistinctHostNameSearch.entity().getType(), SearchCriteria.Op.IN); DistinctHostNameSearch.and("removed", DistinctHostNameSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - DistinctHostNameSearch.join("nicSearch", nicSearch, DistinctHostNameSearch.entity().getId(), + DistinctHostNameSearch.join("nicSearch", nicSearch, DistinctHostNameSearch.entity().getId(), nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); DistinctHostNameSearch.done(); } @@ -605,12 +617,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem SearchCriteria sc = DistinctHostNameSearch.create(); if (types != null && types.length != 0) { sc.setParameters("types", (Object[]) types); - } + } sc.setJoinParameters("nicSearch", "networkId", networkId); return customSearch(sc, null); } - + @Override @DB public boolean remove(Long id) { @@ -625,4 +637,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return result; } + @Override + public List findByHostInStates(Long hostId, State... states) { + SearchCriteria sc = HostAndStateSearch.create(); + sc.setParameters("host", hostId); + sc.setParameters("states", (Object[]) states); + return listBy(sc); + } + + @Override + public List listStartingWithNoHostId() { + SearchCriteria sc = StartingWithNoHostSearch.create(); + sc.setParameters("state", State.Starting); + return listBy(sc); + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java index 0dd55d1d325..5b1f8cd699a 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java @@ -50,7 +50,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - + s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool"); List suitablePools = new ArrayList(); @@ -65,6 +65,14 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat } List pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags()); + + // add remaining pools in cluster, that did not match tags, to avoid set + List allPools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null); + allPools.removeAll(pools); + for (StoragePoolVO pool : allPools) { + avoid.addPool(pool.getId()); + } + if (pools.size() == 0) { if (s_logger.isDebugEnabled()) { String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString(); @@ -72,7 +80,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat } return suitablePools; } - + for (StoragePoolVO pool: pools) { if(suitablePools.size() == returnUpTo){ break; @@ -80,13 +88,15 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, pol, dskCh, plan)) { suitablePools.add(pol); + } else { + avoid.addPool(pool.getId()); } } - + if (s_logger.isDebugEnabled()) { s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools"); } - + return suitablePools; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java index 7447d988a58..632ba439cb0 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java @@ -74,7 +74,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { if (!dskCh.useLocalStorage()) { return suitablePools; } - + // data disk and host identified from deploying vm (attach volume case) if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) { List hostPools = _poolHostDao.listByHostId(plan.getHostId()); @@ -85,7 +85,9 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { if (filter(avoid, pol, dskCh, plan)) { s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); suitablePools.add(pol); - } + } else { + avoid.addPool(pool.getId()); + } } if (suitablePools.size() == returnUpTo) { @@ -101,8 +103,19 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, pol, dskCh, plan)) { suitablePools.add(pol); - } + } else { + avoid.addPool(pool.getId()); + } } + + // add remaining pools in cluster, that did not match tags, to avoid + // set + List allPools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), + plan.getPodId(), plan.getClusterId(), null); + allPools.removeAll(availablePools); + for (StoragePoolVO pool : allPools) { + avoid.addPool(pool.getId()); + } } if (s_logger.isDebugEnabled()) { @@ -111,7 +124,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { return suitablePools; } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index 1d3cd819d70..e9769802a37 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -39,18 +39,18 @@ import com.cloud.vm.VirtualMachineProfile; @Component public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class); - @Inject PrimaryDataStoreDao _storagePoolDao; - @Inject DataStoreManager dataStoreMgr; - + @Inject PrimaryDataStoreDao _storagePoolDao; + @Inject DataStoreManager dataStoreMgr; + @Override - protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, + protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) { Volume volume = _volumeDao.findById(dskCh.getVolumeId()); List requestVolumes = new ArrayList(); requestVolumes.add(volume); return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool); } - + @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, @@ -64,9 +64,16 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { return suitablePools; } } - + List storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags()); - + + // add remaining pools in zone, that did not match tags, to avoid set + List allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null); + allPools.removeAll(storagePools); + for (StoragePoolVO pool : allPools) { + avoid.addPool(pool.getId()); + } + for (StoragePoolVO storage : storagePools) { if (suitablePools.size() == returnUpTo) { break; @@ -74,7 +81,9 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId()); if (filter(avoid, pol, dskCh, plan)) { suitablePools.add(pol); - } + } else { + avoid.addPool(pol.getId()); + } } return suitablePools; } diff --git a/patches/systemvm/debian/config/etc/init.d/cloud-early-config b/patches/systemvm/debian/config/etc/init.d/cloud-early-config index a457f228653..893a2455bc4 100755 --- a/patches/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/patches/systemvm/debian/config/etc/init.d/cloud-early-config @@ -434,6 +434,12 @@ setup_common() { ping -n -c 3 $MGMT_GW & sleep 3 pkill ping + + fi + + local hyp=$(hypervisor) + if [ "$hyp" == "vmware" ]; then + ntpq -p &> /dev/null || vmware-toolbox-cmd timesync enable fi } diff --git a/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java b/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java index 2ab98566e0a..d917893719e 100644 --- a/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java +++ b/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.deploy; @@ -24,18 +24,17 @@ import javax.ejb.Local; import org.apache.log4j.Logger; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @Local(value=DeploymentPlanner.class) -public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner { +public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class); - + /** - * This method should reorder the given list of Cluster Ids by applying any necessary heuristic + * This method should reorder the given list of Cluster Ids by applying any necessary heuristic * for this planner * For UserConcentratedPodPlanner we need to order the clusters in a zone across pods, by considering those pods first which have more number of VMs for this account * This reordering is not done incase the clusters within single pod are passed when the allocation is applied at pod-level. @@ -49,7 +48,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo } return applyUserConcentrationPodHeuristicToClusters(id, clusterIdsByCapacity, vmProfile.getOwner().getAccountId()); } - + private List applyUserConcentrationPodHeuristicToClusters(long zoneId, List prioritizedClusterIds, long accountId){ //user has VMs in certain pods. - prioritize those pods first //UserConcentratedPod strategy @@ -61,8 +60,8 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo clusterList = prioritizedClusterIds; } return clusterList; - } - + } + private List reorderClustersByPods(List clusterIds, List podIds) { if (s_logger.isDebugEnabled()) { @@ -111,11 +110,11 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo return prioritizedPods; } - + /** - * This method should reorder the given list of Pod Ids by applying any necessary heuristic + * This method should reorder the given list of Pod Ids by applying any necessary heuristic * for this planner - * For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account + * For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account * @return List ordered list of Pod Ids */ @Override @@ -124,7 +123,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo if(vmProfile.getOwner() == null){ return podIdsByCapacity; } - long accountId = vmProfile.getOwner().getAccountId(); + long accountId = vmProfile.getOwner().getAccountId(); //user has VMs in certain pods. - prioritize those pods first //UserConcentratedPod strategy @@ -138,18 +137,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo }else{ return podIdsByCapacity; } - - } - @Override - public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { - if(vm.getHypervisorType() != HypervisorType.BareMetal){ - //check the allocation strategy - if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString()))){ - return true; - } - } - return false; } } diff --git a/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java b/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java index 2db2051389d..2b0b1588802 100755 --- a/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java +++ b/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java @@ -29,14 +29,13 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import com.cloud.configuration.Config; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @Local(value=DeploymentPlanner.class) -public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner { +public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class); @@ -191,17 +190,6 @@ public class UserDispersingPlanner extends FirstFitPlanner implements Deployment } - @Override - public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { - if(vm.getHypervisorType() != HypervisorType.BareMetal){ - //check the allocation strategy - if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) { - return true; - } - } - return false; - } - float _userDispersionWeight; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java deleted file mode 100755 index 45fbeb782ab..00000000000 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal.manager; - -import java.util.Map; - -import javax.ejb.Local; -import javax.naming.ConfigurationException; - -import com.cloud.deploy.AbstractDeployPlannerSelector; -import com.cloud.deploy.DeployPlannerSelector; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.vm.UserVmVO; -@Local(value = {DeployPlannerSelector.class}) -public class BaremetalPlannerSelector extends AbstractDeployPlannerSelector{ - - @Override - public String selectPlanner(UserVmVO vm) { - if (vm.getHypervisorType() == HypervisorType.BareMetal) { - return "BareMetalPlanner"; - } - return null; - } - -} diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 15d3d583516..39375f8ebd3 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -127,6 +127,7 @@ import com.cloud.agent.api.PlugNicAnswer; import com.cloud.agent.api.PlugNicCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.ReadyAnswer; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.RebootAnswer; @@ -276,6 +277,8 @@ ServerResource { private String _createTmplPath; private String _heartBeatPath; private String _securityGroupPath; + private String _ovsPvlanDhcpHostPath; + private String _ovsPvlanVmPath; private String _routerProxyPath; private String _host; private String _dcId; @@ -597,6 +600,18 @@ ServerResource { "Unable to find the router_proxy.sh"); } + _ovsPvlanDhcpHostPath = Script.findScript(networkScriptsDir, "ovs-pvlan-dhcp-host.sh"); + if ( _ovsPvlanDhcpHostPath == null) { + throw new ConfigurationException( + "Unable to find the ovs-pvlan-dhcp-host.sh"); + } + + _ovsPvlanVmPath = Script.findScript(networkScriptsDir, "ovs-pvlan-vm.sh"); + if ( _ovsPvlanVmPath == null) { + throw new ConfigurationException( + "Unable to find the ovs-pvlan-vm.sh"); + } + String value = (String) params.get("developer"); boolean isDeveloper = Boolean.parseBoolean(value); @@ -1213,6 +1228,8 @@ ServerResource { return execute((NetworkRulesVmSecondaryIpCommand) cmd); } else if (cmd instanceof StorageSubSystemCommand) { return this.storageHandler.handleStorageCommands((StorageSubSystemCommand)cmd); + } else if (cmd instanceof PvlanSetupCommand) { + return execute((PvlanSetupCommand) cmd); } else { s_logger.warn("Unsupported command "); return Answer.createUnsupportedCommandAnswer(cmd); @@ -1526,6 +1543,65 @@ ServerResource { } } + private Answer execute(PvlanSetupCommand cmd) { + String primaryPvlan = cmd.getPrimary(); + String isolatedPvlan = cmd.getIsolated(); + String op = cmd.getOp(); + String dhcpName = cmd.getDhcpName(); + String dhcpMac = cmd.getDhcpMac(); + String dhcpIp = cmd.getDhcpIp(); + String vmMac = cmd.getVmMac(); + boolean add = true; + + String opr = "-A"; + if (op.equals("delete")) { + opr = "-D"; + add = false; + } + + String result = null; + Connect conn; + try { + if (cmd.getType() == PvlanSetupCommand.Type.DHCP) { + Script script = new Script(_ovsPvlanDhcpHostPath, _timeout, s_logger); + if (add) { + conn = LibvirtConnection.getConnectionByVmName(dhcpName); + List ifaces = getInterfaces(conn, dhcpName); + InterfaceDef guestNic = ifaces.get(0); + script.add(opr, "-b", _guestBridgeName, + "-p", primaryPvlan, "-i", isolatedPvlan, "-n", dhcpName, + "-d", dhcpIp, "-m", dhcpMac, "-I", guestNic.getDevName()); + } else { + script.add(opr, "-b", _guestBridgeName, + "-p", primaryPvlan, "-i", isolatedPvlan, "-n", dhcpName, + "-d", dhcpIp, "-m", dhcpMac); + } + result = script.execute(); + if (result != null) { + s_logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac); + return new Answer(cmd, false, result); + } else { + s_logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac); + } + } else if (cmd.getType() == PvlanSetupCommand.Type.VM) { + Script script = new Script(_ovsPvlanVmPath, _timeout, s_logger); + script.add(opr, "-b", _guestBridgeName, + "-p", primaryPvlan, "-i", isolatedPvlan, "-v", vmMac); + result = script.execute(); + if (result != null) { + s_logger.warn("Failed to program pvlan for vm with mac " + vmMac); + return new Answer(cmd, false, result); + } else { + s_logger.info("Programmed pvlan for vm with mac " + vmMac); + } + } + } catch (LibvirtException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return new Answer(cmd, true, result); + } + private void VifHotPlug(Connect conn, String vmName, String vlanId, String macAddr) throws InternalErrorException, LibvirtException { NicTO nicTO = new NicTO(); @@ -2760,7 +2836,7 @@ ServerResource { Pair nicStats = getNicStats(_publicBridgeName); HostStatsEntry hostStats = new HostStatsEntry(cmd.getHostId(), cpuUtil, - nicStats.first() / 1000, nicStats.second() / 1000, "host", + nicStats.first() / 1024, nicStats.second() / 1024, "host", totMem, freeMem, 0, 0); return new GetHostStatsAnswer(cmd, hostStats); } @@ -4417,10 +4493,10 @@ ServerResource { if (oldStats != null) { long deltarx = rx - oldStats._rx; if (deltarx > 0) - stats.setNetworkReadKBs(deltarx / 1000); + stats.setNetworkReadKBs(deltarx / 1024); long deltatx = tx - oldStats._tx; if (deltatx > 0) - stats.setNetworkWriteKBs(deltatx / 1000); + stats.setNetworkWriteKBs(deltatx / 1024); } vmStats newStat = new vmStats(); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/OvsVifDriver.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/OvsVifDriver.java index 37761aa5555..eac32485e53 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/OvsVifDriver.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/OvsVifDriver.java @@ -76,10 +76,12 @@ public class OvsVifDriver extends VifDriverBase { } else if (nic.getBroadcastType() == Networks.BroadcastDomainType.Lswitch) { logicalSwitchUuid = nic.getBroadcastUri().getSchemeSpecificPart(); + } else if (nic.getBroadcastType() == Networks.BroadcastDomainType.Pvlan) { + vlanId = NetUtils.getPrimaryPvlanFromUri(nic.getBroadcastUri()); } String trafficLabel = nic.getName(); if (nic.getType() == Networks.TrafficType.Guest) { - if (nic.getBroadcastType() == Networks.BroadcastDomainType.Vlan + if ((nic.getBroadcastType() == Networks.BroadcastDomainType.Vlan || nic.getBroadcastType() == Networks.BroadcastDomainType.Pvlan) && !vlanId.equalsIgnoreCase("untagged")) { if(trafficLabel != null && !trafficLabel.isEmpty()) { s_logger.debug("creating a vlan dev and bridge for guest traffic per traffic label " + trafficLabel); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 482ec52b19a..d8d7476858a 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -78,7 +78,6 @@ import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; import com.cloud.agent.api.DeleteStoragePoolCommand; import com.cloud.agent.api.DeleteVMSnapshotAnswer; import com.cloud.agent.api.DeleteVMSnapshotCommand; -import com.cloud.agent.api.UnregisterVMCommand; import com.cloud.agent.api.GetDomRVersionAnswer; import com.cloud.agent.api.GetDomRVersionCmd; import com.cloud.agent.api.GetHostStatsAnswer; @@ -109,6 +108,7 @@ import com.cloud.agent.api.PlugNicCommand; import com.cloud.agent.api.PoolEjectCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.ReadyAnswer; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.RebootAnswer; @@ -116,8 +116,8 @@ import com.cloud.agent.api.RebootCommand; import com.cloud.agent.api.RebootRouterCommand; import com.cloud.agent.api.RevertToVMSnapshotAnswer; import com.cloud.agent.api.RevertToVMSnapshotCommand; -import com.cloud.agent.api.ScaleVmCommand; import com.cloud.agent.api.ScaleVmAnswer; +import com.cloud.agent.api.ScaleVmCommand; import com.cloud.agent.api.SetupAnswer; import com.cloud.agent.api.SetupCommand; import com.cloud.agent.api.SetupGuestNetworkAnswer; @@ -132,6 +132,7 @@ import com.cloud.agent.api.StopCommand; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; +import com.cloud.agent.api.UnregisterVMCommand; import com.cloud.agent.api.UpgradeSnapshotCommand; import com.cloud.agent.api.ValidateSnapshotAnswer; import com.cloud.agent.api.ValidateSnapshotCommand; @@ -166,14 +167,14 @@ import com.cloud.agent.api.routing.VmDataCommand; import com.cloud.agent.api.routing.VpnUsersCfgCommand; import com.cloud.agent.api.storage.CopyVolumeAnswer; import com.cloud.agent.api.storage.CopyVolumeCommand; -import com.cloud.agent.api.storage.CreateVolumeOVACommand; -import com.cloud.agent.api.storage.CreateVolumeOVAAnswer; -import com.cloud.agent.api.storage.PrepareOVAPackingAnswer; -import com.cloud.agent.api.storage.PrepareOVAPackingCommand; import com.cloud.agent.api.storage.CreateAnswer; import com.cloud.agent.api.storage.CreateCommand; import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; +import com.cloud.agent.api.storage.CreateVolumeOVAAnswer; +import com.cloud.agent.api.storage.CreateVolumeOVACommand; import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.storage.PrepareOVAPackingAnswer; +import com.cloud.agent.api.storage.PrepareOVAPackingCommand; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; import com.cloud.agent.api.storage.ResizeVolumeAnswer; @@ -289,30 +290,6 @@ import com.vmware.vim25.VirtualMachineGuestOsIdentifier; import com.vmware.vim25.VirtualMachinePowerState; import com.vmware.vim25.VirtualMachineRuntimeInfo; import com.vmware.vim25.VirtualSCSISharing; -import org.apache.log4j.Logger; -import org.apache.log4j.NDC; - -import javax.naming.ConfigurationException; -import java.io.File; -import java.io.IOException; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.URI; -import java.nio.channels.SocketChannel; -import java.rmi.RemoteException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.TimeZone; -import java.util.UUID; public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService { @@ -542,6 +519,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return storageHandler.handleStorageCommands((StorageSubSystemCommand)cmd); } else if (clz == ScaleVmCommand.class) { return execute((ScaleVmCommand) cmd); + } else if (clz == PvlanSetupCommand.class) { + return execute((PvlanSetupCommand) cmd); } else { answer = Answer.createUnsupportedCommandAnswer(cmd); } @@ -1084,7 +1063,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String domrGIP = cmd.getAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP); String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); String gw = cmd.getAccessDetail(NetworkElementCommand.GUEST_NETWORK_GATEWAY); - String cidr = Long.toString(NetUtils.getCidrSize(nic.getNetmask()));; + String cidr = Long.toString(NetUtils.getCidrSize(nic.getNetmask())); String domainName = cmd.getNetworkDomain(); String dns = cmd.getDefaultDns1(); if (dns == null || dns.isEmpty()) { @@ -1423,7 +1402,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa NicTO nicTo = cmd.getNic(); VirtualDevice nic; - Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false); + Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false, cmd.getVMType());; if (VmwareHelper.isDvPortGroup(networkInfo.first())) { String dvSwitchUuid; ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); @@ -1689,8 +1668,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa networkInfo = HypervisorHostHelper.prepareNetwork(_publicTrafficInfo.getVirtualSwitchName(), "cloud.public", vmMo.getRunningHost(), vlanId, null, null, _ops_timeout, true); } else { - networkInfo = HypervisorHostHelper.prepareNetwork(_publicTrafficInfo.getVirtualSwitchName(), "cloud.public", - vmMo.getRunningHost(), vlanId, null, null, _ops_timeout, vSwitchType, _portsPerDvPortGroup, null, false); + networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public", + vmMo.getRunningHost(), vlanId, null, null, null, this._ops_timeout, vSwitchType, _portsPerDvPortGroup, null, false); } int nicIndex = allocPublicNicIndex(vmMo); @@ -2606,7 +2585,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus")); - Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus); + VirtualMachine.Type vmType = cmd.getVirtualMachine().getType(); + Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType); if (VmwareHelper.isDvPortGroup(networkInfo.first())) { String dvSwitchUuid; ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); @@ -2790,16 +2770,28 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return poolMors; } + + private String getPvlanInfo(NicTO nicTo) { + if (nicTo.getBroadcastType() == BroadcastDomainType.Pvlan) { + return NetUtils.getIsolatedPvlanFromUri(nicTo.getBroadcastUri()); + } + return null; + } + private String getVlanInfo(NicTO nicTo, String defaultVlan) { if (nicTo.getBroadcastType() == BroadcastDomainType.Native) { return defaultVlan; } - - if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan) { + if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan || nicTo.getBroadcastType() == BroadcastDomainType.Pvlan) { if (nicTo.getBroadcastUri() != null) { + if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan) + // For vlan, the broadcast uri is of the form vlan:// return nicTo.getBroadcastUri().getHost(); + else + // for pvlan, the broacast uri will be of the form pvlan://-i + return NetUtils.getPrimaryPvlanFromUri(nicTo.getBroadcastUri()); } else { - s_logger.warn("BroadcastType is not claimed as VLAN, but without vlan info in broadcast URI. Use vlan info from labeling: " + defaultVlan); + s_logger.warn("BroadcastType is not claimed as VLAN or PVLAN, but without vlan info in broadcast URI. Use vlan info from labeling: " + defaultVlan); return defaultVlan; } } @@ -2808,7 +2800,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return defaultVlan; } - private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus) throws Exception { + private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) throws Exception { Pair switchName; TrafficType trafficType; VirtualSwitchType switchType; @@ -2832,12 +2824,22 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix); if (VirtualSwitchType.StandardVirtualSwitch == switchType) { - networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), - nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, + networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, + hostMo, getVlanInfo(nicTo, switchName.second()), nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, !namePrefix.startsWith("cloud.private")); } else { - networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), + String vlanId = getVlanInfo(nicTo, switchName.second()); + String svlanId = null; + boolean pvlannetwork = (getPvlanInfo(nicTo) == null)?false:true; + if (vmType != null && vmType.equals(VirtualMachine.Type.DomainRouter) && pvlannetwork) { + // plumb this network to the promiscuous vlan. + svlanId = vlanId; + } else { + // plumb this network to the isolated vlan. + svlanId = getPvlanInfo(nicTo); + } + networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, vlanId, svlanId, nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, switchType, _portsPerDvPortGroup, nicTo.getGateway(), configureVServiceInNexus); } @@ -3324,7 +3326,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa NicTO[] nics = vm.getNics(); for (NicTO nic : nics) { // prepare network on the host - prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false); + prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false, cmd.getVirtualMachine().getType()); } String secStoreUrl = mgr.getSecondaryStorageStoreUrl(Long.parseLong(_dcId)); @@ -3988,6 +3990,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } + protected Answer execute(PvlanSetupCommand cmd) { + // Pvlan related operations are performed in the start/stop command paths + // for vmware. This function is implemented to support mgmt layer code + // that issue this command. Note that pvlan operations are supported only + // in Distributed Virtual Switch environments for vmware deployments. + return new Answer(cmd, true, "success"); + } + protected Answer execute(UnregisterVMCommand cmd){ if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource UnregisterVMCommand: " + _gson.toJson(cmd)); diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 0828dc48877..ac5056e3411 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -133,6 +133,7 @@ import com.cloud.agent.api.PlugNicCommand; import com.cloud.agent.api.PoolEjectCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.ReadyAnswer; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.RebootAnswer; @@ -666,6 +667,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return execute((NetworkRulesVmSecondaryIpCommand)cmd); } else if (clazz == ScaleVmCommand.class) { return execute((ScaleVmCommand) cmd); + } else if (clazz == PvlanSetupCommand.class) { + return execute((PvlanSetupCommand) cmd); } else { return Answer.createUnsupportedCommandAnswer(cmd); } @@ -1082,6 +1085,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } else if (nic.getBroadcastType() == BroadcastDomainType.Lswitch) { // Nicira Logical Switch return network.getNetwork(); + } else if (nic.getBroadcastType() == BroadcastDomainType.Pvlan) { + URI broadcastUri = nic.getBroadcastUri(); + assert broadcastUri.getScheme().equals(BroadcastDomainType.Pvlan.scheme()); + long vlan = Long.parseLong(NetUtils.getPrimaryPvlanFromUri(broadcastUri)); + return enableVlanNetwork(conn, vlan, network); } throw new CloudRuntimeException("Unable to support this type of network broadcast domain: " + nic.getBroadcastUri()); @@ -1117,7 +1125,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vifr = vif.getRecord(conn); s_logger.debug("Created a vif " + vifr.uuid + " on " + nic.getDeviceId()); } - + return vif; } @@ -1545,6 +1553,55 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } } + + private Answer execute(PvlanSetupCommand cmd) { + Connection conn = getConnection(); + + String primaryPvlan = cmd.getPrimary(); + String isolatedPvlan = cmd.getIsolated(); + String op = cmd.getOp(); + String dhcpName = cmd.getDhcpName(); + String dhcpMac = cmd.getDhcpMac(); + String dhcpIp = cmd.getDhcpIp(); + String vmMac = cmd.getVmMac(); + String networkTag = cmd.getNetworkTag(); + + XsLocalNetwork nw = null; + String nwNameLabel = null; + try { + nw = getNativeNetworkForTraffic(conn, TrafficType.Guest, networkTag); + nwNameLabel = nw.getNetwork().getNameLabel(conn); + } catch (XenAPIException e) { + s_logger.warn("Fail to get network", e); + return new Answer(cmd, false, e.toString()); + } catch (XmlRpcException e) { + s_logger.warn("Fail to get network", e); + return new Answer(cmd, false, e.toString()); + } + + String result = null; + if (cmd.getType() == PvlanSetupCommand.Type.DHCP) { + result = callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-dhcp", "op", op, "nw-label", nwNameLabel, + "primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan, "dhcp-name", dhcpName, + "dhcp-ip", dhcpIp, "dhcp-mac", dhcpMac); + if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { + s_logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac); + return new Answer(cmd, false, result); + } else { + s_logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac); + } + } else if (cmd.getType() == PvlanSetupCommand.Type.VM) { + result = callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-vm", "op", op, "nw-label", nwNameLabel, + "primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan, "vm-mac", vmMac); + if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { + s_logger.warn("Failed to program pvlan for vm with mac " + vmMac); + return new Answer(cmd, false, result); + } else { + s_logger.info("Programmed pvlan for vm with mac " + vmMac); + } + } + return new Answer(cmd, true, result); + } @Override public StartAnswer execute(StartCommand cmd) { diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml index 05c066d6d53..f283ffeb333 100755 --- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml +++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-egress-acl-rule.xml @@ -80,7 +80,7 @@ under the License. @@ -93,7 +93,7 @@ under the License. name="" placement="begin" status="created" - value="%deststartip%"/> + value="%sourcestartip%"/> + value="%sourceendip%"/> @@ -161,8 +161,8 @@ under the License. descr=value actiontype="drop" or "permit" protocolvalue = "TCP" or "UDP" - deststartip="destination start ip" - destendip="destination end ip" + sourcestartip="source start ip" + sourceendip="source end ip" deststartport="start port at destination" destendport="end port at destination" --!> diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml index 17cfa54a34e..e6f4cfb63d1 100755 --- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml +++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-no-protocol-rule.xml @@ -54,7 +54,7 @@ under the License. @@ -67,7 +67,7 @@ under the License. name="" placement="begin" status="created" - value="%deststartip%"/> + value="%sourcestartip%"/> + value="%sourceendip%"/> @@ -89,6 +89,6 @@ under the License. aclrulename="dummy" descr=value actiontype="drop" or "permit" - deststartip="destination start ip" - destendip="destination end ip" + sourcestartip="source start ip" + sourceendip="source end ip" --!> diff --git a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml index 436e3eae790..55edd1fa728 100755 --- a/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml +++ b/plugins/network-elements/cisco-vnmc/scripts/network/cisco/create-generic-egress-acl-rule.xml @@ -80,7 +80,7 @@ under the License. @@ -93,7 +93,7 @@ under the License. name="" placement="begin" status="created" - value="%deststartip%"/> + value="%sourcestartip%"/> + value="%sourceendip%"/> @@ -116,6 +116,6 @@ under the License. descr=value actiontype="drop" or "permit" protocolvalue = "TCP" or "UDP" or "ICMP" - deststartip="destination start ip" - destendip="destination end ip" + sourcestartip="source start ip" + sourceendip="source end ip" --!> diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnection.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnection.java index fed6724418d..28e2535ca91 100644 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnection.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnection.java @@ -150,13 +150,13 @@ public interface CiscoVnmcConnection { public boolean createTenantVDCEgressAclRule(String tenantName, String identifier, String policyIdentifier, - String protocol, String destStartIp, String destEndIp, + String protocol, String sourceStartIp, String sourceEndIp, String destStartPort, String destEndPort) throws ExecutionException; public boolean createTenantVDCEgressAclRule(String tenantName, String identifier, String policyIdentifier, - String protocol, String destStartIp, String destEndIp) + String protocol, String sourceStartIp, String sourceEndIp) throws ExecutionException; public boolean deleteTenantVDCAclRule(String tenantName, diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java index 0e57cae6ddc..a9e8cf633f9 100644 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java @@ -729,7 +729,7 @@ public class CiscoVnmcConnectionImpl implements CiscoVnmcConnection { @Override public boolean createTenantVDCEgressAclRule(String tenantName, String identifier, String policyIdentifier, - String protocol, String destStartIp, String destEndIp, + String protocol, String sourceStartIp, String sourceEndIp, String destStartPort, String destEndPort) throws ExecutionException { String xml = VnmcXml.CREATE_EGRESS_ACL_RULE.getXml(); String service = VnmcXml.CREATE_EGRESS_ACL_RULE.getService(); @@ -740,8 +740,8 @@ public class CiscoVnmcConnectionImpl implements CiscoVnmcConnection { xml = replaceXmlValue(xml, "descr", "Egress ACL rule for Tenant VDC " + tenantName); xml = replaceXmlValue(xml, "actiontype", "permit"); xml = replaceXmlValue(xml, "protocolvalue", protocol); - xml = replaceXmlValue(xml, "deststartip", destStartIp); - xml = replaceXmlValue(xml, "destendip", destEndIp); + xml = replaceXmlValue(xml, "sourcestartip", sourceStartIp); + xml = replaceXmlValue(xml, "sourceendip", sourceEndIp); xml = replaceXmlValue(xml, "deststartport", destStartPort); xml = replaceXmlValue(xml, "destendport", destEndPort); @@ -759,7 +759,7 @@ public class CiscoVnmcConnectionImpl implements CiscoVnmcConnection { @Override public boolean createTenantVDCEgressAclRule(String tenantName, String identifier, String policyIdentifier, - String protocol, String destStartIp, String destEndIp) throws ExecutionException { + String protocol, String sourceStartIp, String sourceEndIp) throws ExecutionException { String xml = VnmcXml.CREATE_GENERIC_EGRESS_ACL_RULE.getXml(); String service = VnmcXml.CREATE_GENERIC_EGRESS_ACL_RULE.getService(); if (protocol.equalsIgnoreCase("all")) { // any protocol @@ -773,8 +773,8 @@ public class CiscoVnmcConnectionImpl implements CiscoVnmcConnection { xml = replaceXmlValue(xml, "aclrulename", getNameForAclRule(tenantName, identifier)); xml = replaceXmlValue(xml, "descr", "Egress ACL rule for Tenant VDC " + tenantName); xml = replaceXmlValue(xml, "actiontype", "permit"); - xml = replaceXmlValue(xml, "deststartip", destStartIp); - xml = replaceXmlValue(xml, "destendip", destEndIp); + xml = replaceXmlValue(xml, "sourcestartip", sourceStartIp); + xml = replaceXmlValue(xml, "sourceendip", sourceEndIp); List rules = listChildren(getDnForAclPolicy(tenantName, policyIdentifier)); int order = 100; diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java index 176fdc45062..29bbbe67a31 100644 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java @@ -60,6 +60,7 @@ import com.cloud.utils.Pair; import com.cloud.utils.cisco.n1kv.vsm.NetconfHelper; import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.OperationType; import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.SwitchPortMode; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.net.NetUtils; @@ -280,30 +281,30 @@ public class CiscoVnmcResource implements ServerResource { String policyIdentifier = cmd.getIpAddress().getPublicIp().replace('.', '-'); try { if (!_connection.createTenantVDCNatPolicySet(tenant)) { - throw new Exception("Failed to create NAT policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create NAT policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCSourceNatPolicy(tenant, policyIdentifier)) { - throw new Exception("Failed to create source NAT policy in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create source NAT policy in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCSourceNatPolicyRef(tenant, policyIdentifier)) { - throw new Exception("Failed to associate source NAT policy with NAT policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate source NAT policy with NAT policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCSourceNatIpPool(tenant, policyIdentifier, cmd.getIpAddress().getPublicIp())) { - throw new Exception("Failed to create source NAT ip pool in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create source NAT ip pool in VNMC for guest network with vlan " + vlanId); } String[] ipRange = getIpRangeFromCidr(cmd.getContextParam(NetworkElementCommand.GUEST_NETWORK_CIDR)); if (!_connection.createTenantVDCSourceNatRule(tenant, policyIdentifier, ipRange[0], ipRange[1])) { - throw new Exception("Failed to create source NAT rule in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create source NAT rule in VNMC for guest network with vlan " + vlanId); } if (!_connection.associateNatPolicySet(tenant)) { - throw new Exception("Failed to associate source NAT policy set with edge security profile in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate source NAT policy set with edge security profile in VNMC for guest network with vlan " + vlanId); } - } catch (Throwable e) { + } catch (ExecutionException e) { String msg = "SetSourceNatCommand failed due to " + e.getMessage(); s_logger.error(msg, e); return new Answer(cmd, false, msg); @@ -337,29 +338,29 @@ public class CiscoVnmcResource implements ServerResource { try { if (!_connection.createTenantVDCAclPolicySet(tenant, true)) { - throw new Exception("Failed to create ACL ingress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL ingress policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicySet(tenant, false)) { - throw new Exception("Failed to create ACL egress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL egress policy set in VNMC for guest network with vlan " + vlanId); } for (String publicIp : publicIpRulesMap.keySet()) { String policyIdentifier = publicIp.replace('.', '-'); if (!_connection.createTenantVDCAclPolicy(tenant, policyIdentifier)) { - throw new Exception("Failed to create ACL policy in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL policy in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicyRef(tenant, policyIdentifier, true)) { - throw new Exception("Failed to associate ACL policy with ACL ingress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate ACL policy with ACL ingress policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicyRef(tenant, policyIdentifier, false)) { - throw new Exception("Failed to associate ACL policy with ACL egress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate ACL policy with ACL egress policy set in VNMC for guest network with vlan " + vlanId); } for (FirewallRuleTO rule : publicIpRulesMap.get(publicIp)) { if (rule.revoked()) { if (!_connection.deleteTenantVDCAclRule(tenant, Long.toString(rule.getId()), policyIdentifier)) { - throw new Exception("Failed to delete ACL rule in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to delete ACL rule in VNMC for guest network with vlan " + vlanId); } } else { String[] externalIpRange = getIpRangeFromCidr(rule.getSourceCidrList().get(0)); @@ -370,13 +371,13 @@ public class CiscoVnmcResource implements ServerResource { Long.toString(rule.getId()), policyIdentifier, rule.getProtocol().toUpperCase(), externalIpRange[0], externalIpRange[1], Integer.toString(rule.getSrcPortRange()[0]), Integer.toString(rule.getSrcPortRange()[1]))) { - throw new Exception("Failed to create ACL ingress rule in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL ingress rule in VNMC for guest network with vlan " + vlanId); } } else { if (!_connection.createTenantVDCIngressAclRule(tenant, Long.toString(rule.getId()), policyIdentifier, rule.getProtocol().toUpperCase(), externalIpRange[0], externalIpRange[1])) { - throw new Exception("Failed to create ACL ingress rule in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL ingress rule in VNMC for guest network with vlan " + vlanId); } } } else { @@ -387,13 +388,13 @@ public class CiscoVnmcResource implements ServerResource { rule.getProtocol().toUpperCase(), externalIpRange[0], externalIpRange[1], Integer.toString(rule.getSrcPortRange()[0]), Integer.toString(rule.getSrcPortRange()[1]))) { - throw new Exception("Failed to create ACL egress rule in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL egress rule in VNMC for guest network with vlan " + vlanId); } } else { if (!_connection.createTenantVDCEgressAclRule(tenant, Long.toString(rule.getId()), policyIdentifier, rule.getProtocol().toUpperCase(), externalIpRange[0], externalIpRange[1])) { - throw new Exception("Failed to create ACL egress rule in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL egress rule in VNMC for guest network with vlan " + vlanId); } } } @@ -402,9 +403,9 @@ public class CiscoVnmcResource implements ServerResource { } if (!_connection.associateAclPolicySet(tenant)) { - throw new Exception("Failed to associate ACL policy set with edge security profile in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate ACL policy set with edge security profile in VNMC for guest network with vlan " + vlanId); } - } catch (Throwable e) { + } catch (ExecutionException e) { String msg = "SetFirewallRulesCommand failed due to " + e.getMessage(); s_logger.error(msg, e); return new Answer(cmd, false, msg); @@ -438,69 +439,60 @@ public class CiscoVnmcResource implements ServerResource { try { if (!_connection.createTenantVDCNatPolicySet(tenant)) { - throw new Exception("Failed to create NAT policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create NAT policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicySet(tenant, true)) { - throw new Exception("Failed to create ACL ingress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL ingress policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicySet(tenant, false)) { - throw new Exception("Failed to create ACL egress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL egress policy set in VNMC for guest network with vlan " + vlanId); } for (String publicIp : publicIpRulesMap.keySet()) { String policyIdentifier = publicIp.replace('.', '-'); if (!_connection.createTenantVDCDNatPolicy(tenant, policyIdentifier)) { - throw new Exception("Failed to create DNAT policy in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create DNAT policy in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCDNatPolicyRef(tenant, policyIdentifier)) { - throw new Exception("Failed to associate DNAT policy with NAT policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate DNAT policy with NAT policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicy(tenant, policyIdentifier)) { - throw new Exception("Failed to create ACL policy in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL policy in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicyRef(tenant, policyIdentifier, true)) { - throw new Exception("Failed to associate ACL policy with ACL ingress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate ACL policy with ACL ingress policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicyRef(tenant, policyIdentifier, false)) { - throw new Exception("Failed to associate ACL policy with ACL egress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate ACL policy with ACL egress policy set in VNMC for guest network with vlan " + vlanId); } for (StaticNatRuleTO rule : publicIpRulesMap.get(publicIp)) { if (rule.revoked()) { if (!_connection.deleteTenantVDCDNatRule(tenant, Long.toString(rule.getId()), policyIdentifier)) { - throw new Exception("Failed to delete DNAT rule in VNMC for guest network with vlan " + vlanId); - } - - if (!_connection.deleteTenantVDCAclRule(tenant, Long.toString(rule.getId()), policyIdentifier)) { - throw new Exception("Failed to delete ACL ingress rule for DNAT in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to delete DNAT rule in VNMC for guest network with vlan " + vlanId); } } else { if (!_connection.createTenantVDCDNatIpPool(tenant, Long.toString(rule.getId()), rule.getDstIp())) { - throw new Exception("Failed to create DNAT ip pool in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create DNAT ip pool in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCDNatRule(tenant, Long.toString(rule.getId()), policyIdentifier, rule.getSrcIp())) { - throw new Exception("Failed to create DNAT rule in VNMC for guest network with vlan " + vlanId); - } - - if (!_connection.createTenantVDCAclRuleForDNat(tenant, - Long.toString(rule.getId()), policyIdentifier, rule.getDstIp())) { - throw new Exception("Failed to create ACL rule for DNAT in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create DNAT rule in VNMC for guest network with vlan " + vlanId); } } } } if (!_connection.associateAclPolicySet(tenant)) { - throw new Exception("Failed to associate source NAT policy set with edge security profile in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate source NAT policy set with edge security profile in VNMC for guest network with vlan " + vlanId); } - } catch (Throwable e) { - String msg = "SetSourceNatCommand failed due to " + e.getMessage(); + } catch (ExecutionException e) { + String msg = "SetStaticNatRulesCommand failed due to " + e.getMessage(); s_logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -533,77 +525,66 @@ public class CiscoVnmcResource implements ServerResource { try { if (!_connection.createTenantVDCNatPolicySet(tenant)) { - throw new Exception("Failed to create NAT policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create NAT policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicySet(tenant, true)) { - throw new Exception("Failed to create ACL ingress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL ingress policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicySet(tenant, false)) { - throw new Exception("Failed to create ACL egress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL egress policy set in VNMC for guest network with vlan " + vlanId); } for (String publicIp : publicIpRulesMap.keySet()) { String policyIdentifier = publicIp.replace('.', '-'); if (!_connection.createTenantVDCPFPolicy(tenant, policyIdentifier)) { - throw new Exception("Failed to create PF policy in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create PF policy in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCPFPolicyRef(tenant, policyIdentifier)) { - throw new Exception("Failed to associate PF policy with NAT policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate PF policy with NAT policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicy(tenant, policyIdentifier)) { - throw new Exception("Failed to create ACL policy in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create ACL policy in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicyRef(tenant, policyIdentifier, true)) { - throw new Exception("Failed to associate ACL policy with ACL ingress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate ACL policy with ACL ingress policy set in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCAclPolicyRef(tenant, policyIdentifier, false)) { - throw new Exception("Failed to associate ACL policy with ACL egress policy set in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate ACL policy with ACL egress policy set in VNMC for guest network with vlan " + vlanId); } for (PortForwardingRuleTO rule : publicIpRulesMap.get(publicIp)) { if (rule.revoked()) { if (!_connection.deleteTenantVDCPFRule(tenant, Long.toString(rule.getId()), policyIdentifier)) { - throw new Exception("Failed to delete PF rule in VNMC for guest network with vlan " + vlanId); - } - - if (!_connection.deleteTenantVDCAclRule(tenant, Long.toString(rule.getId()), policyIdentifier)) { - throw new Exception("Failed to delete ACL ingress rule for PF in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to delete PF rule in VNMC for guest network with vlan " + vlanId); } } else { if (!_connection.createTenantVDCPFIpPool(tenant, Long.toString(rule.getId()), rule.getDstIp())) { - throw new Exception("Failed to create PF ip pool in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create PF ip pool in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCPFPortPool(tenant, Long.toString(rule.getId()), Integer.toString(rule.getDstPortRange()[0]), Integer.toString(rule.getDstPortRange()[1]))) { - throw new Exception("Failed to create PF port pool in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create PF port pool in VNMC for guest network with vlan " + vlanId); } if (!_connection.createTenantVDCPFRule(tenant, Long.toString(rule.getId()), policyIdentifier, rule.getProtocol().toUpperCase(), rule.getSrcIp(), Integer.toString(rule.getSrcPortRange()[0]), Integer.toString(rule.getSrcPortRange()[1]))) { - throw new Exception("Failed to create PF rule in VNMC for guest network with vlan " + vlanId); - } - - if (!_connection.createTenantVDCAclRuleForPF(tenant, - Long.toString(rule.getId()), policyIdentifier, - rule.getProtocol().toUpperCase(), rule.getDstIp(), - Integer.toString(rule.getDstPortRange()[0]), Integer.toString(rule.getDstPortRange()[1]))) { - throw new Exception("Failed to create ACL rule for PF in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create PF rule in VNMC for guest network with vlan " + vlanId); } } } } if (!_connection.associateAclPolicySet(tenant)) { - throw new Exception("Failed to associate source NAT policy set with edge security profile in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate source NAT policy set with edge security profile in VNMC for guest network with vlan " + vlanId); } - } catch (Throwable e) { - String msg = "SetSourceNatCommand failed due to " + e.getMessage(); + } catch (ExecutionException e) { + String msg = "SetPortForwardingRulesCommand failed due to " + e.getMessage(); s_logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -619,24 +600,24 @@ public class CiscoVnmcResource implements ServerResource { return execute(cmd, _numRetries); } - private void createEdgeDeviceProfile(String tenant, List gateways, Long vlanId) throws Exception { + private void createEdgeDeviceProfile(String tenant, List gateways, Long vlanId) throws ExecutionException { // create edge device profile if (!_connection.createTenantVDCEdgeDeviceProfile(tenant)) - throw new Exception("Failed to create tenant edge device profile in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create tenant edge device profile in VNMC for guest network with vlan " + vlanId); // create edge static route policy if (!_connection.createTenantVDCEdgeStaticRoutePolicy(tenant)) - throw new Exception("Failed to create tenant edge static route policy in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create tenant edge static route policy in VNMC for guest network with vlan " + vlanId); // create edge static route for all gateways for (String gateway : gateways) { if (!_connection.createTenantVDCEdgeStaticRoute(tenant, gateway, "0.0.0.0", "0.0.0.0")) - throw new Exception("Failed to create tenant edge static route in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to create tenant edge static route in VNMC for guest network with vlan " + vlanId); } // associate edge if (!_connection.associateTenantVDCEdgeStaticRoutePolicy(tenant)) - throw new Exception("Failed to associate edge static route policy with edge device profile in VNMC for guest network with vlan " + vlanId); + throw new ExecutionException("Failed to associate edge static route policy with edge device profile in VNMC for guest network with vlan " + vlanId); } private Answer execute(CreateLogicalEdgeFirewallCommand cmd, int numRetries) { @@ -644,23 +625,23 @@ public class CiscoVnmcResource implements ServerResource { try { // create tenant if (!_connection.createTenant(tenant)) - throw new Exception("Failed to create tenant in VNMC for guest network with vlan " + cmd.getVlanId()); + throw new ExecutionException("Failed to create tenant in VNMC for guest network with vlan " + cmd.getVlanId()); // create tenant VDC if (!_connection.createTenantVDC(tenant)) - throw new Exception("Failed to create tenant VDC in VNMC for guest network with vlan " + cmd.getVlanId()); + throw new ExecutionException("Failed to create tenant VDC in VNMC for guest network with vlan " + cmd.getVlanId()); // create edge security profile if (!_connection.createTenantVDCEdgeSecurityProfile(tenant)) - throw new Exception("Failed to create tenant edge security profile in VNMC for guest network with vlan " + cmd.getVlanId()); + throw new ExecutionException("Failed to create tenant edge security profile in VNMC for guest network with vlan " + cmd.getVlanId()); // create edge device profile and associated route createEdgeDeviceProfile(tenant, cmd.getPublicGateways(), cmd.getVlanId()); // create logical edge firewall if (!_connection.createEdgeFirewall(tenant, cmd.getPublicIp(), cmd.getInternalIp(), cmd.getPublicSubnet(), cmd.getInternalSubnet())) - throw new Exception("Failed to create edge firewall in VNMC for guest network with vlan " + cmd.getVlanId()); - } catch (Throwable e) { + throw new ExecutionException("Failed to create edge firewall in VNMC for guest network with vlan " + cmd.getVlanId()); + } catch (ExecutionException e) { String msg = "CreateLogicalEdgeFirewallCommand failed due to " + e.getMessage(); s_logger.error(msg, e); return new Answer(cmd, false, msg); @@ -688,7 +669,7 @@ public class CiscoVnmcResource implements ServerResource { s_logger.debug("Created vservice node for ASA appliance in Cisco VSM for vlan " + vlanId); helper.updatePortProfile(cmd.getAsaInPortProfile(), SwitchPortMode.access, params); s_logger.debug("Updated inside port profile for ASA appliance in Cisco VSM with new vlan " + vlanId); - } catch (Throwable e) { + } catch (CloudRuntimeException e) { String msg = "ConfigureVSMForASACommand failed due to " + e.getMessage(); s_logger.error(msg, e); return new Answer(cmd, false, msg); @@ -711,18 +692,18 @@ public class CiscoVnmcResource implements ServerResource { try { Map availableAsaAppliances = _connection.listUnAssocAsa1000v(); if (availableAsaAppliances.isEmpty()) { - throw new Exception("No ASA 1000v available to associate with logical edge firewall for guest vlan " + cmd.getVlanId()); + throw new ExecutionException("No ASA 1000v available to associate with logical edge firewall for guest vlan " + cmd.getVlanId()); } String asaInstanceDn = availableAsaAppliances.get(cmd.getAsaMgmtIp()); if (asaInstanceDn == null) { - throw new Exception("Requested ASA 1000v (" + cmd.getAsaMgmtIp() + ") is not available"); + throw new ExecutionException("Requested ASA 1000v (" + cmd.getAsaMgmtIp() + ") is not available"); } if (!_connection.assignAsa1000v(tenant, asaInstanceDn)) { - throw new Exception("Failed to associate ASA 1000v (" + cmd.getAsaMgmtIp() + ") with logical edge firewall for guest vlan " + cmd.getVlanId()); + throw new ExecutionException("Failed to associate ASA 1000v (" + cmd.getAsaMgmtIp() + ") with logical edge firewall for guest vlan " + cmd.getVlanId()); } - } catch (Throwable e) { + } catch (ExecutionException e) { String msg = "AssociateAsaWithLogicalEdgeFirewallCommand failed due to " + e.getMessage(); s_logger.error(msg, e); return new Answer(cmd, false, msg); @@ -743,7 +724,7 @@ public class CiscoVnmcResource implements ServerResource { String tenant = "vlan-" + cmd.getVlanId(); try { _connection.deleteTenant(tenant); - } catch (Throwable e) { + } catch (ExecutionException e) { String msg = "CleanupLogicalEdgeFirewallCommand failed due to " + e.getMessage(); s_logger.error(msg, e); return new Answer(cmd, false, msg); diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index cc2bcc17a43..850962d05ee 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -923,13 +923,13 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl } @Override - public boolean applyGlobalLoadBalancerRule(long zoneId, GlobalLoadBalancerConfigCommand gslbConfigCmd) + public boolean applyGlobalLoadBalancerRule(long zoneId, long physicalNetworkId, GlobalLoadBalancerConfigCommand gslbConfigCmd) throws ResourceUnavailableException { long zoneGslbProviderHosId = 0; // find the NetScaler device configured as gslb service provider in the zone - ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId); + ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId, physicalNetworkId); if (nsGslbProvider == null) { String msg = "Unable to find a NetScaler configured as gslb service provider in zone " + zoneId; s_logger.debug(msg); @@ -950,28 +950,37 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl return true; } - private ExternalLoadBalancerDeviceVO findGslbProvider(long zoneId) { + private ExternalLoadBalancerDeviceVO findGslbProvider(long zoneId, long physicalNetworkId) { List pNtwks = _physicalNetworkDao.listByZoneAndTrafficType(zoneId, TrafficType.Guest); - if (pNtwks.isEmpty() || pNtwks.size() > 1) { - throw new InvalidParameterValueException("Unable to get physical network in zone id = " + zoneId); + + if (pNtwks == null || pNtwks.isEmpty()) { + throw new InvalidParameterValueException("Unable to get physical network: " + physicalNetworkId + + " in zone id = " + zoneId); + } else { + for (PhysicalNetwork physicalNetwork : pNtwks) { + if (physicalNetwork.getId() == physicalNetworkId) { + PhysicalNetworkVO physNetwork = pNtwks.get(0); + ExternalLoadBalancerDeviceVO nsGslbProvider = _externalLoadBalancerDeviceDao.findGslbServiceProvider( + physNetwork.getId(), Provider.Netscaler.getName()); + return nsGslbProvider; + } + } } - PhysicalNetworkVO physNetwork = pNtwks.get(0); - ExternalLoadBalancerDeviceVO nsGslbProvider = _externalLoadBalancerDeviceDao.findGslbServiceProvider( - physNetwork.getId(), Provider.Netscaler.getName()); - return nsGslbProvider; + + return null; } @Override - public boolean isServiceEnabledInZone(long zoneId) { + public boolean isServiceEnabledInZone(long zoneId, long physicalNetworkId) { - ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId); + ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId, physicalNetworkId); //return true if a NetScaler device is configured in the zone return (nsGslbProvider != null); } @Override - public String getZoneGslbProviderPublicIp(long zoneId) { - ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId); + public String getZoneGslbProviderPublicIp(long zoneId, long physicalNetworkId) { + ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId, physicalNetworkId); if (nsGslbProvider != null) { return nsGslbProvider.getGslbSitePublicIP(); } @@ -979,8 +988,8 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl } @Override - public String getZoneGslbProviderPrivateIp(long zoneId) { - ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId); + public String getZoneGslbProviderPrivateIp(long zoneId, long physicalNetworkId) { + ExternalLoadBalancerDeviceVO nsGslbProvider = findGslbProvider(zoneId, physicalNetworkId); if (nsGslbProvider != null) { return nsGslbProvider.getGslbSitePrivateIP(); } diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java index 98e14618248..c0d4599dc0c 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java @@ -1095,7 +1095,15 @@ public class NetscalerResource implements ServerResource { } vserver.set_name(vserverName); - vserver.set_lbmethod(lbMethod); + if ("RoundRobin".equalsIgnoreCase(lbMethod)) { + vserver.set_lbmethod("ROUNDROBIN"); + } else if ("LeastConn".equalsIgnoreCase(lbMethod)) { + vserver.set_lbmethod("LEASTCONNECTION"); + } else if ("Proximity".equalsIgnoreCase(lbMethod)) { + vserver.set_lbmethod("RTT"); + } else { + throw new ExecutionException("Unsupported LB method"); + } vserver.set_persistencetype(persistenceType); if ("SOURCEIP".equalsIgnoreCase(persistenceType)) { vserver.set_persistenceid(persistenceId); diff --git a/scripts/vm/hypervisor/xenserver/ovs-get-bridge.sh b/scripts/vm/hypervisor/xenserver/ovs-get-bridge.sh new file mode 100755 index 00000000000..f56ddf9020f --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/ovs-get-bridge.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +nw_label=$1 +br=`xe network-list name-label="$nw_label" params=bridge |cut -d ':' -f 2 |tr -d ' ' ` +pbr=`ovs-vsctl br-to-parent $br` +while [ "$br" != "$pbr" ] +do + br=$pbr + pbr=`ovs-vsctl br-to-parent $br` +done +echo $pbr diff --git a/scripts/vm/hypervisor/xenserver/ovs-get-dhcp-iface.sh b/scripts/vm/hypervisor/xenserver/ovs-get-dhcp-iface.sh new file mode 100755 index 00000000000..6b30ee62a06 --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/ovs-get-dhcp-iface.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#!/bin/bash + +bridge=$1 +dhcp_name=$2 +dom_id=`xe vm-list is-control-domain=false power-state=running params=dom-id name-label=$dhcp_name|cut -d ':' -f 2 |tr -d ' ' ` +iface="vif${dom_id}.0" +echo $iface diff --git a/scripts/vm/hypervisor/xenserver/ovs-pvlan b/scripts/vm/hypervisor/xenserver/ovs-pvlan new file mode 100755 index 00000000000..c821870d64d --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/ovs-pvlan @@ -0,0 +1,145 @@ +#!/usr/bin/python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import cloudstack_pluginlib as lib +import logging +import os +import sys +import subprocess +import time +import XenAPIPlugin + +sys.path.append("/opt/xensource/sm/") +import util + +from time import localtime as _localtime, asctime as _asctime + +xePath = "/opt/xensource/bin/xe" +lib.setup_logging("/var/log/ovs-pvlan.log") +dhcpSetupPath = "/opt/xensource/bin/ovs-pvlan-dhcp-host.sh" +vmSetupPath = "/opt/xensource/bin/ovs-pvlan-vm.sh" +getDhcpIfacePath = "/opt/xensource/bin/ovs-get-dhcp-iface.sh" +pvlanCleanupPath = "/opt/xensource/bin/ovs-pvlan-cleanup.sh" +getBridgePath = "/opt/xensource/bin/ovs-get-bridge.sh" + +def echo(fn): + def wrapped(*v, **k): + name = fn.__name__ + util.SMlog("#### VMOPS enter %s ####" % name) + res = fn(*v, **k) + util.SMlog("#### VMOPS exit %s ####" % name) + return res + return wrapped + +@echo +def setup_pvlan_dhcp(session, args): + op = args.pop("op") + nw_label = args.pop("nw-label") + primary = args.pop("primary-pvlan") + isolated = args.pop("isolated-pvlan") + dhcp_name = args.pop("dhcp-name") + dhcp_ip = args.pop("dhcp-ip") + dhcp_mac = args.pop("dhcp-mac") + + res = lib.check_switch() + if res != "SUCCESS": + return "FAILURE:%s" % res + + logging.debug("Network is:%s" % (nw_label)) + bridge = lib.do_cmd([getBridgePath, nw_label]) + logging.debug("Determine bridge/switch is :%s" % (bridge)) + + if op == "add": + logging.debug("Try to get dhcp vm %s port on the switch:%s" % (dhcp_name, bridge)) + dhcp_iface = lib.do_cmd([getDhcpIfacePath, bridge, dhcp_name]) + logging.debug("About to setup dhcp vm on the switch:%s" % bridge) + res = lib.do_cmd([dhcpSetupPath, "-A", "-b", bridge, "-p", primary, + "-i", isolated, "-n", dhcp_name, "-d", dhcp_ip, "-m", dhcp_mac, + "-I", dhcp_iface]) + if res: + result = "FAILURE:%s" % res + return result; + logging.debug("Setup dhcp vm on switch program done") + elif op == "delete": + logging.debug("About to remove dhcp the switch:%s" % bridge) + res = lib.do_cmd([dhcpSetupPath, "-D", "-b", bridge, "-p", primary, + "-i", isolated, "-n", dhcp_name, "-d", dhcp_ip, "-m", dhcp_mac]) + if res: + result = "FAILURE:%s" % res + return result; + logging.debug("Remove DHCP on switch program done") + + result = "true" + logging.debug("Setup_pvlan_dhcp completed with result:%s" % result) + return result + +@echo +def setup_pvlan_vm(session, args): + op = args.pop("op") + nw_label = args.pop("nw-label") + primary = args.pop("primary-pvlan") + isolated = args.pop("isolated-pvlan") + vm_mac = args.pop("vm-mac") + trunk_port = 1 + + res = lib.check_switch() + if res != "SUCCESS": + return "FAILURE:%s" % res + + bridge = lib.do_cmd([getBridgePath, nw_label]) + logging.debug("Determine bridge/switch is :%s" % (bridge)) + + if op == "add": + logging.debug("About to setup vm on the switch:%s" % bridge) + res = lib.do_cmd([vmSetupPath, "-A", "-b", bridge, "-p", primary, "-i", isolated, "-v", vm_mac]) + if res: + result = "FAILURE:%s" % res + return result; + logging.debug("Setup vm on switch program done") + elif op == "delete": + logging.debug("About to remove vm on the switch:%s" % bridge) + res = lib.do_cmd([vmSetupPath, "-D", "-b", bridge, "-p", primary, "-i", isolated, "-v", vm_mac]) + if res: + result = "FAILURE:%s" % res + return result; + logging.debug("Remove vm on switch program done") + + result = "true" + logging.debug("Setup_pvlan_vm_alone completed with result:%s" % result) + return result + +@echo +def cleanup(session, args): + res = lib.check_switch() + if res != "SUCCESS": + return "FAILURE:%s" % res + + res = lib.do_cmd([pvlanCleanUpPath]) + if res: + result = "FAILURE:%s" % res + return result; + + result = "true" + logging.debug("Setup_pvlan_vm_dhcp completed with result:%s" % result) + return result + +if __name__ == "__main__": + XenAPIPlugin.dispatch({"setup-pvlan-dhcp": setup_pvlan_dhcp, + "setup-pvlan-vm": setup_pvlan_vm, + "cleanup":cleanup}) diff --git a/scripts/vm/hypervisor/xenserver/xenserver60/patch b/scripts/vm/hypervisor/xenserver/xenserver60/patch index 6d819791d3d..26205f2e7e6 100644 --- a/scripts/vm/hypervisor/xenserver/xenserver60/patch +++ b/scripts/vm/hypervisor/xenserver/xenserver60/patch @@ -70,4 +70,9 @@ swift=..,0755,/opt/xensource/bin swiftxen=..,0755,/etc/xapi.d/plugins s3xen=..,0755,/etc/xapi.d/plugins add_to_vcpus_params_live.sh=..,0755,/opt/xensource/bin - +ovs-pvlan=..,0755,/etc/xapi.d/plugins +ovs-pvlan-dhcp-host.sh=../../../network,0755,/opt/xensource/bin +ovs-pvlan-vm.sh=../../../network,0755,/opt/xensource/bin +ovs-pvlan-cleanup.sh=../../../network,0755,/opt/xensource/bin +ovs-get-dhcp-iface.sh=..,0755,/opt/xensource/bin +ovs-get-bridge.sh=..,0755,/opt/xensource/bin diff --git a/scripts/vm/network/ovs-pvlan-cleanup.sh b/scripts/vm/network/ovs-pvlan-cleanup.sh new file mode 100755 index 00000000000..7493bedeff6 --- /dev/null +++ b/scripts/vm/network/ovs-pvlan-cleanup.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#!/bin/bash + +ovs-ofctl del-flows xenbr0 +ovs-ofctl add-flow xenbr0 priority=0,actions=NORMAL + diff --git a/scripts/vm/network/ovs-pvlan-dhcp-host.sh b/scripts/vm/network/ovs-pvlan-dhcp-host.sh new file mode 100755 index 00000000000..64565ff45d1 --- /dev/null +++ b/scripts/vm/network/ovs-pvlan-dhcp-host.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#!/bin/bash + +usage() { + printf "Usage: %s: (-A|-D) -b -p -i -n -d -m -I -v -h \n" $(basename $0) >&2 + exit 2 +} + +br= +pri_vlan= +sec_iso_vlan= +dhcp_name= +dhcp_ip= +dhcp_mac= +vm_mac= +iface= +op= + +while getopts 'ADb:p:i:d:m:v:n:I:h' OPTION +do + case $OPTION in + A) op="add" + ;; + D) op="del" + ;; + b) br="$OPTARG" + ;; + p) pri_vlan="$OPTARG" + ;; + i) sec_iso_vlan="$OPTARG" + ;; + n) dhcp_name="$OPTARG" + ;; + d) dhcp_ip="$OPTARG" + ;; + m) dhcp_mac="$OPTARG" + ;; + I) iface="$OPTARG" + ;; + v) vm_mac="$OPTARG" + ;; + h) usage + exit 1 + ;; + esac +done + +if [ -z "$op" ] +then + echo Missing operation pararmeter! + exit 1 +fi + +if [ -z "$br" ] +then + echo Missing parameter bridge! + exit 1 +fi + +if [ -z "$pri_vlan" ] +then + echo Missing parameter primary vlan! + exit 1 +fi + +if [ -z "$sec_iso_vlan" ] +then + echo Missing parameter secondary isolate vlan! + exit 1 +fi + +if [ -z "$dhcp_name" ] +then + echo Missing parameter DHCP NAME! + exit 1 +fi + +if [ -z "$dhcp_ip" ] +then + echo Missing parameter DHCP IP! + exit 1 +fi + +if [ -z "$dhcp_mac" ] +then + echo Missing parameter DHCP MAC! + exit 1 +fi + +if [ "$op" == "add" -a -z "$iface" ] +then + echo Missing parameter DHCP VM interface! + exit 1 +fi + +if [ "$op" == "add" ] +then + dhcp_port=`ovs-ofctl show $br | grep $iface | cut -d '(' -f 1|tr -d ' '` + ovs-ofctl add-flow $br priority=200,arp,dl_vlan=$sec_iso_vlan,nw_dst=$dhcp_ip,actions=strip_vlan,output:$dhcp_port + ovs-ofctl add-flow $br priority=150,dl_vlan=$sec_iso_vlan,dl_dst=$dhcp_mac,actions=strip_vlan,output:$dhcp_port + ovs-ofctl add-flow $br priority=100,udp,dl_vlan=$sec_iso_vlan,nw_dst=255.255.255.255,tp_dst=67,actions=strip_vlan,output:$dhcp_port +else + ovs-ofctl del-flows --strict $br priority=200,arp,dl_vlan=$sec_iso_vlan,nw_dst=$dhcp_ip + ovs-ofctl del-flows --strict $br priority=150,dl_vlan=$sec_iso_vlan,dl_dst=$dhcp_mac + ovs-ofctl del-flows --strict $br priority=100,udp,dl_vlan=$sec_iso_vlan,nw_dst=255.255.255.255,tp_dst=67 +fi diff --git a/scripts/vm/network/ovs-pvlan-vm.sh b/scripts/vm/network/ovs-pvlan-vm.sh new file mode 100755 index 00000000000..fd384814cc4 --- /dev/null +++ b/scripts/vm/network/ovs-pvlan-vm.sh @@ -0,0 +1,99 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#!/bin/bash + +usage() { + printf "Usage: %s: (-A|-D) -b -p -i -d -m -v -h \n" $(basename $0) >&2 + exit 2 +} + +br= +pri_vlan= +sec_iso_vlan= +dhcp_ip= +dhcp_mac= +vm_mac= +op= + +while getopts 'ADb:p:i:d:m:v:h' OPTION +do + case $OPTION in + A) op="add" + ;; + D) op="del" + ;; + b) br="$OPTARG" + ;; + p) pri_vlan="$OPTARG" + ;; + i) sec_iso_vlan="$OPTARG" + ;; + d) dhcp_ip="$OPTARG" + ;; + m) dhcp_mac="$OPTARG" + ;; + v) vm_mac="$OPTARG" + ;; + h) usage + exit 1 + ;; + esac +done + +if [ -z "$op" ] +then + echo Missing operation pararmeter! + exit 1 +fi + +if [ -z "$br" ] +then + echo Missing parameter bridge! + exit 1 +fi + +if [ -z "$vm_mac" ] +then + echo Missing parameter VM MAC! + exit 1 +fi + +if [ -z "$pri_vlan" ] +then + echo Missing parameter secondary isolate vlan! + exit 1 +fi + +if [ -z "$sec_iso_vlan" ] +then + echo Missing parameter secondary isolate vlan! + exit 1 +fi + +trunk_port=1 + +if [ "$op" == "add" ] +then + ovs-ofctl add-flow $br priority=50,dl_vlan=0xffff,dl_src=$vm_mac,actions=mod_vlan_vid:$sec_iso_vlan,resubmit:$trunk_port + ovs-ofctl add-flow $br priority=60,dl_vlan=$sec_iso_vlan,dl_src=$vm_mac,actions=output:$trunk_port +else + ovs-ofctl del-flows --strict $br priority=50,dl_vlan=0xffff,dl_src=$vm_mac + ovs-ofctl del-flows --strict $br priority=60,dl_vlan=$sec_iso_vlan,dl_src=$vm_mac +fi + diff --git a/server/pom.xml b/server/pom.xml index 82f56262312..a29f9f1286a 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -90,6 +90,11 @@ cloud-api ${project.version} + + org.apache.cloudstack + cloud-framework-ipc + ${project.version} + org.apache.cloudstack cloud-framework-events diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index b54b1c1f527..b6286aab8da 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -78,7 +78,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { @Inject ConsoleProxyDao _consoleProxyDao = null; @Inject SecondaryStorageVmDao _secStorgaeVmDao = null; @Inject ConfigurationDao _configDao = null; - @Inject GuestOSDao _guestOSDao = null; + @Inject GuestOSDao _guestOSDao = null; @Inject GuestOSCategoryDao _guestOSCategoryDao = null; @Inject VMInstanceDao _vmInstanceDao = null; @Inject ResourceManager _resourceMgr; @@ -88,17 +88,17 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { boolean _checkHvm = true; protected String _allocationAlgorithm = "random"; @Inject CapacityManager _capacityMgr; - - + + @Override public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo) { return allocateTo(vmProfile, plan, type, avoid, returnUpTo, true); } - + @Override public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) { - + long dcId = plan.getDataCenterId(); Long podId = plan.getPodId(); Long clusterId = plan.getClusterId(); @@ -110,19 +110,19 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not return new ArrayList(); } - + if(s_logger.isDebugEnabled()){ s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId ); } - + String hostTagOnOffering = offering.getHostTag(); String hostTagOnTemplate = template.getTemplateTag(); - + boolean hasSvcOfferingTag = hostTagOnOffering != null ? true : false; boolean hasTemplateTag = hostTagOnTemplate != null ? true : false; - + List clusterHosts = new ArrayList(); - + String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); if (haVmTag != null) { clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, haVmTag); @@ -133,31 +133,31 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { List hostsMatchingOfferingTag = new ArrayList(); List hostsMatchingTemplateTag = new ArrayList(); if (hasSvcOfferingTag){ - if (s_logger.isDebugEnabled()){ + if (s_logger.isDebugEnabled()){ s_logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); } hostsMatchingOfferingTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering); - if (s_logger.isDebugEnabled()){ + if (s_logger.isDebugEnabled()){ s_logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsMatchingOfferingTag); - } + } } if (hasTemplateTag){ - if (s_logger.isDebugEnabled()){ + if (s_logger.isDebugEnabled()){ s_logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); } - hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); - if (s_logger.isDebugEnabled()){ + hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); + if (s_logger.isDebugEnabled()){ s_logger.debug("Hosts with tag '" + hostTagOnTemplate+"' are:" + hostsMatchingTemplateTag); - } + } } - + if (hasSvcOfferingTag && hasTemplateTag){ hostsMatchingOfferingTag.retainAll(hostsMatchingTemplateTag); - clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); - if (s_logger.isDebugEnabled()){ + clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); + if (s_logger.isDebugEnabled()){ s_logger.debug("Found "+ hostsMatchingOfferingTag.size() +" Hosts satisfying both tags, host ids are:" + hostsMatchingOfferingTag); } - + clusterHosts = hostsMatchingOfferingTag; } else { if (hasSvcOfferingTag){ @@ -168,7 +168,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } } } - + + // add all hosts that we are not considering to the avoid list + List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, null); + allhostsInCluster.removeAll(clusterHosts); + for (HostVO host : allhostsInCluster) { + avoid.addHost(host.getId()); + } + return allocateTo(plan, offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity, account); } @@ -226,11 +233,11 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { }else if(_allocationAlgorithm.equals("userdispersing")){ hosts = reorderHostsByNumberOfVms(plan, hosts, account); } - + if (s_logger.isDebugEnabled()) { s_logger.debug("FirstFitAllocator has " + hosts.size() + " hosts to check for allocation: "+hosts); } - + // We will try to reorder the host lists such that we give priority to hosts that have // the minimums to support a VM's requirements hosts = prioritizeHosts(template, hosts); @@ -242,7 +249,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (s_logger.isDebugEnabled()) { s_logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize()); } - + List suitableHosts = new ArrayList(); for (HostVO host : hosts) { @@ -255,7 +262,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } continue; } - + //find number of guest VMs occupying capacity on this host. if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)){ if (s_logger.isDebugEnabled()) { @@ -285,13 +292,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (s_logger.isDebugEnabled()) { s_logger.debug("Not using host " + host.getId() + "; numCpusGood: " + numCpusGood + "; cpuFreqGood: " + cpuFreqGood + ", host has capacity?" + hostHasCapacity); } + avoid.addHost(host.getId()); } } - + if (s_logger.isDebugEnabled()) { s_logger.debug("Host Allocator returning "+suitableHosts.size() +" suitable hosts"); } - + return suitableHosts; } @@ -302,26 +310,26 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { long dcId = plan.getDataCenterId(); Long podId = plan.getPodId(); Long clusterId = plan.getClusterId(); - + List hostIdsByVmCount = _vmInstanceDao.listHostIdsByVmCount(dcId, podId, clusterId, account.getAccountId()); if (s_logger.isDebugEnabled()) { s_logger.debug("List of hosts in ascending order of number of VMs: "+ hostIdsByVmCount); } - + //now filter the given list of Hosts by this ordered list - Map hostMap = new HashMap(); + Map hostMap = new HashMap(); for (HostVO host : hosts) { hostMap.put(host.getId(), host); } List matchingHostIds = new ArrayList(hostMap.keySet()); - + hostIdsByVmCount.retainAll(matchingHostIds); - + List reorderedHosts = new ArrayList(); for(Long id: hostIdsByVmCount){ reorderedHosts.add(hostMap.get(id)); } - + return reorderedHosts; } @@ -336,13 +344,13 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (template == null) { return hosts; } - + // Determine the guest OS category of the template String templateGuestOSCategory = getTemplateGuestOSCategory(template); - + List prioritizedHosts = new ArrayList(); List noHvmHosts = new ArrayList(); - + // If a template requires HVM and a host doesn't support HVM, remove it from consideration List hostsToCheck = new ArrayList(); if (template.isRequiresHvm()) { @@ -356,7 +364,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } else { hostsToCheck.addAll(hosts); } - + if (s_logger.isDebugEnabled()) { if (noHvmHosts.size() > 0) { s_logger.debug("Not considering hosts: " + noHvmHosts + " to deploy template: " + template +" as they are not HVM enabled"); @@ -376,10 +384,10 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { lowPriorityHosts.add(host); } } - + hostsToCheck.removeAll(highPriorityHosts); hostsToCheck.removeAll(lowPriorityHosts); - + // Prioritize the remaining hosts by HVM capability for (HostVO host : hostsToCheck) { if (!template.isRequiresHvm() && !hostSupportsHVM(host)) { @@ -390,21 +398,21 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { prioritizedHosts.add(host); } } - + // Merge the lists prioritizedHosts.addAll(0, highPriorityHosts); prioritizedHosts.addAll(lowPriorityHosts); - + return prioritizedHosts; } - + protected boolean hostSupportsHVM(HostVO host) { if ( !_checkHvm ) { return true; } // Determine host capabilities String caps = host.getCapabilities(); - + if (caps != null) { String[] tokens = caps.split(","); for (String token : tokens) { @@ -413,24 +421,24 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } } } - + return false; } - + protected String getHostGuestOSCategory(HostVO host) { DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), "guest.os.category.id"); if (hostDetail != null) { String guestOSCategoryIdString = hostDetail.getValue(); long guestOSCategoryId; - + try { guestOSCategoryId = Long.parseLong(guestOSCategoryIdString); } catch (Exception e) { return null; } - + GuestOSCategoryVO guestOSCategory = _guestOSCategoryDao.findById(guestOSCategoryId); - + if (guestOSCategory != null) { return guestOSCategory.getName(); } else { @@ -440,7 +448,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { return null; } } - + protected String getTemplateGuestOSCategory(VMTemplateVO template) { long guestOSId = template.getGuestOSId(); GuestOSVO guestOS = _guestOSDao.findById(guestOSId); @@ -455,7 +463,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { Map configs = _configDao.getConfiguration(params); String opFactor = configs.get("cpu.overprovisioning.factor"); _factor = NumbersUtil.parseFloat(opFactor, 1); - + String allocationAlgorithm = configs.get("vm.allocation.algorithm"); if (allocationAlgorithm != null) { _allocationAlgorithm = allocationAlgorithm; diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java index 26f6fe0ff1a..bf31b6803bc 100755 --- a/server/src/com/cloud/api/ApiDBUtils.java +++ b/server/src/com/cloud/api/ApiDBUtils.java @@ -1684,4 +1684,9 @@ public class ApiDBUtils { public static List listSiteLoadBalancers(long gslbRuleId) { return _gslbService.listSiteLoadBalancers(gslbRuleId); } + + public static String getDnsNameConfiguredForGslb() { + String providerDnsName = _configDao.getValue(Config.CloudDnsName.key()); + return providerDnsName; + } } diff --git a/server/src/com/cloud/api/ApiDispatcher.java b/server/src/com/cloud/api/ApiDispatcher.java index b4437ce6193..b7d08e2c872 100755 --- a/server/src/com/cloud/api/ApiDispatcher.java +++ b/server/src/com/cloud/api/ApiDispatcher.java @@ -168,7 +168,7 @@ public class ApiDispatcher { pageSize = Long.valueOf((String) pageSizeObj); } - if ((unpackedParams.get(ApiConstants.PAGE) == null) && (pageSize != null && pageSize != BaseListCmd.PAGESIZE_UNLIMITED)) { + if ((unpackedParams.get(ApiConstants.PAGE) == null) && (pageSize != null && !pageSize.equals(BaseListCmd.PAGESIZE_UNLIMITED))) { ServerApiException ex = new ServerApiException(ApiErrorCode.PARAM_ERROR, "\"page\" parameter is required when \"pagesize\" is specified"); ex.setCSErrorCode(CSExceptionErrorCode.getCSErrCode(ex.getClass().getName())); throw ex; diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index fc5ff597811..ae666fe9add 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -792,7 +792,8 @@ public class ApiResponseHelper implements ResponseGenerator { response.setAlgorithm(globalLoadBalancerRule.getAlgorithm()); response.setStickyMethod(globalLoadBalancerRule.getPersistence()); response.setServiceType(globalLoadBalancerRule.getServiceType()); - response.setServiceDomainName(globalLoadBalancerRule.getGslbDomain()); + response.setServiceDomainName(globalLoadBalancerRule.getGslbDomain() + "." + + ApiDBUtils.getDnsNameConfiguredForGslb()); response.setName(globalLoadBalancerRule.getName()); response.setDescription(globalLoadBalancerRule.getDescription()); response.setRegionIdId(globalLoadBalancerRule.getRegion()); diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index d7ce1cf1f6f..df61391e639 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -2468,7 +2468,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // offerings private boolean isPermissible(Long accountDomainId, Long offeringDomainId) { - if (accountDomainId == offeringDomainId) { + if (accountDomainId.equals(offeringDomainId)) { return true; // account and service offering in same domain } diff --git a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java index 9795fef66fd..ce20562d5f7 100644 --- a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java @@ -73,6 +73,7 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase params) throws ConfigurationException { _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); @@ -552,6 +564,20 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); reservedMemory += so.getRamSize() * 1024L * 1024L; reservedCpu += so.getCpu() * so.getSpeed(); + } else { + // signal if not done already, that the VM has been stopped for skip.counting.hours, + // hence capacity will not be reserved anymore. + UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), MESSAGE_RESERVED_CAPACITY_FREED_FLAG); + if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) { + _messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm); + + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVM = _userVMDao.findById(vm.getId()); + _userVMDao.loadDetails(userVM); + userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true"); + _userVMDao.saveDetails(userVM); + } + } } } @@ -681,13 +707,25 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if ((newState == State.Starting || newState == State.Migrating || event == Event.AgentReportMigrated) && vm.getHostId() != null) { boolean fromLastHost = false; - if (vm.getLastHostId() == vm.getHostId()) { + if (vm.getHostId().equals(vm.getLastHostId())) { s_logger.debug("VM starting again on the last host it was stopped on"); fromLastHost = true; } allocateVmCapacity(vm, fromLastHost); } + if (newState == State.Stopped) { + if (vm.getType() == VirtualMachine.Type.User) { + + UserVmVO userVM = _userVMDao.findById(vm.getId()); + _userVMDao.loadDetails(userVM); + // free the message sent flag if it exists + userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "false"); + _userVMDao.saveDetails(userVM); + + } + } + return true; } diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 77ca2de1923..e1d3751f290 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -214,6 +214,8 @@ public enum Config { SecStorageProxy("Advanced", AgentManager.class, String.class, "secstorage.proxy", null, "http proxy used by ssvm, in http://username:password@proxyserver:port format", null), AlertPurgeInterval("Advanced", ManagementServer.class, Integer.class, "alert.purge.interval", "86400", "The interval (in seconds) to wait before running the alert purge thread", null), AlertPurgeDelay("Advanced", ManagementServer.class, Integer.class, "alert.purge.delay", "0", "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts", null), + HostReservationReleasePeriod("Advanced", ManagementServer.class, Integer.class, "host.reservation.release.period", "300000", "The interval in milliseconds between host reservation release checks", null), + // LB HealthCheck Interval. LBHealthCheck("Advanced", ManagementServer.class, String.class, "healthcheck.update.interval", "600", @@ -235,6 +237,7 @@ public enum Config { ApplyAllocationAlgorithmToPods("Advanced", ManagementServer.class, Boolean.class, "apply.allocation.algorithm.to.pods", "false", "If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation", "true,false"), VmUserDispersionWeight("Advanced", ManagementServer.class, Float.class, "vm.user.dispersion.weight", "1", "Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 - weight of user dispersion)", null), VmAllocationAlgorithm("Advanced", ManagementServer.class, String.class, "vm.allocation.algorithm", "random", "'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit' : Order in which hosts within a cluster will be considered for VM/volume allocation.", null), + VmDeploymentPlanner("Advanced", ManagementServer.class, String.class, "vm.deployment.planner", "FirstFitPlanner", "'FirstFitPlanner', 'UserDispersingPlanner', 'UserConcentratedPodPlanner': DeploymentPlanner heuristic that will be used for VM deployment.", null), EndpointeUrl("Advanced", ManagementServer.class, String.class, "endpointe.url", "http://localhost:8080/client/api", "Endpointe Url", null), ElasticLoadBalancerEnabled("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.enabled", "false", "Whether the load balancing service is enabled for basic zones", "true,false"), ElasticLoadBalancerNetwork("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.network", "guest", "Whether the elastic load balancing service public ips are taken from the public or guest network", "guest,public"), diff --git a/server/src/com/cloud/configuration/ConfigurationManager.java b/server/src/com/cloud/configuration/ConfigurationManager.java index 84ffc3e1aad..d0ae914c20f 100755 --- a/server/src/com/cloud/configuration/ConfigurationManager.java +++ b/server/src/com/cloud/configuration/ConfigurationManager.java @@ -79,10 +79,11 @@ public interface ConfigurationManager extends ConfigurationService, Manager { * TODO * @param id * @param useVirtualNetwork + * @param deploymentPlanner * @return ID */ ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, - boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate); + boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner); /** * Creates a new disk offering diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 28375358f64..174a7ad7c2c 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -162,6 +162,7 @@ import com.cloud.org.Grouping.AllocationState; import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; import com.cloud.server.ConfigurationServer; +import com.cloud.server.ManagementService; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; @@ -345,6 +346,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Inject NicIpAliasDao _nicIpAliasDao; + @Inject + public ManagementService _mgr; + // FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao? @Inject protected DataCenterLinkLocalIpAddressDao _LinkLocalIpAllocDao; @@ -2033,17 +2037,29 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Network rate can be specified only for non-System offering and system offerings having \"domainrouter\" systemvmtype"); } + if (cmd.getDeploymentPlanner() != null) { + List planners = _mgr.listDeploymentPlanners(); + if (planners != null && !planners.isEmpty()) { + if (!planners.contains(cmd.getDeploymentPlanner())) { + throw new InvalidParameterValueException( + "Invalid name for Deployment Planner specified, please use listDeploymentPlanners to get the valid set"); + } + } else { + throw new InvalidParameterValueException("No deployment planners found"); + } + } + return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(), - localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate()); + localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate(), cmd.getDeploymentPlanner()); } @Override @ActionEvent(eventType = EventTypes.EVENT_SERVICE_OFFERING_CREATE, eventDescription = "creating service offering") public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type, String name, int cpu, int ramSize, int speed, String displayText, - boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) { + boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) { tags = cleanupTags(tags); ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, volatileVm, displayText, localStorageRequired, false, tags, isSystem, vm_type, - domainId, hostTag); + domainId, hostTag, deploymentPlanner); if ((offering = _serviceOfferingDao.persist(offering)) != null) { UserContext.current().setEventDetails("Service offering id=" + offering.getId()); @@ -2490,7 +2506,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if ( vlans != null && vlans.size() > 0 ) { if ( vlanId == null ) { vlanId = vlan.getVlanTag(); - } else if ( vlan.getVlanTag() != vlanId ) { + } else if (!vlan.getVlanTag().equals(vlanId)) { throw new InvalidParameterValueException("there is already one vlan " + vlan.getVlanTag() + " on network :" + + network.getId() + ", only one vlan is allowed on guest network"); } @@ -2656,6 +2672,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (uri != null) { String[] vlan = uri.toString().split("vlan:\\/\\/"); networkVlanId = vlan[1]; + //For pvlan + networkVlanId = networkVlanId.split("-")[0]; } if (vlanId != null) { diff --git a/server/src/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java b/server/src/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java index 3cfdf22bf08..23663334dbd 100644 --- a/server/src/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java +++ b/server/src/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java @@ -57,7 +57,7 @@ AgentBasedConsoleProxyManager { if (allocatedHost == null) { /*Is there a consoleproxy agent running in the same pod?*/ for (HostVO hv : hosts) { - if (hv.getType() == Host.Type.ConsoleProxy && hv.getPodId() == host.getPodId()) { + if (hv.getType() == Host.Type.ConsoleProxy && hv.getPodId().equals(host.getPodId())) { allocatedHost = hv; break; } diff --git a/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java b/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java deleted file mode 100755 index 7665687be60..00000000000 --- a/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy; - -import java.util.Map; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import com.cloud.configuration.Config; -import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.utils.component.AdapterBase; -import com.cloud.vm.UserVmVO; - -public abstract class AbstractDeployPlannerSelector extends AdapterBase implements DeployPlannerSelector { - protected Map params; - protected String name; - protected int runLevel; - - @Inject - protected ConfigurationDao _configDao; - protected String _allocationAlgorithm = "random"; - - @Override - public String getName() { - return name; - } - - @Override - public void setName(String name) { - this.name = name; - } - - @Override - public void setConfigParams(Map params) { - this.params = params; - } - - @Override - public Map getConfigParams() { - return params; - } - - @Override - public int getRunLevel() { - return runLevel; - } - - @Override - public void setRunLevel(int level) { - this.runLevel = level; - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - super.configure(name, params); - _allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key()); - return true; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } -} diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java index c7162a2003f..c45c8d0b807 100644 --- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -16,32 +16,107 @@ // under the License. package com.cloud.deploy; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.TreeSet; import javax.ejb.Local; import javax.inject.Inject; +import javax.naming.ConfigurationException; import org.apache.cloudstack.affinity.AffinityGroupProcessor; -import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; + import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.framework.messagebus.MessageSubscriber; + +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; + +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.cluster.ManagementServerNode; +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.Pod; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; +import com.cloud.deploy.dao.PlannerHostReservationDao; import com.cloud.exception.AffinityConflictException; +import com.cloud.exception.ConnectionException; import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; +import com.cloud.org.Grouping; +import com.cloud.resource.ResourceState; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.AccountManager; +import com.cloud.utils.DateUtil; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.agent.AgentManager; +import com.cloud.agent.Listener; +import com.cloud.agent.api.AgentControlAnswer; +import com.cloud.agent.api.AgentControlCommand; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.StartupRoutingCommand; +import com.cloud.agent.manager.allocator.HostAllocator; + @Local(value = { DeploymentPlanningManager.class }) -public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager { +public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener { private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class); @Inject + AgentManager _agentMgr; + @Inject protected UserVmDao _vmDao; @Inject protected VMInstanceDao _vmInstanceDao; @@ -49,6 +124,53 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy protected AffinityGroupDao _affinityGroupDao; @Inject protected AffinityGroupVMMapDao _affinityGroupVMMapDao; + @Inject + DataCenterDao _dcDao; + @Inject + PlannerHostReservationDao _plannerHostReserveDao; + private int _vmCapacityReleaseInterval; + @Inject + MessageBus _messageBus; + private Timer _timer = null; + private long _hostReservationReleasePeriod = 60L * 60L * 1000L; // one hour by default + + private static final long INITIAL_RESERVATION_RELEASE_CHECKER_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds + protected long _nodeId = -1; + + protected List _storagePoolAllocators; + public List getStoragePoolAllocators() { + return _storagePoolAllocators; + } + public void setStoragePoolAllocators( + List _storagePoolAllocators) { + this._storagePoolAllocators = _storagePoolAllocators; + } + + protected List _hostAllocators; + public List getHostAllocators() { + return _hostAllocators; + } + public void setHostAllocators(List _hostAllocators) { + this._hostAllocators = _hostAllocators; + } + + @Inject protected HostDao _hostDao; + @Inject protected HostPodDao _podDao; + @Inject protected ClusterDao _clusterDao; + @Inject protected GuestOSDao _guestOSDao = null; + @Inject protected GuestOSCategoryDao _guestOSCategoryDao = null; + @Inject protected DiskOfferingDao _diskOfferingDao; + @Inject protected StoragePoolHostDao _poolHostDao; + + @Inject protected VolumeDao _volsDao; + @Inject protected CapacityManager _capacityMgr; + @Inject protected ConfigurationDao _configDao; + @Inject protected PrimaryDataStoreDao _storagePoolDao; + @Inject protected CapacityDao _capacityDao; + @Inject protected AccountManager _accountMgr; + @Inject protected StorageManager _storageMgr; + @Inject DataStoreManager dataStoreMgr; + @Inject protected ClusterDetailsDao _clusterDetailsDao; protected List _planners; public List getPlanners() { @@ -87,20 +209,908 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy } // call planners - DeployDestination dest = null; - for (DeploymentPlanner planner : _planners) { - if (planner.canHandle(vmProfile, plan, avoids)) { - dest = planner.plan(vmProfile, plan, avoids); - } else { - continue; + DataCenter dc = _dcDao.findById(vm.getDataCenterId()); + // check if datacenter is in avoid set + if (avoids.shouldAvoid(dc)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("DataCenter id = '" + dc.getId() + + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); } - if (dest != null) { - avoids.addHost(dest.getHost().getId()); + return null; + } + + + ServiceOffering offering = vmProfile.getServiceOffering(); + String plannerName = offering.getDeploymentPlanner(); + if (plannerName == null) { + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + plannerName = "BareMetalPlanner"; + } else { + plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key()); + } + } + DeploymentPlanner planner = null; + for (DeploymentPlanner plannerInList : _planners) { + if (plannerName.equals(plannerInList.getName())) { + planner = plannerInList; break; } - } + + int cpu_requested = offering.getCpu() * offering.getSpeed(); + long ram_requested = offering.getRamSize() * 1024L * 1024L; + + if (s_logger.isDebugEnabled()) { + s_logger.debug("DeploymentPlanner allocation algorithm: " + planner); + + s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + + ", requested ram: " + ram_requested); + + s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + + (plan.getPoolId() != null ? "Yes" : "No")); + } + + String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); + + if (plan.getHostId() != null && haVmTag == null) { + Long hostIdSpecified = plan.getHostId(); + if (s_logger.isDebugEnabled()) { + s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + + hostIdSpecified); + } + HostVO host = _hostDao.findById(hostIdSpecified); + if (host == null) { + s_logger.debug("The specified host cannot be found"); + } else if (avoids.shouldAvoid(host)) { + s_logger.debug("The specified host is in avoid set"); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); + } + + // search for storage under the zone, pod, cluster of the host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), + host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext()); + + Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, + lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential pool for this VM for this host + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner)); + if (potentialResources != null) { + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from destination, since + // we don't have to prepare this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } + } + s_logger.debug("Cannnot deploy to specified host, returning."); + return null; + } + + if (vm.getLastHostId() != null && haVmTag == null) { + s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); + + HostVO host = _hostDao.findById(vm.getLastHostId()); + if (host == null) { + s_logger.debug("The last host of this VM cannot be found"); + } else if (avoids.shouldAvoid(host)) { + s_logger.debug("The last host of this VM is in avoid set"); + } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { + s_logger.debug("The last Host, hostId: " + + host.getId() + + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); + } else { + if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { + long cluster_id = host.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, + "cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, + "memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, + cpuOvercommitRatio, memoryOvercommitRatio, true)) { + s_logger.debug("The last host of this VM is UP and has enough capacity"); + s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); + // search for storage under the zone, pod, cluster of + // the last host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), + host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); + Pair>, List> result = findSuitablePoolsForVolumes( + vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + // choose the potential pool for this VM for this host + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner)); + if (potentialResources != null) { + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from + // destination, since we don't have to prepare + // this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } + } else { + s_logger.debug("The last host of this VM does not have enough capacity"); + } + } else { + s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + + host.getStatus().name() + ", host resource state is: " + host.getResourceState()); + } + } + s_logger.debug("Cannot choose the last host to deploy this VM "); + } + + DeployDestination dest = null; + List clusterList = null; + + if (planner != null && planner.canHandle(vmProfile, plan, avoids)) { + while (true) { + + if (planner instanceof DeploymentClusterPlanner) { + + ExcludeList PlannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(), + avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), + avoids.getPoolsToAvoid()); + + clusterList = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids); + + if (clusterList != null && !clusterList.isEmpty()) { + // planner refactoring. call allocators to list hosts + ExcludeList PlannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), + avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), + avoids.getPoolsToAvoid()); + + resetAvoidSet(PlannerAvoidOutput, PlannerAvoidInput); + + dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, + getPlannerUsage(planner), PlannerAvoidOutput); + if (dest != null) { + return dest; + } + // reset the avoid input to the planners + resetAvoidSet(avoids, PlannerAvoidOutput); + + } else { + return null; + } + } else { + dest = planner.plan(vmProfile, plan, avoids); + if (dest != null) { + long hostId = dest.getHost().getId(); + avoids.addHost(dest.getHost().getId()); + + if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { + // found destination + return dest; + } else { + // find another host - seems some concurrent + // deployment picked it up for dedicated access + continue; + } + } else { + return null; + } + } + } + } + + return dest; } + private void resetAvoidSet(ExcludeList avoidSet, ExcludeList removeSet) { + if (avoidSet.getDataCentersToAvoid() != null && removeSet.getDataCentersToAvoid() != null) { + avoidSet.getDataCentersToAvoid().removeAll(removeSet.getDataCentersToAvoid()); + } + if (avoidSet.getPodsToAvoid() != null && removeSet.getPodsToAvoid() != null) { + avoidSet.getPodsToAvoid().removeAll(removeSet.getPodsToAvoid()); + } + if (avoidSet.getClustersToAvoid() != null && removeSet.getClustersToAvoid() != null) { + avoidSet.getClustersToAvoid().removeAll(removeSet.getClustersToAvoid()); + } + if (avoidSet.getHostsToAvoid() != null && removeSet.getHostsToAvoid() != null) { + avoidSet.getHostsToAvoid().removeAll(removeSet.getHostsToAvoid()); + } + if (avoidSet.getPoolsToAvoid() != null && removeSet.getPoolsToAvoid() != null) { + avoidSet.getPoolsToAvoid().removeAll(removeSet.getPoolsToAvoid()); + } + } + + private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner) { + if (planner != null && planner instanceof DeploymentClusterPlanner) { + return ((DeploymentClusterPlanner) planner).getResourceUsage(); + } else { + return DeploymentPlanner.PlannerResourceUsage.Shared; + } + + } + + @DB + private boolean checkIfHostFitsPlannerUsage(long hostId, PlannerResourceUsage resourceUsageRequired) { + // TODO Auto-generated method stub + // check if this host has been picked up by some other planner + // exclusively + // if planner can work with shared host, check if this host has + // been marked as 'shared' + // else if planner needs dedicated host, + + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (reservationEntry != null) { + long id = reservationEntry.getId(); + PlannerResourceUsage hostResourceType = reservationEntry.getResourceUsage(); + + if (hostResourceType != null) { + if (hostResourceType == resourceUsageRequired) { + return true; + } else { + s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + + ", since this host has been reserved for planner usage : " + hostResourceType); + return false; + } + } else { + // reserve the host for required resourceType + // let us lock the reservation entry before updating. + final Transaction txn = Transaction.currentTxn(); + + try { + txn.start(); + + final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); + if (lockedEntry == null) { + s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + return false; + } + // check before updating + if (lockedEntry.getResourceUsage() == null) { + lockedEntry.setResourceUsage(resourceUsageRequired); + _plannerHostReserveDao.persist(lockedEntry); + return true; + } else { + // someone updated it earlier. check if we can still use it + if (lockedEntry.getResourceUsage() == resourceUsageRequired) { + return true; + } else { + s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + + ", since this host has been reserved for planner usage : " + hostResourceType); + return false; + } + } + } finally { + txn.commit(); + } + } + + } + + return false; + } + + @DB + public boolean checkHostReservationRelease(Long hostId) { + + if (hostId != null) { + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (reservationEntry != null && reservationEntry.getResourceUsage() != null) { + + // check if any VMs are starting or running on this host + List vms = _vmInstanceDao.listUpByHostId(hostId); + if (vms.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + + hostId); + } + return false; + } + + List vmsByLastHostId = _vmInstanceDao.listByLastHostId(hostId); + if (vmsByLastHostId.size() > 0) { + // check if any VMs are within skip.counting.hours, if yes + // we + // cannot release the host + for (VMInstanceVO stoppedVM : vmsByLastHostId) { + long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime() + .getTime()) / 1000; + if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found VM: " + stoppedVM + + " Stopped but reserved on host " + hostId); + } + return false; + } + } + } + + // check if any VMs are stopping on or migrating to this host + List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, + State.Stopping, State.Migrating, State.Starting); + if (vmsStoppingMigratingByHostId.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + + " VMs stopping/migrating on host " + hostId); + } + return false; + } + + // check if any VMs are in starting state with no hostId set yet + // - + // just ignore host release to avoid race condition + List vmsStartingNoHost = _vmInstanceDao.listStartingWithNoHostId(); + + if (vmsStartingNoHost.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + + " VMs starting as of now and no hostId yet stored"); + } + return false; + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); + } + + long id = reservationEntry.getId(); + final Transaction txn = Transaction.currentTxn(); + + try { + txn.start(); + + final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); + if (lockedEntry == null) { + s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + return false; + } + // check before updating + if (lockedEntry.getResourceUsage() != null) { + lockedEntry.setResourceUsage(null); + _plannerHostReserveDao.persist(lockedEntry); + return true; + } + } finally { + txn.commit(); + } + } + + } + return false; + } + + class HostReservationReleaseChecker extends TimerTask { + @Override + public void run() { + try { + s_logger.debug("Checking if any host reservation can be released ... "); + checkHostReservations(); + s_logger.debug("Done running HostReservationReleaseChecker ... "); + } catch (Throwable t) { + s_logger.error("Exception in HostReservationReleaseChecker", t); + } + } + } + + private void checkHostReservations() { + List reservedHosts = _plannerHostReserveDao.listAllReservedHosts(); + + for (PlannerHostReservationVO hostReservation : reservedHosts) { + HostVO host = _hostDao.findById(hostReservation.getHostId()); + if (host != null && host.getManagementServerId() != null && host.getManagementServerId() == _nodeId) { + checkHostReservationRelease(hostReservation.getHostId()); + } + } + + } + + @Override + public boolean processAnswers(long agentId, long seq, Answer[] answers) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean processCommands(long agentId, long seq, Command[] commands) { + // TODO Auto-generated method stub + return false; + } + + @Override + public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { + if (!(cmd instanceof StartupRoutingCommand)) { + return; + } + + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(host.getId()); + if (reservationEntry == null) { + // record the host in this table + PlannerHostReservationVO newHost = new PlannerHostReservationVO(host.getId(), host.getDataCenterId(), + host.getPodId(), host.getClusterId()); + _plannerHostReserveDao.persist(newHost); + } + + } + + @Override + public boolean processDisconnect(long agentId, Status state) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean isRecurring() { + // TODO Auto-generated method stub + return false; + } + + @Override + public int getTimeout() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public boolean processTimeout(long agentId, long seq) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean configure(final String name, final Map params) throws ConfigurationException { + _agentMgr.registerForHostEvents(this, true, false, true); + _messageBus.subscribe("VM_ReservedCapacity_Free", new MessageSubscriber() { + @Override + public void onPublishMessage(String senderAddress, String subject, Object obj) { + VMInstanceVO vm = ((VMInstanceVO) obj); + s_logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + + ", checking if host reservation can be released for host:" + vm.getLastHostId()); + Long hostId = vm.getLastHostId(); + checkHostReservationRelease(hostId); + } + }); + + _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), + 3600); + + String hostReservationReleasePeriod = _configDao.getValue(Config.HostReservationReleasePeriod.key()); + if (hostReservationReleasePeriod != null) { + _hostReservationReleasePeriod = Long.parseLong(hostReservationReleasePeriod); + if (_hostReservationReleasePeriod <= 0) + _hostReservationReleasePeriod = Long.parseLong(Config.HostReservationReleasePeriod.getDefaultValue()); + } + + _timer = new Timer("HostReservationReleaseChecker"); + + _nodeId = ManagementServerNode.getManagementServerId(); + + return super.configure(name, params); + } + + @Override + public boolean start() { + _timer.schedule(new HostReservationReleaseChecker(), INITIAL_RESERVATION_RELEASE_CHECKER_DELAY, + _hostReservationReleasePeriod); + return true; + } + + @Override + public boolean stop() { + _timer.cancel(); + return true; + } + + // /refactoring planner methods + private DeployDestination checkClustersforDestination(List clusterList, + VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, + DataCenter dc, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList PlannerAvoidOutput) { + + if (s_logger.isTraceEnabled()) { + s_logger.trace("ClusterId List to consider: " + clusterList); + } + + for (Long clusterId : clusterList) { + ClusterVO clusterVO = _clusterDao.findById(clusterId); + + if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { + s_logger.debug("Cluster: " + clusterId + + " has HyperVisorType that does not match the VM, skipping this cluster"); + avoid.addCluster(clusterVO.getId()); + continue; + } + + s_logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); + // search for resources(hosts and storage) under this zone, pod, + // cluster. + DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), + clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext()); + + // find suitable hosts under this cluster, need as many hosts as we + // get. + List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); + // if found suitable hosts in this cluster, find suitable storage + // pools for each volume of the VM + if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { + Pod pod = _podDao.findById(clusterVO.getPodId()); + DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); + return dest; + } + + Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, + potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential host and pool for the VM + if (!suitableVolumeStoragePools.isEmpty()) { + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired); + + if (potentialResources != null) { + Pod pod = _podDao.findById(clusterVO.getPodId()); + Host host = _hostDao.findById(potentialResources.first().getId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from destination, since + // we don't have to prepare this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } else { + s_logger.debug("No suitable storagePools found under this Cluster: " + clusterId); + } + } else { + s_logger.debug("No suitable hosts found under this Cluster: " + clusterId); + } + + if (canAvoidCluster(clusterVO, avoid, PlannerAvoidOutput)) { + avoid.addCluster(clusterVO.getId()); + } + } + s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); + return null; + } + + private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeList plannerAvoidOutput) { + + ExcludeList allocatorAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), + avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid()); + + // remove any hosts/pools that the planners might have added + // to get the list of hosts/pools that Allocators flagged as 'avoid' + if (allocatorAvoidOutput.getHostsToAvoid() != null && plannerAvoidOutput.getHostsToAvoid() != null) { + allocatorAvoidOutput.getHostsToAvoid().removeAll(plannerAvoidOutput.getHostsToAvoid()); + } + if (allocatorAvoidOutput.getPoolsToAvoid() != null && plannerAvoidOutput.getPoolsToAvoid() != null) { + allocatorAvoidOutput.getPoolsToAvoid().removeAll(plannerAvoidOutput.getPoolsToAvoid()); + } + + // if all hosts or all pools in the cluster are in avoid set after this + // pass, then put the cluster in avoid set. + boolean avoidAllHosts = true, avoidAllPools = true; + + List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(), + clusterVO.getPodId(), clusterVO.getDataCenterId(), null); + for (HostVO host : allhostsInCluster) { + if (allocatorAvoidOutput.getHostsToAvoid() == null + || !allocatorAvoidOutput.getHostsToAvoid().contains(host.getId())) { + // there's some host in the cluster that is not yet in avoid set + avoidAllHosts = false; + } + } + + List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), + clusterVO.getPodId(), clusterVO.getId(), null); + for (StoragePoolVO pool : allPoolsInCluster) { + if (allocatorAvoidOutput.getPoolsToAvoid() == null + || !allocatorAvoidOutput.getPoolsToAvoid().contains(pool.getId())) { + // there's some pool in the cluster that is not yet in avoid set + avoidAllPools = false; + } + } + + if (avoidAllHosts || avoidAllPools) { + return true; + } + return false; + } + + protected Pair> findPotentialDeploymentResources(List suitableHosts, + Map> suitableVolumeStoragePools, ExcludeList avoid, + DeploymentPlanner.PlannerResourceUsage resourceUsageRequired) { + s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); + + boolean hostCanAccessPool = false; + boolean haveEnoughSpace = false; + Map storage = new HashMap(); + TreeSet volumesOrderBySizeDesc = new TreeSet(new Comparator() { + @Override + public int compare(Volume v1, Volume v2) { + if (v1.getSize() < v2.getSize()) + return 1; + else + return -1; + } + }); + volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet()); + boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; + for (Host potentialHost : suitableHosts) { + Map> volumeAllocationMap = new HashMap>(); + for (Volume vol : volumesOrderBySizeDesc) { + haveEnoughSpace = false; + s_logger.debug("Checking if host: " + potentialHost.getId() + + " can access any suitable storage pool for volume: " + vol.getVolumeType()); + List volumePoolList = suitableVolumeStoragePools.get(vol); + hostCanAccessPool = false; + for (StoragePool potentialSPool : volumePoolList) { + if (hostCanAccessSPool(potentialHost, potentialSPool)) { + hostCanAccessPool = true; + if (multipleVolume) { + List requestVolumes = null; + if (volumeAllocationMap.containsKey(potentialSPool)) + requestVolumes = volumeAllocationMap.get(potentialSPool); + else + requestVolumes = new ArrayList(); + requestVolumes.add(vol); + + if (!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool)) + continue; + volumeAllocationMap.put(potentialSPool, requestVolumes); + } + storage.put(vol, potentialSPool); + haveEnoughSpace = true; + break; + } + } + if (!hostCanAccessPool) { + break; + } + if (!haveEnoughSpace) { + s_logger.warn("insufficient capacity to allocate all volumes"); + break; + } + } + if (hostCanAccessPool && haveEnoughSpace + && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) { + s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + + potentialHost.getName() + " and associated storage pools for this VM"); + return new Pair>(potentialHost, storage); + } else { + avoid.addHost(potentialHost.getId()); + } + } + s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); + return null; + } + + protected boolean hostCanAccessSPool(Host host, StoragePool pool) { + boolean hostCanAccessSPool = false; + + StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); + if (hostPoolLinkage != null) { + hostCanAccessSPool = true; + } + + s_logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + + pool.getId()); + return hostCanAccessSPool; + } + + protected List findSuitableHosts(VirtualMachineProfile vmProfile, + DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + List suitableHosts = new ArrayList(); + for (HostAllocator allocator : _hostAllocators) { + suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); + if (suitableHosts != null && !suitableHosts.isEmpty()) { + break; + } + } + + if (suitableHosts.isEmpty()) { + s_logger.debug("No suitable hosts found"); + } + return suitableHosts; + } + + protected Pair>, List> findSuitablePoolsForVolumes( + VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, + int returnUpTo) { + List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); + Map> suitableVolumeStoragePools = new HashMap>(); + List readyAndReusedVolumes = new ArrayList(); + + // for each volume find list of suitable storage pools by calling the + // allocators + for (VolumeVO toBeCreated : volumesTobeCreated) { + s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + + toBeCreated.getVolumeType().name() + ")"); + + // If the plan specifies a poolId, it means that this VM's ROOT + // volume is ready and the pool should be reused. + // In this case, also check if rest of the volumes are ready and can + // be reused. + if (plan.getPoolId() != null) { + s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + + toBeCreated.getPoolId()); + List suitablePools = new ArrayList(); + StoragePool pool = null; + if (toBeCreated.getPoolId() != null) { + pool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); + } else { + pool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); + } + + if (!pool.isInMaintenance()) { + if (!avoid.shouldAvoid(pool)) { + long exstPoolDcId = pool.getDataCenterId(); + + long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1; + long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1; + if (plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId + && plan.getClusterId() == exstPoolClusterId) { + s_logger.debug("Planner need not allocate a pool for this volume since its READY"); + suitablePools.add(pool); + suitableVolumeStoragePools.put(toBeCreated, suitablePools); + if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { + readyAndReusedVolumes.add(toBeCreated); + } + continue; + } else { + s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); + } + } else { + s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); + } + } else { + s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("We need to allocate new storagepool for this volume"); + } + if (!isRootAdmin(plan.getReservationContext())) { + if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); + s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); + } + // Cannot find suitable storage pools under this cluster for + // this volume since allocation_state is disabled. + // - remove any suitable pools found for other volumes. + // All volumes should get suitable pools under this cluster; + // else we cant use this cluster. + suitableVolumeStoragePools.clear(); + break; + } + } + + s_logger.debug("Calling StoragePoolAllocators to find suitable pools"); + + DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); + DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType()); + + boolean useLocalStorage = false; + if (vmProfile.getType() != VirtualMachine.Type.User) { + String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key()); + if (ssvmUseLocalStorage.equalsIgnoreCase("true")) { + useLocalStorage = true; + } + } else { + useLocalStorage = diskOffering.getUseLocalStorage(); + + // TODO: this is a hacking fix for the problem of deploy + // ISO-based VM on local storage + // when deploying VM based on ISO, we have a service offering + // and an additional disk offering, use-local storage flag is + // actually + // saved in service offering, overrde the flag from service + // offering when it is a ROOT disk + if (!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) { + if (toBeCreated.getVolumeType() == Volume.Type.ROOT) + useLocalStorage = true; + } + } + diskProfile.setUseLocalStorage(useLocalStorage); + + boolean foundPotentialPools = false; + for (StoragePoolAllocator allocator : _storagePoolAllocators) { + final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, + returnUpTo); + if (suitablePools != null && !suitablePools.isEmpty()) { + suitableVolumeStoragePools.put(toBeCreated, suitablePools); + foundPotentialPools = true; + break; + } + } + + if (!foundPotentialPools) { + s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + + plan.getClusterId()); + // No suitable storage pools found under this cluster for this + // volume. - remove any suitable pools found for other volumes. + // All volumes should get suitable pools under this cluster; + // else we cant use this cluster. + suitableVolumeStoragePools.clear(); + break; + } + } + + if (suitableVolumeStoragePools.isEmpty()) { + s_logger.debug("No suitable pools found"); + } + + return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); + } + + private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) { + // Check if the zone exists in the system + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) { + s_logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); + return false; + } + + Pod pod = _podDao.findById(podId); + if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) { + s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); + return false; + } + + Cluster cluster = _clusterDao.findById(clusterId); + if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) { + s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); + return false; + } + + return true; + } + + private boolean isRootAdmin(ReservationContext reservationContext) { + if (reservationContext != null) { + if (reservationContext.getAccount() != null) { + return _accountMgr.isRootAdmin(reservationContext.getAccount().getType()); + } else { + return false; + } + } + return false; + } } diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index e8504a991c1..caf8c6e92db 100755 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -49,6 +49,7 @@ import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -81,7 +82,7 @@ import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; @Local(value=DeploymentPlanner.class) -public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { +public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPlanner { private static final Logger s_logger = Logger.getLogger(FirstFitPlanner.class); @Inject protected HostDao _hostDao; @Inject protected DataCenterDao _dcDao; @@ -103,28 +104,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Inject DataStoreManager dataStoreMgr; @Inject protected ClusterDetailsDao _clusterDetailsDao; - protected List _storagePoolAllocators; - public List getStoragePoolAllocators() { - return _storagePoolAllocators; - } - public void setStoragePoolAllocators( - List _storagePoolAllocators) { - this._storagePoolAllocators = _storagePoolAllocators; - } - - protected List _hostAllocators; - public List getHostAllocators() { - return _hostAllocators; - } - public void setHostAllocators(List _hostAllocators) { - this._hostAllocators = _hostAllocators; - } protected String _allocationAlgorithm = "random"; + protected String _globalDeploymentPlanner = "FirstFitPlanner"; @Override - public DeployDestination plan(VirtualMachineProfile vmProfile, + public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { VirtualMachine vm = vmProfile.getVirtualMachine(); @@ -138,136 +124,19 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { return null; } - ServiceOffering offering = vmProfile.getServiceOffering(); - int cpu_requested = offering.getCpu() * offering.getSpeed(); - long ram_requested = offering.getRamSize() * 1024L * 1024L; - - - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm); - - s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + - ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested); - - s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No")); - } - - String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); - - if(plan.getHostId() != null && haVmTag == null){ - Long hostIdSpecified = plan.getHostId(); - if (s_logger.isDebugEnabled()){ - s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " - + hostIdSpecified); - } - HostVO host = _hostDao.findById(hostIdSpecified); - if (host == null) { - s_logger.debug("The specified host cannot be found"); - } else if (avoid.shouldAvoid(host)) { - s_logger.debug("The specified host is in avoid set"); - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for suitable pools for this host under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId()); - } - - // search for storage under the zone, pod, cluster of the host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), - host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext()); - - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, - lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - // choose the potential pool for this VM for this host - if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - - Pair> potentialResources = findPotentialDeploymentResources( - suitableHosts, suitableVolumeStoragePools); - if (potentialResources != null) { - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since - // we don't have to prepare this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - } - } - s_logger.debug("Cannnot deploy to specified host, returning."); - return null; - } - - if (vm.getLastHostId() != null && haVmTag == null) { - s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId()); - - HostVO host = _hostDao.findById(vm.getLastHostId()); - if(host == null){ - s_logger.debug("The last host of this VM cannot be found"); - }else if(avoid.shouldAvoid(host)){ - s_logger.debug("The last host of this VM is in avoid set"); - }else if(_capacityMgr.checkIfHostReachMaxGuestLimit(host)){ - s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); - }else{ - if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { - long cluster_id = host.getClusterId(); - ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio"); - ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio"); - Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){ - s_logger.debug("The last host of this VM is UP and has enough capacity"); - s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId()); - //search for storage under the zone, pod, cluster of the last host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - //choose the potential pool for this VM for this host - if(!suitableVolumeStoragePools.isEmpty()){ - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - - Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools); - if(potentialResources != null){ - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since we don't have to prepare this volume. - for(Volume vol : readyAndReusedVolumes){ - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: "+ dest); - return dest; - } - } - }else{ - s_logger.debug("The last host of this VM does not have enough capacity"); - } - }else{ - s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: "+host.getStatus().name() + ", host resource state is: "+host.getResourceState()); - } - } - s_logger.debug("Cannot choose the last host to deploy this VM "); - } - - List clusterList = new ArrayList(); if (plan.getClusterId() != null) { Long clusterIdSpecified = plan.getClusterId(); s_logger.debug("Searching resources only under specified Cluster: "+ clusterIdSpecified); ClusterVO cluster = _clusterDao.findById(plan.getClusterId()); if (cluster != null ){ - clusterList.add(clusterIdSpecified); - return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); + if (avoid.shouldAvoid(cluster)) { + s_logger.debug("The specified cluster is in avoid set, returning."); + } else { + clusterList.add(clusterIdSpecified); + removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan); + } + return clusterList; }else{ s_logger.debug("The specified cluster cannot be found, returning."); avoid.addCluster(plan.getClusterId()); @@ -280,11 +149,15 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { HostPodVO pod = _podDao.findById(podIdSpecified); if (pod != null) { - DeployDestination dest = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid); - if(dest == null){ - avoid.addPod(plan.getPodId()); + if (avoid.shouldAvoid(pod)) { + s_logger.debug("The specified pod is in avoid set, returning."); + } else { + clusterList = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid); + if (clusterList == null) { + avoid.addPod(plan.getPodId()); + } } - return dest; + return clusterList; } else { s_logger.debug("The specified Pod cannot be found, returning."); avoid.addPod(plan.getPodId()); @@ -305,7 +178,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } - private DeployDestination scanPodsForDestination(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ + private List scanPodsForDestination(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ ServiceOffering offering = vmProfile.getServiceOffering(); int requiredCpu = offering.getCpu() * offering.getSpeed(); @@ -341,20 +214,24 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { if(!podsWithCapacity.isEmpty()){ prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan); + if (prioritizedPodIds == null || prioritizedPodIds.isEmpty()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("No Pods found for destination, returning."); + } + return null; + } + List clusterList = new ArrayList(); //loop over pods for(Long podId : prioritizedPodIds){ s_logger.debug("Checking resources under Pod: "+podId); - DeployDestination dest = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid); - if(dest != null){ - return dest; + List clustersUnderPod = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, + avoid); + if (clustersUnderPod != null) { + clusterList.addAll(clustersUnderPod); } - avoid.addPod(podId); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No Pods found for destination, returning."); - } - return null; + return clusterList; }else{ if (s_logger.isDebugEnabled()) { s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning."); @@ -363,7 +240,69 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } } - private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ + private Map getCapacityThresholdMap() { + // Lets build this real time so that the admin wont have to restart MS + // if he changes these values + Map disableThresholdMap = new HashMap(); + + String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key()); + float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F); + disableThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, cpuDisableThreshold); + + String memoryDisableThresholdString = _configDao.getValue(Config.MemoryCapacityDisableThreshold.key()); + float memoryDisableThreshold = NumbersUtil.parseFloat(memoryDisableThresholdString, 0.85F); + disableThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, memoryDisableThreshold); + + return disableThresholdMap; + } + + private List getCapacitiesForCheckingThreshold() { + List capacityList = new ArrayList(); + capacityList.add(Capacity.CAPACITY_TYPE_CPU); + capacityList.add(Capacity.CAPACITY_TYPE_MEMORY); + return capacityList; + } + + private void removeClustersCrossingThreshold(List clusterListForVmAllocation, ExcludeList avoid, + VirtualMachineProfile vmProfile, DeploymentPlan plan) { + + List capacityList = getCapacitiesForCheckingThreshold(); + List clustersCrossingThreshold = new ArrayList(); + + ServiceOffering offering = vmProfile.getServiceOffering(); + int cpu_requested = offering.getCpu() * offering.getSpeed(); + long ram_requested = offering.getRamSize() * 1024L * 1024L; + + // For each capacity get the cluster list crossing the threshold and + // remove it from the clusterList that will be used for vm allocation. + for (short capacity : capacityList) { + + if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0) { + return; + } + if (capacity == Capacity.CAPACITY_TYPE_CPU) { + clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, + plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested); + } else if (capacity == Capacity.CAPACITY_TYPE_MEMORY) { + clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, + plan.getDataCenterId(), Config.MemoryCapacityDisableThreshold.key(), ram_requested); + } + + if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0) { + // addToAvoid Set + avoid.addClusterList(clustersCrossingThreshold); + // Remove clusters crossing disabled threshold + clusterListForVmAllocation.removeAll(clustersCrossingThreshold); + + s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + + " crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters"); + } + + } + } + + private List scanClustersForDestinationInZoneOrPod(long id, boolean isZone, + VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) { VirtualMachine vm = vmProfile.getVirtualMachine(); ServiceOffering offering = vmProfile.getServiceOffering(); @@ -396,6 +335,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { prioritizedClusterIds.removeAll(disabledClusters); } } + + removeClustersCrossingThreshold(prioritizedClusterIds, avoid, vmProfile, plan); + }else{ if (s_logger.isDebugEnabled()) { s_logger.debug("No clusters found having a host with enough capacity, returning."); @@ -404,7 +346,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } if(!prioritizedClusterIds.isEmpty()){ List clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan); - return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); + return clusterList; //return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); }else{ if (s_logger.isDebugEnabled()) { s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning."); @@ -452,114 +394,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { return disabledPods; } - private List getCapacitiesForCheckingThreshold(){ - List capacityList = new ArrayList(); - capacityList.add(Capacity.CAPACITY_TYPE_CPU); - capacityList.add(Capacity.CAPACITY_TYPE_MEMORY); - return capacityList; - } - - private void removeClustersCrossingThreshold(List clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile vmProfile, DeploymentPlan plan){ - - List capacityList = getCapacitiesForCheckingThreshold(); - List clustersCrossingThreshold = new ArrayList(); - - ServiceOffering offering = vmProfile.getServiceOffering(); - int cpu_requested = offering.getCpu() * offering.getSpeed(); - long ram_requested = offering.getRamSize() * 1024L * 1024L; - - // For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation. - for(short capacity : capacityList){ - - if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){ - return; - } - if (capacity == Capacity.CAPACITY_TYPE_CPU) { - clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested); - } - else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) { - clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), - Config.MemoryCapacityDisableThreshold.key(), ram_requested ); - } - - - if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){ - // addToAvoid Set - avoid.addClusterList(clustersCrossingThreshold); - // Remove clusters crossing disabled threshold - clusterListForVmAllocation.removeAll(clustersCrossingThreshold); - - s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + - " crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters"); - } - - } - } - - private DeployDestination checkClustersforDestination(List clusterList, VirtualMachineProfile vmProfile, - DeploymentPlan plan, ExcludeList avoid, DataCenter dc){ - - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List to consider: " + clusterList); - } - - removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan); - - for(Long clusterId : clusterList){ - Cluster clusterVO = _clusterDao.findById(clusterId); - - if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { - s_logger.debug("Cluster: "+clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); - avoid.addCluster(clusterVO.getId()); - continue; - } - - s_logger.debug("Checking resources in Cluster: "+clusterId + " under Pod: "+clusterVO.getPodId()); - //search for resources(hosts and storage) under this zone, pod, cluster. - DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext()); - - //find suitable hosts under this cluster, need as many hosts as we get. - List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); - //if found suitable hosts in this cluster, find suitable storage pools for each volume of the VM - if(suitableHosts != null && !suitableHosts.isEmpty()){ - if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { - Pod pod = _podDao.findById(clusterVO.getPodId()); - DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); - return dest; - } - - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - //choose the potential host and pool for the VM - if(!suitableVolumeStoragePools.isEmpty()){ - Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools); - - if(potentialResources != null){ - Pod pod = _podDao.findById(clusterVO.getPodId()); - Host host = _hostDao.findById(potentialResources.first().getId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since we don't have to prepare this volume. - for(Volume vol : readyAndReusedVolumes){ - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap ); - s_logger.debug("Returning Deployment Destination: "+ dest); - return dest; - } - }else{ - s_logger.debug("No suitable storagePools found under this Cluster: "+clusterId); - } - }else{ - s_logger.debug("No suitable hosts found under this Cluster: "+clusterId); - } - avoid.addCluster(clusterVO.getId()); - } - s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); - return null; - } - protected Pair, Map> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){ //look at the aggregate available cpu and ram per cluster @@ -630,215 +464,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } - - protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools){ - s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); - - boolean hostCanAccessPool = false; - boolean haveEnoughSpace = false; - Map storage = new HashMap(); - TreeSet volumesOrderBySizeDesc = new TreeSet(new Comparator() { - @Override - public int compare(Volume v1, Volume v2) { - if(v1.getSize() < v2.getSize()) - return 1; - else - return -1; - } - }); - volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet()); - boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; - for(Host potentialHost : suitableHosts){ - Map> volumeAllocationMap = new HashMap>(); - for(Volume vol : volumesOrderBySizeDesc){ - haveEnoughSpace = false; - s_logger.debug("Checking if host: "+potentialHost.getId() +" can access any suitable storage pool for volume: "+ vol.getVolumeType()); - List volumePoolList = suitableVolumeStoragePools.get(vol); - hostCanAccessPool = false; - for(StoragePool potentialSPool : volumePoolList){ - if(hostCanAccessSPool(potentialHost, potentialSPool)){ - hostCanAccessPool = true; - if(multipleVolume){ - List requestVolumes = null; - if(volumeAllocationMap.containsKey(potentialSPool)) - requestVolumes = volumeAllocationMap.get(potentialSPool); - else - requestVolumes = new ArrayList(); - requestVolumes.add(vol); - - if(!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool)) - continue; - volumeAllocationMap.put(potentialSPool,requestVolumes); - } - storage.put(vol, potentialSPool); - haveEnoughSpace = true; - break; - } - } - if(!hostCanAccessPool){ - break; - } - if(!haveEnoughSpace) { - s_logger.warn("insufficient capacity to allocate all volumes"); - break; - } - } - if(hostCanAccessPool && haveEnoughSpace){ - s_logger.debug("Found a potential host " + "id: "+potentialHost.getId() + " name: " +potentialHost.getName() + " and associated storage pools for this VM"); - return new Pair>(potentialHost, storage); - } - } - s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); - return null; - } - - protected boolean hostCanAccessSPool(Host host, StoragePool pool){ - boolean hostCanAccessSPool = false; - - StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); - if(hostPoolLinkage != null){ - hostCanAccessSPool = true; - } - - s_logger.debug("Host: "+ host.getId() + (hostCanAccessSPool ?" can" : " cannot") + " access pool: "+ pool.getId()); - return hostCanAccessSPool; - } - - protected List findSuitableHosts(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){ - List suitableHosts = new ArrayList(); - for(HostAllocator allocator : _hostAllocators) { - suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); - if (suitableHosts != null && !suitableHosts.isEmpty()) { - break; - } - } - - if(suitableHosts.isEmpty()){ - s_logger.debug("No suitable hosts found"); - } - return suitableHosts; - } - - protected Pair>, List> findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){ - List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); - Map> suitableVolumeStoragePools = new HashMap>(); - List readyAndReusedVolumes = new ArrayList(); - - //for each volume find list of suitable storage pools by calling the allocators - for (VolumeVO toBeCreated : volumesTobeCreated) { - s_logger.debug("Checking suitable pools for volume (Id, Type): ("+toBeCreated.getId() +"," +toBeCreated.getVolumeType().name() + ")"); - - //If the plan specifies a poolId, it means that this VM's ROOT volume is ready and the pool should be reused. - //In this case, also check if rest of the volumes are ready and can be reused. - if(plan.getPoolId() != null){ - s_logger.debug("Volume has pool(" + plan.getPoolId() + ") already allocated, checking if pool can be reused, poolId: "+toBeCreated.getPoolId()); - List suitablePools = new ArrayList(); - StoragePool pool = null; - if(toBeCreated.getPoolId() != null){ - s_logger.debug("finding pool by id '" + toBeCreated.getPoolId() + "'"); - pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); - }else{ - s_logger.debug("finding pool by id '" + plan.getPoolId() + "'"); - pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); - } - - if(pool != null){ - if(!pool.isInMaintenance()){ - if(!avoid.shouldAvoid(pool)){ - long exstPoolDcId = pool.getDataCenterId(); - - long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1; - long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1; - if(plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId){ - s_logger.debug("Planner need not allocate a pool for this volume since its READY"); - suitablePools.add(pool); - suitableVolumeStoragePools.put(toBeCreated, suitablePools); - if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { - readyAndReusedVolumes.add(toBeCreated); - } - continue; - }else{ - s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); - } - }else{ - s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); - } - }else{ - s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); - } - }else{ - s_logger.debug("Unable to find pool by provided id"); - } - } - - if(s_logger.isDebugEnabled()){ - s_logger.debug("We need to allocate new storagepool for this volume"); - } - if(!isRootAdmin(plan.getReservationContext())){ - if(!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())){ - if(s_logger.isDebugEnabled()){ - s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); - s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); - } - //Cannot find suitable storage pools under this cluster for this volume since allocation_state is disabled. - //- remove any suitable pools found for other volumes. - //All volumes should get suitable pools under this cluster; else we cant use this cluster. - suitableVolumeStoragePools.clear(); - break; - } - } - - s_logger.debug("Calling StoragePoolAllocators to find suitable pools"); - - DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); - DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType()); - - boolean useLocalStorage = false; - if (vmProfile.getType() != VirtualMachine.Type.User) { - String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key()); - if (ssvmUseLocalStorage.equalsIgnoreCase("true")) { - useLocalStorage = true; - } - } else { - useLocalStorage = diskOffering.getUseLocalStorage(); - - // TODO: this is a hacking fix for the problem of deploy ISO-based VM on local storage - // when deploying VM based on ISO, we have a service offering and an additional disk offering, use-local storage flag is actually - // saved in service offering, overrde the flag from service offering when it is a ROOT disk - if(!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) { - if(toBeCreated.getVolumeType() == Volume.Type.ROOT) - useLocalStorage = true; - } - } - diskProfile.setUseLocalStorage(useLocalStorage); - - boolean foundPotentialPools = false; - for(StoragePoolAllocator allocator : _storagePoolAllocators) { - final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); - if (suitablePools != null && !suitablePools.isEmpty()) { - suitableVolumeStoragePools.put(toBeCreated, suitablePools); - foundPotentialPools = true; - break; - } - } - - if(!foundPotentialPools){ - s_logger.debug("No suitable pools found for volume: "+toBeCreated +" under cluster: "+plan.getClusterId()); - //No suitable storage pools found under this cluster for this volume. - remove any suitable pools found for other volumes. - //All volumes should get suitable pools under this cluster; else we cant use this cluster. - suitableVolumeStoragePools.clear(); - break; - } - } - - if(suitableVolumeStoragePools.isEmpty()){ - s_logger.debug("No suitable pools found"); - } - - return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); - } - - private boolean isRootAdmin(ReservationContext reservationContext) { if(reservationContext != null){ if(reservationContext.getAccount() != null){ @@ -859,10 +484,17 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Override public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { - if(vm.getHypervisorType() != HypervisorType.BareMetal){ - //check the allocation strategy - if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString()))) { - return true; + // check what the ServiceOffering says. If null, check the global config + ServiceOffering offering = vm.getServiceOffering(); + if (vm.getHypervisorType() != HypervisorType.BareMetal) { + if (offering != null && offering.getDeploymentPlanner() != null) { + if (offering.getDeploymentPlanner().equals(this.getName())) { + return true; + } + } else { + if (_globalDeploymentPlanner != null && _globalDeploymentPlanner.equals(this._name)) { + return true; + } } } return false; @@ -872,29 +504,20 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); _allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key()); + _globalDeploymentPlanner = _configDao.getValue(Config.VmDeploymentPlanner.key()); return true; } - private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId){ - // Check if the zone exists in the system - DataCenterVO zone = _dcDao.findById(zoneId); - if(zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()){ - s_logger.info("Zone is currently disabled, cannot allocate to this zone: "+ zoneId); - return false; - } - Pod pod = _podDao.findById(podId); - if(pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()){ - s_logger.info("Pod is currently disabled, cannot allocate to this pod: "+ podId); - return false; - } + @Override + public DeployDestination plan(VirtualMachineProfile vm, DeploymentPlan plan, + ExcludeList avoid) throws InsufficientServerCapacityException { + // TODO Auto-generated method stub + return null; + } - Cluster cluster = _clusterDao.findById(clusterId); - if(cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()){ - s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: "+ clusterId); - return false; - } - - return true; + @Override + public PlannerResourceUsage getResourceUsage() { + return PlannerResourceUsage.Shared; } } diff --git a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java deleted file mode 100755 index ce494051376..00000000000 --- a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java +++ /dev/null @@ -1,54 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy; - -import javax.ejb.Local; - -import org.apache.log4j.Logger; - -import com.cloud.deploy.DeploymentPlanner.AllocationAlgorithm; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.vm.UserVmVO; - -@Local(value = {DeployPlannerSelector.class}) -public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector { - private static final Logger s_logger = Logger.getLogger(HypervisorVmPlannerSelector.class); - - @Override - public String selectPlanner(UserVmVO vm) { - if (vm.getHypervisorType() != HypervisorType.BareMetal) { - //check the allocation strategy - if (_allocationAlgorithm != null) { - if (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) - || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString())) { - return "FirstFitPlanner"; - } else if (_allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) { - return "UserDispersingPlanner"; - } else if (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString()) - || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString())) { - return "UserConcentratedPodPlanner"; - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("The allocation algorithm is null, cannot select the planner"); - } - } - } - - return null; - } -} diff --git a/server/src/com/cloud/deploy/PlannerHostReservationVO.java b/server/src/com/cloud/deploy/PlannerHostReservationVO.java new file mode 100644 index 00000000000..cf5f03177f7 --- /dev/null +++ b/server/src/com/cloud/deploy/PlannerHostReservationVO.java @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import org.apache.cloudstack.api.InternalIdentity; + +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; + +@Entity +@Table(name = "op_host_planner_reservation") +public class PlannerHostReservationVO implements InternalIdentity { + @Id + @GeneratedValue(strategy=GenerationType.IDENTITY) + @Column(name="id") + private long id; + + @Column(name="host_id") + private Long hostId; + + @Column(name="data_center_id") + private Long dataCenterId; + + @Column(name="pod_id") + private Long podId; + + @Column(name="cluster_id") + private Long clusterId; + + @Column(name = "resource_usage") + @Enumerated(EnumType.STRING) + private PlannerResourceUsage resourceUsage; + + public PlannerHostReservationVO() { + } + + public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId) { + this.hostId = hostId; + this.dataCenterId = dataCenterId; + this.podId = podId; + this.clusterId = clusterId; + } + + public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId, + PlannerResourceUsage resourceUsage) { + this.hostId = hostId; + this.dataCenterId = dataCenterId; + this.podId = podId; + this.clusterId = clusterId; + this.resourceUsage = resourceUsage; + } + + @Override + public long getId() { + return id; + } + + public Long getHostId() { + return hostId; + } + + public void setHostId(Long hostId) { + this.hostId = hostId; + } + + public Long getDataCenterId() { + return dataCenterId; + } + public void setDataCenterId(Long dataCenterId) { + this.dataCenterId = dataCenterId; + } + + public Long getPodId() { + return podId; + } + public void setPodId(long podId) { + this.podId = new Long(podId); + } + + public Long getClusterId() { + return clusterId; + } + public void setClusterId(long clusterId) { + this.clusterId = new Long(clusterId); + } + + public PlannerResourceUsage getResourceUsage() { + return resourceUsage; + } + + public void setResourceUsage(PlannerResourceUsage resourceType) { + this.resourceUsage = resourceType; + } + +} diff --git a/server/src/com/cloud/deploy/DeployPlannerSelector.java b/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java old mode 100755 new mode 100644 similarity index 67% rename from server/src/com/cloud/deploy/DeployPlannerSelector.java rename to server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java index 062b492d8fc..69118f13896 --- a/server/src/com/cloud/deploy/DeployPlannerSelector.java +++ b/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java @@ -1,24 +1,30 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy; - -import com.cloud.utils.component.Adapter; -import com.cloud.vm.UserVmVO; - -public interface DeployPlannerSelector extends Adapter { - String selectPlanner(UserVmVO vm); -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy.dao; + +import java.util.List; + +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.utils.db.GenericDao; + +public interface PlannerHostReservationDao extends GenericDao { + + PlannerHostReservationVO findByHostId(long hostId); + + List listAllReservedHosts(); + +} diff --git a/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java b/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java new file mode 100644 index 00000000000..41e09647d7e --- /dev/null +++ b/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy.dao; + +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.ejb.Local; +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@Local(value = { PlannerHostReservationDao.class }) +public class PlannerHostReservationDaoImpl extends GenericDaoBase implements + PlannerHostReservationDao { + + private SearchBuilder _hostIdSearch; + private SearchBuilder _reservedHostSearch; + + public PlannerHostReservationDaoImpl() { + + } + + @PostConstruct + protected void init() { + _hostIdSearch = createSearchBuilder(); + _hostIdSearch.and("hostId", _hostIdSearch.entity().getHostId(), SearchCriteria.Op.EQ); + _hostIdSearch.done(); + + _reservedHostSearch = createSearchBuilder(); + _reservedHostSearch.and("usage", _reservedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.NNULL); + _reservedHostSearch.done(); + } + + @Override + public PlannerHostReservationVO findByHostId(long hostId) { + SearchCriteria sc = _hostIdSearch.create(); + sc.setParameters("hostId", hostId); + return findOneBy(sc); + } + + @Override + public List listAllReservedHosts() { + SearchCriteria sc = _reservedHostSearch.create(); + return listBy(sc); + } + +} diff --git a/server/src/com/cloud/network/NetworkManager.java b/server/src/com/cloud/network/NetworkManager.java index 15bc61c4206..08198ee40e6 100755 --- a/server/src/com/cloud/network/NetworkManager.java +++ b/server/src/com/cloud/network/NetworkManager.java @@ -130,7 +130,8 @@ public interface NetworkManager { Network createGuestNetwork(long networkOfferingId, String name, String displayText, String gateway, String cidr, String vlanId, String networkDomain, Account owner, Long domainId, PhysicalNetwork physicalNetwork, - long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String ip6Gateway, String ip6Cidr, Boolean displayNetworkEnabled) + long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String ip6Gateway, String ip6Cidr, + Boolean displayNetworkEnabled, String isolatedPvlan) throws ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException; /** diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 88347240959..bf79596b67f 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -283,6 +283,10 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L Long guestNetworkId, boolean sourceNat, boolean assign, String requestedIp, boolean isSystem, Long vpcId) throws InsufficientAddressCapacityException { StringBuilder errorMessage = new StringBuilder("Unable to get ip adress in "); + boolean fetchFromDedicatedRange = false; + List dedicatedVlanDbIds = new ArrayList(); + List nonDedicatedVlanDbIds = new ArrayList(); + Transaction txn = Transaction.currentTxn(); txn.start(); SearchCriteria sc = null; @@ -295,9 +299,37 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L errorMessage.append(" zone id=" + dcId); } - if ( vlanDbIds != null && !vlanDbIds.isEmpty() ) { - sc.setParameters("vlanId", vlanDbIds.toArray()); - errorMessage.append(", vlanId id=" + vlanDbIds.toArray()); + // If owner has dedicated Public IP ranges, fetch IP from the dedicated range + // Otherwise fetch IP from the system pool + List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(owner.getId()); + for (AccountVlanMapVO map : maps) { + if (vlanDbIds == null || vlanDbIds.contains(map.getVlanDbId())) + dedicatedVlanDbIds.add(map.getVlanDbId()); + } + List nonDedicatedVlans = _vlanDao.listZoneWideNonDedicatedVlans(dcId); + for (VlanVO nonDedicatedVlan : nonDedicatedVlans) { + if (vlanDbIds == null || vlanDbIds.contains(nonDedicatedVlan.getId())) + nonDedicatedVlanDbIds.add(nonDedicatedVlan.getId()); + } + if (dedicatedVlanDbIds != null && !dedicatedVlanDbIds.isEmpty()) { + fetchFromDedicatedRange = true; + sc.setParameters("vlanId", dedicatedVlanDbIds.toArray()); + errorMessage.append(", vlanId id=" + dedicatedVlanDbIds.toArray()); + } else if (nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) { + sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray()); + errorMessage.append(", vlanId id=" + nonDedicatedVlanDbIds.toArray()); + } else { + if (podId != null) { + InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException + ("Insufficient address capacity", Pod.class, podId); + ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid()); + throw ex; + } + s_logger.warn(errorMessage.toString()); + InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException + ("Insufficient address capacity", DataCenter.class, dcId); + ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid()); + throw ex; } sc.setParameters("dc", dcId); @@ -320,6 +352,16 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L List addrs = _ipAddressDao.lockRows(sc, filter, true); + // If all the dedicated IPs of the owner are in use fetch an IP from the system pool + if (addrs.size() == 0 && fetchFromDedicatedRange) { + if (nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) { + fetchFromDedicatedRange = false; + sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray()); + errorMessage.append(", vlanId id=" + nonDedicatedVlanDbIds.toArray()); + addrs = _ipAddressDao.lockRows(sc, filter, true); + } + } + if (addrs.size() == 0) { if (podId != null) { InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException @@ -337,6 +379,16 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L assert (addrs.size() == 1) : "Return size is incorrect: " + addrs.size(); + if (!fetchFromDedicatedRange) { + // Check that the maximum number of public IPs for the given accountId will not be exceeded + try { + _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); + } catch (ResourceAllocationException ex) { + s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); + throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); + } + } + IPAddressVO addr = addrs.get(0); addr.setSourceNat(sourceNat); addr.setAllocatedTime(new Date()); @@ -441,14 +493,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L long ownerId = owner.getId(); - // Check that the maximum number of public IPs for the given accountId will not be exceeded - try { - _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); - } catch (ResourceAllocationException ex) { - s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); - throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); - } - PublicIp ip = null; Transaction txn = Transaction.currentTxn(); try { @@ -465,15 +509,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L s_logger.debug("lock account " + ownerId + " is acquired"); } - // If account has Account specific ip ranges, try to allocate ip from there - List vlanIds = new ArrayList(); - List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(ownerId); - if (maps != null && !maps.isEmpty()) { - vlanIds.add(maps.get(0).getVlanDbId()); - } - - - ip = fetchNewPublicIp(dcId, null, vlanIds, owner, VlanType.VirtualNetwork, guestNtwkId, + ip = fetchNewPublicIp(dcId, null, null, owner, VlanType.VirtualNetwork, guestNtwkId, isSourceNat, false, null, false, vpcId); IPAddressVO publicIp = ip.ip(); @@ -609,9 +645,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L VlanType vlanType = VlanType.VirtualNetwork; boolean assign = false; - boolean allocateFromDedicatedRange = false; - List dedicatedVlanDbIds = new ArrayList(); - List nonDedicatedVlanDbIds = new ArrayList(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { // zone is of type DataCenter. See DataCenterVO.java. @@ -641,39 +674,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L txn.start(); - // If account has dedicated Public IP ranges, allocate IP from the dedicated range - List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(ipOwner.getId()); - for (AccountVlanMapVO map : maps) { - dedicatedVlanDbIds.add(map.getVlanDbId()); - } - if (dedicatedVlanDbIds != null && !dedicatedVlanDbIds.isEmpty()) { - allocateFromDedicatedRange = true; - } - - try { - if (allocateFromDedicatedRange) { - ip = fetchNewPublicIp(zone.getId(), null, dedicatedVlanDbIds, ipOwner, vlanType, null, - false, assign, null, isSystem, null); - } - } catch(InsufficientAddressCapacityException e) { - s_logger.warn("All IPs dedicated to account " + ipOwner.getId() + " has been acquired." + - " Now acquiring from the system pool"); - txn.close(); - allocateFromDedicatedRange = false; - } - - if (!allocateFromDedicatedRange) { - // Check that the maximum number of public IPs for the given - // accountId will not be exceeded - _resourceLimitMgr.checkResourceLimit(accountToLock, ResourceType.public_ip); - - List nonDedicatedVlans = _vlanDao.listZoneWideNonDedicatedVlans(zone.getId()); - for (VlanVO nonDedicatedVlan : nonDedicatedVlans) { - nonDedicatedVlanDbIds.add(nonDedicatedVlan.getId()); - } - ip = fetchNewPublicIp(zone.getId(), null, nonDedicatedVlanDbIds, ipOwner, vlanType, null, false, assign, null, - isSystem, null); - } + ip = fetchNewPublicIp(zone.getId(), null, null, ipOwner, vlanType, null, false, assign, null, + isSystem, null); if (ip == null) { InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException @@ -1899,7 +1901,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L @DB public Network createGuestNetwork(long networkOfferingId, String name, String displayText, String gateway, String cidr, String vlanId, String networkDomain, Account owner, Long domainId, - PhysicalNetwork pNtwk, long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String ip6Gateway, String ip6Cidr, Boolean isDisplayNetworkEnabled) + PhysicalNetwork pNtwk, long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String ip6Gateway, String ip6Cidr, + Boolean isDisplayNetworkEnabled, String isolatedPvlan) throws ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException { NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(networkOfferingId); @@ -1989,6 +1992,9 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L if (ipv6) { throw new InvalidParameterValueException("IPv6 is not supported with security group!"); } + if (isolatedPvlan != null) { + throw new InvalidParameterValueException("Isolated Private VLAN is not supported with security group!"); + } // Only Account specific Isolated network with sourceNat service disabled are allowed in security group // enabled zone if ( ntwkOff.getGuestType() != GuestType.Shared ){ @@ -2148,13 +2154,20 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L } if (vlanId != null) { - userNetwork.setBroadcastUri(URI.create("vlan://" + vlanId)); - userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); - if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) { - userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); - } else { - userNetwork.setBroadcastDomainType(BroadcastDomainType.Native); - } + if (isolatedPvlan == null) { + userNetwork.setBroadcastUri(URI.create("vlan://" + vlanId)); + if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) { + userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); + } else { + userNetwork.setBroadcastDomainType(BroadcastDomainType.Native); + } + } else { + if (vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) { + throw new InvalidParameterValueException("Cannot support pvlan with untagged primary vlan!"); + } + userNetwork.setBroadcastUri(NetUtils.generateUriForPvlan(vlanId, isolatedPvlan)); + userNetwork.setBroadcastDomainType(BroadcastDomainType.Pvlan); + } } List networks = setupNetwork(owner, ntwkOff, userNetwork, plan, name, displayText, true, domainId, @@ -2757,7 +2770,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L guestNetwork = createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network" , owner.getAccountName() + "-network", null, null, null, null, owner, null, physicalNetwork, zoneId, ACLType.Account, - null, null, null, null, true); + null, null, null, null, true, null); if (guestNetwork == null) { s_logger.warn("Failed to create default Virtual network for the account " + accountId + "in zone " + zoneId); throw new CloudRuntimeException("Failed to create a Guest Isolated Networks with SourceNAT " + @@ -2992,6 +3005,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L Random _rand = new Random(System.currentTimeMillis()); + @Override public List listVmNics(Long vmId, Long nicId) { List result = null; if (nicId == null) { @@ -3002,6 +3016,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return result; } + @Override public String allocateGuestIP(Account ipOwner, boolean isSystem, long zoneId, Long networkId, String requestedIp) throws InsufficientAddressCapacityException { String ipaddr = null; @@ -3633,8 +3648,10 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L nic.setGateway(ip.getGateway()); nic.setNetmask(ip.getNetmask()); nic.setIsolationUri(IsolationType.Vlan.toUri(ip.getVlanTag())); - nic.setBroadcastType(BroadcastDomainType.Vlan); - nic.setBroadcastUri(BroadcastDomainType.Vlan.toUri(ip.getVlanTag())); + //nic.setBroadcastType(BroadcastDomainType.Vlan); + //nic.setBroadcastUri(BroadcastDomainType.Vlan.toUri(ip.getVlanTag())); + nic.setBroadcastType(network.getBroadcastDomainType()); + nic.setBroadcastUri(network.getBroadcastUri()); nic.setFormat(AddressFormat.Ip4); nic.setReservationId(String.valueOf(ip.getVlanTag())); nic.setMacAddress(ip.getMacAddress()); diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index 5f51a30d389..ed9a8c4ece7 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -952,6 +952,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { String ip6Cidr = cmd.getIp6Cidr(); Boolean displayNetwork = cmd.getDisplayNetwork(); Long aclId = cmd.getAclId(); + String isolatedPvlan = cmd.getIsolatedPvlan(); // Validate network offering NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(networkOfferingId); @@ -1143,6 +1144,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } } + if (isolatedPvlan != null && (zone.getNetworkType() != NetworkType.Advanced || ntwkOff.getGuestType() != Network.GuestType.Shared)) { + throw new InvalidParameterValueException("Can only support create Private VLAN network with advance shared network!"); + } + + if (isolatedPvlan != null && ipv6) { + throw new InvalidParameterValueException("Can only support create Private VLAN network with IPv4!"); + } + // Regular user can create Guest Isolated Source Nat enabled network only if (caller.getType() == Account.ACCOUNT_TYPE_NORMAL && (ntwkOff.getTrafficType() != TrafficType.Guest || ntwkOff.getGuestType() != Network.GuestType.Isolated @@ -1175,6 +1184,10 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Cannot support IPv6 on network offering with external devices!"); } + if (isolatedPvlan != null && providersConfiguredForExternalNetworking(ntwkProviders)) { + throw new InvalidParameterValueException("Cannot support private vlan on network offering with external devices!"); + } + if (cidr != null && providersConfiguredForExternalNetworking(ntwkProviders)) { if (ntwkOff.getGuestType() == GuestType.Shared && (zone.getNetworkType() == NetworkType.Advanced) && isSharedNetworkOfferingWithServices(networkOfferingId)) { @@ -1251,7 +1264,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Unable to find specified NetworkACL"); } - if(vpcId != acl.getVpcId()){ + if(!vpcId.equals(acl.getVpcId())){ throw new InvalidParameterValueException("ACL: "+aclId+" do not belong to the VPC"); } } @@ -1265,8 +1278,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Internal Lb can be enabled on vpc networks only"); } - network = _networkMgr.createGuestNetwork(networkOfferingId, name, displayText, gateway, cidr, vlanId, - networkDomain, owner, sharedDomainId, pNtwk, zoneId, aclType, subdomainAccess, vpcId, ip6Gateway, ip6Cidr, displayNetwork); + network = _networkMgr.createGuestNetwork(networkOfferingId, name, displayText, gateway, cidr, vlanId, + networkDomain, owner, sharedDomainId, pNtwk, zoneId, aclType, subdomainAccess, vpcId, + ip6Gateway, ip6Cidr, displayNetwork, isolatedPvlan); } if (caller.getType() == Account.ACCOUNT_TYPE_ADMIN && createVlan) { @@ -3813,8 +3827,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (privateNetwork == null) { //create Guest network privateNetwork = _networkMgr.createGuestNetwork(ntwkOff.getId(), networkName, displayText, gateway, cidr, vlan, - null, owner, null, pNtwk, pNtwk.getDataCenterId(), ACLType.Account, null, null, null, null, true); - + null, owner, null, pNtwk, pNtwk.getDataCenterId(), ACLType.Account, null, vpcId, null, null, true, null); s_logger.debug("Created private network " + privateNetwork); } else { s_logger.debug("Private network already exists: " + privateNetwork); diff --git a/server/src/com/cloud/network/StorageNetworkManagerImpl.java b/server/src/com/cloud/network/StorageNetworkManagerImpl.java index 9a173826576..901e2041490 100755 --- a/server/src/com/cloud/network/StorageNetworkManagerImpl.java +++ b/server/src/com/cloud/network/StorageNetworkManagerImpl.java @@ -315,9 +315,10 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet List ranges = _sNwIpRangeDao.listByPodId(podId); for (StorageNetworkIpRangeVO r : ranges) { try { - r = _sNwIpRangeDao.acquireInLockTable(r.getId()); + Long rangeId = r.getId(); + r = _sNwIpRangeDao.acquireInLockTable(rangeId); if (r == null) { - String msg = "Unable to acquire lock on storage network ip range id=" + r.getId() + ", delete failed"; + String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed"; s_logger.warn(msg); throw new CloudRuntimeException(msg); } diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index d66373b56ba..8021e6f0074 100755 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.api.command.admin.router.CreateVirtualRouterElement import org.apache.cloudstack.api.command.admin.router.ListVirtualRouterElementsCmd; import org.apache.log4j.Logger; +import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; @@ -47,6 +48,7 @@ import com.cloud.network.Network.Capability; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; import com.cloud.network.NetworkModel; +import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.PublicIpAddress; @@ -228,7 +230,6 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl throw new ResourceUnavailableException("Can't find at least one running router!", DataCenter.class, network.getDataCenterId()); } - return true; } diff --git a/server/src/com/cloud/network/guru/ExternalGuestNetworkGuru.java b/server/src/com/cloud/network/guru/ExternalGuestNetworkGuru.java index fe9e01f558d..eb1b3dc4b24 100644 --- a/server/src/com/cloud/network/guru/ExternalGuestNetworkGuru.java +++ b/server/src/com/cloud/network/guru/ExternalGuestNetworkGuru.java @@ -83,7 +83,7 @@ public class ExternalGuestNetworkGuru extends GuestNetworkGuru { if (networkType == NetworkType.Advanced && isMyTrafficType(offering.getTrafficType()) && offering.getGuestType() == Network.GuestType.Isolated - && isMyIsolationMethod(physicalNetwork)) { + && isMyIsolationMethod(physicalNetwork) && !offering.isSystemOnly()) { return true; } else { s_logger.trace("We only take care of Guest networks of type " diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManager.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManager.java index a80c560d2bf..9852c47dc85 100644 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManager.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManager.java @@ -33,6 +33,7 @@ import com.cloud.user.User; import com.cloud.uservm.UserVm; import com.cloud.utils.component.Manager; import com.cloud.vm.DomainRouterVO; +import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; import com.cloud.vm.VirtualMachineProfile; @@ -112,4 +113,4 @@ public interface VirtualNetworkApplianceManager extends Manager, VirtualNetworkA boolean removeDhcpSupportForSubnet(Network network, List routers) throws ResourceUnavailableException; -} \ No newline at end of file +} diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index b8ad29f6216..f5f99ede6fb 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -34,6 +34,7 @@ import com.cloud.agent.api.GetDomRVersionCmd; import com.cloud.agent.api.ModifySshKeysCommand; import com.cloud.agent.api.NetworkUsageAnswer; import com.cloud.agent.api.NetworkUsageCommand; +import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StopAnswer; import com.cloud.agent.api.check.CheckSshAnswer; @@ -2223,6 +2224,28 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V return dhcpRange; } + private boolean setupDhcpForPvlan(boolean add, DomainRouterVO router, Nic nic) { + if (!nic.getBroadcastUri().getScheme().equals("pvlan")) { + return false; + } + String op = "add"; + if (!add) { + op = "delete"; + } + Network network = _networkDao.findById(nic.getNetworkId()); + String networkTag = _networkModel.getNetworkTag(router.getHypervisorType(), network); + PvlanSetupCommand cmd = PvlanSetupCommand.createDhcpSetup(op, nic.getBroadcastUri(), networkTag, router.getInstanceName(), nic.getMacAddress(), nic.getIp4Address()); + Commands cmds = new Commands(cmd); + // In fact we send command to the host of router, we're not programming router but the host + try { + sendCommandsToRouter(router, cmds); + } catch (AgentUnavailableException e) { + s_logger.warn("Agent Unavailable ", e); + return false; + } + return true; + } + @Override public boolean finalizeDeployment(Commands cmds, VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) throws ResourceUnavailableException { @@ -2536,13 +2559,20 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V List guestNetworks = new ArrayList(); List routerNics = _nicDao.listByVmId(profile.getId()); - for (Nic routerNic : routerNics) { - Network network = _networkModel.getNetwork(routerNic.getNetworkId()); + for (Nic nic : routerNics) { + Network network = _networkModel.getNetwork(nic.getNetworkId()); if (network.getTrafficType() == TrafficType.Guest) { guestNetworks.add(network); + if (nic.getBroadcastUri().getScheme().equals("pvlan")) { + result = setupDhcpForPvlan(true, router, nic); + } } } + if (!result) { + return result; + } + answer = cmds.getAnswer("getDomRVersion"); if (answer != null && answer instanceof GetDomRVersionAnswer) { GetDomRVersionAnswer versionAnswer = (GetDomRVersionAnswer)answer; @@ -2568,6 +2598,14 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V VMInstanceVO vm = profile.getVirtualMachine(); DomainRouterVO domR = _routerDao.findById(vm.getId()); processStopOrRebootAnswer(domR, answer); + List routerNics = _nicDao.listByVmId(profile.getId()); + for (Nic nic : routerNics) { + Network network = _networkModel.getNetwork(nic.getNetworkId()); + if (network.getTrafficType() == TrafficType.Guest && nic.getBroadcastUri().getScheme().equals("pvlan")) { + setupDhcpForPvlan(false, domR, nic); + } + } + } } diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index 915e2d8afe9..9992b7ca01e 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -27,24 +27,6 @@ import java.util.TreeSet; import javax.ejb.Local; import javax.inject.Inject; -import com.cloud.network.vpc.NetworkACLItem; -import com.cloud.network.vpc.NetworkACLItemDao; -import com.cloud.network.vpc.NetworkACLItemVO; -import com.cloud.network.vpc.NetworkACLManager; -import com.cloud.network.vpc.PrivateGateway; -import com.cloud.network.vpc.PrivateIpAddress; -import com.cloud.network.vpc.PrivateIpVO; -import com.cloud.network.vpc.StaticRoute; -import com.cloud.network.vpc.StaticRouteProfile; -import com.cloud.network.vpc.Vpc; -import com.cloud.network.vpc.VpcGateway; -import com.cloud.network.vpc.VpcManager; -import com.cloud.network.vpc.VpcVO; -import com.cloud.network.vpc.dao.PrivateIpDao; -import com.cloud.network.vpc.dao.StaticRouteDao; -import com.cloud.network.vpc.dao.VpcDao; -import com.cloud.network.vpc.dao.VpcGatewayDao; -import com.cloud.network.vpc.dao.VpcOfferingDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -108,6 +90,24 @@ import com.cloud.network.dao.Site2SiteCustomerGatewayVO; import com.cloud.network.dao.Site2SiteVpnConnectionDao; import com.cloud.network.dao.Site2SiteVpnGatewayDao; import com.cloud.network.dao.Site2SiteVpnGatewayVO; +import com.cloud.network.vpc.NetworkACLItem; +import com.cloud.network.vpc.NetworkACLItemDao; +import com.cloud.network.vpc.NetworkACLItemVO; +import com.cloud.network.vpc.NetworkACLManager; +import com.cloud.network.vpc.PrivateGateway; +import com.cloud.network.vpc.PrivateIpAddress; +import com.cloud.network.vpc.PrivateIpVO; +import com.cloud.network.vpc.StaticRoute; +import com.cloud.network.vpc.StaticRouteProfile; +import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.VpcGateway; +import com.cloud.network.vpc.VpcManager; +import com.cloud.network.vpc.VpcVO; +import com.cloud.network.vpc.dao.PrivateIpDao; +import com.cloud.network.vpc.dao.StaticRouteDao; +import com.cloud.network.vpc.dao.VpcDao; +import com.cloud.network.vpc.dao.VpcGatewayDao; +import com.cloud.network.vpc.dao.VpcOfferingDao; import com.cloud.network.vpn.Site2SiteVpnManager; import com.cloud.offering.NetworkOffering; import com.cloud.user.Account; @@ -127,7 +127,6 @@ import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VirtualMachineProfile.Param; import com.cloud.vm.dao.VMInstanceDao; - @Component @Local(value = {VpcVirtualNetworkApplianceManager.class, VpcVirtualNetworkApplianceService.class}) public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplianceManagerImpl implements VpcVirtualNetworkApplianceManager{ @@ -339,7 +338,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian DomainRouterVO router = _routerDao.findById(vm.getId()); if (router.getState() == State.Running) { try { - PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName()); + PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName(), vm.getType()); Commands cmds = new Commands(OnError.Stop); cmds.addCommand("plugnic", plugNicCmd); @@ -748,7 +747,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian // if (rule.getSourceCidrList() == null && (rule.getPurpose() == Purpose.Firewall || rule.getPurpose() == Purpose.NetworkACL)) { // _firewallDao.loadSourceCidrs((FirewallRuleVO)rule); // } - NetworkACLTO ruleTO = new NetworkACLTO((NetworkACLItemVO)rule, guestVlan, rule.getTrafficType()); + NetworkACLTO ruleTO = new NetworkACLTO(rule, guestVlan, rule.getTrafficType()); rulesTO.add(ruleTO); } } @@ -828,7 +827,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian _routerDao.update(routerVO.getId(), routerVO); } } - PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, publicNic.getNetworkId(), publicNic.getBroadcastUri().toString()), router.getInstanceName()); + PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, publicNic.getNetworkId(), publicNic.getBroadcastUri().toString()), router.getInstanceName(), router.getType()); cmds.addCommand(plugNicCmd); VpcVO vpc = _vpcDao.findById(router.getVpcId()); NetworkUsageCommand netUsageCmd = new NetworkUsageCommand(router.getPrivateIpAddress(), router.getInstanceName(), true, publicNic.getIp4Address(), vpc.getCidr()); @@ -851,7 +850,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian for (Pair nicNtwk : guestNics) { Nic guestNic = nicNtwk.first(); //plug guest nic - PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, guestNic.getNetworkId(), null), router.getInstanceName()); + PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, guestNic.getNetworkId(), null), router.getInstanceName(), router.getType()); cmds.addCommand(plugNicCmd); if (!_networkModel.isPrivateGateway(guestNic)) { @@ -1236,12 +1235,14 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian //1) allocate nic for control and source nat public ip networks = super.createRouterNetworks(owner, isRedundant, plan, null, sourceNatIp); - //2) allocate nic for private gateway if needed - PrivateGateway privateGateway = _vpcMgr.getVpcPrivateGateway(vpcId); - if (privateGateway != null) { - NicProfile privateNic = createPrivateNicProfileForGateway(privateGateway); - Network privateNetwork = _networkModel.getNetwork(privateGateway.getNetworkId()); - networks.add(new Pair((NetworkVO) privateNetwork, privateNic)); + //2) allocate nic for private gateways if needed + List privateGateways = _vpcMgr.getVpcPrivateGateways(vpcId); + if (privateGateways != null && !privateGateways.isEmpty()) { + for (PrivateGateway privateGateway : privateGateways) { + NicProfile privateNic = createPrivateNicProfileForGateway(privateGateway); + Network privateNetwork = _networkModel.getNetwork(privateGateway.getNetworkId()); + networks.add(new Pair((NetworkVO) privateNetwork, privateNic)); + } } //3) allocate nic for guest gateway if needed diff --git a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java index 00c90d5164e..4d5d98192fa 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -182,7 +182,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ throw new InvalidParameterValueException("Unable to find Vpc associated with the NetworkACL"); } _accountMgr.checkAccess(caller, null, true, vpc); - if(gateway.getVpcId() != acl.getVpcId()){ + if(!gateway.getVpcId().equals(acl.getVpcId())){ throw new InvalidParameterValueException("private gateway: "+privateGatewayId+" and ACL: "+aclId+" do not belong to the same VPC"); } } @@ -225,7 +225,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ } _accountMgr.checkAccess(caller, null, true, vpc); - if(network.getVpcId() != acl.getVpcId()){ + if(!network.getVpcId().equals(acl.getVpcId())){ throw new InvalidParameterValueException("Network: "+networkId+" and ACL: "+aclId+" do not belong to the same VPC"); } } diff --git a/server/src/com/cloud/network/vpc/VpcManager.java b/server/src/com/cloud/network/vpc/VpcManager.java index e8db8d3fd5c..f22e7e4bf83 100644 --- a/server/src/com/cloud/network/vpc/VpcManager.java +++ b/server/src/com/cloud/network/vpc/VpcManager.java @@ -166,5 +166,5 @@ public interface VpcManager extends VpcService{ */ void validateNtwkOffForNtwkInVpc(Long networkId, long newNtwkOffId, String newCidr, String newNetworkDomain, Vpc vpc, String gateway, Account networkOwner); - List getVpcPrivateGateways(long id); + List getVpcPrivateGateways(long vpcId); } diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java index f01d81be392..1aab7320fb4 100644 --- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java @@ -711,8 +711,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis public boolean destroyVpc(Vpc vpc, Account caller, Long callerUserId) throws ConcurrentOperationException, ResourceUnavailableException { s_logger.debug("Destroying vpc " + vpc); - //don't allow to delete vpc if it's in use by existing networks - int networksCount = _ntwkDao.getNetworkCountByVpcId(vpc.getId()); + //don't allow to delete vpc if it's in use by existing non system networks (system networks are networks of a private gateway of the VPC, + //and they will get removed as a part of VPC cleanup + int networksCount = _ntwkDao.getNonSystemNetworkCountByVpcId(vpc.getId()); if (networksCount > 0) { throw new InvalidParameterValueException("Can't delete VPC " + vpc + " as its used by " + networksCount + " networks"); } @@ -1235,7 +1236,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return false; } - //4) Delete private gateway + //4) Delete private gateways List gateways = getVpcPrivateGateways(vpcId); if (gateways != null) { for (PrivateGateway gateway: gateways) { @@ -1299,8 +1300,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override - public List getVpcPrivateGateways(long id) { - List gateways = _vpcGatewayDao.listByVpcIdAndType(id, VpcGateway.Type.Private); + public List getVpcPrivateGateways(long vpcId) { + List gateways = _vpcGatewayDao.listByVpcIdAndType(vpcId, VpcGateway.Type.Private); if (gateways != null) { List pvtGateway = new ArrayList(); @@ -2024,8 +2025,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis //2) Create network Network guestNetwork = _ntwkMgr.createGuestNetwork(ntwkOffId, name, displayText, gateway, cidr, vlanId, - networkDomain, owner, domainId, pNtwk, zoneId, aclType, subdomainAccess, vpcId, null, null, isDisplayNetworkEnabled); - + networkDomain, owner, domainId, pNtwk, zoneId, aclType, subdomainAccess, vpcId, null, null, isDisplayNetworkEnabled, null); if(guestNetwork != null){ guestNetwork.setNetworkACLId(aclId); diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 270355764e0..e25cfbb89c0 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -92,6 +92,10 @@ import com.cloud.dc.dao.ClusterVSMMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterIpAddressDao; import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.deploy.dao.PlannerHostReservationDao; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.DiscoveryException; import com.cloud.exception.InvalidParameterValueException; @@ -221,7 +225,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, protected HighAvailabilityManager _haMgr; @Inject protected StorageService _storageSvr; - + @Inject + PlannerHostReservationDao _plannerHostReserveDao; protected List _discoverers; @@ -2531,5 +2536,42 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); sc.addAnd(sc.getEntity().getResourceState(), Op.EQ, ResourceState.Enabled); return sc.list(); + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_HOST_RESERVATION_RELEASE, eventDescription = "releasing host reservation", async = true) + public boolean releaseHostReservation(Long hostId) { + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (reservationEntry != null) { + long id = reservationEntry.getId(); + PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true); + if (hostReservation == null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + } + txn.rollback(); + return false; + } + hostReservation.setResourceUsage(null); + _plannerHostReserveDao.persist(hostReservation); + txn.commit(); + return true; + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host reservation for host: " + hostId + + " does not even exist. Release reservartion call is ignored."); + } + return false; + } catch (CloudRuntimeException e) { + throw e; + } catch (Throwable t) { + s_logger.error("Unable to release host reservation for host: " + hostId, t); + txn.rollback(); + return false; + } } } diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 6e8ce86909a..a8185b861ae 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -78,6 +78,7 @@ import org.apache.cloudstack.api.command.admin.host.FindHostsForMigrationCmd; import org.apache.cloudstack.api.command.admin.host.ListHostsCmd; import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; +import org.apache.cloudstack.api.command.admin.host.ReleaseHostReservationCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; import org.apache.cloudstack.api.command.admin.internallb.ConfigureInternalLoadBalancerElementCmd; @@ -457,6 +458,7 @@ import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.PodVlanMapDao; import com.cloud.dc.dao.VlanDao; import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -660,6 +662,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.api.command.admin.config.ListDeploymentPlannersCmd; public class ManagementServerImpl extends ManagerBase implements ManagementServer { public static final Logger s_logger = Logger.getLogger(ManagementServerImpl.class.getName()); @@ -796,11 +799,21 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe private List _userAuthenticators; private List _userPasswordEncoders; + protected List _planners; + + public List getPlanners() { + return _planners; + } + + public void setPlanners(List _planners) { + this._planners = _planners; + } + @Inject ClusterManager _clusterMgr; private String _hashKey = null; private String _encryptionKey = null; private String _encryptionIV = null; - + @Inject protected AffinityGroupVMMapDao _affinityGroupVMMapDao; @@ -1046,29 +1059,29 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe String zoneType = cmd.getZoneType(); String keyword = cmd.getKeyword(); zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), zoneId); - - + + Filter searchFilter = new Filter(ClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); - - SearchBuilder sb = _clusterDao.createSearchBuilder(); - sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); - sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); - sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); - sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + + SearchBuilder sb = _clusterDao.createSearchBuilder(); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); + sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); sb.and("clusterType", sb.entity().getClusterType(), SearchCriteria.Op.EQ); sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ); - + if(zoneType != null) { SearchBuilder zoneSb = _dcDao.createSearchBuilder(); - zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); + zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER); } - - - SearchCriteria sc = sb.create(); + + + SearchCriteria sc = sb.create(); if (id != null) { - sc.setParameters("id", id); + sc.setParameters("id", id); } if (name != null) { @@ -1096,9 +1109,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } if(zoneType != null) { - sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); + sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); } - + if (keyword != null) { SearchCriteria ssc = _clusterDao.createSearchCriteria(); ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); @@ -1222,7 +1235,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (volumePools.isEmpty()) { allHosts.remove(host); } else { - if (host.getClusterId() != srcHost.getClusterId() || usesLocal) { + if (!host.getClusterId().equals(srcHost.getClusterId()) || usesLocal) { requiresStorageMotion.put(host, true); } } @@ -1511,26 +1524,26 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public Pair, Integer> searchForPods(ListPodsByCmd cmd) { String podName = cmd.getPodName(); Long id = cmd.getId(); - Long zoneId = cmd.getZoneId(); + Long zoneId = cmd.getZoneId(); Object keyword = cmd.getKeyword(); Object allocationState = cmd.getAllocationState(); String zoneType = cmd.getZoneType(); zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), zoneId); - + Filter searchFilter = new Filter(HostPodVO.class, "dataCenterId", true, cmd.getStartIndex(), cmd.getPageSizeVal()); - SearchBuilder sb = _hostPodDao.createSearchBuilder(); + SearchBuilder sb = _hostPodDao.createSearchBuilder(); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); - sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); - sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ); - + if(zoneType != null) { SearchBuilder zoneSb = _dcDao.createSearchBuilder(); - zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); + zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER); } - + SearchCriteria sc = sb.create(); if (keyword != null) { SearchCriteria ssc = _hostPodDao.createSearchCriteria(); @@ -1543,23 +1556,23 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (id != null) { sc.setParameters("id", id); } - + if (podName != null) { sc.setParameters("name", "%" + podName + "%"); } - + if (zoneId != null) { sc.setParameters("dataCenterId", zoneId); } - + if (allocationState != null) { sc.setParameters("allocationState", allocationState); - } - - if(zoneType != null) { - sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); } - + + if(zoneType != null) { + sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); + } + Pair, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter); return new Pair, Integer>(result.first(), result.second()); } @@ -1868,6 +1881,89 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } */ + private VMTemplateVO updateTemplateOrIso(BaseUpdateTemplateOrIsoCmd cmd) { + Long id = cmd.getId(); + String name = cmd.getTemplateName(); + String displayText = cmd.getDisplayText(); + String format = cmd.getFormat(); + Long guestOSId = cmd.getOsTypeId(); + Boolean passwordEnabled = cmd.isPasswordEnabled(); + Boolean bootable = cmd.isBootable(); + Integer sortKey = cmd.getSortKey(); + Account account = UserContext.current().getCaller(); + + // verify that template exists + VMTemplateVO template = _templateDao.findById(id); + if (template == null || template.getRemoved() != null) { + InvalidParameterValueException ex = new InvalidParameterValueException("unable to find template/iso with specified id"); + ex.addProxyObject(template, id, "templateId"); + throw ex; + } + + // Don't allow to modify system template + if (id.equals(Long.valueOf(1))) { + InvalidParameterValueException ex = new InvalidParameterValueException("Unable to update template/iso of specified id"); + ex.addProxyObject(template, id, "templateId"); + throw ex; + } + + // do a permission check + _accountMgr.checkAccess(account, AccessType.ModifyEntry, true, template); + + boolean updateNeeded = !(name == null && displayText == null && format == null && guestOSId == null && passwordEnabled == null + && bootable == null && sortKey == null); + if (!updateNeeded) { + return template; + } + + template = _templateDao.createForUpdate(id); + + if (name != null) { + template.setName(name); + } + + if (displayText != null) { + template.setDisplayText(displayText); + } + + if (sortKey != null) { + template.setSortKey(sortKey); + } + + ImageFormat imageFormat = null; + if (format != null) { + try { + imageFormat = ImageFormat.valueOf(format.toUpperCase()); + } catch (IllegalArgumentException e) { + throw new InvalidParameterValueException("Image format: " + format + " is incorrect. Supported formats are " + + EnumUtils.listValues(ImageFormat.values())); + } + + template.setFormat(imageFormat); + } + + if (guestOSId != null) { + GuestOSVO guestOS = _guestOSDao.findById(guestOSId); + + if (guestOS == null) { + throw new InvalidParameterValueException("Please specify a valid guest OS ID."); + } else { + template.setGuestOSId(guestOSId); + } + } + + if (passwordEnabled != null) { + template.setEnablePassword(passwordEnabled); + } + + if (bootable != null) { + template.setBootable(bootable); + } + + _templateDao.update(id, template); + + return _templateDao.findById(id); + } @Override public Pair, Integer> searchForIPAddresses(ListPublicIpAddressesCmd cmd) { @@ -2332,7 +2428,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public int compare(SummedCapacity arg0, SummedCapacity arg1) { if (arg0.getPercentUsed() < arg1.getPercentUsed()) { return 1; - } else if (arg0.getPercentUsed() == arg1.getPercentUsed()) { + } else if (arg0.getPercentUsed().equals(arg1.getPercentUsed())) { return 0; } return -1; @@ -2826,7 +2922,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(UpdateVMAffinityGroupCmd.class); cmdList.add(ListAffinityGroupTypesCmd.class); cmdList.add(ListNetworkIsolationMethodsCmd.class); - + cmdList.add(ListDeploymentPlannersCmd.class); + cmdList.add(ReleaseHostReservationCmd.class); cmdList.add(AddResourceDetailCmd.class); cmdList.add(RemoveResourceDetailCmd.class); cmdList.add(ListResourceDetailsCmd.class); @@ -3028,10 +3125,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if(zoneType != null) { SearchBuilder zoneSb = _dcDao.createSearchBuilder(); - zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); + zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER); - } - + } + SearchCriteria sc = sb.create(); if (keyword != null) { @@ -3073,9 +3170,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } if(zoneType != null) { - sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); + sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); } - + Pair, Integer> result = _vmInstanceDao.searchAndCount(sc, searchFilter); return new Pair, Integer>(result.first(), result.second()); } @@ -3600,7 +3697,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe // although we may have race conditioning here, database transaction serialization should // give us the same key if (_hashKey == null) { - _hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(), + _hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(), getBase64EncodedRandomKey(128)); } return _hashKey; @@ -3609,41 +3706,41 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public String getEncryptionKey() { if (_encryptionKey == null) { - _encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(), - Config.EncryptionKey.getCategory(), + _encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(), + Config.EncryptionKey.getCategory(), getBase64EncodedRandomKey(128)); } return _encryptionKey; } - + @Override public String getEncryptionIV() { if (_encryptionIV == null) { - _encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(), - Config.EncryptionIV.getCategory(), + _encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(), + Config.EncryptionIV.getCategory(), getBase64EncodedRandomKey(128)); } return _encryptionIV; } - + @Override @DB public void resetEncryptionKeyIV() { - + SearchBuilder sb = _configDao.createSearchBuilder(); sb.and("name1", sb.entity().getName(), SearchCriteria.Op.EQ); sb.or("name2", sb.entity().getName(), SearchCriteria.Op.EQ); sb.done(); - + SearchCriteria sc = sb.create(); sc.setParameters("name1", Config.EncryptionKey.key()); sc.setParameters("name2", Config.EncryptionIV.key()); - + _configDao.expunge(sc); _encryptionKey = null; _encryptionIV = null; } - + private static String getBase64EncodedRandomKey(int nBits) { SecureRandom random; try { @@ -3979,4 +4076,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } + + @Override + public List listDeploymentPlanners() { + List plannersAvailable = new ArrayList(); + for (DeploymentPlanner planner : _planners) { + plannersAvailable.add(planner.getName()); + } + + return plannersAvailable; + } + } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index f03e9f99e04..504cdc64948 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -438,6 +438,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C SearchCriteria.Op.EQ); volumeSB.and("removed", volumeSB.entity().getRemoved(), SearchCriteria.Op.NULL); + volumeSB.and("state", volumeSB.entity().getState(), SearchCriteria.Op.NIN); SearchBuilder activeVmSB = _vmInstanceDao .createSearchBuilder(); @@ -449,6 +450,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C SearchCriteria volumeSC = volumeSB.create(); volumeSC.setParameters("poolId", PrimaryDataStoreVO.getId()); + volumeSC.setParameters("state", Volume.State.Expunging, Volume.State.Destroy); volumeSC.setJoinParameters("activeVmSB", "state", State.Starting, State.Running, State.Stopping, State.Migrating); @@ -644,6 +646,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C SearchCriteria.Op.EQ); volumeSearch.and("poolId", volumeSearch.entity().getPoolId(), SearchCriteria.Op.EQ); + volumeSearch.and("state", volumeSearch.entity().getState(), SearchCriteria.Op.EQ); StoragePoolSearch.join("vmVolume", volumeSearch, volumeSearch.entity() .getInstanceId(), StoragePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); @@ -1591,6 +1594,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C SearchCriteria sc = StoragePoolSearch.create(); sc.setJoinParameters("vmVolume", "volumeType", Volume.Type.ROOT); sc.setJoinParameters("vmVolume", "poolId", storagePoolId); + sc.setJoinParameters("vmVolume", "state", Volume.State.Ready); return _vmInstanceDao.search(sc, null); } diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index 7fee53c1271..9c7069a80d2 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -739,7 +739,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, Account caller = UserContext.current().getCaller(); //Verify parameters - if (sourceZoneId == destZoneId) { + if (sourceZoneId.equals(destZoneId)) { throw new InvalidParameterValueException("Please specify different source and destination zones."); } @@ -1160,7 +1160,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, Account caller = UserContext.current().getCaller(); Long id = cmd.getId(); - if (id == Long.valueOf(1)) { + if (id.equals(Long.valueOf(1))) { throw new PermissionDeniedException("unable to list permissions for " + cmd.getMediaType() + " with id " + id); } @@ -1252,7 +1252,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new InvalidParameterValueException("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); } - if (id == Long.valueOf(1)) { + if (id.equals(Long.valueOf(1))) { throw new InvalidParameterValueException("unable to update permissions for " + mediaType + " with id " + id); } diff --git a/server/src/com/cloud/test/DatabaseConfig.java b/server/src/com/cloud/test/DatabaseConfig.java index 7c10f98abf4..70c81781959 100755 --- a/server/src/com/cloud/test/DatabaseConfig.java +++ b/server/src/com/cloud/test/DatabaseConfig.java @@ -792,14 +792,14 @@ public class DatabaseConfig { } // If a netmask was provided, check that the startIP, endIP, and gateway all belong to the same subnet - if (netmask != null && netmask != "") { + if (netmask != null && !netmask.equals("")) { if (endIP != null) { if (!IPRangeConfig.sameSubnet(startIP, endIP, netmask)) { printError("Start and end IPs for the public IP range must be in the same subnet, as per the provided netmask."); } } - if (gateway != null && gateway != "") { + if (gateway != null && !gateway.equals("")) { if (!IPRangeConfig.sameSubnet(startIP, gateway, netmask)) { printError("The start IP for the public IP range must be in the same subnet as the gateway, as per the provided netmask."); } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index c40993c0329..63947e1ad55 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -73,6 +73,7 @@ import com.cloud.agent.api.GetVmStatsAnswer; import com.cloud.agent.api.GetVmStatsCommand; import com.cloud.agent.api.PlugNicAnswer; import com.cloud.agent.api.PlugNicCommand; +import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StopAnswer; import com.cloud.agent.api.UnPlugNicAnswer; @@ -101,7 +102,6 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; -import com.cloud.deploy.DeployPlannerSelector; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -398,9 +398,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use AffinityGroupVMMapDao _affinityGroupVMMapDao; @Inject AffinityGroupDao _affinityGroupDao; - - @Inject - List plannerSelectors; @Inject TemplateDataFactory templateFactory; @@ -1023,6 +1020,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw new CloudRuntimeException("Failed to find a nic profile for the existing default network. This is bad and probably means some sort of configuration corruption"); } + Network oldDefaultNetwork = null; + oldDefaultNetwork = _networkModel.getDefaultNetworkForVm(vmId); + long oldNetworkOfferingId = -1L; + + if(oldDefaultNetwork!=null) { + oldNetworkOfferingId = oldDefaultNetwork.getNetworkOfferingId(); + } NicVO existingVO = _nicDao.findById(existing.id); Integer chosenID = nic.getDeviceId(); Integer existingID = existing.getDeviceId(); @@ -1054,6 +1058,16 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw new CloudRuntimeException("Failed to change default nic to " + nic + " and now we have no default"); } else if (newdefault.getId() == nic.getNetworkId()) { s_logger.debug("successfully set default network to " + network + " for " + vmInstance); + String nicIdString = Long.toString(nic.getId()); + long newNetworkOfferingId = network.getNetworkOfferingId(); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vmInstance.getAccountId(), vmInstance.getDataCenterId(), + vmInstance.getId(), nicIdString, oldNetworkOfferingId, null, 1L, VirtualMachine.class.getName(), vmInstance.getUuid()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmInstance.getAccountId(), vmInstance.getDataCenterId(), + vmInstance.getId(), nicIdString, newNetworkOfferingId, null, 1L, VirtualMachine.class.getName(), vmInstance.getUuid()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vmInstance.getAccountId(), vmInstance.getDataCenterId(), + vmInstance.getId(), nicIdString, newNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmInstance.getAccountId(), vmInstance.getDataCenterId(), + vmInstance.getId(), nicIdString, oldNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid()); return _vmDao.findById(vmInstance.getId()); } @@ -1655,7 +1669,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use String description = ""; - if (displayName != vmInstance.getDisplayName()) { + if (!displayName.equals(vmInstance.getDisplayName())) { description += "New display name: " + displayName + ". "; } @@ -2191,7 +2205,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use s_logger.debug("Creating network for account " + owner + " from the network offering id=" +requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, - null, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true); + null, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null); defaultNetwork = _networkDao.findById(newNetwork.getId()); } else if (virtualNetworks.size() > 1) { throw new InvalidParameterValueException( @@ -2769,6 +2783,37 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use return true; } + private boolean setupVmForPvlan(boolean add, Long hostId, NicVO nic) { + if (!nic.getBroadcastUri().getScheme().equals("pvlan")) { + return false; + } + String op = "add"; + if (!add) { + // "delete" would remove all the rules(if using ovs) related to this vm + op = "delete"; + } + Network network = _networkDao.findById(nic.getNetworkId()); + Host host = _hostDao.findById(hostId); + String networkTag = _networkModel.getNetworkTag(host.getHypervisorType(), network); + PvlanSetupCommand cmd = PvlanSetupCommand.createVmSetup(op, nic.getBroadcastUri(), networkTag, nic.getMacAddress()); + Answer answer = null; + try { + answer = _agentMgr.send(hostId, cmd); + } catch (OperationTimedoutException e) { + s_logger.warn("Timed Out", e); + return false; + } catch (AgentUnavailableException e) { + s_logger.warn("Agent Unavailable ", e); + return false; + } + + boolean result = true; + if (answer == null || !answer.getResult()) { + result = false; + } + return result; + } + @Override public boolean finalizeDeployment(Commands cmds, VirtualMachineProfile profile, DeployDestination dest, @@ -2830,8 +2875,16 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use originalIp = nic.getIp4Address(); guestNic = nic; guestNetwork = network; + // In vmware, we will be effecting pvlan settings in portgroups in StartCommand. + if (profile.getHypervisorType() != HypervisorType.VMware) { + if (nic.getBroadcastUri().getScheme().equals("pvlan")) { + if (!setupVmForPvlan(true, hostId, nic)) { + return false; + } + } } } + } boolean ipChanged = false; if (originalIp != null && !originalIp.equalsIgnoreCase(returnedIp)) { if (returnedIp != null && guestNic != null) { @@ -2960,6 +3013,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + " stop due to exception ", ex); } } + + VMInstanceVO vm = profile.getVirtualMachine(); + List nics = _nicDao.listByVmId(vm.getId()); + for (NicVO nic : nics) { + NetworkVO network = _networkDao.findById(nic.getNetworkId()); + if (network.getTrafficType() == TrafficType.Guest) { + if (nic.getBroadcastUri().getScheme().equals("pvlan")) { + setupVmForPvlan(false, vm.getHostId(), nic); + } + } + } } public String generateRandomPassword() { @@ -3087,15 +3151,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid()); - String plannerName = null; - for (DeployPlannerSelector dps : plannerSelectors) { - plannerName = dps.selectPlanner(vm); - if (plannerName != null) { - break; - } - } + // Get serviceOffering for Virtual Machine + ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); + String plannerName = offering.getDeploymentPlanner(); if (plannerName == null) { - throw new CloudRuntimeException(String.format("cannot find DeployPlannerSelector for vm[uuid:%s, hypervisorType:%s]", vm.getUuid(), vm.getHypervisorType())); + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + plannerName = "BareMetalPlanner"; + } else { + plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key()); + } } String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString()); @@ -3624,7 +3688,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use List vmVolumes = _volsDao.findUsableVolumesForInstance(vm.getId()); Map volToPoolObjectMap = new HashMap(); - if (!isVMUsingLocalStorage(vm) && destinationHost.getClusterId() == srcHost.getClusterId()) { + if (!isVMUsingLocalStorage(vm) && destinationHost.getClusterId().equals(srcHost.getClusterId())) { if (volumeToPool.isEmpty()) { // If the destination host is in the same cluster and volumes do not have to be migrated across pools // then fail the call. migrateVirtualMachine api should have been used. @@ -3739,7 +3803,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + cmd.getAccountName() + " is disabled."); } - //check caller has access to both the old and new account + //check caller has access to both the old and new account _accountMgr.checkAccess(caller, null, true, oldAccount); _accountMgr.checkAccess(caller, null, true, newAccount); @@ -4019,7 +4083,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), newAccount.getAccountName() + "-network", newAccount.getAccountName() + "-network", null, null, - null, null, newAccount, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true); + null, null, newAccount, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null); // if the network offering has persistent set to true, implement the network if (requiredOfferings.get(0).getIsPersistent()) { DeployDestination dest = new DeployDestination(zone, null, null, null); @@ -4252,7 +4316,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use UserVmVO vmVO = _vmDao.findById(vm.getId()); if (vmVO.getState() == State.Running) { try { - PlugNicCommand plugNicCmd = new PlugNicCommand(nic,vm.getName()); + PlugNicCommand plugNicCmd = new PlugNicCommand(nic,vm.getName(), vm.getType()); Commands cmds = new Commands(OnError.Stop); cmds.addCommand("plugnic",plugNicCmd); _agentMgr.send(dest.getHost().getId(),cmds); diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java index 6f930ba473b..79c85830b9b 100755 --- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -1322,7 +1322,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (migrationResult) { //if the vm is migrated to different pod in basic mode, need to reallocate ip - if (vm.getPodIdToDeployIn() != destPool.getPodId()) { + if (!vm.getPodIdToDeployIn().equals(destPool.getPodId())) { DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), destPool.getPodId(), null, null, null, null); VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, null, null, null, null); _networkMgr.reallocate(vmProfile, plan); diff --git a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java index 56c46b01c79..a1865c64af7 100644 --- a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java +++ b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java @@ -35,6 +35,7 @@ import com.cloud.region.ha.GlobalLoadBalancingRulesService; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.UserContext; +import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @@ -154,7 +155,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR long gslbRuleId = assignToGslbCmd.getGlobalLoadBalancerRuleId(); GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); if (gslbRule == null) { - throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRule.getUuid()); + throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId); } _accountMgr.checkAccess(caller, SecurityChecker.AccessType.ModifyEntry, true, gslbRule); @@ -173,6 +174,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR List oldLbRuleIds = new ArrayList(); List oldZones = new ArrayList(); List newZones = new ArrayList(oldZones); + List> physcialNetworks = new ArrayList>(); // get the list of load balancer rules id's that are assigned currently to GSLB rule and corresponding zone id's List gslbLbMapVos = _gslbLbMapDao.listByGslbRuleId(gslbRuleId); @@ -217,12 +219,14 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } newZones.add(network.getDataCenterId()); + physcialNetworks.add(new Pair(network.getDataCenterId(), network.getPhysicalNetworkId())); } - // check each of the zone has a GSLB service provider configured - for (Long zoneId: newZones) { - if (!checkGslbServiceEnabledInZone(zoneId)) { - throw new InvalidParameterValueException("GSLB service is not enabled in the Zone"); + // for each of the physical network check if GSLB service provider configured + for (Pair physicalNetwork: physcialNetworks) { + if (!checkGslbServiceEnabledInZone(physicalNetwork.first(), physicalNetwork.second())) { + throw new InvalidParameterValueException("GSLB service is not enabled in the Zone:" + + physicalNetwork.first() + " and physical network " + physicalNetwork.second()); } } @@ -278,7 +282,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR long gslbRuleId = removeFromGslbCmd.getGlobalLoadBalancerRuleId(); GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); if (gslbRule == null) { - throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRule.getUuid()); + throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId); } _accountMgr.checkAccess(caller, SecurityChecker.AccessType.ModifyEntry, true, gslbRule); @@ -543,8 +547,8 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR GlobalLoadBalancerConfigCommand gslbConfigCmd = new GlobalLoadBalancerConfigCommand(gslbFqdn, lbMethod, persistenceMethod, serviceType, gslbRuleId, revoke); - // list of the zones participating in global load balancing - List gslbSiteIds = new ArrayList(); + // list of the physical network participating in global load balancing + List> gslbSiteIds = new ArrayList>(); // map of the zone and info corresponding to the load balancer configured in the zone Map zoneSiteLoadbalancerMap = new HashMap(); @@ -559,37 +563,38 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR LoadBalancerVO loadBalancer = _lbDao.findById(gslbLbMapVo.getLoadBalancerId()); Network network = _networkDao.findById(loadBalancer.getNetworkId()); long dataCenterId = network.getDataCenterId(); + long physicalNetworkId = network.getPhysicalNetworkId(); - gslbSiteIds.add(dataCenterId); + gslbSiteIds.add(new Pair(dataCenterId, physicalNetworkId)); IPAddressVO ip = _ipAddressDao.findById(loadBalancer.getSourceIpAddressId()); SiteLoadBalancerConfig siteLb = new SiteLoadBalancerConfig(gslbLbMapVo.isRevoke(), serviceType, ip.getAddress().addr(), Integer.toString(loadBalancer.getDefaultPortStart()), dataCenterId); - siteLb.setGslbProviderPublicIp(_gslbProvider.getZoneGslbProviderPublicIp(dataCenterId)); - siteLb.setGslbProviderPrivateIp(_gslbProvider.getZoneGslbProviderPrivateIp(dataCenterId)); + siteLb.setGslbProviderPublicIp(_gslbProvider.getZoneGslbProviderPublicIp(dataCenterId, physicalNetworkId)); + siteLb.setGslbProviderPrivateIp(_gslbProvider.getZoneGslbProviderPrivateIp(dataCenterId, physicalNetworkId)); zoneSiteLoadbalancerMap.put(network.getDataCenterId(), siteLb); } // loop through all the zones, participating in GSLB, and send GSLB config command // to the corresponding GSLB service provider in that zone - for (long zoneId: gslbSiteIds) { + for (Pair zoneId: gslbSiteIds) { List slbs = new ArrayList(); // set site as 'local' for the site in that zone - for (long innerLoopZoneId: gslbSiteIds) { - SiteLoadBalancerConfig siteLb = zoneSiteLoadbalancerMap.get(innerLoopZoneId); - siteLb.setLocal(zoneId == innerLoopZoneId); + for (Pair innerLoopZoneId: gslbSiteIds) { + SiteLoadBalancerConfig siteLb = zoneSiteLoadbalancerMap.get(innerLoopZoneId.first()); + siteLb.setLocal(zoneId.first() == innerLoopZoneId.first()); slbs.add(siteLb); } gslbConfigCmd.setSiteLoadBalancers(slbs); try { - _gslbProvider.applyGlobalLoadBalancerRule(zoneId, gslbConfigCmd); + _gslbProvider.applyGlobalLoadBalancerRule(zoneId.first(), zoneId.second(), gslbConfigCmd); } catch (ResourceUnavailableException e) { s_logger.warn("Failed to configure GSLB rul in the zone " + zoneId + " due to " + e.getMessage()); throw new CloudRuntimeException("Failed to configure GSLB rul in the zone"); @@ -599,13 +604,13 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR return true; } - private boolean checkGslbServiceEnabledInZone(long zoneId) { + private boolean checkGslbServiceEnabledInZone(long zoneId, long physicalNetworkId) { if (_gslbProvider == null) { throw new CloudRuntimeException("No GSLB provider is available"); } - return _gslbProvider.isServiceEnabledInZone(zoneId); + return _gslbProvider.isServiceEnabledInZone(zoneId, physicalNetworkId); } @Override diff --git a/server/src/org/apache/cloudstack/region/gslb/GslbServiceProvider.java b/server/src/org/apache/cloudstack/region/gslb/GslbServiceProvider.java index 4338d65eff2..0413edff978 100755 --- a/server/src/org/apache/cloudstack/region/gslb/GslbServiceProvider.java +++ b/server/src/org/apache/cloudstack/region/gslb/GslbServiceProvider.java @@ -24,13 +24,13 @@ import org.apache.cloudstack.region.RegionServiceProvider; public interface GslbServiceProvider extends RegionServiceProvider { - public boolean isServiceEnabledInZone(long zoneId); + public boolean isServiceEnabledInZone(long zoneId, long physicalNetworkId); - public String getZoneGslbProviderPublicIp(long zoneId); + public String getZoneGslbProviderPublicIp(long zoneId, long physicalNetworkId); - public String getZoneGslbProviderPrivateIp(long zoneId); + public String getZoneGslbProviderPrivateIp(long zoneId, long physicalNetworkId); - public boolean applyGlobalLoadBalancerRule(long zoneId, GlobalLoadBalancerConfigCommand gslbConfigCmd) + public boolean applyGlobalLoadBalancerRule(long zoneId, long physicalNetworkId, GlobalLoadBalancerConfigCommand gslbConfigCmd) throws ResourceUnavailableException; } diff --git a/server/test/com/cloud/network/MockNetworkManagerImpl.java b/server/test/com/cloud/network/MockNetworkManagerImpl.java index 7ba34c76aa0..87431ab54ab 100755 --- a/server/test/com/cloud/network/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/network/MockNetworkManagerImpl.java @@ -272,7 +272,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage @Override public Network createGuestNetwork(long networkOfferingId, String name, String displayText, String gateway, String cidr, String vlanId, String networkDomain, Account owner, Long domainId, - PhysicalNetwork physicalNetwork, long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String gatewayv6, String cidrv6, Boolean displayNetworkEnabled) throws ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException { + PhysicalNetwork physicalNetwork, long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String gatewayv6, String cidrv6, Boolean displayNetworkEnabled, String isolatedPvlan) throws ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException { // TODO Auto-generated method stub return null; } diff --git a/server/test/com/cloud/resource/MockResourceManagerImpl.java b/server/test/com/cloud/resource/MockResourceManagerImpl.java index 819120bfa8b..a1c52aa9dea 100644 --- a/server/test/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/test/com/cloud/resource/MockResourceManagerImpl.java @@ -609,4 +609,10 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return null; } + @Override + public boolean releaseHostReservation(Long hostId) { + // TODO Auto-generated method stub + return false; + } + } diff --git a/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java new file mode 100644 index 00000000000..e3b7d311ba7 --- /dev/null +++ b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java @@ -0,0 +1,359 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +import static org.junit.Assert.*; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import com.cloud.service.ServiceOfferingVO; +import com.cloud.storage.StorageManager; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.agent.AgentManager; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentClusterPlanner; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; +import com.cloud.deploy.DeploymentPlanningManagerImpl; +import com.cloud.deploy.FirstFitPlanner; +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.deploy.dao.PlannerHostReservationDao; +import org.apache.cloudstack.affinity.AffinityGroupProcessor; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.cloud.exception.AffinityConflictException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.user.AccountManager; +import com.cloud.utils.component.ComponentContext; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class DeploymentPlanningManagerImplTest { + + @Inject + DeploymentPlanningManagerImpl _dpm; + + @Inject + PlannerHostReservationDao _plannerHostReserveDao; + + @Inject VirtualMachineProfileImpl vmProfile; + + @Inject + AffinityGroupVMMapDao _affinityGroupVMMapDao; + + @Inject + ExcludeList avoids; + + @Inject + DataCenterVO dc; + + @Inject + DataCenterDao _dcDao; + + @Inject + FirstFitPlanner _planner; + + @Inject + ClusterDao _clusterDao; + + private static long domainId = 5L; + + private static long dataCenterId = 1L; + + + @BeforeClass + public static void setUp() throws ConfigurationException { + } + + @Before + public void testSetUp() { + ComponentContext.initComponentsLifeCycle(); + + PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(200L, 1L, 2L, 3L, PlannerResourceUsage.Shared); + Mockito.when(_plannerHostReserveDao.persist(Mockito.any(PlannerHostReservationVO.class))).thenReturn(reservationVO); + Mockito.when(_plannerHostReserveDao.findById(Mockito.anyLong())).thenReturn(reservationVO); + Mockito.when(_affinityGroupVMMapDao.countAffinityGroupsForVm(Mockito.anyLong())).thenReturn(0L); + + VMInstanceVO vm = new VMInstanceVO(); + Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm); + + Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + Mockito.when(dc.getId()).thenReturn(dataCenterId); + + ClusterVO clusterVO = new ClusterVO(); + clusterVO.setHypervisorType(HypervisorType.XenServer.toString()); + Mockito.when(_clusterDao.findById(Mockito.anyLong())).thenReturn(clusterVO); + + Mockito.when(_planner.getName()).thenReturn("FirstFitPlanner"); + List planners = new ArrayList(); + planners.add(_planner); + _dpm.setPlanners(planners); + + } + + @Test + public void dataCenterAvoidTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, + "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + + Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(true); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("DataCenter is in avoid set, destination should be null! ", dest); + } + + @Test + public void plannerCannotHandleTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, + "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, + "UserDispersingPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false); + + Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(false); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("Planner cannot handle, destination should be null! ", dest); + } + + @Test + public void emptyClusterListTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, + "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false); + Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(true); + + Mockito.when(((DeploymentClusterPlanner) _planner).orderClusters(vmProfile, plan, avoids)).thenReturn(null); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("Planner cannot handle, destination should be null! ", dest); + } + + + @Configuration + @ComponentScan(basePackageClasses = { DeploymentPlanningManagerImpl.class }, includeFilters = { @Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM) }, useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + @Bean + public FirstFitPlanner firstFitPlanner() { + return Mockito.mock(FirstFitPlanner.class); + } + + @Bean + public DeploymentPlanner deploymentPlanner() { + return Mockito.mock(DeploymentPlanner.class); + } + + @Bean + public DataCenterVO dataCenter() { + return Mockito.mock(DataCenterVO.class); + } + + @Bean + public ExcludeList excludeList() { + return Mockito.mock(ExcludeList.class); + } + + @Bean + public VirtualMachineProfileImpl virtualMachineProfileImpl() { + return Mockito.mock(VirtualMachineProfileImpl.class); + } + + @Bean + public ClusterDetailsDao clusterDetailsDao() { + return Mockito.mock(ClusterDetailsDao.class); + } + + @Bean + public DataStoreManager cataStoreManager() { + return Mockito.mock(DataStoreManager.class); + } + + @Bean + public StorageManager storageManager() { + return Mockito.mock(StorageManager.class); + } + + @Bean + public HostDao hostDao() { + return Mockito.mock(HostDao.class); + } + + @Bean + public HostPodDao hostPodDao() { + return Mockito.mock(HostPodDao.class); + } + + @Bean + public ClusterDao clusterDao() { + return Mockito.mock(ClusterDao.class); + } + + @Bean + public GuestOSDao guestOSDao() { + return Mockito.mock(GuestOSDao.class); + } + + @Bean + public GuestOSCategoryDao guestOSCategoryDao() { + return Mockito.mock(GuestOSCategoryDao.class); + } + + @Bean + public CapacityManager capacityManager() { + return Mockito.mock(CapacityManager.class); + } + + @Bean + public StoragePoolHostDao storagePoolHostDao() { + return Mockito.mock(StoragePoolHostDao.class); + } + + @Bean + public VolumeDao volumeDao() { + return Mockito.mock(VolumeDao.class); + } + + @Bean + public ConfigurationDao configurationDao() { + return Mockito.mock(ConfigurationDao.class); + } + + @Bean + public DiskOfferingDao diskOfferingDao() { + return Mockito.mock(DiskOfferingDao.class); + } + + @Bean + public PrimaryDataStoreDao primaryDataStoreDao() { + return Mockito.mock(PrimaryDataStoreDao.class); + } + + @Bean + public CapacityDao capacityDao() { + return Mockito.mock(CapacityDao.class); + } + + @Bean + public PlannerHostReservationDao plannerHostReservationDao() { + return Mockito.mock(PlannerHostReservationDao.class); + } + + @Bean + public AffinityGroupProcessor affinityGroupProcessor() { + return Mockito.mock(AffinityGroupProcessor.class); + } + + @Bean + public AffinityGroupDao affinityGroupDao() { + return Mockito.mock(AffinityGroupDao.class); + } + + @Bean + public AffinityGroupVMMapDao affinityGroupVMMapDao() { + return Mockito.mock(AffinityGroupVMMapDao.class); + } + + @Bean + public AccountManager accountManager() { + return Mockito.mock(AccountManager.class); + } + + @Bean + public AgentManager agentManager() { + return Mockito.mock(AgentManager.class); + } + + @Bean + public MessageBus messageBus() { + return Mockito.mock(MessageBus.class); + } + + + @Bean + public UserVmDao userVMDao() { + return Mockito.mock(UserVmDao.class); + } + + @Bean + public VMInstanceDao vmInstanceDao() { + return Mockito.mock(VMInstanceDao.class); + } + + @Bean + public DataCenterDao dataCenterDao() { + return Mockito.mock(DataCenterDao.class); + } + + public static class Library implements TypeFilter { + + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + } +} diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java index b64278c9709..ba18fa1c11d 100755 --- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -431,7 +431,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu */ @Override public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, boolean offerHA, - boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) { + boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) { // TODO Auto-generated method stub return null; } diff --git a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java index 97775b10c36..3e6a08bdbf3 100644 --- a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java @@ -868,7 +868,8 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage @Override public Network createGuestNetwork(long networkOfferingId, String name, String displayText, String gateway, String cidr, String vlanId, String networkDomain, Account owner, Long domainId, - PhysicalNetwork physicalNetwork, long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String gatewayv6, String cidrv6, Boolean displayNetworkEnabled) + PhysicalNetwork physicalNetwork, long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String gatewayv6, String cidrv6, + Boolean displayNetworkEnabled, String isolatedPvlan) throws ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException { // TODO Auto-generated method stub return null; diff --git a/server/test/com/cloud/vpc/MockVpcManagerImpl.java b/server/test/com/cloud/vpc/MockVpcManagerImpl.java index 9812750e479..921321f52da 100644 --- a/server/test/com/cloud/vpc/MockVpcManagerImpl.java +++ b/server/test/com/cloud/vpc/MockVpcManagerImpl.java @@ -379,7 +379,7 @@ public class MockVpcManagerImpl extends ManagerBase implements VpcManager { } @Override - public List getVpcPrivateGateways(long id) { + public List getVpcPrivateGateways(long vpcId) { return null; } diff --git a/server/test/com/cloud/vpc/dao/MockNetworkDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkDaoImpl.java index 4747e702e65..ec1a0173aa8 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkDaoImpl.java @@ -367,4 +367,9 @@ public class MockNetworkDaoImpl extends GenericDaoBase implemen return null; } + + @Override + public int getNonSystemNetworkCountByVpcId(long vpcId) { + return 0; + } } diff --git a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java index 4727cfba99f..f5cf4422c50 100644 --- a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java +++ b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java @@ -90,6 +90,7 @@ import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.projects.ProjectManager; import com.cloud.server.ConfigurationServer; +import com.cloud.server.ManagementService; import com.cloud.service.dao.ServiceOfferingDaoImpl; import com.cloud.storage.dao.DiskOfferingDaoImpl; import com.cloud.storage.dao.S3DaoImpl; @@ -168,162 +169,167 @@ useDefaultFilters=false ) public class ChildTestConfiguration { - + + @Bean + public ManagementService managementService() { + return Mockito.mock(ManagementService.class); + } + @Bean public AccountManager acctMgr() { return Mockito.mock(AccountManager.class); } - + @Bean public NetworkService ntwkSvc() { return Mockito.mock(NetworkService.class); } - + @Bean public NetworkModel ntwkMdl() { return Mockito.mock(NetworkModel.class); } - + @Bean public AlertManager alertMgr() { return Mockito.mock(AlertManager.class); } - + @Bean public SecurityChecker securityChkr() { return Mockito.mock(SecurityChecker.class); } - + @Bean public ResourceLimitService resourceSvc() { return Mockito.mock(ResourceLimitService.class); } - + @Bean public ProjectManager projectMgr() { return Mockito.mock(ProjectManager.class); } - + @Bean public SecondaryStorageVmManager ssvmMgr() { return Mockito.mock(SecondaryStorageVmManager.class); } - + @Bean public SwiftManager swiftMgr() { return Mockito.mock(SwiftManager.class); } - + @Bean public S3Manager s3Mgr() { return Mockito.mock(S3Manager.class); } - + @Bean public VpcManager vpcMgr() { return Mockito.mock(VpcManager.class); } - + @Bean public UserVmDao userVMDao() { return Mockito.mock(UserVmDao.class); } - + @Bean public RulesManager rulesMgr() { return Mockito.mock(RulesManager.class); } - + @Bean public LoadBalancingRulesManager lbRulesMgr() { return Mockito.mock(LoadBalancingRulesManager.class); } - + @Bean public RemoteAccessVpnService vpnMgr() { return Mockito.mock(RemoteAccessVpnService.class); } - + @Bean public NetworkGuru ntwkGuru() { return Mockito.mock(NetworkGuru.class); } - + @Bean public NetworkElement ntwkElement() { return Mockito.mock(NetworkElement.class); } - + @Bean public IpDeployer ipDeployer() { return Mockito.mock(IpDeployer.class); } - + @Bean public DhcpServiceProvider dhcpProvider() { return Mockito.mock(DhcpServiceProvider.class); } - + @Bean public FirewallManager firewallMgr() { return Mockito.mock(FirewallManager.class); } - + @Bean public AgentManager agentMgr() { return Mockito.mock(AgentManager.class); } - + @Bean public StorageNetworkManager storageNtwkMgr() { return Mockito.mock(StorageNetworkManager.class); } - + @Bean public NetworkACLManager ntwkAclMgr() { return Mockito.mock(NetworkACLManager.class); } - + @Bean public Ipv6AddressManager ipv6Mgr() { return Mockito.mock(Ipv6AddressManager.class); } - + @Bean public ConfigurationDao configDao() { return Mockito.mock(ConfigurationDao.class); } - + @Bean public UserContext userContext() { return Mockito.mock(UserContext.class); } - + @Bean public UserContextInitializer userContextInitializer() { return Mockito.mock(UserContextInitializer.class); } - + @Bean public NetworkManager networkManager() { return Mockito.mock(NetworkManager.class); } - + @Bean public NetworkOfferingDao networkOfferingDao() { return Mockito.mock(NetworkOfferingDao.class); } - + @Bean public NetworkDao networkDao() { return Mockito.mock(NetworkDao.class); } - + @Bean public NetworkOfferingServiceMapDao networkOfferingServiceMapDao() { return Mockito.mock(NetworkOfferingServiceMapDao.class); } - + @Bean public DataCenterLinkLocalIpAddressDao datacenterLinkLocalIpAddressDao() { return Mockito.mock(DataCenterLinkLocalIpAddressDao.class); @@ -359,5 +365,5 @@ public class ChildTestConfiguration { } } - + } diff --git a/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java b/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java index 700fe8f7dde..1c281a08bed 100644 --- a/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java +++ b/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java @@ -730,6 +730,9 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { Field dcID = NetworkVO.class.getDeclaredField("dataCenterId"); dcID.setAccessible(true); dcID.set(networkVo, new Long(1)); + Field phyNetworkId = NetworkVO.class.getDeclaredField("physicalNetworkId"); + phyNetworkId.setAccessible(true); + phyNetworkId.set(networkVo, new Long(200)); when(gslbServiceImpl._networkDao.findById(new Long(1))).thenReturn(networkVo); GlobalLoadBalancerLbRuleMapVO gslbLbMap = new GlobalLoadBalancerLbRuleMapVO(1, 1); diff --git a/setup/bindir/cloud-sysvmadm.in b/setup/bindir/cloud-sysvmadm.in index 0a7b454ef95..3cb7858150b 100755 --- a/setup/bindir/cloud-sysvmadm.in +++ b/setup/bindir/cloud-sysvmadm.in @@ -23,7 +23,7 @@ #set -x usage() { - printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-e]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -e - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n\n" $(basename $0) >&2 + printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-n] [-z]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -n - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n -z - do restart only for the instances in the specific zone. If not specified, restart will apply to instances in all zones\n\n" $(basename $0) >&2 } @@ -37,9 +37,12 @@ password= help= maxthreads=10 LOGFILE=cloud.log +zone="" +inzone="" -while getopts 'sarhnd:m:u:p:t:l:' OPTION + +while getopts 'sarhnd:m:u:p:t:l:z:' OPTION do case $OPTION in s) system=1 @@ -63,6 +66,9 @@ do t) maxthreads="$OPTARG" ;; l) LOGFILE="$OPTARG" + ;; + z) zone=" AND data_center_id=""$OPTARG" + inzone=" in zone id=""$OPTARG" esac done @@ -70,14 +76,14 @@ done stop_start_system() { -secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"SecondaryStorageVm\""`) -console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"ConsoleProxy\""`) +secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"SecondaryStorageVm\"$zone"`) +console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"ConsoleProxy\"$zone"`) length_secondary=(${#secondary[@]}) length_console=(${#console[@]}) -echo -e "\nStopping and starting $length_secondary secondary storage vm(s)..." -echo -e "Stopping and starting $length_secondary secondary storage vm(s)..." >>$LOGFILE +echo -e "\nStopping and starting $length_secondary secondary storage vm(s)$inzone..." +echo -e "Stopping and starting $length_secondary secondary storage vm(s)$inzone..." >>$LOGFILE for d in "${secondary[@]}"; do echo "INFO: Stopping secondary storage vm with id $d" >>$LOGFILE @@ -98,12 +104,12 @@ done if [ "$length_secondary" == "0" ];then echo -e "No running secondary storage vms found \n" else - echo -e "Done stopping and starting secondary storage vm(s)" - echo -e "Done stopping and starting secondary storage vm(s)." >>$LOGFILE + echo -e "Done stopping and starting secondary storage vm(s)$inzone" + echo -e "Done stopping and starting secondary storage vm(s)$inzone." >>$LOGFILE fi -echo -e "\nStopping and starting $length_console console proxy vm(s)..." -echo -e "Stopping and starting $length_console console proxy vm(s)..." >>$LOGFILE +echo -e "\nStopping and starting $length_console console proxy vm(s)$inzone..." +echo -e "Stopping and starting $length_console console proxy vm(s)$inzone..." >>$LOGFILE for d in "${console[@]}"; do echo "INFO: Stopping console proxy with id $d" >>$LOGFILE @@ -124,17 +130,17 @@ done if [ "$length_console" == "0" ];then echo -e "No running console proxy vms found \n" else - echo "Done stopping and starting console proxy vm(s)." - echo "Done stopping and starting console proxy vm(s)." >>$LOGFILE + echo "Done stopping and starting console proxy vm(s) $inzone." + echo "Done stopping and starting console proxy vm(s) $inzone." >>$LOGFILE fi } stop_start_router() { - router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\""`) + router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\"$zone"`) length_router=(${#router[@]}) - echo -e "\nStopping and starting $length_router running routing vm(s)... " - echo -e "Stopping and starting $length_router running routing vm(s)... " >>$LOGFILE + echo -e "\nStopping and starting $length_router running routing vm(s)$inzone... " + echo -e "Stopping and starting $length_router running routing vm(s)$inzone... " >>$LOGFILE #Spawn reboot router in parallel - run commands in chunks - number of threads is configurable @@ -185,8 +191,8 @@ stop_start_router() { sleep 10 done - echo -e "Done restarting router(s). \n" - echo -e "Done restarting router(s). \n" >>$LOGFILE + echo -e "Done restarting router(s)$inzone. \n" + echo -e "Done restarting router(s)$inzone. \n" >>$LOGFILE fi } @@ -231,11 +237,11 @@ reboot_router(){ restart_networks(){ networks=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select n.id - from networks n, network_offerings no where n.network_offering_id = no.id and no.system_only = 0 and n.removed is null"`) + from networks n, network_offerings no where n.network_offering_id = no.id and no.system_only = 0 and n.removed is null$zone"`) length_networks=(${#networks[@]}) - echo -e "\nRestarting networks... " - echo -e "Restarting networks... " >>$LOGFILE + echo -e "\nRestarting $length_networks networks$inzone... " + echo -e "Restarting $length_networks networks$inzone... " >>$LOGFILE #Spawn restart network in parallel - run commands in chunks - number of threads is configurable @@ -287,8 +293,8 @@ restart_networks(){ sleep 10 done - echo -e "Done restarting networks. \n" - echo -e "Done restarting networks. \n" >>$LOGFILE + echo -e "Done restarting networks$inzone. \n" + echo -e "Done restarting networks$inzone. \n" >>$LOGFILE fi } diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index d9ed7a77ba5..556b7f319dc 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -1104,9 +1104,61 @@ CREATE TABLE `cloud`.`network_asa1000v_map` ( ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `eip_associate_public_ip` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if public IP is associated with user VM creation by default when EIP service is enabled.' AFTER `elastic_ip_service`; --- Re-enable foreign key checking, at the end of the upgrade path -SET foreign_key_checks = 1; +CREATE TABLE `cloud`.`op_host_planner_reservation` ( + `id` bigint unsigned NOT NULL auto_increment, + `data_center_id` bigint unsigned NOT NULL, + `pod_id` bigint unsigned, + `cluster_id` bigint unsigned, + `host_id` bigint unsigned, + `resource_usage` varchar(255) COMMENT 'Shared(between planners) Vs Dedicated (exclusive usage to a planner)', + PRIMARY KEY (`id`), + INDEX `i_op_host_planner_reservation__host_resource_usage`(`host_id`, `resource_usage`), + CONSTRAINT `fk_planner_reservation__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_planner_reservation__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `cloud`.`data_center`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_planner_reservation__pod_id` FOREIGN KEY (`pod_id`) REFERENCES `cloud`.`host_pod_ref`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_planner_reservation__cluster_id` FOREIGN KEY (`cluster_id`) REFERENCES `cloud`.`cluster`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `cloud`.`service_offering` ADD COLUMN `deployment_planner` varchar(255) COMMENT 'Planner heuristics used to deploy a VM of this offering; if null global config vm.deployment.planner is used'; + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.deployment.planner', 'FirstFitPlanner', '[''FirstFitPlanner'', ''UserDispersingPlanner'', ''UserConcentratedPodPlanner'']: DeploymentPlanner heuristic that will be used for VM deployment.'); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'host.reservation.release.period', '300000', 'The interval in milliseconds between host reservation release checks'); + +DROP VIEW IF EXISTS `cloud`.`service_offering_view`; +CREATE VIEW `cloud`.`service_offering_view` AS + select + service_offering.id, + disk_offering.uuid, + disk_offering.name, + disk_offering.display_text, + disk_offering.created, + disk_offering.tags, + disk_offering.removed, + disk_offering.use_local_storage, + disk_offering.system_use, + service_offering.cpu, + service_offering.speed, + service_offering.ram_size, + service_offering.nw_rate, + service_offering.mc_rate, + service_offering.ha_enabled, + service_offering.limit_cpu_use, + service_offering.host_tag, + service_offering.default_use, + service_offering.vm_type, + service_offering.sort_key, + service_offering.deployment_planner, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path + from + `cloud`.`service_offering` + inner join + `cloud`.`disk_offering` ON service_offering.id = disk_offering.id + left join + `cloud`.`domain` ON disk_offering.domain_id = domain.id; -- Add "default" field to account/user tables ALTER TABLE `cloud`.`account` ADD COLUMN `default` int(1) unsigned NOT NULL DEFAULT '0' COMMENT '1 if account is default'; @@ -1837,3 +1889,8 @@ CREATE TABLE `cloud`.`nic_ip_alias` ( alter table `cloud`.`vpc_gateways` add column network_acl_id bigint unsigned default 1 NOT NULL; update `cloud`.`vpc_gateways` set network_acl_id = 2; + +-- Re-enable foreign key checking, at the end of the upgrade path +SET foreign_key_checks = 1; + + diff --git a/test/integration/component/test_accounts.py b/test/integration/component/test_accounts.py index 9cbefe55fdb..b2038a9bd3b 100644 --- a/test/integration/component/test_accounts.py +++ b/test/integration/component/test_accounts.py @@ -78,7 +78,6 @@ class Services: "template": { "displaytext": "Public Template", "name": "Public template", - "ostypeid": 'bc66ada0-99e7-483b-befc-8fb0c2129b70', "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2", "hypervisor": 'XenServer', "format": 'VHD', @@ -243,7 +242,7 @@ class TestRemoveUserFromAccount(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id @@ -568,7 +567,7 @@ class TestNonRootAdminsPrivileges(cloudstackTestCase): self.apiclient, self.services["account"] ) - self.debug("Created account: %s" % account_2.account.name) + self.debug("Created account: %s" % account_2.name) self.cleanup.append(account_2) accounts_response = list_accounts( @@ -886,7 +885,7 @@ class TesttemplateHierarchy(cloudstackTestCase): cls.template = Template.register( cls.api_client, cls.services["template"], - account=cls.account_1.account.name, + account=cls.account_1.name, domainid=cls.domain_1.id ) cls._cleanup = [ @@ -935,7 +934,7 @@ class TesttemplateHierarchy(cloudstackTestCase): templates = list_templates( self.apiclient, templatefilter='self', - account=self.account_1.account.name, + account=self.account_1.name, domainid=self.domain_1.id ) self.assertEqual( @@ -960,7 +959,7 @@ class TesttemplateHierarchy(cloudstackTestCase): templates = list_templates( self.apiclient, templatefilter='self', - account=self.account_2.account.name, + account=self.account_2.name, domainid=self.domain_2.id ) self.assertEqual( @@ -1033,15 +1032,15 @@ class TestAddVmToSubDomain(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.vm_1 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], templateid=cls.template.id, - accountid=cls.account_1.account.name, - domainid=cls.account_1.account.domainid, + accountid=cls.account_1.name, + domainid=cls.account_1.domainid, serviceofferingid=cls.service_offering.id ) @@ -1049,8 +1048,8 @@ class TestAddVmToSubDomain(cloudstackTestCase): cls.api_client, cls.services["virtual_machine"], templateid=cls.template.id, - accountid=cls.account_2.account.name, - domainid=cls.account_2.account.domainid, + accountid=cls.account_2.name, + domainid=cls.account_2.domainid, serviceofferingid=cls.service_offering.id ) cls._cleanup = [ @@ -1625,7 +1624,7 @@ class TestDomainForceRemove(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -1719,31 +1718,31 @@ class TestDomainForceRemove(cloudstackTestCase): ) self.debug("Deploying virtual machine in account 1: %s" % - self.account_1.account.name) + self.account_1.name) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, - accountid=self.account_1.account.name, - domainid=self.account_1.account.domainid, + accountid=self.account_1.name, + domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.debug("Deploying virtual machine in account 2: %s" % - self.account_2.account.name) + self.account_2.name) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, - accountid=self.account_2.account.name, - domainid=self.account_2.account.domainid, + accountid=self.account_2.name, + domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) networks = Network.list( self.apiclient, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, listall=True ) self.assertEqual( @@ -1753,13 +1752,13 @@ class TestDomainForceRemove(cloudstackTestCase): ) network_1 = networks[0] self.debug("Default network in account 1: %s is %s" % ( - self.account_1.account.name, + self.account_1.name, network_1.name)) src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=network_1.id, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, listall=True, issourcenat=True, ) @@ -1823,8 +1822,8 @@ class TestDomainForceRemove(cloudstackTestCase): self.debug("Checking if the resources in domain are deleted or not..") accounts = Account.list( self.apiclient, - name=self.account_1.account.name, - domainid=self.account_1.account.domainid, + name=self.account_1.name, + domainid=self.account_1.domainid, listall=True ) @@ -1894,31 +1893,31 @@ class TestDomainForceRemove(cloudstackTestCase): self.cleanup.append(self.service_offering) self.debug("Deploying virtual machine in account 1: %s" % - self.account_1.account.name) + self.account_1.name) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, - accountid=self.account_1.account.name, - domainid=self.account_1.account.domainid, + accountid=self.account_1.name, + domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.debug("Deploying virtual machine in account 2: %s" % - self.account_2.account.name) + self.account_2.name) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, - accountid=self.account_2.account.name, - domainid=self.account_2.account.domainid, + accountid=self.account_2.name, + domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) networks = Network.list( self.apiclient, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, listall=True ) self.assertEqual( @@ -1928,13 +1927,13 @@ class TestDomainForceRemove(cloudstackTestCase): ) network_1 = networks[0] self.debug("Default network in account 1: %s is %s" % ( - self.account_1.account.name, + self.account_1.name, network_1.name)) src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=network_1.id, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, listall=True, issourcenat=True, ) diff --git a/test/integration/component/test_resource_limits.py b/test/integration/component/test_resource_limits.py index 1d876b6195f..ea79c07c376 100644 --- a/test/integration/component/test_resource_limits.py +++ b/test/integration/component/test_resource_limits.py @@ -191,25 +191,25 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Updating instance resource limit for account: %s" % - self.account_1.account.name) + self.account_1.name) # Set usage_vm=1 for Account 1 update_resource_limit( self.apiclient, 0, # Instance - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, max=1 ) self.debug( "Deploying VM instance in account: %s" % - self.account_1.account.name) + self.account_1.name) virtual_machine = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_1.account.name, - domainid=self.account_1.account.domainid, + accountid=self.account_1.name, + domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine) @@ -227,20 +227,20 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_1.account.name, - domainid=self.account_1.account.domainid, + accountid=self.account_1.name, + domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.debug( "Deploying VM instance in account: %s" % - self.account_2.account.name) + self.account_2.name) # Start 2 instances for account_2 virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_2.account.name, - domainid=self.account_2.account.domainid, + accountid=self.account_2.name, + domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_1) @@ -253,13 +253,13 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Deploying VM instance in account: %s" % - self.account_2.account.name) + self.account_2.name) virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_2.account.name, - domainid=self.account_2.account.domainid, + accountid=self.account_2.name, + domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_2) @@ -287,25 +287,25 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Updating public IP resource limit for account: %s" % - self.account_1.account.name) + self.account_1.name) # Set usage_vm=1 for Account 1 update_resource_limit( self.apiclient, 1, # Public Ip - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, max=2 ) self.debug( "Deploying VM instance in account: %s" % - self.account_1.account.name) + self.account_1.name) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_1.account.name, - domainid=self.account_1.account.domainid, + accountid=self.account_1.name, + domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_1) @@ -318,14 +318,14 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Deploying VM instance in account: %s" % - self.account_2.account.name) + self.account_2.name) # Create VM for second account virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_2.account.name, - domainid=self.account_2.account.domainid, + accountid=self.account_2.name, + domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_2) @@ -431,25 +431,25 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Updating public IP resource limit for account: %s" % - self.account_1.account.name) + self.account_1.name) # Set usage_vm=1 for Account 1 update_resource_limit( self.apiclient, 3, # Snapshot - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, max=1 ) self.debug( "Deploying VM instance in account: %s" % - self.account_1.account.name) + self.account_1.name) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_1.account.name, - domainid=self.account_1.account.domainid, + accountid=self.account_1.name, + domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_1) @@ -462,14 +462,14 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Deploying VM instance in account: %s" % - self.account_1.account.name) + self.account_1.name) # Create VM for second account virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_2.account.name, - domainid=self.account_2.account.domainid, + accountid=self.account_2.name, + domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_2) @@ -498,8 +498,8 @@ class TestResourceLimitsAccount(cloudstackTestCase): # Create a snapshot from the ROOTDISK (Account 1) snapshot_1 = Snapshot.create(self.apiclient, volumes[0].id, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, ) self.cleanup.append(snapshot_1) # Verify Snapshot state @@ -516,8 +516,8 @@ class TestResourceLimitsAccount(cloudstackTestCase): with self.assertRaises(Exception): Snapshot.create(self.apiclient, volumes[0].id, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, ) # Get the Root disk of VM @@ -538,8 +538,8 @@ class TestResourceLimitsAccount(cloudstackTestCase): # Create a snapshot from the ROOTDISK (Account 2) snapshot_2 = Snapshot.create(self.apiclient, volumes[0].id, - account=self.account_2.account.name, - domainid=self.account_2.account.domainid, + account=self.account_2.name, + domainid=self.account_2.domainid, ) self.cleanup.append(snapshot_2) # Verify Snapshot state @@ -556,8 +556,8 @@ class TestResourceLimitsAccount(cloudstackTestCase): # Create a second snapshot from the ROOTDISK (Account 2) snapshot_3 = Snapshot.create(self.apiclient, volumes[0].id, - account=self.account_2.account.name, - domainid=self.account_2.account.domainid, + account=self.account_2.name, + domainid=self.account_2.domainid, ) self.cleanup.append(snapshot_3) # Verify Snapshot state @@ -587,25 +587,25 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Updating volume resource limit for account: %s" % - self.account_1.account.name) + self.account_1.name) # Set usage_vm=1 for Account 1 update_resource_limit( self.apiclient, 2, # Volume - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, max=2 ) self.debug( - "Deploying VM for account: %s" % self.account_1.account.name) + "Deploying VM for account: %s" % self.account_1.name) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_1.account.name, - domainid=self.account_1.account.domainid, + accountid=self.account_1.name, + domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_1) @@ -617,15 +617,15 @@ class TestResourceLimitsAccount(cloudstackTestCase): ) self.debug( - "Deploying VM for account: %s" % self.account_2.account.name) + "Deploying VM for account: %s" % self.account_2.name) # Create VM for second account virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_2.account.name, - domainid=self.account_2.account.domainid, + accountid=self.account_2.name, + domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_2) @@ -637,13 +637,13 @@ class TestResourceLimitsAccount(cloudstackTestCase): ) self.debug( - "Create a data volume for account: %s" % self.account_1.account.name) + "Create a data volume for account: %s" % self.account_1.name) volume_1 = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, diskofferingid=self.disk_offering.id ) self.cleanup.append(volume_1) @@ -663,20 +663,20 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.apiclient, self.services["volume"], zoneid=self.zone.id, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, diskofferingid=self.disk_offering.id ) self.debug( - "Create a data volume for account: %s" % self.account_2.account.name) + "Create a data volume for account: %s" % self.account_2.name) # Create volume for Account 2 volume_2 = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, - account=self.account_2.account.name, - domainid=self.account_2.account.domainid, + account=self.account_2.name, + domainid=self.account_2.domainid, diskofferingid=self.disk_offering.id ) self.cleanup.append(volume_2) @@ -691,14 +691,14 @@ class TestResourceLimitsAccount(cloudstackTestCase): ) self.debug( - "Create a data volume for account: %s" % self.account_2.account.name) + "Create a data volume for account: %s" % self.account_2.name) # Create a second volume from the ROOTDISK (Account 2) volume_3 = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, - account=self.account_2.account.name, - domainid=self.account_2.account.domainid, + account=self.account_2.name, + domainid=self.account_2.domainid, diskofferingid=self.disk_offering.id ) self.cleanup.append(volume_3) @@ -727,25 +727,25 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Updating template resource limit for account: %s" % - self.account_1.account.name) + self.account_1.name) # Set usage_vm=1 for Account 1 update_resource_limit( self.apiclient, 4, # Template - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, max=1 ) self.debug( "Updating volume resource limit for account: %s" % - self.account_1.account.name) + self.account_1.name) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_1.account.name, - domainid=self.account_1.account.domainid, + accountid=self.account_1.name, + domainid=self.account_1.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_1) @@ -758,14 +758,14 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.debug( "Deploying virtual machine for account: %s" % - self.account_2.account.name) + self.account_2.name) # Create VM for second account virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, - accountid=self.account_2.account.name, - domainid=self.account_2.account.domainid, + accountid=self.account_2.name, + domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id ) self.cleanup.append(virtual_machine_2) @@ -798,8 +798,8 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.apiclient, self.services["template"], volumeid=volume.id, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, ) self.cleanup.append(template_1) @@ -816,8 +816,8 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.apiclient, self.services["template"], volumeid=volume.id, - account=self.account_1.account.name, - domainid=self.account_1.account.domainid, + account=self.account_1.name, + domainid=self.account_1.domainid, ) virtual_machine_2.stop(self.apiclient) # Get the Root disk of VM @@ -841,8 +841,8 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.apiclient, self.services["template"], volumeid=volume.id, - account=self.account_2.account.name, - domainid=self.account_2.account.domainid, + account=self.account_2.name, + domainid=self.account_2.domainid, ) self.cleanup.append(template_2) @@ -859,8 +859,8 @@ class TestResourceLimitsAccount(cloudstackTestCase): self.apiclient, self.services["template"], volumeid=volume.id, - account=self.account_2.account.name, - domainid=self.account_2.account.domainid, + account=self.account_2.name, + domainid=self.account_2.domainid, ) self.cleanup.append(template_3) diff --git a/test/integration/component/test_storage_motion.py b/test/integration/component/test_storage_motion.py index c05d79e6861..cf110d34e61 100644 --- a/test/integration/component/test_storage_motion.py +++ b/test/integration/component/test_storage_motion.py @@ -178,6 +178,12 @@ class TestStorageMotion(cloudstackTestCase): # Migrate to a host that requires storage motion hosts[:] = [host for host in hosts if host.requiresStorageMotion] + self.assert_(hosts is not None, msg="No valid hosts for storage motion") + self.assert_(len(hosts)>0, msg="No valid hosts for storage motion. Skipping") + if hosts is None or len(hosts) == 0: + self.skipTest("No valid hosts for storage motion. Skipping") + + host = hosts[0] self.debug("Migrating VM-ID: %s to Host: %s" % ( self.virtual_machine.id, diff --git a/test/integration/component/test_vm_passwdenabled.py b/test/integration/component/test_vm_passwdenabled.py index 65b068dc2d2..e89253c407a 100644 --- a/test/integration/component/test_vm_passwdenabled.py +++ b/test/integration/component/test_vm_passwdenabled.py @@ -83,7 +83,7 @@ class TestVMPasswordEnabled(cloudstackTestCase): # Get Zone, Domain and templates domain = get_domain(cls.api_client, cls.services) zone = get_zone(cls.api_client, cls.services) - cls.services['mode'] = cls.zone.networktype + cls.services['mode'] = zone.networktype template = get_template( cls.api_client, zone.id, diff --git a/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py b/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py new file mode 100644 index 00000000000..d904a4cb7d8 --- /dev/null +++ b/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py @@ -0,0 +1,164 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#!/usr/bin/env python + +import marvin +from marvin import cloudstackTestCase +from marvin.cloudstackTestCase import * + +import unittest +import hashlib +import random + +class TestDeployVmWithVariedPlanners(cloudstackTestCase): + """ + This test tests that we can create serviceOfferings with different deployment Planners and deploy virtual machines into a user account + using these service offerings and builtin template + """ + def setUp(self): + """ + CloudStack internally saves its passwords in md5 form and that is how we + specify it in the API. Python's hashlib library helps us to quickly hash + strings as follows + """ + mdf = hashlib.md5() + mdf.update('password') + mdf_pass = mdf.hexdigest() + + self.apiClient = self.testClient.getApiClient() #Get ourselves an API client + + self.acct = createAccount.createAccountCmd() #The createAccount command + self.acct.accounttype = 0 #We need a regular user. admins have accounttype=1 + self.acct.firstname = 'test' + self.acct.lastname = 'user' #What's up doc? + self.acct.username = 'testuser' + self.acct.password = mdf_pass #The md5 hashed password string + self.acct.email = 'test@domain.com' + self.acct.account = 'testacct' + self.acct.domainid = 1 #The default ROOT domain + self.acctResponse = self.apiClient.createAccount(self.acct) + # And upon successful creation we'll log a helpful message in our logs + # using the default debug logger of the test framework + self.debug("successfully created account: %s, id: \ + %s"%(self.acctResponse.name, \ + self.acctResponse.id)) + + #Create service offerings with varied planners + self.svcOfferingFirstFit = createServiceOffering.createServiceOfferingCmd() + self.svcOfferingFirstFit.name = 'Tiny Instance FirstFit' + self.svcOfferingFirstFit.displaytext = 'Tiny Instance with FirstFitPlanner' + self.svcOfferingFirstFit.cpuspeed = 100 + self.svcOfferingFirstFit.cpunumber = 1 + self.svcOfferingFirstFit.memory = 256 + self.svcOfferingFirstFit.deploymentplanner = 'FirstFitPlanner' + self.svcOfferingFirstFitResponse = self.apiClient.createServiceOffering(self.svcOfferingFirstFit) + + self.debug("successfully created serviceofferring name: %s, id: \ + %s, deploymentPlanner: %s"%(self.svcOfferingFirstFitResponse.name, \ + self.svcOfferingFirstFitResponse.id,self.svcOfferingFirstFitResponse.deploymentplanner)) + + #Create service offerings with varied planners + self.svcOfferingUserDispersing = createServiceOffering.createServiceOfferingCmd() + self.svcOfferingUserDispersing.name = 'Tiny Instance UserDispersing' + self.svcOfferingUserDispersing.displaytext = 'Tiny Instance with UserDispersingPlanner' + self.svcOfferingUserDispersing.cpuspeed = 100 + self.svcOfferingUserDispersing.cpunumber = 1 + self.svcOfferingUserDispersing.memory = 256 + self.svcOfferingUserDispersing.deploymentplanner = 'FirstFitPlanner' + self.svcOfferingUserDispersingResponse = self.apiClient.createServiceOffering(self.svcOfferingUserDispersing) + + self.debug("successfully created serviceofferring name: %s, id: \ + %s, deploymentPlanner: %s"%(self.svcOfferingUserDispersingResponse.name, \ + self.svcOfferingUserDispersingResponse.id,self.svcOfferingUserDispersingResponse.deploymentplanner)) + + def test_DeployVm(self): + """ + Let's start by defining the attributes of our VM that we will be + deploying on CloudStack. We will be assuming a single zone is available + and is configured and all templates are Ready + + The hardcoded values are used only for brevity. + """ + deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd() + deployVmCmd.zoneid = 1 + deployVmCmd.account = self.acct.account + deployVmCmd.domainid = self.acct.domainid + deployVmCmd.templateid = 5 #For default template- CentOS 5.6(64 bit) + deployVmCmd.serviceofferingid = self.svcOfferingFirstFitResponse.id + + deployVmResponse = self.apiClient.deployVirtualMachine(deployVmCmd) + self.debug("VM %s was deployed in the job %s"%(deployVmResponse.id, deployVmResponse.jobid)) + + # At this point our VM is expected to be Running. Let's find out what + # listVirtualMachines tells us about VMs in this account + + listVmCmd = listVirtualMachines.listVirtualMachinesCmd() + listVmCmd.id = deployVmResponse.id + listVmResponse = self.apiClient.listVirtualMachines(listVmCmd) + + self.assertNotEqual(len(listVmResponse), 0, "Check if the list API \ + returns a non-empty response") + + vm1 = listVmResponse[0] + + self.assertEqual(vm1.id, deployVmResponse.id, "Check if the VM returned \ + is the same as the one we deployed") + self.assertEqual(vm1.state, "Running", "Check if VM has reached \ + a state of running") + + + deployVm2Cmd = deployVirtualMachine.deployVirtualMachineCmd() + deployVm2Cmd.zoneid = 1 + deployVm2Cmd.account = self.acct.account + deployVm2Cmd.domainid = self.acct.domainid + deployVm2Cmd.templateid = 5 #For default template- CentOS 5.6(64 bit) + deployVm2Cmd.serviceofferingid = self.svcOfferingFirstFitResponse.id + + deployVm2Response = self.apiClient.deployVirtualMachine(deployVm2Cmd) + self.debug("VM %s was deployed in the job %s"%(deployVm2Response.id, deployVm2Response.jobid)) + + # At this point our VM is expected to be Running. Let's find out what + # listVirtualMachines tells us about VMs in this account + + listVm2Cmd = listVirtualMachines.listVirtualMachinesCmd() + listVm2Cmd.id = deployVm2Response.id + listVm2Response = self.apiClient.listVirtualMachines(listVm2Cmd) + self.assertNotEqual(len(listVm2Response), 0, "Check if the list API \ + returns a non-empty response") + vm2 = listVm2Response[0] + self.assertEqual(vm2.id, deployVm2Response.id, "Check if the VM returned \ + is the same as the one we deployed") + self.assertEqual(vm2.state, "Running", "Check if VM has reached \ + a state of running") + + + def tearDown(self): # Teardown will delete the Account as well as the VM once the VM reaches "Running" state + """ + And finally let us cleanup the resources we created by deleting the + account. All good unittests are atomic and rerunnable this way + """ + deleteAcct = deleteAccount.deleteAccountCmd() + deleteAcct.id = self.acctResponse.id + self.apiClient.deleteAccount(deleteAcct) + deleteSvcOfferingFirstFit = deleteServiceOffering.deleteServiceOfferingCmd() + deleteSvcOfferingFirstFit.id = self.svcOfferingFirstFitResponse.id + self.apiClient.deleteServiceOffering(deleteSvcOfferingFirstFit); + deleteSvcOfferingUserDispersing = deleteServiceOffering.deleteServiceOfferingCmd() + deleteSvcOfferingUserDispersing.id = self.svcOfferingUserDispersingResponse.id + self.apiClient.deleteServiceOffering(deleteSvcOfferingUserDispersing); + \ No newline at end of file diff --git a/test/integration/smoke/test_pvlan.py b/test/integration/smoke/test_pvlan.py new file mode 100644 index 00000000000..4eb76e1cdb7 --- /dev/null +++ b/test/integration/smoke/test_pvlan.py @@ -0,0 +1,86 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" test for private vlan isolation +""" +#Import Local Modules +import marvin +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin import remoteSSHClient +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * +from nose.plugins.attrib import attr +import telnetlib + +#Import System modules +import time +_multiprocess_shared_ = True + +class TestPVLAN(cloudstackTestCase): + + zoneId = 1 + networkOfferingId = 7 + vlan = 1234 + isolatedpvlan = 567 + + def setUp(self): + self.apiClient = self.testClient.getApiClient() + + def test_create_pvlan_network(self): + self.debug("Test create pvlan network") + createNetworkCmd = createNetwork.createNetworkCmd() + createNetworkCmd.name = "pvlan network" + createNetworkCmd.displaytext = "pvlan network" + createNetworkCmd.netmask = "255.255.255.0" + createNetworkCmd.gateway = "10.10.10.1" + createNetworkCmd.startip = "10.10.10.10" + createNetworkCmd.gateway = "10.10.10.20" + createNetworkCmd.vlan = "1234" + createNetworkCmd.isolatedpvlan = "567" + createNetworkCmd.zoneid = self.zoneId + createNetworkCmd.networkofferingid = self.networkOfferingId + createNetworkResponse = self.apiClient.createNetwork(createNetworkCmd) + self.networkId = createNetworkResponse.id + self.broadcasttype = createNetworkResponse.broadcastdomaintype + self.broadcasturi = createNetworkResponse.broadcasturi + + self.assertIsNotNone(createNetworkResponse.id, "Network failed to create") + self.assertTrue(createNetworkResponse.broadcastdomaintype, "Pvlan") + self.assertTrue(createNetworkResponse.broadcasturi, "pvlan://1234-i567") + + self.debug("Clean up test pvlan network") + deleteNetworkCmd = deleteNetwork.deleteNetworkCmd() + deleteNetworkCmd.id = self.networkId; + self.apiClient.deleteNetwork(deleteNetworkCmd) + + #Test invalid parameter + + # CLOUDSTACK-2392: Should not allow create pvlan with ipv6 + createNetworkCmd.ip6gateway="fc00:1234::1" + createNetworkCmd.ip6cidr="fc00:1234::/64" + createNetworkCmd.startipv6="fc00:1234::10" + createNetworkCmd.endipv6="fc00:1234::20" + err = 0; + try: + createNetworkResponse = self.apiClient.createNetwork(createNetworkCmd) + except Exception as e: + err = 1; + self.debug("Try alloc with ipv6, got:%s" % e) + self.assertEqual(err, 1, "Shouldn't allow create PVLAN network with IPv6"); + + diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 476855671f0..c01bf475050 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -146,6 +146,7 @@ known_categories = { 'deleteImageStore': 'Image Store', 'createCacheStore': 'Image Store', 'InternalLoadBalancer': 'Internal LB', + 'DeploymentPlanners': 'Configuration', } diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh index c39d38a4e76..0216c067a45 100644 --- a/tools/appliance/build.sh +++ b/tools/appliance/build.sh @@ -34,7 +34,7 @@ bundle # Clean and start building the appliance veewee vbox destroy $appliance -veewee vbox build $appliance --nogui +veewee vbox build $appliance --nogui --auto veewee vbox halt $appliance while [[ `vboxmanage list runningvms | grep $appliance | wc -l` -ne 0 ]]; diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 527d80b16c3..ea443717fec 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -1300,6 +1300,7 @@ name: { label: 'label.name' }, type: { label: 'label.type' }, vlan: { label: 'label.vlan.id' }, + broadcasturi: { label: 'broadcast URI' }, cidr: { label: 'IPv4 CIDR' }, ip6cidr: { label: 'IPv6 CIDR'} //scope: { label: 'label.scope' } @@ -1335,7 +1336,10 @@ label: 'label.vlan.id', docID: 'helpGuestNetworkZoneVLANID' }, - + isolatedpvlanId: { + label: 'Private VLAN ID' + }, + scope: { label: 'label.scope', docID: 'helpGuestNetworkZoneScope', @@ -1549,11 +1553,15 @@ if(this.id == selectedNetworkOfferingId) { if(this.specifyvlan == false) { $form.find('.form-item[rel=vlanId]').hide(); - cloudStack.dialog.createFormField.validation.required.remove($form.find('.form-item[rel=vlanId]')); //make vlanId optional + cloudStack.dialog.createFormField.validation.required.remove($form.find('.form-item[rel=vlanId]')); //make vlanId optional + + $form.find('.form-item[rel=isolatedpvlanId]').hide(); } else { $form.find('.form-item[rel=vlanId]').css('display', 'inline-block'); - cloudStack.dialog.createFormField.validation.required.add($form.find('.form-item[rel=vlanId]')); //make vlanId required + cloudStack.dialog.createFormField.validation.required.add($form.find('.form-item[rel=vlanId]')); //make vlanId required + + $form.find('.form-item[rel=isolatedpvlanId]').css('display', 'inline-block'); } return false; //break each loop } @@ -1639,7 +1647,10 @@ if(($form.find('.form-item[rel=vlanId]').css("display") != "none") && (args.data.vlanId != null && args.data.vlanId.length > 0)) array1.push("&vlan=" + todb(args.data.vlanId)); - + + if(($form.find('.form-item[rel=isolatedpvlanId]').css("display") != "none") && (args.data.isolatedpvlanId != null && args.data.isolatedpvlanId.length > 0)) + array1.push("&isolatedpvlan=" + todb(args.data.isolatedpvlanId)); + if($form.find('.form-item[rel=domainId]').css("display") != "none") { array1.push("&domainId=" + args.data.domainId); @@ -2007,6 +2018,7 @@ } }, vlan: { label: 'label.vlan.id' }, + broadcasturi: { label: 'broadcast URI' }, scope: { label: 'label.scope' }, networkofferingdisplaytext: { label: 'label.network.offering' }, networkofferingid: { diff --git a/utils/src/com/cloud/utils/net/NetUtils.java b/utils/src/com/cloud/utils/net/NetUtils.java index 9f28d5b36b2..9551c262e54 100755 --- a/utils/src/com/cloud/utils/net/NetUtils.java +++ b/utils/src/com/cloud/utils/net/NetUtils.java @@ -24,6 +24,7 @@ import java.net.InetAddress; import java.net.InterfaceAddress; import java.net.NetworkInterface; import java.net.SocketException; +import java.net.URI; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Formatter; @@ -1294,4 +1295,29 @@ public class NetUtils { } return resultIp; } + + public static URI generateUriForPvlan(String primaryVlan, String isolatedPvlan) { + return URI.create("pvlan://" + primaryVlan + "-i" + isolatedPvlan); + } + + public static String getPrimaryPvlanFromUri(URI uri) { + String[] vlans = uri.getHost().split("-"); + if (vlans.length < 1) { + return null; + } + return vlans[0]; + } + + public static String getIsolatedPvlanFromUri(URI uri) { + String[] vlans = uri.getHost().split("-"); + if (vlans.length < 2) { + return null; + } + for (String vlan : vlans) { + if (vlan.startsWith("i")) { + return vlan.replace("i", " ").trim(); + } + } + return null; + } } diff --git a/utils/test/com/cloud/utils/net/NetUtilsTest.java b/utils/test/com/cloud/utils/net/NetUtilsTest.java index 28bd71f18d7..16d3402f0e6 100644 --- a/utils/test/com/cloud/utils/net/NetUtilsTest.java +++ b/utils/test/com/cloud/utils/net/NetUtilsTest.java @@ -17,6 +17,7 @@ package com.cloud.utils.net; import java.math.BigInteger; +import java.net.URI; import java.util.SortedSet; import java.util.TreeSet; @@ -128,4 +129,11 @@ public class NetUtilsTest extends TestCase { assertFalse(NetUtils.isIp6InRange("1234:5678:abcd::1", null)); assertTrue(NetUtils.isIp6InRange("1234:5678:abcd::1", "1234:5678::1-1234:5679::1")); } + + public void testPvlan() { + URI uri = NetUtils.generateUriForPvlan("123", "456"); + assertTrue(uri.toString().equals("pvlan://123-i456")); + assertTrue(NetUtils.getPrimaryPvlanFromUri(uri).equals("123")); + assertTrue(NetUtils.getIsolatedPvlanFromUri(uri).equals("456")); + } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DistributedVirtualSwitchMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DistributedVirtualSwitchMO.java index 247be2a5fab..b00b97ca3ae 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DistributedVirtualSwitchMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DistributedVirtualSwitchMO.java @@ -17,13 +17,20 @@ package com.cloud.hypervisor.vmware.mo; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.log4j.Logger; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.DVPortgroupConfigSpec; +import com.vmware.vim25.DVSConfigInfo; import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.TaskInfo; +import com.vmware.vim25.VMwareDVSConfigInfo; +import com.vmware.vim25.VMwareDVSConfigSpec; +import com.vmware.vim25.VMwareDVSPvlanMapEntry; public class DistributedVirtualSwitchMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(DistributedVirtualSwitchMO.class); @@ -46,4 +53,74 @@ public class DistributedVirtualSwitchMO extends BaseMO { // TODO(sateesh): Update numPorts _context.getService().reconfigureDVPortgroupTask(dvPortGroupMor, dvPortGroupSpec); } + + public void updateVMWareDVSwitch(ManagedObjectReference dvSwitchMor, VMwareDVSConfigSpec dvsSpec) throws Exception { + _context.getService().reconfigureDvsTask(dvSwitchMor, dvsSpec); + } + + public TaskInfo updateVMWareDVSwitchGetTask(ManagedObjectReference dvSwitchMor, VMwareDVSConfigSpec dvsSpec) throws Exception { + ManagedObjectReference task = _context.getService().reconfigureDvsTask(dvSwitchMor, dvsSpec); + TaskInfo info = (TaskInfo) (_context.getVimClient().getDynamicProperty(task, "info")); + boolean waitvalue = _context.getVimClient().waitForTask(task); + return info; + } + + public String getDVSConfigVersion(ManagedObjectReference dvSwitchMor) throws Exception { + assert (dvSwitchMor != null); + DVSConfigInfo dvsConfigInfo = (DVSConfigInfo)_context.getVimClient().getDynamicProperty(dvSwitchMor, "config"); + return dvsConfigInfo.getConfigVersion(); + } + + public Map retrieveVlanPvlan(int vlanid, int secondaryvlanid, ManagedObjectReference dvSwitchMor) throws Exception { + assert (dvSwitchMor != null); + + Map result = new HashMap(); + + VMwareDVSConfigInfo configinfo = (VMwareDVSConfigInfo)_context.getVimClient().getDynamicProperty(dvSwitchMor, "config"); + List pvlanconfig = null; + pvlanconfig = configinfo.getPvlanConfig(); + + if (null == pvlanconfig || 0 == pvlanconfig.size()) { + return result; + } + // Iterate through the pvlanMapList and check if the specified vlan id and pvlan id exist. If they do, set the fields in result accordingly. + + for (VMwareDVSPvlanMapEntry mapEntry : pvlanconfig) { + int entryVlanid = mapEntry.getPrimaryVlanId(); + int entryPvlanid = mapEntry.getSecondaryVlanId(); + if (entryVlanid == entryPvlanid) { + // promiscuous + if (vlanid == entryVlanid) { + // pvlan type will always be promiscuous in this case. + result.put(vlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType())); + } else if ((vlanid != secondaryvlanid) && secondaryvlanid == entryVlanid) { + result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType())); + } + } else { + if (vlanid == entryVlanid) { + // vlan id in entry is promiscuous + result.put(vlanid, HypervisorHostHelper.PvlanType.promiscuous); + } else if (vlanid == entryPvlanid) { + result.put(vlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType())); + } + if ((vlanid != secondaryvlanid) && secondaryvlanid == entryVlanid) { + //promiscuous + result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.promiscuous); + } else if (secondaryvlanid == entryPvlanid) { + result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType())); + } + + } + // If we already know that the vlanid is being used as a non primary vlan, it's futile to + // go over the entire list. Return. + if (result.containsKey(vlanid) && result.get(vlanid) != HypervisorHostHelper.PvlanType.promiscuous) + return result; + + // If we've already found both vlanid and pvlanid, we have enough info to make a decision. Return. + if (result.containsKey(vlanid) && result.containsKey(secondaryvlanid)) + return result; + } + return result; + } + } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 7f323c5e400..20f84784157 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -39,6 +39,7 @@ import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.SwitchPortMode; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; +import com.vmware.vim25.AlreadyExistsFaultMsg; import com.vmware.vim25.BoolPolicy; import com.vmware.vim25.DVPortSetting; import com.vmware.vim25.DVPortgroupConfigInfo; @@ -59,7 +60,11 @@ import com.vmware.vim25.ObjectContent; import com.vmware.vim25.OvfCreateImportSpecParams; import com.vmware.vim25.OvfCreateImportSpecResult; import com.vmware.vim25.OvfFileItem; +import com.vmware.vim25.TaskInfo; +import com.vmware.vim25.VMwareDVSConfigSpec; import com.vmware.vim25.VMwareDVSPortSetting; +import com.vmware.vim25.VMwareDVSPvlanConfigSpec; +import com.vmware.vim25.VMwareDVSPvlanMapEntry; import com.vmware.vim25.VirtualDeviceConfigSpec; import com.vmware.vim25.VirtualDeviceConfigSpecOperation; import com.vmware.vim25.VirtualLsiLogicController; @@ -67,6 +72,7 @@ import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineFileInfo; import com.vmware.vim25.VirtualMachineVideoCard; import com.vmware.vim25.VirtualSCSISharing; +import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanSpec; @@ -124,12 +130,17 @@ public class HypervisorHostHelper { } } - public static String composeCloudNetworkName(String prefix, String vlanId, Integer networkRateMbps, String vSwitchName) { + public static String composeCloudNetworkName(String prefix, String vlanId, String svlanId, Integer networkRateMbps, String vSwitchName) { StringBuffer sb = new StringBuffer(prefix); - if(vlanId == null || UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) + if(vlanId == null || UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) { sb.append(".untagged"); - else + } else { sb.append(".").append(vlanId); + if (svlanId != null) { + sb.append(".").append("s" + svlanId); + } + + } if(networkRateMbps != null && networkRateMbps.intValue() > 0) sb.append(".").append(String.valueOf(networkRateMbps)); @@ -412,7 +423,7 @@ public class HypervisorHostHelper { */ public static Pair prepareNetwork(String physicalNetwork, String namePrefix, - HostMO hostMo, String vlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, long timeOutMs, + HostMO hostMo, String vlanId, String secondaryvlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, long timeOutMs, VirtualSwitchType vSwitchType, int numPorts, String gateway, boolean configureVServiceInNexus) throws Exception { ManagedObjectReference morNetwork = null; VmwareContext context = hostMo.getContext(); @@ -428,20 +439,28 @@ public class HypervisorHostHelper { boolean createGCTag = false; String networkName; Integer vid = null; + Integer spvlanid = null; // secondary pvlan id if(vlanId != null && !UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) { createGCTag = true; vid = Integer.parseInt(vlanId); } - networkName = composeCloudNetworkName(namePrefix, vlanId, networkRateMbps, physicalNetwork); + if (secondaryvlanId != null) { + spvlanid = Integer.parseInt(secondaryvlanId); + } + networkName = composeCloudNetworkName(namePrefix, vlanId, secondaryvlanId, networkRateMbps, physicalNetwork); if (vSwitchType == VirtualSwitchType.VMwareDistributedVirtualSwitch) { + VMwareDVSConfigSpec dvsSpec = null; DVSTrafficShapingPolicy shapingPolicy; - VmwareDistributedVirtualSwitchVlanSpec vlanSpec; + VmwareDistributedVirtualSwitchVlanSpec vlanSpec = null; + VmwareDistributedVirtualSwitchPvlanSpec pvlanSpec = null; + //VMwareDVSPvlanConfigSpec pvlanSpec = null; DVSSecurityPolicy secPolicy; VMwareDVSPortSetting dvsPortSetting; DVPortgroupConfigSpec dvPortGroupSpec; DVPortgroupConfigInfo dvPortgroupInfo; + //DVSConfigInfo dvsInfo; dvSwitchName = physicalNetwork; // TODO(sateesh): Remove this after ensuring proper default value for vSwitchName throughout traffic types @@ -462,13 +481,95 @@ public class HypervisorHostHelper { dvSwitchMo = new DistributedVirtualSwitchMO(context, morDvSwitch); shapingPolicy = getDVSShapingPolicy(networkRateMbps); - if (vid != null) { - vlanSpec = createDVPortVlanIdSpec(vid); - } else { - vlanSpec = createDVPortVlanSpec(); - } secPolicy = createDVSSecurityPolicy(); + + // First, if both vlan id and pvlan id are provided, we need to + // reconfigure the DVSwitch to have a tuple of + // type isolated. + if (vid != null && spvlanid != null) { + // First check if the vlan/pvlan pair already exists on this dvswitch. + + Map vlanmap = dvSwitchMo.retrieveVlanPvlan(vid, spvlanid, morDvSwitch); + if (vlanmap.size() != 0) { + // Then either vid or pvlanid or both are already being used. + if (vlanmap.containsKey(vid) && vlanmap.get(vid) != HypervisorHostHelper.PvlanType.promiscuous) { + // This VLAN ID is already setup as a non-promiscuous vlan id on the DVS. Throw an exception. + String msg = "VLAN ID " + vid + " is already in use as a " + vlanmap.get(vid).toString() + " VLAN on the DVSwitch"; + s_logger.error(msg); + throw new Exception(msg); + } + if ((vid != spvlanid) && vlanmap.containsKey(spvlanid) && vlanmap.get(spvlanid) != HypervisorHostHelper.PvlanType.isolated) { + // This PVLAN ID is already setup as a non-isolated vlan id on the DVS. Throw an exception. + String msg = "PVLAN ID " + spvlanid + " is already in use as a " + vlanmap.get(spvlanid).toString() + " VLAN in the DVSwitch"; + s_logger.error(msg); + throw new Exception(msg); + } + } + + // First create a DVSconfig spec. + dvsSpec = new VMwareDVSConfigSpec(); + // Next, add the required primary and secondary vlan config specs to the dvs config spec. + if (!vlanmap.containsKey(vid)) { + VMwareDVSPvlanConfigSpec ppvlanConfigSpec = createDVPortPvlanConfigSpec(vid, vid, PvlanType.promiscuous, PvlanOperation.add); + dvsSpec.getPvlanConfigSpec().add(ppvlanConfigSpec); + } + if ( !vid.equals(spvlanid) && !vlanmap.containsKey(spvlanid)) { + VMwareDVSPvlanConfigSpec spvlanConfigSpec = createDVPortPvlanConfigSpec(vid, spvlanid, PvlanType.isolated, PvlanOperation.add); + dvsSpec.getPvlanConfigSpec().add(spvlanConfigSpec); + } + + if (dvsSpec.getPvlanConfigSpec().size() > 0) { + // We have something to configure on the DVS... so send it the command. + // When reconfiguring a vmware DVSwitch, we need to send in the configVersion in the spec. + // Let's retrieve this switch's configVersion first. + String dvsConfigVersion = dvSwitchMo.getDVSConfigVersion(morDvSwitch); + dvsSpec.setConfigVersion(dvsConfigVersion); + // Reconfigure the dvs using this spec. + + try { + TaskInfo reconfigTask = dvSwitchMo.updateVMWareDVSwitchGetTask(morDvSwitch, dvsSpec); + } catch (Exception e) { + if(e instanceof AlreadyExistsFaultMsg) { + s_logger.info("Specified vlan id (" + vid + ") private vlan id (" + spvlanid + ") tuple already configured on VMWare DVSwitch"); + // Do nothing, good if the tuple's already configured on the dvswitch. + } else { + // Rethrow the exception + s_logger.error("Failed to configure vlan/pvlan tuple on VMware DVSwitch: " + vid + "/" + spvlanid + ", failure message: " + e.getMessage()); + e.printStackTrace(); + throw e; + } + } + } + // Else the vlan/pvlan pair already exists on the DVSwitch, and we needn't configure it again. + } + + // Next, create the port group. For this, we need to create a VLAN spec. + if (vid == null) { + vlanSpec = createDVPortVlanSpec(); + } else { + if (spvlanid == null) { + // Create vlan spec. + vlanSpec = createDVPortVlanIdSpec(vid); + } else { + // Create a pvlan spec. The pvlan spec is different from the pvlan config spec + // that we created earlier. The pvlan config spec is used to configure the switch + // with a tuple. The pvlan spec is used + // to configure a port group (i.e., a network) with a secondary vlan id. We don't + // need to mention more than the secondary vlan id because one secondary vlan id + // can be associated with only one primary vlan id. Give vCenter the secondary vlan id, + // and it will find out the associated primary vlan id and do the rest of the + // port group configuration. + pvlanSpec = createDVPortPvlanIdSpec(spvlanid); + } + } + + // NOTE - VmwareDistributedVirtualSwitchPvlanSpec extends VmwareDistributedVirtualSwitchVlanSpec. + if (pvlanSpec != null) { + dvsPortSetting = createVmwareDVPortSettingSpec(shapingPolicy, secPolicy, pvlanSpec); + } else { dvsPortSetting = createVmwareDVPortSettingSpec(shapingPolicy, secPolicy, vlanSpec); + } + dvPortGroupSpec = createDvPortGroupSpec(networkName, dvsPortSetting, numPorts); if (!dataCenterMo.hasDvPortGroup(networkName)) { @@ -627,7 +728,6 @@ public class HypervisorHostHelper { dvsPortSetting.setSecurityPolicy(secPolicy); dvsPortSetting.setInShapingPolicy(shapingPolicy); dvsPortSetting.setOutShapingPolicy(shapingPolicy); - return dvsPortSetting; } @@ -658,6 +758,35 @@ public class HypervisorHostHelper { return shapingPolicy; } + public static VmwareDistributedVirtualSwitchPvlanSpec createDVPortPvlanIdSpec(int pvlanId) { + VmwareDistributedVirtualSwitchPvlanSpec pvlanIdSpec = new VmwareDistributedVirtualSwitchPvlanSpec(); + pvlanIdSpec.setPvlanId(pvlanId); + return pvlanIdSpec; + } + + public enum PvlanOperation { + add, + edit, + remove + } + + public enum PvlanType { + promiscuous, + isolated, + community, // We don't use Community + } + + public static VMwareDVSPvlanConfigSpec createDVPortPvlanConfigSpec(int vlanId, int secondaryVlanId, PvlanType pvlantype, PvlanOperation operation) { + VMwareDVSPvlanConfigSpec pvlanConfigSpec = new VMwareDVSPvlanConfigSpec(); + VMwareDVSPvlanMapEntry map = new VMwareDVSPvlanMapEntry(); + map.setPvlanType(pvlantype.toString()); + map.setPrimaryVlanId(vlanId); + map.setSecondaryVlanId(secondaryVlanId); + pvlanConfigSpec.setPvlanEntry(map); + + pvlanConfigSpec.setOperation(operation.toString()); + return pvlanConfigSpec; + } public static VmwareDistributedVirtualSwitchVlanIdSpec createDVPortVlanIdSpec(int vlanId) { VmwareDistributedVirtualSwitchVlanIdSpec vlanIdSpec = new VmwareDistributedVirtualSwitchVlanIdSpec(); vlanIdSpec.setVlanId(vlanId); @@ -706,7 +835,7 @@ public class HypervisorHostHelper { vid = Integer.parseInt(vlanId); } - networkName = composeCloudNetworkName(namePrefix, vlanId, networkRateMbps, vSwitchName); + networkName = composeCloudNetworkName(namePrefix, vlanId, null, networkRateMbps, vSwitchName); HostNetworkSecurityPolicy secPolicy = null; if (namePrefix.equalsIgnoreCase("cloud.private")) { secPolicy = new HostNetworkSecurityPolicy(); @@ -1036,6 +1165,7 @@ public class HypervisorHostHelper { context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile, bytesAlreadyWritten, new ActionDelegate () { + @Override public void action(Long param) { progressReporter.reportProgress((int)(param * 100 / totalBytes)); }