diff --git a/.gitignore b/.gitignore index 016554a5f75..929227d8604 100644 --- a/.gitignore +++ b/.gitignore @@ -81,3 +81,4 @@ debian/*.debhelper replace.properties.tmp build-indep-stamp configure-stamp +*_flymake.js \ No newline at end of file diff --git a/INSTALL.md b/INSTALL.md index 4f93900ddfe..b0e1a7617b0 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,5 +1,5 @@ -This document describes how to develop, build, package and install Apache CloudStack -(Incubating). For more information please refer to the project's website: +This document describes how to develop, build, package and install Apache +CloudStack. For more information please refer to: http://cloudstack.apache.org @@ -10,7 +10,7 @@ Refer to the [wiki](http://cwiki.apache.org/confluence/display/CLOUDSTACK/Index) for the latest information, especially: - [Setting up development environment](https://cwiki.apache.org/confluence/display/CLOUDSTACK/Setting+up+CloudStack+Development+Environment) for Apache CloudStack. - - [Building](https://cwiki.apache.org/confluence/display/CLOUDSTACK/Building) Apache CloudStack. + - [Building](https://cwiki.apache.org/confluence/display/CLOUDSTACK/How+to+build+on+master+branch) Apache CloudStack. ## Setting up Development Environment @@ -21,12 +21,12 @@ Install tools and dependencies used for development: $ yum install git ant ant-devel java-1.6.0-openjdk java-1.6.0-openjdk-devel mysql mysql-server tomcat6 mkisofs gcc python MySQL-python openssh-clients wget -Set up Maven (3.0.4): +Set up Maven (3.0.5): - $ wget http://www.us.apache.org/dist/maven/maven-3/3.0.4/binaries/apache-maven-3.0.4-bin.tar.gz + $ wget http://www.us.apache.org/dist/maven/maven-3/3.0.5/binaries/apache-maven-3.0.5-bin.tar.gz $ cd /usr/local/ # or any path - $ tar -zxvf apache-maven-3.0.4-bin.tar.gz - $ echo export M2_HOME=/usr/local/apache-maven-3.0.4 >> ~/.bashrc # or .zshrc or .profile + $ tar -zxvf apache-maven-3.0.5-bin.tar.gz + $ echo export M2_HOME=/usr/local/apache-maven-3.0.5 >> ~/.bashrc # or .zshrc or .profile $ echo export PATH=${M2_HOME}/bin:${PATH} >> ~/.bashrc # or .zshrc or .profile Note: Tomcat 6.0.35 has some known issue with Apache CloudStack, please use Tomcat @@ -77,10 +77,10 @@ Start the MySQL service: You may get the source code from the repository hosted on Apache: - $ git clone https://git-wip-us.apache.org/repos/asf/cloudstack.git + $ git clone git://git.apache.org/cloudstack.git Or, you may fork a repository from the official Apache CloudStack mirror by -Apache on [Github](https://github.com/apache/incubator-cloudstack) +Apache on [Github](https://github.com/apache/cloudstack) To keep yourself updated on a branch, do: @@ -92,7 +92,6 @@ For example, for master: ## Building - Clean and build: $ mvn clean install -P systemvm,developer diff --git a/agent-simulator/tomcatconf/components-simulator.xml.in b/agent-simulator/tomcatconf/components-simulator.xml.in deleted file mode 100755 index 1b7df46ad2b..00000000000 --- a/agent-simulator/tomcatconf/components-simulator.xml.in +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - true - - - - - - - - - - - - - - - - - - - - - - - - - - - true - - - - diff --git a/agent/bindir/cloud-setup-agent.in b/agent/bindir/cloud-setup-agent.in index 5e2ba09406d..6932672b962 100755 --- a/agent/bindir/cloud-setup-agent.in +++ b/agent/bindir/cloud-setup-agent.in @@ -95,6 +95,11 @@ if __name__ == '__main__': parser.add_option("--prvNic", dest="prvNic", help="Private traffic interface") parser.add_option("--guestNic", dest="guestNic", help="Guest traffic interface") + old_config = configFileOps("@AGENTSYSCONFDIR@/agent.properties") + bridgeType = old_config.getEntry("network.bridge.type").lower() + if bridgeType: + glbEnv.bridgeType = bridgeType + (options, args) = parser.parse_args() if options.auto is None: userInputs = getUserInputs() @@ -104,7 +109,7 @@ if __name__ == '__main__': glbEnv.pod = userInputs[3] glbEnv.cluster = userInputs[4] #generate UUID - glbEnv.uuid = configFileOps("@AGENTSYSCONFDIR@/agent.properties").getEntry("guid") + glbEnv.uuid = old_config.getEntry("guid") if glbEnv.uuid == "": glbEnv.uuid = bash("uuidgen").getStdout() else: diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 7dc4ba8a18c..60030ae4f11 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -84,6 +84,11 @@ domr.scripts.dir=scripts/network/domr/kvm # set the hypervisor type, values are: kvm, lxc # hypervisor.type=kvm +# set the hypervisor URI. Usually there is no need for changing this +# For KVM: qemu:///system +# For LXC: lxc:/// +# hypervisor.uri=qemu:///system + # settings to enable direct networking in libvirt, should not be used # on hosts that run system vms, values for mode are: private, bridge, vepa # libvirt.vif.driver=com.cloud.hypervisor.kvm.resource.DirectVifDriver diff --git a/api/src/com/cloud/agent/api/to/DnsmasqTO.java b/api/src/com/cloud/agent/api/to/DnsmasqTO.java index f99878c2fed..c7be04d4900 100644 --- a/api/src/com/cloud/agent/api/to/DnsmasqTO.java +++ b/api/src/com/cloud/agent/api/to/DnsmasqTO.java @@ -20,11 +20,14 @@ public class DnsmasqTO { String routerIp; String gateway; String netmask; + String startIpOfSubnet; - public DnsmasqTO(String routerIp, String gateway, String netmask) { + public DnsmasqTO(String routerIp, String gateway, String netmask, String StartIpOfSubnet) { this.routerIp = routerIp; + this.startIpOfSubnet = StartIpOfSubnet; this.gateway = gateway; this.netmask =netmask; + } public void setRouterIp(String routerIp){ @@ -39,6 +42,10 @@ public class DnsmasqTO { this.netmask = netmask ; } + public void setStartIpOfSubnet( String ipOfSubNet) { + startIpOfSubnet = ipOfSubNet; + } + public String getRouterIp() { return routerIp; } @@ -50,4 +57,8 @@ public class DnsmasqTO { public String getNetmask() { return netmask; } + public String getStartIpOfSubnet() { + return startIpOfSubnet; + } + } diff --git a/api/src/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/com/cloud/agent/api/to/VirtualMachineTO.java index 2644c04971b..e6240ffda10 100644 --- a/api/src/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/com/cloud/agent/api/to/VirtualMachineTO.java @@ -52,6 +52,7 @@ public class VirtualMachineTO { boolean rebootOnCrash; boolean enableHA; boolean limitCpuUse; + boolean enableDynamicallyScaleVm; String vncPassword; String vncAddr; Map params; @@ -102,6 +103,14 @@ public class VirtualMachineTO { this.id = id; } + public boolean isEnableDynamicallyScaleVm() { + return enableDynamicallyScaleVm; + } + + public void setEnableDynamicallyScaleVm(boolean enableDynamicallyScaleVm) { + this.enableDynamicallyScaleVm = enableDynamicallyScaleVm; + } + public String getName() { return name; } diff --git a/api/src/com/cloud/dc/DedicatedResources.java b/api/src/com/cloud/dc/DedicatedResources.java new file mode 100755 index 00000000000..e8e5ab3dffc --- /dev/null +++ b/api/src/com/cloud/dc/DedicatedResources.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc; + +import org.apache.cloudstack.acl.InfrastructureEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +public interface DedicatedResources extends InfrastructureEntity, InternalIdentity, Identity{ + long getId(); + Long getDataCenterId(); + Long getPodId(); + Long getClusterId(); + Long getHostId(); + Long getDomainId(); + Long getAccountId(); + String getUuid(); + +} diff --git a/api/src/com/cloud/deploy/DeploymentClusterPlanner.java b/api/src/com/cloud/deploy/DeploymentClusterPlanner.java index 1a19c71dbfa..8b15ea56e8f 100644 --- a/api/src/com/cloud/deploy/DeploymentClusterPlanner.java +++ b/api/src/com/cloud/deploy/DeploymentClusterPlanner.java @@ -18,6 +18,7 @@ package com.cloud.deploy; import java.util.List; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -40,6 +41,7 @@ public interface DeploymentClusterPlanner extends DeploymentPlanner { List orderClusters(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException; - PlannerResourceUsage getResourceUsage(); + PlannerResourceUsage getResourceUsage(VirtualMachineProfile vmProfile, + DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException; } diff --git a/api/src/com/cloud/event/EventTypes.java b/api/src/com/cloud/event/EventTypes.java index fcac8e8e65d..ed4ba1254f0 100755 --- a/api/src/com/cloud/event/EventTypes.java +++ b/api/src/com/cloud/event/EventTypes.java @@ -103,7 +103,7 @@ public class EventTypes { public static final String EVENT_NET_IP_ASSIGN = "NET.IPASSIGN"; public static final String EVENT_NET_IP_RELEASE = "NET.IPRELEASE"; public static final String EVENT_PORTABLE_IP_ASSIGN = "PORTABLE.IPASSIGN"; - public static final String EVENT_PORTABLE_IP_RELEASE = "PORTABLEIPRELEASE"; + public static final String EVENT_PORTABLE_IP_RELEASE = "PORTABLE.IPRELEASE"; public static final String EVENT_NET_RULE_ADD = "NET.RULEADD"; public static final String EVENT_NET_RULE_DELETE = "NET.RULEDELETE"; public static final String EVENT_NET_RULE_MODIFY = "NET.RULEMODIFY"; @@ -436,6 +436,11 @@ public class EventTypes { public static final String EVENT_PORTABLE_IP_RANGE_CREATE = "PORTABLE.IP.RANGE.CREATE"; public static final String EVENT_PORTABLE_IP_RANGE_DELETE = "PORTABLE.IP.RANGE.DELETE"; + public static final String EVENT_PORTABLE_IP_TRANSFER = "PORTABLE.IP.TRANSFER"; + + // Dedicated Resources + public static final String EVENT_DEDICATE_RESOURCE = "DEDICATE.RESOURCE"; + public static final String EVENT_DEDICATE_RESOURCE_RELEASE = "DEDICATE.RESOURCE.RELEASE"; static { diff --git a/api/src/com/cloud/network/NetworkService.java b/api/src/com/cloud/network/NetworkService.java index 59702a2864e..405cecd8847 100755 --- a/api/src/com/cloud/network/NetworkService.java +++ b/api/src/com/cloud/network/NetworkService.java @@ -55,7 +55,7 @@ public interface NetworkService { IpAddress allocatePortableIP(Account ipOwner, int regionId, Long zoneId, Long networkId, Long vpcId) throws ResourceAllocationException, InsufficientAddressCapacityException, ConcurrentOperationException; - boolean releasePortableIpAddress(long ipAddressId) throws InsufficientAddressCapacityException; + boolean releasePortableIpAddress(long ipAddressId); Network createGuestNetwork(CreateNetworkCmd cmd) throws InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException; diff --git a/api/src/com/cloud/region/ha/GlobalLoadBalancingRulesService.java b/api/src/com/cloud/region/ha/GlobalLoadBalancingRulesService.java index 186faf75f78..9375544ac28 100644 --- a/api/src/com/cloud/region/ha/GlobalLoadBalancingRulesService.java +++ b/api/src/com/cloud/region/ha/GlobalLoadBalancingRulesService.java @@ -33,6 +33,9 @@ public interface GlobalLoadBalancingRulesService { GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalancerRuleCmd updateGslbCmd); + boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) + throws com.cloud.exception.ResourceUnavailableException; + /* * methods for managing sites participating in global load balancing */ diff --git a/api/src/com/cloud/user/AccountService.java b/api/src/com/cloud/user/AccountService.java index 903eebc5bf8..8153a3f1af6 100755 --- a/api/src/com/cloud/user/AccountService.java +++ b/api/src/com/cloud/user/AccountService.java @@ -16,22 +16,15 @@ // under the License. package com.cloud.user; -import java.util.List; import java.util.Map; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; -import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; -import org.apache.cloudstack.api.command.admin.user.RegisterCmd; -import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; - import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import com.cloud.domain.Domain; import com.cloud.exception.PermissionDeniedException; -import com.cloud.utils.Pair; public interface AccountService { @@ -83,13 +76,11 @@ public interface AccountService { Account finalizeOwner(Account caller, String accountName, Long domainId, Long projectId); - Pair, Long> finalizeAccountDomainForList(Account caller, String accountName, Long domainId, Long projectId); - Account getActiveAccountByName(String accountName, Long domainId); - Account getActiveAccountById(Long accountId); + Account getActiveAccountById(long accountId); - Account getAccount(Long accountId); + Account getAccount(long accountId); User getActiveUser(long userId); diff --git a/api/src/com/cloud/vm/VmDiskStats.java b/api/src/com/cloud/vm/VmDiskStats.java new file mode 100644 index 00000000000..0cf82d0047d --- /dev/null +++ b/api/src/com/cloud/vm/VmDiskStats.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +public interface VmDiskStats { + // vm related disk stats + + public Long getIORead(); + + public Long getIOWrite(); + + public Long getBytesRead(); + + public Long getBytesWrite(); + +} diff --git a/api/src/com/cloud/vm/VmStats.java b/api/src/com/cloud/vm/VmStats.java index 7d0bd61b9d8..d284db0f64a 100644 --- a/api/src/com/cloud/vm/VmStats.java +++ b/api/src/com/cloud/vm/VmStats.java @@ -23,5 +23,13 @@ public interface VmStats { public double getNetworkReadKBs(); public double getNetworkWriteKBs(); + + public double getDiskReadIOs(); + + public double getDiskWriteIOs(); + + public double getDiskReadKBs(); + + public double getDiskWriteKBs(); } diff --git a/api/src/org/apache/cloudstack/affinity/AffinityGroupService.java b/api/src/org/apache/cloudstack/affinity/AffinityGroupService.java index 26c32c89c1f..7423c4864d5 100644 --- a/api/src/org/apache/cloudstack/affinity/AffinityGroupService.java +++ b/api/src/org/apache/cloudstack/affinity/AffinityGroupService.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.affinity; import java.util.List; -import com.cloud.exception.ResourceInUseException; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; @@ -45,10 +44,8 @@ public interface AffinityGroupService { * @param account * @param domainId * @param affinityGroupName - * @throws ResourceInUseException */ - boolean deleteAffinityGroup(Long affinityGroupId, String account, Long domainId, String affinityGroupName) - throws ResourceInUseException; + boolean deleteAffinityGroup(Long affinityGroupId, String account, Long domainId, String affinityGroupName); /** Lists Affinity Groups in your account * @param account diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index 1e9435f6a8e..ab1402ccde9 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -206,6 +206,7 @@ public class ApiConstants { public static final String STATE = "state"; public static final String STATUS = "status"; public static final String STORAGE_TYPE = "storagetype"; + public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled"; public static final String SYSTEM_VM_TYPE = "systemvmtype"; public static final String TAGS = "tags"; public static final String TARGET_IQN = "targetiqn"; @@ -304,6 +305,8 @@ public class ApiConstants { public static final String TEMPLATE_TAG = "templatetag"; public static final String HYPERVISOR_VERSION = "hypervisorversion"; public static final String MAX_GUESTS_LIMIT = "maxguestslimit"; + public static final String MAX_DATA_VOLUMES_LIMIT = "maxdatavolumeslimit"; + public static final String MAX_HOSTS_PER_CLUSTER = "maxhostspercluster"; public static final String PROJECT_ID = "projectid"; public static final String PROJECT_IDS = "projectids"; public static final String PROJECT = "project"; @@ -402,6 +405,7 @@ public class ApiConstants { public static final String VSM_CONFIG_MODE = "vsmconfigmode"; public static final String VSM_CONFIG_STATE = "vsmconfigstate"; public static final String VSM_DEVICE_STATE = "vsmdevicestate"; + public static final String VCENTER = "vcenter"; public static final String ADD_VSM_FLAG = "addvsmflag"; public static final String END_POINT = "endpoint"; public static final String REGION_ID = "regionid"; diff --git a/api/src/org/apache/cloudstack/api/BaseCmd.java b/api/src/org/apache/cloudstack/api/BaseCmd.java index 5b3b264e4a1..1a8fdc79fb7 100644 --- a/api/src/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/org/apache/cloudstack/api/BaseCmd.java @@ -521,7 +521,7 @@ public abstract class BaseCmd { return project.getProjectAccountId(); } else { PermissionDeniedException ex = new PermissionDeniedException("Can't add resources to the project with specified projectId in state=" + project.getState() + " as it's no longer active"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(project.getUuid(), "projectId"); throw ex; } } else { diff --git a/api/src/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/org/apache/cloudstack/api/ResponseGenerator.java index 575a2ff97de..d8d07cb56fb 100644 --- a/api/src/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/org/apache/cloudstack/api/ResponseGenerator.java @@ -16,30 +16,6 @@ // under the License. package org.apache.cloudstack.api; -import java.text.DecimalFormat; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; - -import com.cloud.vm.NicSecondaryIp; -import org.apache.cloudstack.affinity.AffinityGroup; -import org.apache.cloudstack.affinity.AffinityGroupResponse; -import com.cloud.network.vpc.NetworkACL; -import com.cloud.network.vpc.NetworkACLItem; -import com.cloud.network.vpc.PrivateGateway; -import com.cloud.network.vpc.StaticRoute; -import com.cloud.network.vpc.Vpc; -import com.cloud.network.vpc.VpcOffering; -import org.apache.cloudstack.api.ApiConstants.HostDetails; -import org.apache.cloudstack.api.ApiConstants.VMDetails; -import org.apache.cloudstack.api.command.user.job.QueryAsyncJobResultCmd; -import org.apache.cloudstack.api.response.*; -import org.apache.cloudstack.network.lb.ApplicationLoadBalancerRule; -import org.apache.cloudstack.region.Region; -import org.apache.cloudstack.region.PortableIp; -import org.apache.cloudstack.region.PortableIpRange; -import org.apache.cloudstack.usage.Usage; - import com.cloud.async.AsyncJob; import com.cloud.capacity.Capacity; import com.cloud.configuration.Configuration; @@ -81,6 +57,12 @@ import com.cloud.network.rules.StaticNatRule; import com.cloud.network.rules.StickinessPolicy; import com.cloud.network.security.SecurityGroup; import com.cloud.network.security.SecurityRule; +import com.cloud.network.vpc.NetworkACL; +import com.cloud.network.vpc.NetworkACLItem; +import com.cloud.network.vpc.PrivateGateway; +import com.cloud.network.vpc.StaticRoute; +import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.VpcOffering; import com.cloud.offering.DiskOffering; import com.cloud.offering.NetworkOffering; import com.cloud.offering.ServiceOffering; @@ -91,7 +73,6 @@ import com.cloud.projects.ProjectInvitation; import com.cloud.region.ha.GlobalLoadBalancerRule; import com.cloud.server.ResourceTag; import com.cloud.storage.GuestOS; -import com.cloud.storage.ImageStore; import com.cloud.storage.S3; import com.cloud.storage.Snapshot; import com.cloud.storage.StoragePool; @@ -110,6 +91,108 @@ import com.cloud.vm.Nic; import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.VirtualMachine; import com.cloud.vm.snapshot.VMSnapshot; +import org.apache.cloudstack.affinity.AffinityGroup; +import org.apache.cloudstack.affinity.AffinityGroupResponse; +import org.apache.cloudstack.api.ApiConstants.HostDetails; +import org.apache.cloudstack.api.ApiConstants.VMDetails; +import org.apache.cloudstack.api.command.user.job.QueryAsyncJobResultCmd; +import org.apache.cloudstack.api.response.AccountResponse; +import org.apache.cloudstack.api.response.ApplicationLoadBalancerResponse; +import org.apache.cloudstack.api.response.AsyncJobResponse; +import org.apache.cloudstack.api.response.AutoScalePolicyResponse; +import org.apache.cloudstack.api.response.AutoScaleVmGroupResponse; +import org.apache.cloudstack.api.response.AutoScaleVmProfileResponse; +import org.apache.cloudstack.api.response.CapacityResponse; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.ConditionResponse; +import org.apache.cloudstack.api.response.ConfigurationResponse; +import org.apache.cloudstack.api.response.CounterResponse; +import org.apache.cloudstack.api.response.CreateCmdResponse; +import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.DomainRouterResponse; +import org.apache.cloudstack.api.response.EventResponse; +import org.apache.cloudstack.api.response.ExtractResponse; +import org.apache.cloudstack.api.response.FirewallResponse; +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.cloudstack.api.response.GlobalLoadBalancerResponse; +import org.apache.cloudstack.api.response.GuestOSResponse; +import org.apache.cloudstack.api.response.GuestVlanRangeResponse; +import org.apache.cloudstack.api.response.HostForMigrationResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.HypervisorCapabilitiesResponse; +import org.apache.cloudstack.api.response.IPAddressResponse; +import org.apache.cloudstack.api.response.ImageStoreResponse; +import org.apache.cloudstack.api.response.InstanceGroupResponse; +import org.apache.cloudstack.api.response.InternalLoadBalancerElementResponse; +import org.apache.cloudstack.api.response.IpForwardingRuleResponse; +import org.apache.cloudstack.api.response.IsolationMethodResponse; +import org.apache.cloudstack.api.response.LBHealthCheckResponse; +import org.apache.cloudstack.api.response.LBStickinessResponse; +import org.apache.cloudstack.api.response.LDAPConfigResponse; +import org.apache.cloudstack.api.response.LoadBalancerResponse; +import org.apache.cloudstack.api.response.NetworkACLItemResponse; +import org.apache.cloudstack.api.response.NetworkACLResponse; +import org.apache.cloudstack.api.response.NetworkOfferingResponse; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.NicResponse; +import org.apache.cloudstack.api.response.NicSecondaryIpResponse; +import org.apache.cloudstack.api.response.PhysicalNetworkResponse; +import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.PortableIpRangeResponse; +import org.apache.cloudstack.api.response.PortableIpResponse; +import org.apache.cloudstack.api.response.PrivateGatewayResponse; +import org.apache.cloudstack.api.response.ProjectAccountResponse; +import org.apache.cloudstack.api.response.ProjectInvitationResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.api.response.ProviderResponse; +import org.apache.cloudstack.api.response.RegionResponse; +import org.apache.cloudstack.api.response.RemoteAccessVpnResponse; +import org.apache.cloudstack.api.response.ResourceCountResponse; +import org.apache.cloudstack.api.response.ResourceLimitResponse; +import org.apache.cloudstack.api.response.ResourceTagResponse; +import org.apache.cloudstack.api.response.S3Response; +import org.apache.cloudstack.api.response.SecurityGroupResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.ServiceResponse; +import org.apache.cloudstack.api.response.Site2SiteCustomerGatewayResponse; +import org.apache.cloudstack.api.response.Site2SiteVpnConnectionResponse; +import org.apache.cloudstack.api.response.Site2SiteVpnGatewayResponse; +import org.apache.cloudstack.api.response.SnapshotPolicyResponse; +import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.SnapshotScheduleResponse; +import org.apache.cloudstack.api.response.StaticRouteResponse; +import org.apache.cloudstack.api.response.StorageNetworkIpRangeResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; +import org.apache.cloudstack.api.response.SwiftResponse; +import org.apache.cloudstack.api.response.SystemVmInstanceResponse; +import org.apache.cloudstack.api.response.SystemVmResponse; +import org.apache.cloudstack.api.response.TemplatePermissionsResponse; +import org.apache.cloudstack.api.response.TemplateResponse; +import org.apache.cloudstack.api.response.TrafficMonitorResponse; +import org.apache.cloudstack.api.response.TrafficTypeResponse; +import org.apache.cloudstack.api.response.UsageRecordResponse; +import org.apache.cloudstack.api.response.UserResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.api.response.VMSnapshotResponse; +import org.apache.cloudstack.api.response.VirtualRouterProviderResponse; +import org.apache.cloudstack.api.response.VlanIpRangeResponse; +import org.apache.cloudstack.api.response.VolumeResponse; +import org.apache.cloudstack.api.response.VpcOfferingResponse; +import org.apache.cloudstack.api.response.VpcResponse; +import org.apache.cloudstack.api.response.VpnUsersResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.network.lb.ApplicationLoadBalancerRule; +import org.apache.cloudstack.region.PortableIp; +import org.apache.cloudstack.region.PortableIpRange; +import org.apache.cloudstack.region.Region; +import org.apache.cloudstack.usage.Usage; + +import com.cloud.storage.ImageStore; +import java.text.DecimalFormat; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; public interface ResponseGenerator { UserResponse createUserResponse(UserAccount user); @@ -177,7 +260,7 @@ public interface ResponseGenerator { StoragePoolResponse createStoragePoolResponse(StoragePool pool); - StoragePoolForMigrationResponse createStoragePoolForMigrationResponse(StoragePool pool); + StoragePoolResponse createStoragePoolForMigrationResponse(StoragePool pool); ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapacities); diff --git a/api/src/org/apache/cloudstack/api/ServerApiException.java b/api/src/org/apache/cloudstack/api/ServerApiException.java index 4b0fae58548..1a740d56c90 100644 --- a/api/src/org/apache/cloudstack/api/ServerApiException.java +++ b/api/src/org/apache/cloudstack/api/ServerApiException.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import com.cloud.exception.CloudException; import com.cloud.utils.exception.CSExceptionErrorCode; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionProxyObject; @SuppressWarnings("serial") public class ServerApiException extends CloudRuntimeException { @@ -45,7 +46,7 @@ public class ServerApiException extends CloudRuntimeException { _description = description; if (cause instanceof CloudRuntimeException) { CloudRuntimeException rt = (CloudRuntimeException) cause; - ArrayList idList = rt.getIdProxyList(); + ArrayList idList = rt.getIdProxyList(); if (idList != null) { for (int i = 0; i < idList.size(); i++) { addProxyObject(idList.get(i)); diff --git a/api/src/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java b/api/src/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java index e2fe8a7b1ea..8728f915dcb 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.api.command.admin.config; +import com.cloud.hypervisor.HypervisorCapabilities; +import com.cloud.user.Account; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -23,13 +25,9 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.HypervisorCapabilitiesResponse; -import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.log4j.Logger; -import com.cloud.hypervisor.HypervisorCapabilities; -import com.cloud.user.Account; - -@APICommand(name = "updateHypervisorCapabilities", description="Updates a hypervisor capabilities.", responseObject=ServiceOfferingResponse.class, since="3.0.0") +@APICommand(name = "updateHypervisorCapabilities", description="Updates a hypervisor capabilities.", responseObject=HypervisorCapabilitiesResponse.class, since="3.0.0") public class UpdateHypervisorCapabilitiesCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(UpdateHypervisorCapabilitiesCmd.class.getName()); private static final String s_name = "updatehypervisorcapabilitiesresponse"; diff --git a/api/src/org/apache/cloudstack/api/command/admin/internallb/ConfigureInternalLoadBalancerElementCmd.java b/api/src/org/apache/cloudstack/api/command/admin/internallb/ConfigureInternalLoadBalancerElementCmd.java index 7c3d1e95e57..86f30067b18 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/internallb/ConfigureInternalLoadBalancerElementCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/internallb/ConfigureInternalLoadBalancerElementCmd.java @@ -17,10 +17,13 @@ package org.apache.cloudstack.api.command.admin.internallb; -import java.util.List; - -import javax.inject.Inject; - +import com.cloud.event.EventTypes; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.VirtualRouterProvider; +import com.cloud.user.Account; +import com.cloud.user.UserContext; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -31,13 +34,8 @@ import org.apache.cloudstack.api.response.InternalLoadBalancerElementResponse; import org.apache.cloudstack.network.element.InternalLoadBalancerElementService; import org.apache.log4j.Logger; -import com.cloud.event.EventTypes; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.network.VirtualRouterProvider; -import com.cloud.user.Account; -import com.cloud.user.UserContext; +import javax.inject.Inject; +import java.util.List; @APICommand(name = "configureInternalLoadBalancerElement", responseObject=InternalLoadBalancerElementResponse.class, description="Configures an Internal Load Balancer element.", since="4.2.0") @@ -98,11 +96,8 @@ public class ConfigureInternalLoadBalancerElementCmd extends BaseAsyncCmd { @Override public void execute() throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException{ - s_logger.debug("hello alena"); UserContext.current().setEventDetails("Internal load balancer element: " + id); - s_logger.debug("hello alena"); VirtualRouterProvider result = _service.get(0).configureInternalLoadBalancerElement(getId(), getEnabled()); - s_logger.debug("hello alena"); if (result != null){ InternalLoadBalancerElementResponse routerResponse = _responseGenerator.createInternalLbElementResponse(result); routerResponse.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index 5178d685889..74eb2b9bf8f 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -71,15 +71,19 @@ public class CreateStoragePoolCmd extends BaseCmd { @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class, required=true, description="the Zone ID for the storage pool") private Long zoneId; - + @Parameter(name=ApiConstants.PROVIDER, type=CommandType.STRING, required=false, description="the storage provider name") private String storageProviderName; - + @Parameter(name=ApiConstants.SCOPE, type=CommandType.STRING, required=false, description="the scope of the storage: cluster or zone") private String scope; + @Parameter(name=ApiConstants.HYPERVISOR, type=CommandType.STRING, required=false, + description="hypervisor type of the hosts in zone that will be attached to this storage pool. KVM, VMware supported as of now.") + private String hypervisor; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -111,18 +115,18 @@ public class CreateStoragePoolCmd extends BaseCmd { public Long getZoneId() { return zoneId; } - + public String getStorageProviderName() { return this.storageProviderName; } - + public String getScope() { - return this.scope; + return this.scope; } - ///////////////////////////////////////////////////// - /////////////// API Implementation/////////////////// - ///////////////////////////////////////////////////// + public String getHypervisor() { + return hypervisor; + } @Override public String getCommandName() { diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java index 37d007c0376..ed6ca04c16f 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java @@ -16,24 +16,23 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import java.util.ArrayList; -import java.util.List; - +import com.cloud.async.AsyncJob; +import com.cloud.storage.StoragePool; +import com.cloud.utils.Pair; import org.apache.cloudstack.api.APICommand; -import org.apache.log4j.Logger; - import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.VolumeResponse; -import com.cloud.async.AsyncJob; -import com.cloud.storage.StoragePool; -import com.cloud.utils.Pair; +import org.apache.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; @APICommand(name = "findStoragePoolsForMigration", description="Lists storage pools available for migration of a volume.", - responseObject=StoragePoolForMigrationResponse.class) + responseObject=StoragePoolResponse.class) public class FindStoragePoolsForMigrationCmd extends BaseListCmd { public static final Logger s_logger = Logger.getLogger(FindStoragePoolsForMigrationCmd.class.getName()); @@ -72,13 +71,13 @@ public class FindStoragePoolsForMigrationCmd extends BaseListCmd { public void execute() { Pair, List> pools = _mgr.listStoragePoolsForMigrationOfVolume(getId()); - ListResponse response = new ListResponse(); - List poolResponses = new ArrayList(); + ListResponse response = new ListResponse(); + List poolResponses = new ArrayList(); List allPools = pools.first(); List suitablePoolList = pools.second(); for (StoragePool pool : allPools) { - StoragePoolForMigrationResponse poolResponse = _responseGenerator.createStoragePoolForMigrationResponse(pool); + StoragePoolResponse poolResponse = _responseGenerator.createStoragePoolForMigrationResponse(pool); Boolean suitableForMigration = false; for (StoragePool suitablePool : suitablePoolList) { if (suitablePool.getId() == pool.getId()) { diff --git a/api/src/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java index 9bbae064376..6eaac38d29a 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java @@ -38,7 +38,7 @@ public class UpdateVPCOfferingCmd extends BaseAsyncCmd{ //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = VpcOfferingResponse.class, + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = VpcOfferingResponse.class, required=true, description="the id of the VPC offering") private Long id; diff --git a/api/src/org/apache/cloudstack/api/command/user/account/AddAccountToProjectCmd.java b/api/src/org/apache/cloudstack/api/command/user/account/AddAccountToProjectCmd.java index ebc22723585..58735f281e9 100644 --- a/api/src/org/apache/cloudstack/api/command/user/account/AddAccountToProjectCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/account/AddAccountToProjectCmd.java @@ -101,7 +101,7 @@ public class AddAccountToProjectCmd extends BaseAsyncCmd { //verify input parameters if (project == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find project with specified id"); - ex.addProxyObject(project, getProjectId(), "projectId"); + ex.addProxyObject(getProjectId().toString(), "projectId"); throw ex; } diff --git a/api/src/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java b/api/src/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java index f37e82060fb..b99ca6347e0 100644 --- a/api/src/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java @@ -191,7 +191,11 @@ public class AssociateIPAddrCmd extends BaseAsyncCreateCmd { @Override public String getEventType() { - return EventTypes.EVENT_NET_IP_ASSIGN; + if (isPortable()) { + return EventTypes.EVENT_PORTABLE_IP_ASSIGN; + } else { + return EventTypes.EVENT_NET_IP_ASSIGN; + } } @Override diff --git a/api/src/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java b/api/src/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java index 41691ea86d0..3d431f4ac03 100644 --- a/api/src/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java @@ -78,7 +78,7 @@ public class DisassociateIPAddrCmd extends BaseAsyncCmd { if (!isPortable(id)) { result = _networkService.releaseIpAddress(getIpAddressId()); } else { - result = _networkService.releaseIpAddress(getIpAddressId()); + result = _networkService.releasePortableIpAddress(getIpAddressId()); } if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); @@ -90,7 +90,11 @@ public class DisassociateIPAddrCmd extends BaseAsyncCmd { @Override public String getEventType() { - return EventTypes.EVENT_NET_IP_RELEASE; + if (!isPortable(id)) { + return EventTypes.EVENT_NET_IP_RELEASE; + } else { + return EventTypes.EVENT_PORTABLE_IP_RELEASE; + } } @Override diff --git a/api/src/org/apache/cloudstack/api/command/user/affinitygroup/DeleteAffinityGroupCmd.java b/api/src/org/apache/cloudstack/api/command/user/affinitygroup/DeleteAffinityGroupCmd.java index ea4a010ab93..f80e17626fd 100644 --- a/api/src/org/apache/cloudstack/api/command/user/affinitygroup/DeleteAffinityGroupCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/affinitygroup/DeleteAffinityGroupCmd.java @@ -30,7 +30,6 @@ import org.apache.log4j.Logger; import com.cloud.async.AsyncJob; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.ResourceInUseException; import com.cloud.user.Account; import com.cloud.user.UserContext; @@ -123,17 +122,12 @@ public class DeleteAffinityGroupCmd extends BaseAsyncCmd { @Override public void execute(){ - try{ - boolean result = _affinityGroupService.deleteAffinityGroup(id, accountName, domainId, name); - if (result) { - SuccessResponse response = new SuccessResponse(getCommandName()); - this.setResponseObject(response); - } else { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete affinity group"); - } - } catch (ResourceInUseException ex) { - s_logger.warn("Exception: ", ex); - throw new ServerApiException(ApiErrorCode.RESOURCE_IN_USE_ERROR, ex.getMessage()); + boolean result = _affinityGroupService.deleteAffinityGroup(id, accountName, domainId, name); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete affinity group"); } } diff --git a/api/src/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupsCmd.java b/api/src/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupsCmd.java index 9310fb91016..d966a4c28b2 100644 --- a/api/src/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupsCmd.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.api.command.user.affinitygroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.BaseListAccountResourcesCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UserVmResponse; @@ -28,7 +28,7 @@ import org.apache.log4j.Logger; import com.cloud.async.AsyncJob; @APICommand(name = "listAffinityGroups", description = "Lists affinity groups", responseObject = AffinityGroupResponse.class) -public class ListAffinityGroupsCmd extends BaseListCmd { +public class ListAffinityGroupsCmd extends BaseListAccountResourcesCmd { public static final Logger s_logger = Logger.getLogger(ListAffinityGroupsCmd.class.getName()); private static final String s_name = "listaffinitygroupsresponse"; @@ -77,7 +77,8 @@ public class ListAffinityGroupsCmd extends BaseListCmd { public void execute(){ ListResponse response = _queryService.listAffinityGroups(id, affinityGroupName, - affinityGroupType, virtualMachineId, this.getStartIndex(), this.getPageSizeVal()); + affinityGroupType, virtualMachineId, this.getAccountName(), this.getDomainId(), this.isRecursive(), + this.listAll(), this.getStartIndex(), this.getPageSizeVal()); response.setResponseName(getCommandName()); this.setResponseObject(response); diff --git a/api/src/org/apache/cloudstack/api/command/user/autoscale/ListConditionsCmd.java b/api/src/org/apache/cloudstack/api/command/user/autoscale/ListConditionsCmd.java index 1c949232403..2b15d2b7e7d 100644 --- a/api/src/org/apache/cloudstack/api/command/user/autoscale/ListConditionsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/autoscale/ListConditionsCmd.java @@ -17,9 +17,7 @@ package org.apache.cloudstack.api.command.user.autoscale; -import java.util.ArrayList; -import java.util.List; - +import com.cloud.network.as.Condition; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListAccountResourcesCmd; @@ -30,9 +28,10 @@ import org.apache.cloudstack.api.response.CounterResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.log4j.Logger; -import com.cloud.network.as.Condition; +import java.util.ArrayList; +import java.util.List; -@APICommand(name = "listConditions", description = "List Conditions for the specific user", responseObject = CounterResponse.class) +@APICommand(name = "listConditions", description = "List Conditions for the specific user", responseObject = ConditionResponse.class) public class ListConditionsCmd extends BaseListAccountResourcesCmd { public static final Logger s_logger = Logger.getLogger(ListConditionsCmd.class.getName()); private static final String s_name = "listconditionsresponse"; diff --git a/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java index 40128526ce0..549de76f2c6 100644 --- a/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java @@ -198,7 +198,11 @@ public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements P _firewallService.revokeRelatedFirewallRule(getEntityId(), true); } - _rulesService.revokePortForwardingRule(getEntityId(), true); + try { + _rulesService.revokePortForwardingRule(getEntityId(), true); + } catch (Exception ex){ + //Ignore e.g. failed to apply rules to device error + } throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to apply port forwarding rule"); } diff --git a/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java b/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java index 275fa1866b6..3a56fe4642a 100644 --- a/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.NetworkACLItemResponse; import org.apache.cloudstack.api.response.NetworkACLResponse; import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import com.cloud.async.AsyncJob; @@ -98,7 +99,15 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd { // /////////////////////////////////////////////////// public String getProtocol() { - return protocol.trim(); + String p = protocol.trim(); + // Deal with ICMP(protocol number 1) specially because it need to be paired with icmp type and code + if(StringUtils.isNumeric(p)){ + int protoNumber = Integer.parseInt(p); + if (protoNumber == 1) { + p = "icmp"; + } + } + return p; } public List getSourceCidrList() { diff --git a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java index fe5decdf5fc..77ec4e7609f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java @@ -87,13 +87,19 @@ public class DeleteGlobalLoadBalancerRuleCmd extends BaseAsyncCmd { @Override public void execute(){ - _gslbService.deleteGlobalLoadBalancerRule(this); - UserContext.current().setEventDetails("Deleting global Load balancer Id: " + getGlobalLoadBalancerId()); + UserContext.current().setEventDetails("Deleting global Load balancer rule Id: " + getGlobalLoadBalancerId()); + boolean result = _gslbService.deleteGlobalLoadBalancerRule(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete Global Load Balancer rule."); + } } @Override public String getSyncObjType() { - return BaseAsyncCmd.networkSyncObject; + return BaseAsyncCmd.gslbSyncObject; } @Override diff --git a/api/src/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java b/api/src/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java index 5a9ea2a073d..14c2ee11c80 100644 --- a/api/src/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java @@ -112,7 +112,7 @@ public class CreateSnapshotPolicyCmd extends BaseCmd { Project project = _projectService.findByProjectAccountId(volume.getAccountId()); if (project.getState() != Project.State.Active) { PermissionDeniedException ex = new PermissionDeniedException("Can't add resources to the specified project id in state=" + project.getState() + " as it's no longer active"); - ex.addProxyObject(project, project.getId(), "projectId"); + ex.addProxyObject(project.getUuid(), "projectId"); throw ex; } } else if (account.getState() == Account.State.disabled) { diff --git a/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java index ba1f924fe02..6aa60aca1fc 100644 --- a/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java @@ -16,24 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.template; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import org.apache.cloudstack.api.APICommand; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.ApiErrorCode; -import org.apache.cloudstack.api.BaseAsyncCreateCmd; -import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.response.GuestOSResponse; -import org.apache.cloudstack.api.response.SnapshotResponse; -import org.apache.cloudstack.api.response.StoragePoolResponse; -import org.apache.cloudstack.api.response.TemplateResponse; -import org.apache.cloudstack.api.response.UserVmResponse; -import org.apache.cloudstack.api.response.VolumeResponse; -import org.apache.log4j.Logger; - import com.cloud.async.AsyncJob; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -45,8 +27,24 @@ import com.cloud.storage.Volume; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.user.UserContext; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.GuestOSResponse; +import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.TemplateResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.api.response.VolumeResponse; +import org.apache.log4j.Logger; -@APICommand(name = "createTemplate", responseObject = StoragePoolResponse.class, description = "Creates a template of a virtual machine. " + "The virtual machine must be in a STOPPED state. " +import java.util.Collection; +import java.util.List; +import java.util.Map; + +@APICommand(name = "createTemplate", responseObject = TemplateResponse.class, description = "Creates a template of a virtual machine. " + "The virtual machine must be in a STOPPED state. " + "A template created from this command is automatically designated as a private template visible to the account that created it.") public class CreateTemplateCmd extends BaseAsyncCreateCmd { public static final Logger s_logger = Logger.getLogger(CreateTemplateCmd.class.getName()); @@ -209,7 +207,7 @@ import com.cloud.user.UserContext; Project project = _projectService.findByProjectAccountId(accountId); if (project.getState() != Project.State.Active) { PermissionDeniedException ex = new PermissionDeniedException("Can't add resources to the specified project id in state=" + project.getState() + " as it's no longer active"); - ex.addProxyObject(project, project.getId(), "projectId"); + ex.addProxyObject(project.getUuid(), "projectId"); } } else if (account.getState() == Account.State.disabled) { throw new PermissionDeniedException("The owner of template is disabled: " + account); diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java index 9c33f97c317..2f7d8e10a77 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java @@ -35,7 +35,7 @@ import com.cloud.user.Account; import com.cloud.user.UserContext; import com.cloud.uservm.UserVm; -@APICommand(name = "restoreVirtualMachine", description="Restore a VM to original template or new template", responseObject=UserVmResponse.class, since="3.0.0") +@APICommand(name = "restoreVirtualMachine", description="Restore a VM to original template/ISO or new template/ISO", responseObject=UserVmResponse.class, since="3.0.0") public class RestoreVMCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(RestoreVMCmd.class); private static final String s_name = "restorevmresponse"; @@ -44,9 +44,10 @@ public class RestoreVMCmd extends BaseAsyncCmd { required=true, description="Virtual Machine ID") private Long vmId; - @Parameter(name=ApiConstants.TEMPLATE_ID, type=CommandType.UUID, entityType = TemplateResponse.class, description="an optional template Id to restore vm from the new template") + @Parameter(name=ApiConstants.TEMPLATE_ID, type=CommandType.UUID, entityType = TemplateResponse.class, description="an optional template Id to restore vm from the new template. This can be an ISO id in case of restore vm deployed using ISO") private Long templateId; + @Override public String getEventType() { return EventTypes.EVENT_VM_RESTORE; diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java index 3b00ba0d4bb..fea3e04d249 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java @@ -22,7 +22,9 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.BaseCmd.CommandType; import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.log4j.Logger; @@ -72,6 +74,10 @@ public class UploadVolumeCmd extends BaseAsyncCmd { description="Image store uuid") private String imageStoreUuid; + @Parameter(name=ApiConstants.PROJECT_ID, type=CommandType.UUID, entityType = ProjectResponse.class, + description="Upload volume for the project") + private Long projectId; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -135,7 +141,7 @@ public class UploadVolumeCmd extends BaseAsyncCmd { @Override public long getEntityOwnerId() { - Long accountId = finalyzeAccountId(accountName, domainId, null, true); + Long accountId = finalyzeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return UserContext.current().getCaller().getId(); } diff --git a/api/src/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java index bd56c744a2f..714e9e79926 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java @@ -42,7 +42,7 @@ public class RestartVPCCmd extends BaseAsyncCmd{ //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=VpcResponse.class, + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=VpcResponse.class, required=true, description="the id of the VPC") private Long id; diff --git a/api/src/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java index 2cc3c98b087..a6410214cc3 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java @@ -38,11 +38,11 @@ public class UpdateVPCCmd extends BaseAsyncCmd{ //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=VpcResponse.class, + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=VpcResponse.class, required=true, description="the id of the VPC") private Long id; - @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, description="the name of the VPC") + @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, description="the name of the VPC", required=true) private String vpcName; @Parameter(name=ApiConstants.DISPLAY_TEXT, type=CommandType.STRING, description="the display text of the VPC") diff --git a/api/src/org/apache/cloudstack/api/response/ExceptionResponse.java b/api/src/org/apache/cloudstack/api/response/ExceptionResponse.java index 3afd516e075..830cf007cd0 100644 --- a/api/src/org/apache/cloudstack/api/response/ExceptionResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ExceptionResponse.java @@ -17,16 +17,18 @@ package org.apache.cloudstack.api.response; import java.util.ArrayList; +import java.util.List; import org.apache.cloudstack.api.BaseResponse; import com.cloud.serializer.Param; +import com.cloud.utils.exception.ExceptionProxyObject; import com.google.gson.annotations.SerializedName; public class ExceptionResponse extends BaseResponse { @SerializedName("uuidList") @Param(description="List of uuids associated with this error") - private ArrayList idList; + private List idList; @SerializedName("errorcode") @Param(description="numeric code associated with this error") private Integer errorCode; @@ -37,6 +39,10 @@ public class ExceptionResponse extends BaseResponse { @SerializedName("errortext") @Param(description="the text associated with this error") private String errorText = "Command failed due to Internal Server Error"; + public ExceptionResponse(){ + idList = new ArrayList(); + } + public Integer getErrorCode() { return errorCode; } @@ -53,12 +59,12 @@ public class ExceptionResponse extends BaseResponse { this.errorText = errorText; } - public void addProxyObject(String id) { + public void addProxyObject(ExceptionProxyObject id) { idList.add(id); return; } - public ArrayList getIdProxyList() { + public List getIdProxyList() { return idList; } diff --git a/api/src/org/apache/cloudstack/api/response/HypervisorCapabilitiesResponse.java b/api/src/org/apache/cloudstack/api/response/HypervisorCapabilitiesResponse.java index 36021876184..2d37add5eb4 100644 --- a/api/src/org/apache/cloudstack/api/response/HypervisorCapabilitiesResponse.java +++ b/api/src/org/apache/cloudstack/api/response/HypervisorCapabilitiesResponse.java @@ -42,6 +42,14 @@ public class HypervisorCapabilitiesResponse extends BaseResponse { @SerializedName(ApiConstants.SECURITY_GROUP_EANBLED) @Param(description="true if security group is supported") private boolean isSecurityGroupEnabled; + @SerializedName(ApiConstants.MAX_DATA_VOLUMES_LIMIT) @Param(description="the maximum number of Data Volumes that can be attached for this hypervisor") + private Integer maxDataVolumesLimit; + + @SerializedName(ApiConstants.MAX_HOSTS_PER_CLUSTER) @Param(description="the maximum number of Hosts per cluster for this hypervisor") + private Integer maxHostsPerCluster; + + @SerializedName(ApiConstants.STORAGE_MOTION_ENABLED) @Param(description="true if storage motion is supported") + private boolean isStorageMotionSupported; public String getId() { return id; @@ -83,4 +91,28 @@ public class HypervisorCapabilitiesResponse extends BaseResponse { public void setIsSecurityGroupEnabled(Boolean sgEnabled) { this.isSecurityGroupEnabled = sgEnabled; } + + public Boolean getIsStorageMotionSupported() { + return this.isStorageMotionSupported; + } + + public void setIsStorageMotionSupported(Boolean smSupported) { + this.isStorageMotionSupported = smSupported; + } + + public Integer getMaxDataVolumesLimit() { + return maxDataVolumesLimit; + } + + public void setMaxDataVolumesLimit(Integer maxDataVolumesLimit) { + this.maxDataVolumesLimit = maxDataVolumesLimit; + } + + public Integer getMaxHostsPerCluster() { + return maxHostsPerCluster; + } + + public void setMaxHostsPerCluster(Integer maxHostsPerCluster) { + this.maxHostsPerCluster = maxHostsPerCluster; + } } diff --git a/api/src/org/apache/cloudstack/api/response/NetworkResponse.java b/api/src/org/apache/cloudstack/api/response/NetworkResponse.java index d6847d55846..70c3d79c4c0 100644 --- a/api/src/org/apache/cloudstack/api/response/NetworkResponse.java +++ b/api/src/org/apache/cloudstack/api/response/NetworkResponse.java @@ -166,6 +166,10 @@ public class NetworkResponse extends BaseResponse implements ControlledEntityRes @SerializedName(ApiConstants.DISPLAY_NETWORK) @Param(description="an optional field, whether to the display the network to the end user or not.") private Boolean displayNetwork; + @SerializedName(ApiConstants.ACL_ID) @Param(description="ACL Id associated with the VPC network") + private String aclId; + + public Boolean getDisplayNetwork() { return displayNetwork; } @@ -352,4 +356,12 @@ public class NetworkResponse extends BaseResponse implements ControlledEntityRes public void setIp6Cidr(String ip6Cidr) { this.ip6Cidr = ip6Cidr; } + + public String getAclId() { + return aclId; + } + + public void setAclId(String aclId) { + this.aclId = aclId; + } } diff --git a/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index 08ebbb05887..31533f87728 100644 --- a/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -60,6 +60,9 @@ public class ServiceOfferingResponse extends BaseResponse { @SerializedName("limitcpuuse") @Param(description="restrict the CPU usage to committed service offering") private Boolean limitCpuUse; + @SerializedName("isvolatile") @Param(description="true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk") + private Boolean isVolatile; + @SerializedName("tags") @Param(description="the tags for the service offering") private String tags; @@ -237,4 +240,12 @@ public class ServiceOfferingResponse extends BaseResponse { public void setDeploymentPlanner(String deploymentPlanner) { this.deploymentPlanner = deploymentPlanner; } + + public boolean getVolatileVm() { + return isVolatile; + } + + public void setVolatileVm(boolean isVolatile) { + this.isVolatile = isVolatile; + } } diff --git a/api/src/org/apache/cloudstack/api/response/StoragePoolForMigrationResponse.java b/api/src/org/apache/cloudstack/api/response/StoragePoolForMigrationResponse.java deleted file mode 100644 index f0bbcb19136..00000000000 --- a/api/src/org/apache/cloudstack/api/response/StoragePoolForMigrationResponse.java +++ /dev/null @@ -1,248 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.api.response; - -import java.util.Date; - -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseResponse; -import org.apache.cloudstack.api.EntityReference; - -import com.cloud.serializer.Param; -import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolStatus; -import com.google.gson.annotations.SerializedName; - -@EntityReference(value=StoragePool.class) -public class StoragePoolForMigrationResponse extends BaseResponse { - @SerializedName("id") @Param(description="the ID of the storage pool") - private String id; - - @SerializedName("zoneid") @Param(description="the Zone ID of the storage pool") - private String zoneId; - - @SerializedName(ApiConstants.ZONE_NAME) @Param(description="the Zone name of the storage pool") - private String zoneName; - - @SerializedName("podid") @Param(description="the Pod ID of the storage pool") - private String podId; - - @SerializedName("podname") @Param(description="the Pod name of the storage pool") - private String podName; - - @SerializedName("name") @Param(description="the name of the storage pool") - private String name; - - @SerializedName("ipaddress") @Param(description="the IP address of the storage pool") - private String ipAddress; - - @SerializedName("path") @Param(description="the storage pool path") - private String path; - - @SerializedName("created") @Param(description="the date and time the storage pool was created") - private Date created; - - @SerializedName("type") @Param(description="the storage pool type") - private String type; - - @SerializedName("clusterid") @Param(description="the ID of the cluster for the storage pool") - private String clusterId; - - @SerializedName("clustername") @Param(description="the name of the cluster for the storage pool") - private String clusterName; - - @SerializedName("disksizetotal") @Param(description="the total disk size of the storage pool") - private Long diskSizeTotal; - - @SerializedName("disksizeallocated") @Param(description="the host's currently allocated disk size") - private Long diskSizeAllocated; - - @SerializedName("disksizeused") @Param(description="the host's currently used disk size") - private Long diskSizeUsed; - - @SerializedName("tags") @Param(description="the tags for the storage pool") - private String tags; - - @SerializedName(ApiConstants.STATE) @Param(description="the state of the storage pool") - private StoragePoolStatus state; - - @SerializedName(ApiConstants.SCOPE) @Param(description="the scope of the storage pool") - private String scope; - - @SerializedName("suitableformigration") @Param(description="true if this pool is suitable to migrate a volume," + - " false otherwise") - private Boolean suitableForMigration; - - /** - * @return the scope - */ - public String getScope() { - return scope; - } - - /** - * @param scope the scope to set - */ - public void setScope(String scope) { - this.scope = scope; - } - - @Override - public String getObjectId() { - return this.getId(); - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getZoneId() { - return zoneId; - } - - public void setZoneId(String zoneId) { - this.zoneId = zoneId; - } - - public String getZoneName() { - return zoneName; - } - - public void setZoneName(String zoneName) { - this.zoneName = zoneName; - } - - public String getPodId() { - return podId; - } - - public void setPodId(String podId) { - this.podId = podId; - } - - public String getPodName() { - return podName; - } - - public void setPodName(String podName) { - this.podName = podName; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getIpAddress() { - return ipAddress; - } - - public void setIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - } - - public String getPath() { - return path; - } - - public void setPath(String path) { - this.path = path; - } - - public Date getCreated() { - return created; - } - - public void setCreated(Date created) { - this.created = created; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getClusterId() { - return clusterId; - } - - public void setClusterId(String clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public Long getDiskSizeTotal() { - return diskSizeTotal; - } - - public void setDiskSizeTotal(Long diskSizeTotal) { - this.diskSizeTotal = diskSizeTotal; - } - - public Long getDiskSizeAllocated() { - return diskSizeAllocated; - } - - public void setDiskSizeAllocated(Long diskSizeAllocated) { - this.diskSizeAllocated = diskSizeAllocated; - } - - public Long getDiskSizeUsed() { - return diskSizeUsed; - } - - public void setDiskSizeUsed(Long diskSizeUsed) { - this.diskSizeUsed = diskSizeUsed; - } - - public String getTags() { - return tags; - } - - public void setTags(String tags) { - this.tags = tags; - } - - public StoragePoolStatus getState() { - return state; - } - - public void setState(StoragePoolStatus state) { - this.state = state; - } - - public void setSuitableForMigration(Boolean suitableForMigration) { - this.suitableForMigration = suitableForMigration; - } -} diff --git a/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java index 4411ddcb112..965407d9952 100644 --- a/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java +++ b/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java @@ -16,16 +16,15 @@ // under the License. package org.apache.cloudstack.api.response; -import java.util.Date; - -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseResponse; -import org.apache.cloudstack.api.EntityReference; - import com.cloud.serializer.Param; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolStatus; import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import java.util.Date; @EntityReference(value=StoragePool.class) public class StoragePoolResponse extends BaseResponse { @@ -40,13 +39,13 @@ public class StoragePoolResponse extends BaseResponse { @SerializedName(ApiConstants.ZONE_TYPE) @Param(description = "network type of the availability zone") private String zoneType; - + @SerializedName("podid") @Param(description="the Pod ID of the storage pool") private String podId; @SerializedName("podname") @Param(description="the Pod name of the storage pool") private String podName; - + @SerializedName("name") @Param(description="the name of the storage pool") private String name; @@ -82,10 +81,17 @@ public class StoragePoolResponse extends BaseResponse { @SerializedName(ApiConstants.STATE) @Param(description="the state of the storage pool") private StoragePoolStatus state; - + @SerializedName(ApiConstants.SCOPE) @Param(description="the scope of the storage pool") private String scope; + @SerializedName(ApiConstants.HYPERVISOR) @Param(description="the hypervisor type of the storage pool") + private String hypervisor; + + @SerializedName("suitableformigration") @Param(description="true if this pool is suitable to migrate a volume," + + " false otherwise") + private Boolean suitableForMigration; + /** * @return the scope */ @@ -100,6 +106,14 @@ public class StoragePoolResponse extends BaseResponse { this.scope = scope; } + public String getHypervisor() { + return hypervisor; + } + + public void setHypervisor(String hypervisor) { + this.hypervisor = hypervisor; + } + @Override public String getObjectId() { return this.getId(); @@ -132,11 +146,11 @@ public class StoragePoolResponse extends BaseResponse { public String getZoneType() { return zoneType; } - + public void setZoneType(String zoneType) { this.zoneType = zoneType; } - + public String getPodId() { return podId; } @@ -248,4 +262,8 @@ public class StoragePoolResponse extends BaseResponse { public void setState(StoragePoolStatus state) { this.state = state; } + + public void setSuitableForMigration(Boolean suitableForMigration) { + this.suitableForMigration = suitableForMigration; + } } diff --git a/api/src/org/apache/cloudstack/api/response/UserVmResponse.java b/api/src/org/apache/cloudstack/api/response/UserVmResponse.java index c3bbf8db382..1f9eb1ac63f 100644 --- a/api/src/org/apache/cloudstack/api/response/UserVmResponse.java +++ b/api/src/org/apache/cloudstack/api/response/UserVmResponse.java @@ -137,6 +137,18 @@ public class UserVmResponse extends BaseResponse implements ControlledEntityResp @SerializedName("networkkbswrite") @Param(description="the outgoing network traffic on the host") private Long networkKbsWrite; + @SerializedName("diskkbsread") @Param(description="the read (bytes) of disk on the vm") + private Long diskKbsRead; + + @SerializedName("diskkbswrite") @Param(description="the write (bytes) of disk on the vm") + private Long diskKbsWrite; + + @SerializedName("diskioread") @Param(description="the read (io) of disk on the vm") + private Long diskIORead; + + @SerializedName("diskiowrite") @Param(description="the write (io) of disk on the vm") + private Long diskIOWrite; + @SerializedName("guestosid") @Param(description="Os type ID of the virtual machine") private String guestOsId; @@ -300,6 +312,22 @@ public class UserVmResponse extends BaseResponse implements ControlledEntityResp public void setIsoDisplayText(String isoDisplayText) { this.isoDisplayText = isoDisplayText; } + + public void setDiskKbsRead(Long diskKbsRead) { + this.diskKbsRead = diskKbsRead; + } + + public void setDiskKbsWrite(Long diskKbsWrite) { + this.diskKbsWrite = diskKbsWrite; + } + + public void setDiskIORead(Long diskIORead) { + this.diskIORead = diskIORead; + } + + public void setDiskIOWrite(Long diskIOWrite) { + this.diskIOWrite = diskIOWrite; + } public void setServiceOfferingId(String serviceOfferingId) { this.serviceOfferingId = serviceOfferingId; diff --git a/api/src/org/apache/cloudstack/query/QueryService.java b/api/src/org/apache/cloudstack/query/QueryService.java index 1a9e36eade6..28dba3db5e1 100644 --- a/api/src/org/apache/cloudstack/query/QueryService.java +++ b/api/src/org/apache/cloudstack/query/QueryService.java @@ -115,7 +115,8 @@ public interface QueryService { public ListResponse listIsos(ListIsosCmd cmd); public ListResponse listAffinityGroups(Long affinityGroupId, String affinityGroupName, - String affinityGroupType, Long vmId, Long startIndex, Long pageSize); + String affinityGroupType, Long vmId, String accountName, Long domainId, boolean isRecursive, + boolean listAll, Long startIndex, Long pageSize); public List listResource(ListResourceDetailsCmd cmd); diff --git a/api/src/org/apache/cloudstack/usage/UsageTypes.java b/api/src/org/apache/cloudstack/usage/UsageTypes.java index 2baa1d20057..ddf10979cb7 100644 --- a/api/src/org/apache/cloudstack/usage/UsageTypes.java +++ b/api/src/org/apache/cloudstack/usage/UsageTypes.java @@ -36,6 +36,10 @@ public class UsageTypes { public static final int PORT_FORWARDING_RULE = 12; public static final int NETWORK_OFFERING = 13; public static final int VPN_USERS = 14; + public static final int VM_DISK_IO_READ = 21; + public static final int VM_DISK_IO_WRITE = 22; + public static final int VM_DISK_BYTES_READ = 23; + public static final int VM_DISK_BYTES_WRITE = 24; public static List listUsageTypes(){ List responseList = new ArrayList(); @@ -53,6 +57,10 @@ public class UsageTypes { responseList.add(new UsageTypeResponse(PORT_FORWARDING_RULE, "Port Forwarding Usage")); responseList.add(new UsageTypeResponse(NETWORK_OFFERING, "Network Offering Usage")); responseList.add(new UsageTypeResponse(VPN_USERS, "VPN users usage")); + responseList.add(new UsageTypeResponse(VM_DISK_IO_READ, "VM Disk usage(I/O Read)")); + responseList.add(new UsageTypeResponse(VM_DISK_IO_WRITE, "VM Disk usage(I/O Write)")); + responseList.add(new UsageTypeResponse(VM_DISK_BYTES_READ, "VM Disk usage(Bytes Read)")); + responseList.add(new UsageTypeResponse(VM_DISK_BYTES_WRITE, "VM Disk usage(Bytes Write)")); return responseList; } } diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties index 1638be19e49..2b173596b73 100644 --- a/client/WEB-INF/classes/resources/messages.properties +++ b/client/WEB-INF/classes/resources/messages.properties @@ -14,6 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +label.view.secondary.ips=View secondary IPs +message.acquire.ip.nic=Please confirm that you would like to acquire a new secondary IP for this NIC.
NOTE: You need to manually configure the newly-acquired secondary IP inside the virtual machine. message.select.affinity.groups=Please select any affinity groups you want this VM to belong to: message.no.affinity.groups=You do not have any affinity groups. Please continue to the next step. label.action.delete.nic=Remove NIC @@ -53,6 +55,7 @@ label.account.specific=Account-Specific label.account=Account label.accounts=Accounts label.acquire.new.ip=Acquire New IP +label.acquire.new.secondary.ip=Acquire new secondary IP label.action.attach.disk.processing=Attaching Disk.... label.action.attach.disk=Attach Disk label.action.attach.iso.processing=Attaching ISO.... @@ -464,10 +467,14 @@ label.disabled=Disabled label.disabling.vpn.access=Disabling VPN Access label.disk.allocated=Disk Allocated label.disk.offering=Disk Offering +label.disk.read.bytes=Disk Read (Bytes) +label.disk.read.io=Disk Read (IO) label.disk.size.gb=Disk Size (in GB) label.disk.size=Disk Size label.disk.total=Disk Total label.disk.volume=Disk Volume +label.disk.write.bytes=Disk Write (Bytes) +label.disk.write.io=Disk Write (IO) label.display.name=Display name label.display.text=Display Text label.dns.1=DNS 1 diff --git a/client/WEB-INF/classes/resources/messages_ar.properties b/client/WEB-INF/classes/resources/messages_ar.properties index 4d3011b5a6c..5b3afea929d 100644 --- a/client/WEB-INF/classes/resources/messages_ar.properties +++ b/client/WEB-INF/classes/resources/messages_ar.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - changed.item.properties=\u062a\u063a\u064a\u0631 \u062e\u0635\u0627\u0626\u0635 \u0627\u0644\u0639\u0646\u0635\u0631 confirm.enable.s3=\u0641\u0636\u0644\u0627 \u0642\u0645 \u0628\u062a\u0639\u0628\u0626\u0629 \u0627\u0644\u0628\u064a\u0627\u0646\u0627\u062a \u0627\u0644\u0642\u0627\u062f\u0645\u0629 \u0644\u062a\u0645\u0643\u064a\u0646 \u0627\u0644\u062a\u062e\u0632\u064a\u0646 S3 \u0644\u0644\u0630\u0627\u0643\u0631\u0629 \u0627\u0644\u062b\u0627\u0646\u0648\u064a\u0629. instances.actions.reboot.label=\u0625\u0639\u0627\u062f\u0629 \u062a\u0634\u063a\u064a\u0644 \u0627\u0644\u0646\u0645\u0648\u0630\u062c @@ -217,6 +216,7 @@ label.zone.step.3.title=\u0627\u0644\u062e\u0637\u0648\u0629 3 \\\: \u0639\u0644 label.zone.step.4.title=\u0627\u0644\u062e\u0637\u0648\u0629 4 \\\: <\u0642\u0648\u064a> \u0625\u0636\u0627\u0641\u0629 \u0645\u062c\u0645\u0648\u0639\u0629 IP <\\\u0642\u0648\u064a> label.zone.wide=\u0645\u0646\u0637\u0642\u0629 \u0648\u0627\u0633\u0639\u0629 label.zoneWizard.trafficType.guest=\u0627\u0644\u0636\u064a\u0641 \\\: \u0627\u0644\u062d\u0631\u0643\u0629 \u0628\u064a\u0646 \u0627\u0644\u0623\u062c\u0647\u0632\u0629 \u0627\u0644\u0625\u0641\u062a\u0631\u0627\u0636\u064a\u0629 \u0644\u0644\u0645\u0633\u062a\u062e\u062f\u0645 \u0627\u0644\u0646\u0647\u0627\u0626\u064a. +label.zoneWizard.trafficType.management=\u0625\u062f\u0627\u0631\u0629\\\: \u0627\u0644\u062d\u0631\u0643\u0629 \u0628\u064a\u0646 \u0627\u0644\u0645\u0648\u0627\u0631\u062f \u0627\u0644\u062f\u0627\u062e\u0644\u064a\u0629 \u0644 \u0643\u0644\u0627\u0648\u062f \u0633\u062a\u0627\u0643 \u060c \u0645\u062a\u0636\u0645\u0646\u0629 \u0623\u064a \u062c\u0632\u0621 \u064a\u062a\u0635\u0644 \u0628\u062e\u0627\u062f\u0645\\\u0633\u064a\u0631\u0641\u0631 \u0627\u0644\u0625\u062f\u0627\u0631\u0629 \u060c \u0645\u062b\u0644 \u0627\u0644\u0645\u0636\u064a\u0641\u0627\u062a \u0648 \u0623\u0646\u0638\u0645\u0629 \u0643\u0644\u0627\u0648\u062f \u0633\u062a\u0627\u0643 \u0627\u0644\u0625\u0641\u062a\u0631\u0627\u0636\u064a\u0629. label.zoneWizard.trafficType.public=\u0627\u0644\u0639\u0627\u0645\u0629 \\\: \u0627\u0644\u0645\u0631\u0648\u0631 \u0628\u064a\u0646 \u0627\u0644\u0625\u0646\u062a\u0631\u0646\u062a \u0648\u0627\u0644\u0623\u062c\u0647\u0632\u0629 \u0627\u0644\u0638\u0627\u0647\u0631\u064a\u0629 \u0641\u064a \u0627\u0644\u0633\u062d\u0627\u0628\u0629. label.zoneWizard.trafficType.storage=\u0627\u0644\u062a\u062e\u0632\u064a\u0646 \\\: \u0627\u0644\u0645\u0631\u0648\u0631 \u0628\u064a\u0646 \u0645\u0644\u0642\u0645\u0627\u062a \u0627\u0644\u062a\u062e\u0632\u064a\u0646 \u0627\u0644\u0627\u0628\u062a\u062f\u0627\u0626\u064a\u0629 \u0648\u0627\u0644\u062b\u0627\u0646\u0648\u064a\u0629\u060c \u0645\u062b\u0644 \u0642\u0648\u0627\u0644\u0628 VM \u0648\u0627\u0644\u0644\u0642\u0637\u0627\u062a message.acquire.new.ip.vpc=\u064a\u0631\u062c\u0649 \u0627\u0644\u062a\u0623\u0643\u064a\u062f \u0628\u0623\u0646\u0643 \u062a\u0631\u063a\u0628 \u0641\u064a \u0627\u0644\u062d\u0635\u0648\u0644 \u0639\u0644\u0649 \u0628\u0648\u0631\u062a\u0648\u0643\u0648\u0644 \u0625\u0646\u062a\u0631\u0646\u062a \u062c\u062f\u064a\u062f \u0644\u0647\u0630\u0627 \u0627\u0644\u062d\u0627\u0633\u0648\u0628 \u0627\u0644\u0625\u0641\u062a\u0631\u0627\u0636\u064a. diff --git a/client/WEB-INF/classes/resources/messages_ca.properties b/client/WEB-INF/classes/resources/messages_ca.properties index 4e66083dbd5..2d8e953419f 100644 --- a/client/WEB-INF/classes/resources/messages_ca.properties +++ b/client/WEB-INF/classes/resources/messages_ca.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - confirm.enable.swift=Si us plau ompliu la seg\u00fcent informaci\u00f3 per habilitar el suport per a Swift error.installWizard.message=Quelcom ha fallat, vost\u00e8 pot tornar enrere i corregir els errors detalls suggerime error.password.not.match=Els camps de contrasenya no coincideixen diff --git a/client/WEB-INF/classes/resources/messages_de_DE.properties b/client/WEB-INF/classes/resources/messages_de_DE.properties index ca87323cc77..3c0c8deaabd 100644 --- a/client/WEB-INF/classes/resources/messages_de_DE.properties +++ b/client/WEB-INF/classes/resources/messages_de_DE.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - error.installWizard.message=Ein Fehler ist aufgetreten; Sie k\u00f6nnen zur\u00fcckgehen und den Fehler korregieren error.login=Ihr Benutzername / Passwort stimmt nicht mit uneren unseren Aufzeichnungen \u00fcberein. error.session.expired=Ihre Sitzung ist abgelaufen. diff --git a/client/WEB-INF/classes/resources/messages_es.properties b/client/WEB-INF/classes/resources/messages_es.properties index 16cfc1cda49..86eb596689c 100644 --- a/client/WEB-INF/classes/resources/messages_es.properties +++ b/client/WEB-INF/classes/resources/messages_es.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - error.installWizard.message=Algo salio mal, debes ir para atr\u00e1s y corregir los error. error.login=Su nombre de usuario / contrase\u00c3\u00b1a no coincide con nuestros registros. error.mgmt.server.inaccessible=El Servidor de Gesti\u00c3\u00b3n es inaccesible. Por favor, int\u00c3\u00a9ntelo de nuevo m\u00c3\u00a1s tarde. diff --git a/client/WEB-INF/classes/resources/messages_fr_FR.properties b/client/WEB-INF/classes/resources/messages_fr_FR.properties index 8be438a11c1..33ffcfc4714 100644 --- a/client/WEB-INF/classes/resources/messages_fr_FR.properties +++ b/client/WEB-INF/classes/resources/messages_fr_FR.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - changed.item.properties=Propri\u00e9t\u00e9s de l\\'\u00e9l\u00e9ment modifi\u00e9es confirm.enable.s3=Remplir les informations suivantes pour activer le support de stockage secondaire S3 confirm.enable.swift=Remplir les informations suivantes pour activer Swift @@ -93,6 +92,7 @@ label.action.delete.load.balancer=Supprimer la r\u00e8gle de r\u00e9partition de label.action.delete.network.processing=Suppression du r\u00e9seau... label.action.delete.network=Supprimer le r\u00e9seau label.action.delete.nexusVswitch=Supprimer le Nexus 1000v +label.action.delete.nic=Supprimer carte NIC label.action.delete.physical.network=Supprimer le r\u00e9seau physique label.action.delete.pod.processing=Suppression du pod... label.action.delete.pod=Supprimer le Pod @@ -1200,6 +1200,7 @@ message.action.delete.ISO.for.all.zones=L\\'ISO est utilis\u00e9 par toutes les message.action.delete.ISO=\u00cates-vous s\u00fbr que vous souhaitez supprimer cette ISO. message.action.delete.network=\u00cates-vous s\u00fbr que vous voulez supprimer ce r\u00e9seau. message.action.delete.nexusVswitch=Confirmer la suppession de ce Nexus 1000v +message.action.delete.nic=Veuillez confirmer que vous souhaitez supprimer cette carte NIC, ce qui supprimera \u00e9galement le r\u00e9seau associ\u00e9 sur la machine virtuelle. message.action.delete.physical.network=Confirmer la suppression du r\u00e9seau physique message.action.delete.pod=\u00cates-vous s\u00fbr que vous souhaitez supprimer ce pod. message.action.delete.primary.storage=\u00cates-vous s\u00fbr que vous voulez supprimer ce stockage principal. @@ -1412,6 +1413,7 @@ message.migrate.router.confirm=Confirmer la migration du routeur vers \: message.migrate.systemvm.confirm=Confirmer la migration de la VM syst\u00e8me vers \: message.migrate.volume=Confirmer la migration du volume vers un autre stockage principal. message.new.user=Renseigner les informations suivantes pour ajouter un nouveau compte utilisateur +message.no.affinity.groups=Vous n\\'avez pas de groupes d\\'affinit\u00e9. Continuer vers la prochaine \u00e9tape. message.no.network.support.configuration.not.true=Il n\\'y a pas de zone avec la fonction groupe de s\u00e9curit\u00e9 active. D\u00e8s lors, pas de fonction r\u00e9seau suppl\u00e9mentaires disponibles. Continuer \u00e0 l\\'\u00e9tape 5. message.no.network.support=S\u00e9lectionnez l\\'hyperviseur. vSphere, n\\'a pas de fonctionnalit\u00e9s suppl\u00e9mentaires pour le r\u00e9seau. Continuez \u00e0 l\\'\u00e9tape 5. message.no.projects.adminOnly=Vous n\\'avez pas de projet.
Contacter votre administrateur pour ajouter un projet. @@ -1444,6 +1446,7 @@ message.restart.mgmt.usage.server=Red\u00e9marrer le ou les serveur(s) de gestio message.restart.network=Tous les services fournit par ce routeur virtuel vont \u00eatre interrompus. Confirmer le red\u00e9marrage de ce routeur. message.restart.vpc=Confirmer le red\u00e9marrage du VPC message.security.group.usage=(Utilisez Ctrl-clic pour s\u00e9lectionner les groupes de s\u00e9curit\u00e9 vis\u00e9s) +message.select.affinity.groups=S\u00e9lectionner les groupes d\\'affinit\u00e9 qui appartiendront \u00e0 cette machine virtuelle \: message.select.a.zone=Une zone correspond typiquement \u00e0 un seul centre de donn\u00e9es. Des zones multiples peuvent permettre de rendre votre cloud plus fiable en apportant une isolation physique et de la redondance. message.select.instance=S\u00e9lectionner une instance. message.select.iso=S\u00e9lectionner un ISO pour votre nouvelle instance virtuelle. diff --git a/client/WEB-INF/classes/resources/messages_it_IT.properties b/client/WEB-INF/classes/resources/messages_it_IT.properties index 78323b02578..6ae5bd85376 100644 --- a/client/WEB-INF/classes/resources/messages_it_IT.properties +++ b/client/WEB-INF/classes/resources/messages_it_IT.properties @@ -15,32 +15,207 @@ # specific language governing permissions and limitations # under the License. - changed.item.properties=Elementi delle propriet\u00e0 modificati confirm.enable.s3=Si prega di inserire i valori richiesti per abilitare il supporto per il Secondary Storage di tipo S3 confirm.enable.swift=Si prega di inserire i valori richiesti per abilitare il supporto per Swift error.could.not.enable.zone=Impossibile abilitare la zona error.installWizard.message=E\\' stato rilevato un errore\: tornare agli step precedenti e correggere gli errori error.invalid.username.password=Username o Password non valida +error.login=Le credenziali fornite per username/password non corrispondono a quelle nei nostri sistemi. +error.menu.select=Impossibile effettuare operazioni senza aver selezionato alcun elemento. +error.mgmt.server.inaccessible=Impossibile accedere al Management Server. Si prega di riprovare pi\u00f9 tardi. error.password.not.match=I campi password non corrispondono error.please.specify.physical.network.tags=Le offerte di rete non sono disponibili se non si specificano tag per questa rete fisica. +error.session.expired=La sessione \u00e8 scaduta. error.something.went.wrong.please.correct.the.following=E\\' stato rilevato un errore; si prega di correggere quanto indicato di seguito error.unable.to.reach.management.server=Impossibile raggiungere il Management Server +error.unresolved.internet.name=Il tuo nome internet non pu\u00f2 essere risolto. +extractable=Estraibile +force.delete.domain.warning=Attenzione\: La scelta di questa opzione provocher\u00e0 la rimozione di tutti i sotto domini e agli account associati e alle loro risorse. +force.remove.host.warning=Attenzione\: La scelta di questa opzione provocher\u00e0 l\\'arresto forzato di tutte le virtual machine da parte di CloudStack prima di rimuovere questo host dal cluster. +force.stop.instance.warning=Attenzione\: Forzare un arresto su questa instanza dovrebbe essere l\\'ultima opzione. C\\'\u00e8 il rischio di perdita di dati e di un comportamento inconsistente dello stato della virtual machine. +ICMP.code=Codice ICMP +ICMP.type=Tipo ICMP +image.directory=Directory Immagine instances.actions.reboot.label=Riavviare una instanza label.accept.project.invitation=Accettare un invito ad un progetto +label.account=Account label.account.and.security.group=Account, Security group +label.account.id=ID dell\\'Account +label.account.name=Nome Account +label.account.specific=Specifico dell\\'Account +label.accounts=Utenti +label.acquire.new.ip=Acquisizione nuovo indirizzo IP +label.action.attach.disk=Collegamento di un Disco +label.action.attach.disk.processing=Collegamento Disco in corso... +label.action.attach.iso=Collegamento di una immagine ISO +label.action.attach.iso.processing=Collegamento immagine ISO in corso... +label.action.cancel.maintenance.mode=Annullamento dello stato di Maintenance Mode +label.action.cancel.maintenance.mode.processing=Cancellazione dello stato Maintenance Mode in corso... +label.action.change.password=Modifica della Password +label.action.change.service=Modificare Servizio +label.action.change.service.processing=Modifica del Servizio in corso... +label.action.copy.ISO=Copia della immagine ISO +label.action.copy.ISO.processing=Copia immagine ISO in corso... +label.action.copy.template=Copia di un Template +label.action.copy.template.processing=Copia Template in corso... +label.action.create.template=Creazione Template +label.action.create.template.from.vm=Creazione Template da una VM +label.action.create.template.from.volume=Creazione Template da un Volume +label.action.create.template.processing=Creazione Template in corso... +label.action.create.vm=Creazione VM +label.action.create.vm.processing=Creazione VM in corso... +label.action.create.volume=Creazione Volume +label.action.create.volume.processing=Creazione Volume in corso... +label.action.delete.account=Cancellazione account +label.action.delete.account.processing=Cancellazione account in corso.... +label.action.delete.cluster=Cancellazione Cluster +label.action.delete.cluster.processing=Cancellazione Cluster in corso.... +label.action.delete.disk.offering=Cancellazione Offerta Disco +label.action.delete.disk.offering.processing=Cancellazione Offerta Disco in corso.... +label.action.delete.domain=Cancellazione Dominio +label.action.delete.domain.processing=Cancellazione Dominio in corso.... +label.action.delete.firewall=Cancellazione regola firewall +label.action.delete.firewall.processing=Cancellazione Firewall in corso.... +label.action.delete.IP.range=Cancellazione intervallo indirizzi IP +label.action.delete.IP.range.processing=Cancellazione intervallo indirizzi IP in corso.... +label.action.delete.ISO=Cancellazione immagine ISO +label.action.delete.ISO.processing=Cancellazione immagine ISO in corso.... +label.action.delete.load.balancer=Cancellazione regola load balancer +label.action.delete.load.balancer.processing=Cancellazione Load Balancer in corso.... +label.action.delete.network=Cancellazione Rete +label.action.delete.network.processing=Cancellazione Rete in corso.... label.action.delete.nexusVswitch=Cancellare Nexus 1000v label.action.delete.physical.network=Cancellazione di una rete fisica +label.action.delete.pod=Cancellazione Pod +label.action.delete.pod.processing=Cancellazione Pod in corso.... +label.action.delete.primary.storage=Cancellazione Storage Primario +label.action.delete.primary.storage.processing=Cancellazione Storage Primario in corso.... +label.action.delete.secondary.storage=Cancellazione Storage Secondario +label.action.delete.secondary.storage.processing=Cancellazione Storage Secondario in corso.... +label.action.delete.security.group=Cancellazione Security Group +label.action.delete.security.group.processing=Cancellazione Security Group in corso.... +label.action.delete.service.offering=Cancellazione Offerta di Servizio +label.action.delete.service.offering.processing=Cancellazione Offerta di Servizio in corso.... +label.action.delete.snapshot=Cancellazione Snapshot +label.action.delete.snapshot.processing=Cancellazione Snapshot in corso.... label.action.delete.system.service.offering=Cancellare Offerta di Servizio di Sistema +label.action.delete.template=Cancellazione Template +label.action.delete.template.processing=Cancellazione Template in corso.... +label.action.delete.user=Cancellazione Utente +label.action.delete.user.processing=Cancellazione Utente in corso.... +label.action.delete.volume=Cancellazione Volume +label.action.delete.volume.processing=Cancellazione Volume in corso.... +label.action.delete.zone=Cancellazione Zona +label.action.delete.zone.processing=Cancellazione Zona in corso.... +label.action.destroy.instance.processing=Rimozione Instanza in corso.... +label.action.destroy.instance=Rimozione instanza +label.action.destroy.systemvm.processing=Rimozione VM di Sistema in corso.... +label.action.destroy.systemvm=Rimozione VM di sistema +label.action.detach.disk.processing=Scollegamento Disco in corso.... +label.action.detach.disk=Scollegamento di un Disco +label.action.detach.iso.processing=Scollegamento immagine ISO in corso.... +label.action.detach.iso=Scollegamento immagine ISO +label.action.disable.account=Disabilitazione account +label.action.disable.account.processing=Disabilitazione account in corso.... +label.action.disable.cluster=Disabilitazione Cluster +label.action.disable.cluster.processing=Disabilitazione Cluster in corso.... label.action.disable.nexusVswitch=Disabilitare Nexus 1000v label.action.disable.physical.network=Disabilitare la rete fisica +label.action.disable.pod=Disabilitazione Pod +label.action.disable.pod.processing=Disabilitazione Pod in corso.... +label.action.disable.static.NAT=Disabilitazione NAT Statico +label.action.disable.static.NAT.processing=Disabilitazione NAT Statico in corso.... +label.action.disable.user=Disabilitazione Utente +label.action.disable.user.processing=Disabilitazione Utente in corso.... +label.action.disable.zone=Disabilitazione Zona +label.action.disable.zone.processing=Disabilitazione Zona in corso.... +label.action.download.ISO=Download immagine ISO +label.action.download.template=Download Template +label.action.download.volume=Download Volume +label.action.download.volume.processing=Download Volume in corso.... +label.action.edit.account=Modifica account +label.action.edit.disk.offering=Modifica Offerta Disco +label.action.edit.domain=Modifica Dominio +label.action.edit.global.setting=Modifica Impostazioni Globali +label.action.edit.host=Modifica Host +label.action.edit.instance=Modifica Instanza +label.action.edit.ISO=Modifica immagine ISO +label.action.edit.network=Modifica Rete +label.action.edit.network.offering=Modifica Offerta di Rete +label.action.edit.network.processing=Modifica Rete in corso.... +label.action.edit.pod=Modifica Pod +label.action.edit.primary.storage=Modifica Storage Primario +label.action.edit.resource.limits=Modifica Limiti delle Risorse +label.action.edit.service.offering=Modifica Offerta di Servizio +label.action.edit.template=Modifica Template +label.action.edit.user=Modifica Utente +label.action.edit.zone=Modifica Zona +label.action.enable.account=Abilitazione account +label.action.enable.account.processing=Abilitazione account in corso.... +label.action.enable.cluster=Abilitazione Cluster +label.action.enable.cluster.processing=Abilitazione Cluster in corso.... +label.action.enable.maintenance.mode=Abilitazione dello stato Maintenance Mode +label.action.enable.maintenance.mode.processing=Abilitazione dello stato Maintenance Mode in corso.... label.action.enable.nexusVswitch=Abilitare Nexus 1000v label.action.enable.physical.network=Abilitare la rete fisica +label.action.enable.pod=Abilitazione Pod +label.action.enable.pod.processing=Abilitazione Pod in corso.... +label.action.enable.static.NAT=Abilitazione NAT Statico +label.action.enable.static.NAT.processing=Abilitazione NAT Statico in corso.... +label.action.enable.user=Abilitazione Utente +label.action.enable.user.processing=Abilitazione Utente in corso.... +label.action.enable.zone=Abilitazione Zona +label.action.enable.zone.processing=Abilitazione Zona in corso.... +label.action.force.reconnect.processing=Riconnessione in corso.... +label.action.generate.keys=Generazione Chiavi +label.action.generate.keys.processing=Generazione Chiavi in corso.... label.action.list.nexusVswitch=Elencare Nexus 1000v +label.action.lock.account=Blocco di un account +label.action.lock.account.processing=Blocco account in corso.... +label.action.manage.cluster=Gestione Cluster +label.action.manage.cluster.processing=Gestione Cluster in corso.... +label.action.migrate.instance=Migrazione Instanza +label.action.migrate.instance.processing=Migrazione Instanza in corso.... +label.action.migrate.router=Migrazione Router label.action.migrate.router.processing=Migrazione Router... +label.action.migrate.systemvm=Migrazione VM di Systema +label.action.migrate.systemvm.processing=Migrazione VM di Sistema in corso.... +label.action.reboot.instance.processing=Riavvio Instanza in corso.... +label.action.reboot.instance=Riavvio Instanza +label.action.reboot.router.processing=Riavvio Router in corso.... +label.action.reboot.router=Riavvio Router +label.action.reboot.systemvm.processing=Riavvio VM di Sistema in corso.... +label.action.reboot.systemvm=Riavvio VM di Sistema label.action.register.iso=Registrare una ISO label.action.register.template=Registrare un template +label.action.release.ip.processing=Rilascio indirizzo IP in corso.... +label.action.release.ip=Rilascio indirizzo IP +label.action.remove.host.processing=Rimozione Host in corso.... +label.action.remove.host=Rimozione Host +label.action.reset.password.processing=Reset della Password in corso.... +label.action.reset.password=Reset Password +label.action.resize.volume.processing=Ridimensionamento Volume in corso.... +label.action.resize.volume=Ridimensionamento Volume +label.action.restore.instance.processing=Restore dell\\'Instanza in corso.... +label.action.restore.instance=Restore Instanza +label.actions=Azioni +label.action.start.instance=Avvio Instanza +label.action.start.instance.processing=Avvio Instanza in corso.... +label.action.start.router=Avvio Router +label.action.start.router.processing=Avvio Router in corso.... +label.action.start.systemvm=Avvio VM di Sistema +label.action.start.systemvm.processing=Avvio VM di Sistema in corso.... +label.action.stop.instance=Arresto Instanza +label.action.stop.instance.processing=Arresto Instanza in corso.... +label.action.stop.router=Arresto Router +label.action.stop.router.processing=Arresto Router in corso.... +label.action.stop.systemvm=Arresto VM di Sistema +label.action.stop.systemvm.processing=Arresto VM di Sistema in corso.... +label.action.update.OS.preference=Aggiornamento Preferenze OS +label.action.update.OS.preference.processing=Aggiornamento preferenze OS in corso.... label.activate.project=Attivare il Progetto +label.active.sessions=Sessioni Attive label.add.accounts=Aggiungere utenti label.add.accounts.to=Aggiungere utenti a label.add.account.to.project=Aggiungere account al progetto @@ -49,6 +224,7 @@ label.add.compute.offering=Aggiungere una offerta computazionale label.add.egress.rule=Aggiungere una regola d\\'uscita label.add.F5.device=Aggiungere device F5 label.add.guest.network=Aggiungere una rete guest +label.additional.networks=Network Aggiuntivi label.add.netScaler.device=Aggiungere device Netscaler label.add.network.ACL=Aggiungere le ACL di rete label.add.network.offering=Aggiungere offerta di rete @@ -76,22 +252,32 @@ label.add.vpn.customer.gateway=Aggiungere Gateway VPN del Cliente label.add.VPN.gateway=Aggiungere un Gateway VPN label.add.vpn.user=Aggiungere utente VPN label.advanced=Avanzato +label.advanced.search=Ricerca Avanzata label.agent.password=Password per l\\'Agent label.agent.username=Username per l\\'Agent label.allocated=Allocato label.apply=Applicare label.associated.network=Rete Associata +label.available.public.ips=Indirizzi IP Pubblici Disponibili label.bandwidth=Capacit\u00e0 della banda (Bandwidth) label.basic=Basic label.broadcast.uri=URI di Broadcast +label.cancel=Annulla +label.certificate=Certificato label.change.service.offering=Modificare offerta di servizio +label.character=Carattere label.checksum=Checksum MD5 +label.cidr=CIDR label.CIDR.list=Lista CIDR label.CIDR.of.destination.network=Valore CIDR della rete di destinazione label.clear.list=Pulizia dell\\'elenco +label.cloud.console=Console di Gestione Cloud label.cluster=Cluster label.cluster.name=Nome del Cluster label.clusters=Cluster +label.cluster.type=Tipo di Cluster +label.clvm=CLVM +label.code=Codice label.community=Community label.compute.and.storage=Computazione e Storage label.compute=Compute @@ -107,6 +293,7 @@ label.console.proxy=Proxy di Console label.continue.basic.install=Proseguire con l\\'installazione di base label.continue=Continuare label.corrections.saved=Salvataggio correzioni effettuato +label.cpu=CPU label.cpu.mhz=CPU (in MHz) label.created.by.system=Creato dal sistema label.create.project=Creare un progetto @@ -234,6 +421,7 @@ label.max.volumes=Numero max di volumi label.max.vpcs=Numero max di VPC label.may.continue=E\\' ora possibile continuare. label.memory.mb=Memoria (in MB) +label.menu.accounts=Utenti label.menu.configuration=Configurazione label.menu.infrastructure=Infrastruttura label.menu.system.service.offerings=Offerte di Sistema @@ -433,6 +621,7 @@ label.zone.name=Nome Zona label.zones=Zone label.zone.type=Tipo di Zona label.zoneWizard.trafficType.guest=Guest\: Traffico di rete tra le virtual machine dell\\'utente finale +label.zoneWizard.trafficType.management=Management\: Traffico di rete tra le risorse interne di CloudStack, incluso qualsiasi componente che comunichi con il Management Server, come ad esempio gli host e le VM di Sistema di CloudStack label.zoneWizard.trafficType.public=Public\: Traffico di rete tra la rete internet e le virtual machine nell\\'infrastruttura cloud. label.zoneWizard.trafficType.storage=Storage\: Traffico di rete tra i server di primary e secondary storage, come ad esempio i template delle VM e le operazioni di snapshot message.acquire.new.ip=Si prega di confermare di voler acquisire un nuovo indirizzo IP per questa rete. diff --git a/client/WEB-INF/classes/resources/messages_ja.properties b/client/WEB-INF/classes/resources/messages_ja.properties index e483a97804b..a6075958d6a 100644 --- a/client/WEB-INF/classes/resources/messages_ja.properties +++ b/client/WEB-INF/classes/resources/messages_ja.properties @@ -23,7 +23,7 @@ error.could.not.enable.zone=\u30be\u30fc\u30f3\u3092\u6709\u52b9\u306b\u3067\u30 error.installWizard.message=\u554f\u984c\u304c\u767a\u751f\u3057\u307e\u3057\u305f\u3002\u623b\u3063\u3066\u30a8\u30e9\u30fc\u3092\u4fee\u6b63\u3067\u304d\u307e\u3059\u3002 error.invalid.username.password=\u7121\u52b9\u306a\u30e6\u30fc\u30b6\u30fc\u540d\u307e\u305f\u306f\u30d1\u30b9\u30ef\u30fc\u30c9 error.login=\u30e6\u30fc\u30b6\u30fc\u540d/\u30d1\u30b9\u30ef\u30fc\u30c9\u304c\u8a18\u9332\u3068\u4e00\u81f4\u3057\u307e\u305b\u3093\u3002 -error.menu.select=\u00e3\u0082\u00a2\u00e3\u0082\u00a4\u00e3\u0083\u0086\u00e3\u0083\u00a0\u00e3\u0081\u008c\u00e9\u0081\u00b8\u00e6\u008a\u009e\u00e3\u0081\u0095\u00e3\u0082\u008c\u00e3\u0081\u00a6\u00e3\u0081\u0084\u00e3\u0081\u00aa\u00e3\u0081\u0084\u00e3\u0081\u009f\u00e3\u0082\u0081\u00e3\u0082\u00a2\u00e3\u0082\u00af\u00e3\u0082\u00b7\u00e3\u0083\u00a7\u00e3\u0083\u00b3\u00e3\u0082\u0092\u00e5\u00ae\u009f\u00e8\u00a1\u008c\u00e3\u0081\u0099\u00e3\u0082\u008b\u00e3\u0081\u0093\u00e3\u0081\u00a8\u00e3\u0081\u008c\u00e3\u0081\u00a7\u00e3\u0081\u008d\u00e3\u0081\u00be\u00e3\u0081\u009b\u00e3\u0082\u0093 +error.menu.select=\u9805\u76ee\u304c\u9078\u629e\u3055\u308c\u3066\u3044\u306a\u3044\u305f\u3081\u64cd\u4f5c\u3092\u5b9f\u884c\u3067\u304d\u307e\u305b\u3093\u3002 error.mgmt.server.inaccessible=\u7ba1\u7406\u30b5\u30fc\u30d0\u30fc\u306b\u30a2\u30af\u30bb\u30b9\u3067\u304d\u307e\u305b\u3093\u3002\u5f8c\u3067\u518d\u5b9f\u884c\u3057\u3066\u304f\u3060\u3055\u3044\u3002 error.password.not.match=\u30d1\u30b9\u30ef\u30fc\u30c9\u304c\u4e00\u81f4\u3057\u307e\u305b\u3093 error.please.specify.physical.network.tags=\u3053\u306e\u7269\u7406\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u306e\u30bf\u30b0\u3092\u6307\u5b9a\u3057\u306a\u3051\u308c\u3070\u3001\u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0\u306f\u4f7f\u7528\u3067\u304d\u307e\u305b\u3093\u3002 @@ -32,11 +32,11 @@ error.something.went.wrong.please.correct.the.following=\u554f\u984c\u304c\u767a error.unable.to.reach.management.server=\u7ba1\u7406\u30b5\u30fc\u30d0\u30fc\u3068\u901a\u4fe1\u3067\u304d\u307e\u305b\u3093 error.unresolved.internet.name=\u3042\u306a\u305f\u306e\u30a4\u30f3\u30bf\u30fc\u30cd\u30c3\u30c8\u540d\u306f\u89e3\u6c7a\u3055\u308c\u307e\u305b\u3093\u3067\u3057\u305f\u3002 extractable=\u62bd\u51fa\u53ef\u80fd -force.delete.domain.warning=\u8b66\u544a\: \u3053\u306e\u30aa\u30d7\u30b7\u30e7\u30f3\u3092\u9078\u629e\u3059\u308b\u3068\u3001\u3059\u3079\u3066\u306e\u5b50\u30c9\u30e1\u30a4\u30f3\u304a\u3088\u3073\u95a2\u9023\u3059\u308b\u3059\u3079\u3066\u306e\u30a2\u30ab\u30a6\u30f3\u30c8\u3068\u305d\u306e\u30ea\u30bd\u30fc\u30b9\u304c\u524a\u9664\u3055\u308c\u307e\u3059\u3002 +force.delete.domain.warning=\u8b66\u544a: \u3053\u306e\u30aa\u30d7\u30b7\u30e7\u30f3\u3092\u9078\u629e\u3059\u308b\u3068\u3001\u3059\u3079\u3066\u306e\u5b50\u30c9\u30e1\u30a4\u30f3\u304a\u3088\u3073\u95a2\u9023\u3059\u308b\u3059\u3079\u3066\u306e\u30a2\u30ab\u30a6\u30f3\u30c8\u3068\u305d\u306e\u30ea\u30bd\u30fc\u30b9\u304c\u524a\u9664\u3055\u308c\u307e\u3059\u3002 force.delete=\u5f37\u5236\u524a\u9664 -force.remove.host.warning=\u8b66\u544a\: \u3053\u306e\u30aa\u30d7\u30b7\u30e7\u30f3\u3092\u9078\u629e\u3059\u308b\u3068\u3001\u5b9f\u884c\u4e2d\u306e\u3059\u3079\u3066\u306e\u4eee\u60f3\u30de\u30b7\u30f3\u304c\u5f37\u5236\u7684\u306b\u505c\u6b62\u3055\u308c\u3001\u30af\u30e9\u30b9\u30bf\u30fc\u304b\u3089\u3053\u306e\u30db\u30b9\u30c8\u304c\u5f37\u5236\u7684\u306b\u89e3\u9664\u3055\u308c\u307e\u3059\u3002 +force.remove.host.warning=\u8b66\u544a: \u3053\u306e\u30aa\u30d7\u30b7\u30e7\u30f3\u3092\u9078\u629e\u3059\u308b\u3068\u3001\u5b9f\u884c\u4e2d\u306e\u3059\u3079\u3066\u306e\u4eee\u60f3\u30de\u30b7\u30f3\u304c\u5f37\u5236\u7684\u306b\u505c\u6b62\u3055\u308c\u3001\u30af\u30e9\u30b9\u30bf\u30fc\u304b\u3089\u3053\u306e\u30db\u30b9\u30c8\u304c\u5f37\u5236\u7684\u306b\u89e3\u9664\u3055\u308c\u307e\u3059\u3002 force.remove=\u5f37\u5236\u89e3\u9664 -force.stop.instance.warning=\u8b66\u544a\: \u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306e\u5f37\u5236\u505c\u6b62\u306f\u3001\u6700\u7d42\u624b\u6bb5\u306b\u3057\u3066\u304f\u3060\u3055\u3044\u3002\u30c7\u30fc\u30bf\u3092\u640d\u5931\u3059\u308b\u3060\u3051\u3067\u306a\u304f\u3001\u4eee\u60f3\u30de\u30b7\u30f3\u306e\u52d5\u4f5c\u304c\u4e00\u8cab\u3057\u306a\u304f\u306a\u308b\u53ef\u80fd\u6027\u304c\u3042\u308a\u307e\u3059\u3002 +force.stop.instance.warning=\u8b66\u544a: \u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306e\u5f37\u5236\u505c\u6b62\u306f\u3001\u6700\u7d42\u624b\u6bb5\u306b\u3057\u3066\u304f\u3060\u3055\u3044\u3002\u30c7\u30fc\u30bf\u3092\u640d\u5931\u3059\u308b\u3060\u3051\u3067\u306a\u304f\u3001\u4eee\u60f3\u30de\u30b7\u30f3\u306e\u52d5\u4f5c\u304c\u4e00\u8cab\u3057\u306a\u304f\u306a\u308b\u53ef\u80fd\u6027\u304c\u3042\u308a\u307e\u3059\u3002 force.stop=\u5f37\u5236\u505c\u6b62 ICMP.code=ICMP \u30b3\u30fc\u30c9 ICMP.type=ICMP \u306e\u7a2e\u985e @@ -230,7 +230,7 @@ label.action.update.resource.count.processing=\u30ea\u30bd\u30fc\u30b9\u6570\u30 label.action.update.resource.count=\u30ea\u30bd\u30fc\u30b9\u6570\u306e\u66f4\u65b0 label.activate.project=\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u306e\u30a2\u30af\u30c6\u30a3\u30d6\u5316 label.active.sessions=\u30a2\u30af\u30c6\u30a3\u30d6\u306a\u30bb\u30c3\u30b7\u30e7\u30f3 -label.add.accounts.to=\u30a2\u30ab\u30a6\u30f3\u30c8\u306e\u8ffd\u52a0\u5148\: +label.add.accounts.to=\u30a2\u30ab\u30a6\u30f3\u30c8\u306e\u8ffd\u52a0\u5148: label.add.accounts=\u30a2\u30ab\u30a6\u30f3\u30c8\u306e\u8ffd\u52a0 label.add.account.to.project=\u30a2\u30ab\u30a6\u30f3\u30c8\u306e\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u3078\u306e\u8ffd\u52a0 label.add.account=\u30a2\u30ab\u30a6\u30f3\u30c8\u306e\u8ffd\u52a0 @@ -456,7 +456,7 @@ label.domain.admin=\u30c9\u30e1\u30a4\u30f3\u7ba1\u7406\u8005 label.domain.id=\u30c9\u30e1\u30a4\u30f3 ID label.domain.name=\u30c9\u30e1\u30a4\u30f3\u540d label.domain.router=\u30c9\u30e1\u30a4\u30f3 \u30eb\u30fc\u30bf\u30fc -label.domain.suffix=DNS \u30c9\u30e1\u30a4\u30f3 \u30b5\u30d5\u30a3\u30c3\u30af\u30b9 (\u4f8b\: xyz.com) +label.domain.suffix=DNS \u30c9\u30e1\u30a4\u30f3 \u30b5\u30d5\u30a3\u30c3\u30af\u30b9 (\u4f8b: xyz.com) label.domain=\u30c9\u30e1\u30a4\u30f3 label.done=\u5b8c\u4e86 label.double.quotes.are.not.allowed=\u4e8c\u91cd\u5f15\u7528\u7b26\u306f\u4f7f\u7528\u3067\u304d\u307e\u305b\u3093 @@ -534,7 +534,7 @@ label.host.alerts=\u30db\u30b9\u30c8 \u30a2\u30e9\u30fc\u30c8 label.host.MAC=\u30db\u30b9\u30c8\u306e MAC label.host.name=\u30db\u30b9\u30c8\u540d label.hosts=\u30db\u30b9\u30c8 -label.host.tags=\u00e3\u0083\u009b\u00e3\u0082\u00b9\u00e3\u0083\u0088\u00e3\u0082\u00bf\u00e3\u0082\u00b0 +label.host.tags=\u30db\u30b9\u30c8 \u30bf\u30b0 label.host=\u30db\u30b9\u30c8 label.hourly=\u6bce\u6642 label.hypervisor.capabilities=\u30cf\u30a4\u30d1\u30fc\u30d0\u30a4\u30b6\u30fc\u306e\u6a5f\u80fd @@ -563,8 +563,8 @@ label.installWizard.addZoneIntro.subtitle=\u30be\u30fc\u30f3\u306b\u3064\u3044\u label.installWizard.addZoneIntro.title=\u30be\u30fc\u30f3\u3092\u8ffd\u52a0\u3057\u307e\u3057\u3087\u3046 label.installWizard.addZone.title=\u30be\u30fc\u30f3\u306e\u8ffd\u52a0 label.installWizard.click.launch=[\u8d77\u52d5] \u3092\u30af\u30ea\u30c3\u30af\u3057\u3066\u304f\u3060\u3055\u3044\u3002 -label.installWizard.subtitle=\u3053\u306e\u30ac\u30a4\u30c9 \u30c4\u30a2\u30fc\u306f CloudStack&\#8482; \u74b0\u5883\u306e\u30bb\u30c3\u30c8\u30a2\u30c3\u30d7\u306b\u5f79\u7acb\u3061\u307e\u3059 -label.installWizard.title=CloudStack&\#8482; \u3078\u3088\u3046\u3053\u305d +label.installWizard.subtitle=\u3053\u306e\u30ac\u30a4\u30c9 \u30c4\u30a2\u30fc\u306f CloudStack™ \u74b0\u5883\u306e\u30bb\u30c3\u30c8\u30a2\u30c3\u30d7\u306b\u5f79\u7acb\u3061\u307e\u3059 +label.installWizard.title=CloudStack™ \u3078\u3088\u3046\u3053\u305d label.instance.limits=\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u5236\u9650 label.instance.name=\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u540d label.instances=\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9 @@ -573,12 +573,12 @@ label.internal.dns.1=\u5185\u90e8 DNS 1 label.internal.dns.2=\u5185\u90e8 DNS 2 label.internal.name=\u5185\u90e8\u540d label.interval.type=\u9593\u9694\u306e\u7a2e\u985e -label.introduction.to.cloudstack=CloudStack&\#8482; \u306e\u7d39\u4ecb +label.introduction.to.cloudstack=CloudStack™ \u306e\u7d39\u4ecb label.invalid.integer=\u7121\u52b9\u306a\u6574\u6570 label.invalid.number=\u7121\u52b9\u306a\u6570 label.invitations=\u62db\u5f85\u72b6 label.invited.accounts=\u62db\u5f85\u6e08\u307f\u30a2\u30ab\u30a6\u30f3\u30c8 -label.invite.to=\u62db\u5f85\u3059\u308b\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\: +label.invite.to=\u62db\u5f85\u3059\u308b\u30d7\u30ed\u30b8\u30a7\u30af\u30c8: label.invite=\u62db\u5f85 label.ip.address=IP \u30a2\u30c9\u30ec\u30b9 label.ipaddress=IP \u30a2\u30c9\u30ec\u30b9 @@ -695,9 +695,9 @@ label.menu.virtual.resources=\u4eee\u60f3\u30ea\u30bd\u30fc\u30b9 label.menu.volumes=\u30dc\u30ea\u30e5\u30fc\u30e0 label.migrate.instance.to.host=\u5225\u306e\u30db\u30b9\u30c8\u3078\u306e\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306e\u79fb\u884c label.migrate.instance.to.ps=\u5225\u306e\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3078\u306e\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306e\u79fb\u884c -label.migrate.instance.to=\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306e\u79fb\u884c\u5148\: -label.migrate.router.to=\u30eb\u30fc\u30bf\u30fc\u306e\u79fb\u884c\u5148\: -label.migrate.systemvm.to=\u30b7\u30b9\u30c6\u30e0 VM \u306e\u79fb\u884c\u5148\: +label.migrate.instance.to=\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306e\u79fb\u884c\u5148: +label.migrate.router.to=\u30eb\u30fc\u30bf\u30fc\u306e\u79fb\u884c\u5148: +label.migrate.systemvm.to=\u30b7\u30b9\u30c6\u30e0 VM \u306e\u79fb\u884c\u5148: label.migrate.to.host=\u30db\u30b9\u30c8\u3078\u79fb\u884c label.migrate.to.storage=\u30b9\u30c8\u30ec\u30fc\u30b8\u3078\u79fb\u884c label.migrate.volume=\u5225\u306e\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3078\u306e\u30dc\u30ea\u30e5\u30fc\u30e0\u306e\u79fb\u884c @@ -785,7 +785,7 @@ label.os.preference=OS \u57fa\u672c\u8a2d\u5b9a label.os.type=OS \u306e\u7a2e\u985e label.owned.public.ips=\u6240\u6709\u3059\u308b\u30d1\u30d6\u30ea\u30c3\u30af IP \u30a2\u30c9\u30ec\u30b9 label.owner.account=\u6240\u6709\u8005\u30a2\u30ab\u30a6\u30f3\u30c8 -label.owner.domain=\u00e6\u0089\u0080\u00e6\u009c\u0089\u00e8\u0080 +label.owner.domain=\u6240\u6709\u8005\u30c9\u30e1\u30a4\u30f3 label.parent.domain=\u89aa\u30c9\u30e1\u30a4\u30f3 label.password.enabled=\u30d1\u30b9\u30ef\u30fc\u30c9\u7ba1\u7406\u6709\u52b9 label.password=\u30d1\u30b9\u30ef\u30fc\u30c9 @@ -817,7 +817,7 @@ label.private.interface=\u30d7\u30e9\u30a4\u30d9\u30fc\u30c8 \u30a4\u30f3\u30bf\ label.private.ip.range=\u30d7\u30e9\u30a4\u30d9\u30fc\u30c8 IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2 label.private.ips=\u30d7\u30e9\u30a4\u30d9\u30fc\u30c8 IP \u30a2\u30c9\u30ec\u30b9 label.private.ip=\u30d7\u30e9\u30a4\u30d9\u30fc\u30c8 IP \u30a2\u30c9\u30ec\u30b9 -label.privatekey=PKC\#8 \u79d8\u5bc6\u30ad\u30fc +label.privatekey=PKC#8 \u79d8\u5bc6\u30ad\u30fc label.private.network=\u30d7\u30e9\u30a4\u30d9\u30fc\u30c8 \u30cd\u30c3\u30c8\u30ef\u30fc\u30af label.private.port=\u30d7\u30e9\u30a4\u30d9\u30fc\u30c8 \u30dd\u30fc\u30c8 label.private.zone=\u30d7\u30e9\u30a4\u30d9\u30fc\u30c8 \u30be\u30fc\u30f3 @@ -951,7 +951,7 @@ label.start.reserved.system.IP=\u4e88\u7d04\u6e08\u307f\u958b\u59cb\u30b7\u30b9\ label.start.vlan=\u958b\u59cb VLAN label.state=\u72b6\u614b label.static.nat.enabled=\u9759\u7684 NAT \u6709\u52b9 -label.static.nat.to=\u9759\u7684 NAT \u306e\u8a2d\u5b9a\u5148\: +label.static.nat.to=\u9759\u7684 NAT \u306e\u8a2d\u5b9a\u5148: label.static.nat=\u9759\u7684 NAT label.static.nat.vm.details=\u9759\u7684 NAT VM \u306e\u8a73\u7d30 label.statistics=\u7d71\u8a08 @@ -960,7 +960,7 @@ label.step.1.title=\u624b\u9806 1. \u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\ label.step.1=\u624b\u9806 1 label.step.2.title=\u624b\u9806 2. \u30b5\u30fc\u30d3\u30b9 \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0 label.step.2=\u624b\u9806 2 -label.step.3.title=\u624b\u9806 3. \u30c7\u30a3\u30b9\u30af \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0\u306e\u9078\u629e +label.step.3.title=\u624b\u9806 3. \u30c7\u30a3\u30b9\u30af \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0\u306e\u9078\u629e label.step.3=\u624b\u9806 3 label.step.4.title=\u624b\u9806 4. \u30cd\u30c3\u30c8\u30ef\u30fc\u30af label.step.4=\u624b\u9806 4 @@ -986,7 +986,7 @@ label.storage.traffic=\u30b9\u30c8\u30ec\u30fc\u30b8 \u30c8\u30e9\u30d5\u30a3\u3 label.storage.type=\u30b9\u30c8\u30ec\u30fc\u30b8\u306e\u7a2e\u985e label.storage=\u30b9\u30c8\u30ec\u30fc\u30b8 label.subdomain.access=\u30b5\u30d6\u30c9\u30e1\u30a4\u30f3 \u30a2\u30af\u30bb\u30b9 -label.submitted.by=[\u9001\u4fe1\u30e6\u30fc\u30b6\u30fc\: ] +label.submitted.by=[\u9001\u4fe1\u30e6\u30fc\u30b6\u30fc: ] label.submit=\u9001\u4fe1 label.succeeded=\u6210\u529f label.sunday=\u65e5\u66dc\u65e5 @@ -1061,7 +1061,7 @@ label.vcipaddress=vCenter IP \u30a2\u30c9\u30ec\u30b9 label.version=\u30d0\u30fc\u30b8\u30e7\u30f3 label.view.all=\u3059\u3079\u3066\u8868\u793a label.view.console=\u30b3\u30f3\u30bd\u30fc\u30eb\u306e\u8868\u793a -label.viewing=\u8868\u793a\u9805\u76ee\: +label.viewing=\u8868\u793a\u9805\u76ee: label.view.more=\u8a73\u7d30\u8868\u793a label.view=\u8868\u793a - label.virtual.appliances=\u4eee\u60f3\u30a2\u30d7\u30e9\u30a4\u30a2\u30f3\u30b9 @@ -1110,7 +1110,7 @@ label.wednesday=\u6c34\u66dc\u65e5 label.weekly=\u6bce\u9031 label.welcome.cloud.console=\u7ba1\u7406\u30b3\u30f3\u30bd\u30fc\u30eb\u3078\u3088\u3046\u3053\u305d label.welcome=\u3088\u3046\u3053\u305d -label.what.is.cloudstack=CloudStack&\#8482; \u306b\u3064\u3044\u3066 +label.what.is.cloudstack=CloudStack™ \u306b\u3064\u3044\u3066 label.xen.traffic.label=XenServer \u30c8\u30e9\u30d5\u30a3\u30c3\u30af\u306e\u30e9\u30d9\u30eb label.yes=\u306f\u3044 label.zone.details=\u30be\u30fc\u30f3\u306e\u8a73\u7d30 @@ -1124,9 +1124,9 @@ label.zones=\u30be\u30fc\u30f3 label.zone.type=\u30be\u30fc\u30f3\u306e\u7a2e\u985e label.zone=\u30be\u30fc\u30f3 label.zone.wide=\u30be\u30fc\u30f3\u5168\u4f53 -label.zoneWizard.trafficType.guest=\u30b2\u30b9\u30c8\: \u30a8\u30f3\u30c9\u30e6\u30fc\u30b6\u30fc\u4eee\u60f3\u30de\u30b7\u30f3\u9593\u306e\u30c8\u30e9\u30d5\u30a3\u30c3\u30af -label.zoneWizard.trafficType.public=\u30d1\u30d6\u30ea\u30c3\u30af\: \u30a4\u30f3\u30bf\u30fc\u30cd\u30c3\u30c8\u3068\u30af\u30e9\u30a6\u30c9\u5185\u306e\u4eee\u60f3\u30de\u30b7\u30f3\u306e\u9593\u306e\u30c8\u30e9\u30d5\u30a3\u30c3\u30af -label.zoneWizard.trafficType.storage=\u30b9\u30c8\u30ec\u30fc\u30b8\: VM\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u3068\u30b9\u30ca\u30c3\u30d7\u30b7\u30e7\u30c3\u30c8\u306e\u3088\u3046\u306a\u3001\u30d7\u30e9\u30a4\u30de\u30ea\u3068\u30bb\u30ab\u30f3\u30c0\u30ea\u306e\u30b9\u30c8\u30ec\u30fc\u30b8\u30b5\u30fc\u30d0\u30fc\u9593\u306e\u30c8\u30e9\u30d5\u30a3\u30c3\u30af\u3002 +label.zoneWizard.trafficType.guest=\u30b2\u30b9\u30c8: \u30a8\u30f3\u30c9\u30e6\u30fc\u30b6\u30fc\u4eee\u60f3\u30de\u30b7\u30f3\u9593\u306e\u30c8\u30e9\u30d5\u30a3\u30c3\u30af +label.zoneWizard.trafficType.public=\u30d1\u30d6\u30ea\u30c3\u30af: \u30a4\u30f3\u30bf\u30fc\u30cd\u30c3\u30c8\u3068\u30af\u30e9\u30a6\u30c9\u5185\u306e\u4eee\u60f3\u30de\u30b7\u30f3\u306e\u9593\u306e\u30c8\u30e9\u30d5\u30a3\u30c3\u30af +label.zoneWizard.trafficType.storage=\u30b9\u30c8\u30ec\u30fc\u30b8: VM\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u3068\u30b9\u30ca\u30c3\u30d7\u30b7\u30e7\u30c3\u30c8\u306e\u3088\u3046\u306a\u3001\u30d7\u30e9\u30a4\u30de\u30ea\u3068\u30bb\u30ab\u30f3\u30c0\u30ea\u306e\u30b9\u30c8\u30ec\u30fc\u30b8\u30b5\u30fc\u30d0\u30fc\u9593\u306e\u30c8\u30e9\u30d5\u30a3\u30c3\u30af\u3002 managed.state=\u7ba1\u7406\u5bfe\u8c61\u72b6\u614b message.acquire.new.ip=\u3053\u306e\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u306e\u65b0\u3057\u3044 IP \u30a2\u30c9\u30ec\u30b9\u3092\u53d6\u5f97\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.acquire.new.ip.vpc=VPC\u306e\u65b0\u3057\u3044IP\u3092\u53d6\u5f97\u3059\u308b\u3053\u3068\u3092\u78ba\u8a8d\u3057\u3066\u304f\u3060\u3055\u3044\u3002 @@ -1138,8 +1138,8 @@ message.action.change.service.warning.for.router=\u73fe\u5728\u306e\u30b5\u30fc\ message.action.delete.cluster=\u3053\u306e\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.delete.disk.offering=\u3053\u306e\u30c7\u30a3\u30b9\u30af \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.delete.domain=\u3053\u306e\u30c9\u30e1\u30a4\u30f3\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.action.delete.external.firewall=\u3053\u306e\u5916\u90e8\u30d5\u30a1\u30a4\u30a2\u30a6\u30a9\u30fc\u30eb\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? \u8b66\u544a\: \u540c\u3058\u5916\u90e8\u30d5\u30a1\u30a4\u30a2\u30a6\u30a9\u30fc\u30eb\u3092\u518d\u5ea6\u8ffd\u52a0\u3059\u308b\u4e88\u5b9a\u3067\u3042\u308b\u5834\u5408\u306f\u3001\u30c7\u30d0\u30a4\u30b9\u306e\u4f7f\u7528\u72b6\u6cc1\u30c7\u30fc\u30bf\u3092\u30ea\u30bb\u30c3\u30c8\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002 -message.action.delete.external.load.balancer=\u3053\u306e\u5916\u90e8\u8ca0\u8377\u5206\u6563\u88c5\u7f6e\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? \u8b66\u544a\: \u540c\u3058\u5916\u90e8\u8ca0\u8377\u5206\u6563\u88c5\u7f6e\u3092\u518d\u5ea6\u8ffd\u52a0\u3059\u308b\u4e88\u5b9a\u3067\u3042\u308b\u5834\u5408\u306f\u3001\u30c7\u30d0\u30a4\u30b9\u306e\u4f7f\u7528\u72b6\u6cc1\u30c7\u30fc\u30bf\u3092\u30ea\u30bb\u30c3\u30c8\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002 +message.action.delete.external.firewall=\u3053\u306e\u5916\u90e8\u30d5\u30a1\u30a4\u30a2\u30a6\u30a9\u30fc\u30eb\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? \u8b66\u544a: \u540c\u3058\u5916\u90e8\u30d5\u30a1\u30a4\u30a2\u30a6\u30a9\u30fc\u30eb\u3092\u518d\u5ea6\u8ffd\u52a0\u3059\u308b\u4e88\u5b9a\u3067\u3042\u308b\u5834\u5408\u306f\u3001\u30c7\u30d0\u30a4\u30b9\u306e\u4f7f\u7528\u72b6\u6cc1\u30c7\u30fc\u30bf\u3092\u30ea\u30bb\u30c3\u30c8\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002 +message.action.delete.external.load.balancer=\u3053\u306e\u5916\u90e8\u8ca0\u8377\u5206\u6563\u88c5\u7f6e\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? \u8b66\u544a: \u540c\u3058\u5916\u90e8\u8ca0\u8377\u5206\u6563\u88c5\u7f6e\u3092\u518d\u5ea6\u8ffd\u52a0\u3059\u308b\u4e88\u5b9a\u3067\u3042\u308b\u5834\u5408\u306f\u3001\u30c7\u30d0\u30a4\u30b9\u306e\u4f7f\u7528\u72b6\u6cc1\u30c7\u30fc\u30bf\u3092\u30ea\u30bb\u30c3\u30c8\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002 message.action.delete.ingress.rule=\u3053\u306e\u53d7\u4fe1\u898f\u5247\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.delete.ISO.for.all.zones=\u305d\u306e ISO \u306f\u3059\u3079\u3066\u306e\u30be\u30fc\u30f3\u3067\u4f7f\u7528\u3055\u308c\u3066\u3044\u307e\u3059\u3002\u3059\u3079\u3066\u306e\u30be\u30fc\u30f3\u304b\u3089\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.delete.ISO=\u3053\u306e ISO \u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? @@ -1175,9 +1175,9 @@ message.action.enable.pod=\u3053\u306e\u30dd\u30c3\u30c9\u3092\u6709\u52b9\u306b message.action.enable.zone=\u3053\u306e\u30be\u30fc\u30f3\u3092\u6709\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.force.reconnect=\u30db\u30b9\u30c8\u306f\u5f37\u5236\u7684\u306b\u518d\u63a5\u7d9a\u3057\u307e\u3057\u305f\u3002\u3053\u306e\u51e6\u7406\u306b\u306f\u6570\u5206\u304b\u304b\u308b\u53ef\u80fd\u6027\u304c\u3042\u308a\u307e\u3059\u3002 message.action.host.enable.maintenance.mode=\u4fdd\u5b88\u30e2\u30fc\u30c9\u3092\u6709\u52b9\u306b\u3059\u308b\u3068\u3001\u3053\u306e\u30db\u30b9\u30c8\u3067\u5b9f\u884c\u4e2d\u306e\u3059\u3079\u3066\u306e\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u304c\u307b\u304b\u306e\u4f7f\u7528\u3067\u304d\u308b\u30db\u30b9\u30c8\u306b\u30e9\u30a4\u30d6 \u30de\u30a4\u30b0\u30ec\u30fc\u30b7\u30e7\u30f3\u3055\u308c\u307e\u3059\u3002 -message.action.instance.reset.password=\u00e3\u0081\u0093\u00e3\u0081\u00ae\u00e4\u00bb\u00ae\u00e6\u0083\u00b3\u00e3\u0083\u009e\u00e3\u0082\u00b7\u00e3\u0083\u00b3\u00e3\u0081\u00ae\u00e3\u0083\u00ab\u00e3\u0083\u00bc\u00e3\u0083\u0088\u00e3\u0083\u0091\u00e3\u0082\u00b9\u00e3\u0083\u00af\u00e3\u0083\u00bc\u00e3\u0083\u0089\u00e3\u0082\u0092\u00e5\u00a4\u0089\u00e6\u009b\u00b4\u00e3\u0081\u0097\u00e3\u0081\u00a6\u00e3\u0082\u0082\u00e3\u0082\u0088\u00e3\u0082\u008d\u00e3\u0081\u0097\u00e3\u0081\u0084\u00e3\u0081\u00a7\u00e3\u0081\u0099\u00e3\u0081\u008b? +message.action.instance.reset.password=\u3053\u306e\u4eee\u60f3\u30de\u30b7\u30f3\u306e\u30eb\u30fc\u30c8 \u30d1\u30b9\u30ef\u30fc\u30c9\u3092\u5909\u66f4\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.manage.cluster=\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u7ba1\u7406\u5bfe\u8c61\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.action.primarystorage.enable.maintenance.mode=\u8b66\u544a\: \u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u4fdd\u5b88\u30e2\u30fc\u30c9\u306b\u3059\u308b\u3068\u3001\u305d\u306e\u30b9\u30c8\u30ec\u30fc\u30b8\u4e0a\u306e\u30dc\u30ea\u30e5\u30fc\u30e0\u3092\u4f7f\u7528\u3059\u308b\u3059\u3079\u3066\u306e VM \u304c\u505c\u6b62\u3057\u307e\u3059\u3002\u7d9a\u884c\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? +message.action.primarystorage.enable.maintenance.mode=\u8b66\u544a: \u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u4fdd\u5b88\u30e2\u30fc\u30c9\u306b\u3059\u308b\u3068\u3001\u305d\u306e\u30b9\u30c8\u30ec\u30fc\u30b8\u4e0a\u306e\u30dc\u30ea\u30e5\u30fc\u30e0\u3092\u4f7f\u7528\u3059\u308b\u3059\u3079\u3066\u306e VM \u304c\u505c\u6b62\u3057\u307e\u3059\u3002\u7d9a\u884c\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.reboot.instance=\u3053\u306e\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u3092\u518d\u8d77\u52d5\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.reboot.router=\u3053\u306e\u4eee\u60f3\u30eb\u30fc\u30bf\u30fc\u3067\u63d0\u4f9b\u3059\u308b\u3059\u3079\u3066\u306e\u30b5\u30fc\u30d3\u30b9\u304c\u4e2d\u65ad\u3055\u308c\u307e\u3059\u3002\u3053\u306e\u30eb\u30fc\u30bf\u30fc\u3092\u518d\u8d77\u52d5\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.reboot.systemvm=\u3053\u306e\u30b7\u30b9\u30c6\u30e0 VM \u3092\u518d\u8d77\u52d5\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? @@ -1195,8 +1195,8 @@ message.action.stop.systemvm=\u3053\u306e\u30b7\u30b9\u30c6\u30e0 VM \u3092\u505 message.action.take.snapshot=\u3053\u306e\u30dc\u30ea\u30e5\u30fc\u30e0\u306e\u30b9\u30ca\u30c3\u30d7\u30b7\u30e7\u30c3\u30c8\u3092\u4f5c\u6210\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.action.unmanage.cluster=\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u975e\u7ba1\u7406\u5bfe\u8c61\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.activate.project=\u3053\u306e\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u3092\u30a2\u30af\u30c6\u30a3\u30d6\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.add.cluster=\u30be\u30fc\u30f3 \u306e\u30dd\u30c3\u30c9 \u306b\u30cf\u30a4\u30d1\u30fc\u30d0\u30a4\u30b6\u30fc\u3067\u7ba1\u7406\u3055\u308c\u308b\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u8ffd\u52a0\u3057\u307e\u3059 -message.add.cluster.zone=\u30be\u30fc\u30f3 \u306b\u30cf\u30a4\u30d1\u30fc\u30d0\u30a4\u30b6\u30fc\u3067\u7ba1\u7406\u3055\u308c\u308b\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u8ffd\u52a0\u3057\u307e\u3059 +message.add.cluster=\u30be\u30fc\u30f3 \u306e\u30dd\u30c3\u30c9 \u306b\u30cf\u30a4\u30d1\u30fc\u30d0\u30a4\u30b6\u30fc\u3067\u7ba1\u7406\u3055\u308c\u308b\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u8ffd\u52a0\u3057\u307e\u3059 +message.add.cluster.zone=\u30be\u30fc\u30f3 \u306b\u30cf\u30a4\u30d1\u30fc\u30d0\u30a4\u30b6\u30fc\u3067\u7ba1\u7406\u3055\u308c\u308b\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u8ffd\u52a0\u3057\u307e\u3059 message.add.disk.offering=\u65b0\u3057\u3044\u30c7\u30a3\u30b9\u30af \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0\u3092\u8ffd\u52a0\u3059\u308b\u305f\u3081\u306b\u3001\u6b21\u306e\u30d1\u30e9\u30e1\u30fc\u30bf\u30fc\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.add.domain=\u3053\u306e\u30c9\u30e1\u30a4\u30f3\u306b\u4f5c\u6210\u3059\u308b\u30b5\u30d6\u30c9\u30e1\u30a4\u30f3\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.add.firewall=\u30be\u30fc\u30f3\u306b\u30d5\u30a1\u30a4\u30a2\u30a6\u30a9\u30fc\u30eb\u3092\u8ffd\u52a0\u3057\u307e\u3059 @@ -1205,18 +1205,18 @@ message.add.host=\u65b0\u3057\u3044\u30db\u30b9\u30c8\u3092\u8ffd\u52a0\u3059\u3 message.adding.host=\u30db\u30b9\u30c8\u3092\u8ffd\u52a0\u3057\u3066\u3044\u307e\u3059 message.adding.Netscaler.device=Netscaler \u30c7\u30d0\u30a4\u30b9\u3092\u8ffd\u52a0\u3057\u3066\u3044\u307e\u3059 message.adding.Netscaler.provider=Netscaler \u30d7\u30ed\u30d0\u30a4\u30c0\u30fc\u3092\u8ffd\u52a0\u3057\u3066\u3044\u307e\u3059 -message.add.ip.range.direct.network=\u30be\u30fc\u30f3 \u306e\u76f4\u63a5\u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u306b IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u3092\u8ffd\u52a0\u3057\u307e\u3059 -message.add.ip.range.to.pod=

\u30dd\u30c3\u30c9 \u306b IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u3092\u8ffd\u52a0\u3057\u307e\u3059

+message.add.ip.range.direct.network=\u30be\u30fc\u30f3 \u306e\u76f4\u63a5\u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u306b IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u3092\u8ffd\u52a0\u3057\u307e\u3059 +message.add.ip.range.to.pod=

\u30dd\u30c3\u30c9 \u306b IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u3092\u8ffd\u52a0\u3057\u307e\u3059

message.add.ip.range=\u30be\u30fc\u30f3\u306e\u30d1\u30d6\u30ea\u30c3\u30af \u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u306b IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u3092\u8ffd\u52a0\u3057\u307e\u3059 message.additional.networks.desc=\u4eee\u60f3\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u304c\u63a5\u7d9a\u3059\u308b\u8ffd\u52a0\u306e\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3092\u9078\u629e\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.add.load.balancer=\u30be\u30fc\u30f3\u306b\u8ca0\u8377\u5206\u6563\u88c5\u7f6e\u3092\u8ffd\u52a0\u3057\u307e\u3059 -message.add.load.balancer.under.ip=\u8ca0\u8377\u5206\u6563\u898f\u5247\u304c\u6b21\u306e IP \u30a2\u30c9\u30ec\u30b9\u306b\u5bfe\u3057\u3066\u8ffd\u52a0\u3055\u308c\u307e\u3057\u305f\: -message.add.network=\u30be\u30fc\u30f3 \u306b\u65b0\u3057\u3044\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3092\u8ffd\u52a0\u3057\u307e\u3059 +message.add.load.balancer.under.ip=\u8ca0\u8377\u5206\u6563\u898f\u5247\u304c\u6b21\u306e IP \u30a2\u30c9\u30ec\u30b9\u306b\u5bfe\u3057\u3066\u8ffd\u52a0\u3055\u308c\u307e\u3057\u305f: +message.add.network=\u30be\u30fc\u30f3 \u306b\u65b0\u3057\u3044\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3092\u8ffd\u52a0\u3057\u307e\u3059 message.add.new.gateway.to.vpc=\u3053\u306e VPC \u306b\u65b0\u3057\u3044\u30b2\u30fc\u30c8\u30a6\u30a7\u30a4\u3092\u8ffd\u52a0\u3059\u308b\u305f\u3081\u306e\u60c5\u5831\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002 -message.add.pod=\u30be\u30fc\u30f3 \u306b\u65b0\u3057\u3044\u30dd\u30c3\u30c9\u3092\u8ffd\u52a0\u3057\u307e\u3059 -message.add.primary.storage=\u30be\u30fc\u30f3 \u306e\u30dd\u30c3\u30c9 \u306b\u65b0\u3057\u3044\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u8ffd\u52a0\u3057\u307e\u3059 +message.add.pod=\u30be\u30fc\u30f3 \u306b\u65b0\u3057\u3044\u30dd\u30c3\u30c9\u3092\u8ffd\u52a0\u3057\u307e\u3059 +message.add.primary.storage=\u30be\u30fc\u30f3 \u306e\u30dd\u30c3\u30c9 \u306b\u65b0\u3057\u3044\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u8ffd\u52a0\u3057\u307e\u3059 message.add.primary=\u65b0\u3057\u3044\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u8ffd\u52a0\u3059\u308b\u305f\u3081\u306b\u3001\u6b21\u306e\u30d1\u30e9\u30e1\u30fc\u30bf\u30fc\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002 -message.add.secondary.storage=\u30be\u30fc\u30f3 \u306b\u65b0\u3057\u3044\u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u8ffd\u52a0\u3057\u307e\u3059 +message.add.secondary.storage=\u30be\u30fc\u30f3 \u306b\u65b0\u3057\u3044\u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u8ffd\u52a0\u3057\u307e\u3059 message.add.service.offering=\u65b0\u3057\u3044\u30b3\u30f3\u30d4\u30e5\u30fc\u30c6\u30a3\u30f3\u30b0 \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0\u3092\u8ffd\u52a0\u3059\u308b\u305f\u3081\u306b\u3001\u6b21\u306e\u30c7\u30fc\u30bf\u3092\u5165\u529b\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.add.system.service.offering=\u65b0\u3057\u3044\u30b7\u30b9\u30c6\u30e0 \u30b5\u30fc\u30d3\u30b9 \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0\u3092\u8ffd\u52a0\u3059\u308b\u305f\u3081\u306b\u3001\u6b21\u306e\u30c7\u30fc\u30bf\u3092\u5165\u529b\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.add.template=\u65b0\u3057\u3044\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u3092\u4f5c\u6210\u3059\u308b\u305f\u3081\u306b\u3001\u6b21\u306e\u30c7\u30fc\u30bf\u3092\u5165\u529b\u3057\u3066\u304f\u3060\u3055\u3044\u3002 @@ -1225,8 +1225,8 @@ message.add.VPN.gateway=VPN \u30b2\u30fc\u30c8\u30a6\u30a7\u30a4\u3092\u8ffd\u52 message.advanced.mode.desc=VLAN \u30b5\u30dd\u30fc\u30c8\u3092\u6709\u52b9\u306b\u3059\u308b\u5834\u5408\u306f\u3001\u3053\u306e\u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u30e2\u30c7\u30eb\u3092\u9078\u629e\u3057\u3066\u304f\u3060\u3055\u3044\u3002\u3053\u306e\u30e2\u30c7\u30eb\u3067\u306f\u6700\u3082\u67d4\u8edf\u306b\u30ab\u30b9\u30bf\u30e0 \u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u30aa\u30d5\u30a1\u30ea\u30f3\u30b0\u3092\u63d0\u4f9b\u3067\u304d\u3001\u30d5\u30a1\u30a4\u30a2\u30a6\u30a9\u30fc\u30eb\u3001VPN\u3001\u8ca0\u8377\u5206\u6563\u88c5\u7f6e\u306e\u30b5\u30dd\u30fc\u30c8\u306e\u307b\u304b\u306b\u3001\u76f4\u63a5\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3068\u4eee\u60f3\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3082\u6709\u52b9\u306b\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002 message.advanced.security.group=\u30b2\u30b9\u30c8 VM \u3092\u5206\u96e2\u3059\u308b\u305f\u3081\u306b\u30bb\u30ad\u30e5\u30ea\u30c6\u30a3 \u30b0\u30eb\u30fc\u30d7\u3092\u4f7f\u7528\u3059\u308b\u5834\u5408\u306f\u3001\u3053\u306e\u30aa\u30d7\u30b7\u30e7\u30f3\u3092\u9078\u629e\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.advanced.virtual=\u30b2\u30b9\u30c8 VM \u3092\u5206\u96e2\u3059\u308b\u305f\u3081\u306b\u30be\u30fc\u30f3\u5168\u4f53\u306e VLAN \u3092\u4f7f\u7528\u3059\u308b\u5834\u5408\u306f\u3001\u3053\u306e\u30aa\u30d7\u30b7\u30e7\u30f3\u3092\u9078\u629e\u3057\u3066\u304f\u3060\u3055\u3044\u3002 -message.after.enable.s3=S3\u57fa\u76e4\u30bb\u30ab\u30f3\u30c0\u30ea\u30b9\u30c8\u30ec\u30fc\u30b8\u304c\u8a2d\u5b9a\u3055\u308c\u307e\u3057\u305f\u3002 \u30ce\u30fc\u30c8\:\u3053\u306e\u30da\u30fc\u30b8\u3092\u9589\u3058\u308b\u3068S3\u3092\u518d\u8a2d\u5b9a\u3067\u304d\u307e\u305b\u3093\u3002 -message.after.enable.swift=Swift \u304c\u69cb\u6210\u3055\u308c\u307e\u3057\u305f\u3002\u6ce8\: \u3053\u306e\u30da\u30fc\u30b8\u3092\u9589\u3058\u308b\u3068\u3001Swift \u3092\u518d\u69cb\u6210\u3059\u308b\u3053\u3068\u306f\u3067\u304d\u307e\u305b\u3093\u3002 +message.after.enable.s3=S3\u57fa\u76e4\u30bb\u30ab\u30f3\u30c0\u30ea\u30b9\u30c8\u30ec\u30fc\u30b8\u304c\u8a2d\u5b9a\u3055\u308c\u307e\u3057\u305f\u3002 \u30ce\u30fc\u30c8:\u3053\u306e\u30da\u30fc\u30b8\u3092\u9589\u3058\u308b\u3068S3\u3092\u518d\u8a2d\u5b9a\u3067\u304d\u307e\u305b\u3093\u3002 +message.after.enable.swift=Swift \u304c\u69cb\u6210\u3055\u308c\u307e\u3057\u305f\u3002\u6ce8: \u3053\u306e\u30da\u30fc\u30b8\u3092\u9589\u3058\u308b\u3068\u3001Swift \u3092\u518d\u69cb\u6210\u3059\u308b\u3053\u3068\u306f\u3067\u304d\u307e\u305b\u3093\u3002 message.alert.state.detected=\u30a2\u30e9\u30fc\u30c8\u72b6\u614b\u304c\u691c\u51fa\u3055\u308c\u307e\u3057\u305f message.allow.vpn.access=VPN \u30a2\u30af\u30bb\u30b9\u3092\u8a31\u53ef\u3059\u308b\u30e6\u30fc\u30b6\u30fc\u306e\u30e6\u30fc\u30b6\u30fc\u540d\u3068\u30d1\u30b9\u30ef\u30fc\u30c9\u3092\u5165\u529b\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.apply.snapshot.policy=\u73fe\u5728\u306e\u30b9\u30ca\u30c3\u30d7\u30b7\u30e7\u30c3\u30c8 \u30dd\u30ea\u30b7\u30fc\u3092\u66f4\u65b0\u3057\u307e\u3057\u305f\u3002 @@ -1251,10 +1251,10 @@ message.confirm.join.project=\u3053\u306e\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u3 message.confirm.remove.IP.range=\u3053\u306e IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u3092\u524a\u9664\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.confirm.shutdown.provider=\u3053\u306e\u30d7\u30ed\u30d0\u30a4\u30c0\u30fc\u3092\u30b7\u30e3\u30c3\u30c8\u30c0\u30a6\u30f3\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.copy.iso.confirm=ISO \u3092\u6b21\u306e\u5834\u6240\u306b\u30b3\u30d4\u30fc\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.copy.template=\u30be\u30fc\u30f3 \u304b\u3089\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8 XXX \u3092\u6b21\u306e\u5834\u6240\u306b\u30b3\u30d4\u30fc\u3057\u307e\u3059\: +message.copy.template=\u30be\u30fc\u30f3 \u304b\u3089\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8 XXX \u3092\u6b21\u306e\u5834\u6240\u306b\u30b3\u30d4\u30fc\u3057\u307e\u3059: message.create.template=\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u3092\u4f5c\u6210\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.create.template.vm=\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8 \u304b\u3089 VM \u3092\u4f5c\u6210\u3057\u307e\u3059 -message.create.template.volume=\u30c7\u30a3\u30b9\u30af \u30dc\u30ea\u30e5\u30fc\u30e0 \u306e\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u3092\u4f5c\u6210\u3059\u308b\u524d\u306b\u3001\u6b21\u306e\u60c5\u5831\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002\u30dc\u30ea\u30e5\u30fc\u30e0 \u30b5\u30a4\u30ba\u306b\u3088\u3063\u3066\u306f\u3001\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u306e\u4f5c\u6210\u306b\u306f\u6570\u5206\u4ee5\u4e0a\u304b\u304b\u308b\u53ef\u80fd\u6027\u304c\u3042\u308a\u307e\u3059\u3002 +message.create.template.vm=\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8 \u304b\u3089 VM \u3092\u4f5c\u6210\u3057\u307e\u3059 +message.create.template.volume=\u30c7\u30a3\u30b9\u30af \u30dc\u30ea\u30e5\u30fc\u30e0 \u306e\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u3092\u4f5c\u6210\u3059\u308b\u524d\u306b\u3001\u6b21\u306e\u60c5\u5831\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002\u30dc\u30ea\u30e5\u30fc\u30e0 \u30b5\u30a4\u30ba\u306b\u3088\u3063\u3066\u306f\u3001\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u306e\u4f5c\u6210\u306b\u306f\u6570\u5206\u4ee5\u4e0a\u304b\u304b\u308b\u53ef\u80fd\u6027\u304c\u3042\u308a\u307e\u3059\u3002 message.creating.cluster=\u30af\u30e9\u30b9\u30bf\u30fc\u3092\u4f5c\u6210\u3057\u3066\u3044\u307e\u3059 message.creating.guest.network=\u30b2\u30b9\u30c8 \u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3092\u4f5c\u6210\u3057\u3066\u3044\u307e\u3059 message.creating.physical.networks=\u7269\u7406\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3092\u4f5c\u6210\u3057\u3066\u3044\u307e\u3059 @@ -1278,21 +1278,21 @@ message.desc.secondary.storage=\u5404\u30be\u30fc\u30f3\u306b\u306f\u5c11\u306a\ message.desc.zone=\u30be\u30fc\u30f3\u306f CloudStack \u74b0\u5883\u5185\u306e\u6700\u5927\u306e\u7d44\u7e54\u5358\u4f4d\u3067\u3001\u901a\u5e38\u3001\u5358\u4e00\u306e\u30c7\u30fc\u30bf\u30bb\u30f3\u30bf\u30fc\u306b\u76f8\u5f53\u3057\u307e\u3059\u3002\u30be\u30fc\u30f3\u306b\u3088\u3063\u3066\u7269\u7406\u7684\u306a\u5206\u96e2\u3068\u5197\u9577\u6027\u304c\u63d0\u4f9b\u3055\u308c\u307e\u3059\u3002\u30be\u30fc\u30f3\u306f 1 \u3064\u4ee5\u4e0a\u306e\u30dd\u30c3\u30c9 (\u5404\u30dd\u30c3\u30c9\u306f\u30db\u30b9\u30c8\u3068\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8 \u30b5\u30fc\u30d0\u30fc\u304b\u3089\u69cb\u6210\u3055\u308c\u307e\u3059) \u3068\u3001\u30be\u30fc\u30f3\u5185\u306e\u3059\u3079\u3066\u306e\u30dd\u30c3\u30c9\u3067\u5171\u6709\u3055\u308c\u308b\u30bb\u30ab\u30f3\u30c0\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8 \u30b5\u30fc\u30d0\u30fc\u304b\u3089\u69cb\u6210\u3055\u308c\u307e\u3059\u3002 message.detach.disk=\u3053\u306e\u30c7\u30a3\u30b9\u30af\u3092\u30c7\u30bf\u30c3\u30c1\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.detach.iso.confirm=\u3053\u306e\u4eee\u60f3\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u304b\u3089 ISO \u30d5\u30a1\u30a4\u30eb\u3092\u30c7\u30bf\u30c3\u30c1\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.disable.account=\u00e3\u0081\u0093\u00e3\u0081\u00ae\u00e3\u0082\u00a2\u00e3\u0082\u00ab\u00e3\u0082\u00a6\u00e3\u0083\u00b3\u00e3\u0083\u0088\u00e3\u0082\u0092\u00e7\u0084\u00a1\u00e5\u008a\u00b9\u00e3\u0081\u00ab\u00e3\u0081\u0097\u00e3\u0081\u00a6\u00e3\u0082\u0082\u00e3\u0082\u0088\u00e3\u0082\u008d\u00e3\u0081\u0097\u00e3\u0081\u0084\u00e3\u0081\u00a7\u00e3\u0081\u0099\u00e3\u0081\u008b? \u00e3\u0082\u00a2\u00e3\u0082\u00ab\u00e3\u0082\u00a6\u00e3\u0083\u00b3\u00e3\u0083\u0088\u00e3\u0082\u0092\u00e7\u0084\u00a1\u00e5\u008a\u00b9\u00e3\u0081\u00ab\u00e3\u0081\u0099\u00e3\u0082\u008b\u00e3\u0081\u0093\u00e3\u0081\u00a8\u00e3\u0081\u00ab\u00e3\u0082\u0088\u00e3\u0082\u008a\u00e3\u0080\u0081\u00e3\u0081\u0093\u00e3\u0081\u00ae\u00e3\u0082\u00a2\u00e3\u0082\u00ab\u00e3\u0082\u00a6\u00e3\u0083\u00b3\u00e3\u0083\u0088\u00e3\u0081\u00ae\u00e3\u0081\u0099\u00e3\u0081\u00b9\u00e3\u0081\u00a6\u00e3\u0081\u00ae\u00e3\u0083\u00a6\u00e3\u0083\u00bc\u00e3\u0082\u00b6\u00e3\u0083\u00bc\u00e3\u0081\u00af\u00e3\u0082\u00af\u00e3\u0083\u00a9\u00e3\u0082\u00a6\u00e3\u0083\u0089\u00e3\u0083\u00aa\u00e3\u0082\u00bd\u00e3\u0083\u00bc\u00e3\u0082\u00b9\u00e3\u0081\u00ab\u00e3\u0082\u00a2\u00e3\u0082\u00af\u00e3\u0082\u00bb\u00e3\u0082\u00b9\u00e3\u0081\u00a7\u00e3\u0081\u008d\u00e3\u0081\u00aa\u00e3\u0081\u008f\u00e3\u0081\u00aa\u00e3\u0082\u008a\u00e3\u0081\u00be\u00e3\u0081\u0099\u00e3\u0080\u0082\u00e5\u00ae\u009f\u00e8\u00a1\u008c\u00e4\u00b8\u00ad\u00e3\u0081\u00ae\u00e3\u0081\u0099\u00e3\u0081\u00b9\u00e3\u0081\u00a6\u00e3\u0081\u00ae\u00e4\u00bb\u00ae\u00e6\u0083\u00b3\u00e3\u0083\u009e\u00e3\u0082\u00b7\u00e3\u0083\u00b3\u00e3\u0081\u00af\u00e3\u0081\u0099\u00e3\u0081\u0090\u00e3\u0081\u00ab\u00e3\u0082\u00b7\u00e3\u0083\u00a3\u00e3\u0083\u0083\u00e3\u0083\u0088\u00e3\u0083\u0080\u00e3\u0082\u00a6\u00e3\u0083\u00b3\u00e3\u0081\u0095\u00e3\u0082\u008c\u00e3\u0081\u00be\u00e3\u0081\u0099\u00e3\u0080\u0082 +message.disable.account=\u3053\u306e\u30a2\u30ab\u30a6\u30f3\u30c8\u3092\u7121\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? \u3053\u306e\u30a2\u30ab\u30a6\u30f3\u30c8\u306e\u3059\u3079\u3066\u306e\u30e6\u30fc\u30b6\u30fc\u304c\u30af\u30e9\u30a6\u30c9 \u30ea\u30bd\u30fc\u30b9\u306b\u30a2\u30af\u30bb\u30b9\u3067\u304d\u306a\u304f\u306a\u308a\u307e\u3059\u3002\u5b9f\u884c\u4e2d\u306e\u3059\u3079\u3066\u306e\u4eee\u60f3\u30de\u30b7\u30f3\u306f\u4eca\u3059\u3050\u306b\u30b7\u30e3\u30c3\u30c8\u30c0\u30a6\u30f3\u3055\u308c\u307e\u3059\u3002 message.disable.snapshot.policy=\u73fe\u5728\u306e\u30b9\u30ca\u30c3\u30d7\u30b7\u30e7\u30c3\u30c8 \u30dd\u30ea\u30b7\u30fc\u3092\u7121\u52b9\u306b\u3057\u307e\u3057\u305f\u3002 message.disable.user=\u3053\u306e\u30e6\u30fc\u30b6\u30fc\u3092\u7121\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.disable.vpn.access=VPN \u30a2\u30af\u30bb\u30b9\u3092\u7121\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.disable.vpn=VPN \u3092\u7121\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.download.ISO=ISO\u00e3\u0082\u0092\u00e3\u0083\u0080\u00e3\u0082\u00a6\u00e3\u0083\u00b3\u00e3\u0083\u00ad\u00e3\u0083\u00bc\u00e3\u0083\u0089\u00e3\u0081\u0099\u00e3\u0082\u008b\u00e3\u0081\u009f\u00e3\u0082\u0081\u00e3\u0081\u00ab00000\u00e3\u0082\u0092\u00e3\u0082\u00af\u00e3\u0083\u00aa\u00e3\u0083\u0083\u00e3\u0082\u00af\u00e3\u0081\u0097\u00e3\u0081\u00a6\u00e3\u0081\u008f\u00e3\u0081\u00a0\u00e3\u0081\u0095\u00e3\u0081\u0084\u00e3\u0080\u0082 -message.download.template=\u00e3\u0083\u0086\u00e3\u0083\u00b3\u00e3\u0083\u0097\u00e3\u0083\u00ac\u00e3\u0083\u00bc\u00e3\u0083\u0088\u00e3\u0082\u0092\u00e3\u0083\u0080\u00e3\u0082\u00a6\u00e3\u0083\u00b3\u00e3\u0083\u00ad\u00e3\u0083\u00bc\u00e3\u0083\u0089\u00e3\u0081\u0099\u00e3\u0082\u008b\u00e3\u0081\u009f\u00e3\u0082\u0081\u00e3\u0081\u00ab00000\u00e3\u0082\u0092\u00e3\u0082\u00af\u00e3\u0083\u00aa\u00e3\u0083\u0083\u00e3\u0082\u00af\u00e3\u0081\u0097\u00e3\u0081\u00a6\u00e3\u0081\u008f\u00e3\u0081\u00a0\u00e3\u0081\u0095\u00e3\u0081\u0084\u00e3\u0080\u0082 +message.download.ISO=ISO \u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3059\u308b\u306b\u306f 00000 \u3092\u30af\u30ea\u30c3\u30af\u3057\u307e\u3059 +message.download.template=\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3059\u308b\u306b\u306f 00000 \u3092\u30af\u30ea\u30c3\u30af\u3057\u307e\u3059 message.download.volume.confirm=\u3053\u306e\u30dc\u30ea\u30e5\u30fc\u30e0\u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.download.volume=\u30dc\u30ea\u30e5\u30fc\u30e0\u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3059\u308b\u306b\u306f 00000 \u3092\u30af\u30ea\u30c3\u30af\u3057\u307e\u3059 +message.download.volume=\u30dc\u30ea\u30e5\u30fc\u30e0\u3092\u30c0\u30a6\u30f3\u30ed\u30fc\u30c9\u3059\u308b\u306b\u306f 00000 \u3092\u30af\u30ea\u30c3\u30af\u3057\u307e\u3059 message.edit.account=\u7de8\u96c6 ("-1" \u306f\u3001\u30ea\u30bd\u30fc\u30b9\u4f5c\u6210\u306e\u91cf\u306b\u5236\u9650\u304c\u306a\u3044\u3053\u3068\u3092\u793a\u3057\u307e\u3059) message.edit.confirm=[\u4fdd\u5b58] \u3092\u30af\u30ea\u30c3\u30af\u3059\u308b\u524d\u306b\u5909\u66f4\u5185\u5bb9\u3092\u78ba\u8a8d\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.edit.limits=\u6b21\u306e\u30ea\u30bd\u30fc\u30b9\u306b\u5236\u9650\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002\u300c-1\u300d\u306f\u3001\u30ea\u30bd\u30fc\u30b9\u4f5c\u6210\u306b\u5236\u9650\u304c\u306a\u3044\u3053\u3068\u3092\u793a\u3057\u307e\u3059\u3002 message.edit.traffic.type=\u3053\u306e\u30c8\u30e9\u30d5\u30a3\u30c3\u30af\u306e\u7a2e\u985e\u306b\u95a2\u9023\u4ed8\u3051\u308b\u30c8\u30e9\u30d5\u30a3\u30c3\u30af \u30e9\u30d9\u30eb\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.enable.account=\u3053\u306e\u30a2\u30ab\u30a6\u30f3\u30c8\u3092\u6709\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.enabled.vpn.ip.sec=IPSec \u4e8b\u524d\u5171\u6709\u30ad\u30fc\: +message.enabled.vpn.ip.sec=IPSec \u4e8b\u524d\u5171\u6709\u30ad\u30fc: message.enabled.vpn=\u73fe\u5728\u3001VPN \u30a2\u30af\u30bb\u30b9\u304c\u6709\u52b9\u306b\u306a\u3063\u3066\u3044\u307e\u3059\u3002\u6b21\u306e IP \u30a2\u30c9\u30ec\u30b9\u7d4c\u7531\u3067\u30a2\u30af\u30bb\u30b9\u3067\u304d\u307e\u3059\u3002 message.enable.user=\u3053\u306e\u30e6\u30fc\u30b6\u30fc\u3092\u6709\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.enable.vpn.access=\u73fe\u5728\u3053\u306e IP \u30a2\u30c9\u30ec\u30b9\u306b\u5bfe\u3059\u308b VPN \u306f\u7121\u52b9\u3067\u3059\u3002VPN \u30a2\u30af\u30bb\u30b9\u3092\u6709\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? @@ -1304,8 +1304,8 @@ message.generate.keys=\u3053\u306e\u30e6\u30fc\u30b6\u30fc\u306b\u65b0\u3057\u30 message.guest.traffic.in.advanced.zone=\u30b2\u30b9\u30c8 \u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u30c8\u30e9\u30d5\u30a3\u30c3\u30af\u306f\u3001\u30a8\u30f3\u30c9 \u30e6\u30fc\u30b6\u30fc\u306e\u4eee\u60f3\u30de\u30b7\u30f3\u9593\u306e\u901a\u4fe1\u3067\u3059\u3002\u5404\u7269\u7406\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u306e\u30b2\u30b9\u30c8 \u30c8\u30e9\u30d5\u30a3\u30c3\u30af\u3092\u901a\u4fe1\u3059\u308b\u305f\u3081\u306e VLAN ID \u306e\u7bc4\u56f2\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.guest.traffic.in.basic.zone=\u30b2\u30b9\u30c8 \u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u30c8\u30e9\u30d5\u30a3\u30c3\u30af\u306f\u3001\u30a8\u30f3\u30c9 \u30e6\u30fc\u30b6\u30fc\u306e\u4eee\u60f3\u30de\u30b7\u30f3\u9593\u306e\u901a\u4fe1\u3067\u3059\u3002CloudStack \u3067\u30b2\u30b9\u30c8 VM \u306b\u5272\u308a\u5f53\u3066\u3089\u308c\u308b IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002\u3053\u306e\u7bc4\u56f2\u304c\u4e88\u7d04\u6e08\u307f\u306e\u30b7\u30b9\u30c6\u30e0 IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u3068\u91cd\u8907\u3057\u306a\u3044\u3088\u3046\u306b\u6ce8\u610f\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.installWizard.click.retry=\u8d77\u52d5\u3092\u518d\u8a66\u884c\u3059\u308b\u306b\u306f\u30dc\u30bf\u30f3\u3092\u30af\u30ea\u30c3\u30af\u3057\u3066\u304f\u3060\u3055\u3044\u3002 -message.installWizard.copy.whatIsAPod=\u901a\u5e38\u30011 \u3064\u306e\u30dd\u30c3\u30c9\u306f\u5358\u4e00\u306e\u30e9\u30c3\u30af\u3092\u8868\u3057\u307e\u3059\u3002\u540c\u3058\u30dd\u30c3\u30c9\u5185\u306e\u30db\u30b9\u30c8\u306f\u540c\u3058\u30b5\u30d6\u30cd\u30c3\u30c8\u306b\u542b\u307e\u308c\u307e\u3059\u3002

\u30dd\u30c3\u30c9\u306f CloudStack&\#8482; \u74b0\u5883\u5185\u306e 2 \u756a\u76ee\u306b\u5927\u304d\u306a\u7d44\u7e54\u5358\u4f4d\u3067\u3059\u3002\u30dd\u30c3\u30c9\u306f\u30be\u30fc\u30f3\u306b\u542b\u307e\u308c\u307e\u3059\u3002\u5404\u30be\u30fc\u30f3\u306f 1 \u3064\u4ee5\u4e0a\u306e\u30dd\u30c3\u30c9\u3092\u542b\u3080\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002\u57fa\u672c\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3067\u306f\u3001\u30be\u30fc\u30f3\u5185\u306e\u30dd\u30c3\u30c9\u306f 1 \u3064\u3067\u3059\u3002 -message.installWizard.copy.whatIsAZone=\u30be\u30fc\u30f3\u306f CloudStack&\#8482; \u74b0\u5883\u5185\u306e\u6700\u5927\u306e\u7d44\u7e54\u5358\u4f4d\u3067\u3059\u30021 \u3064\u306e\u30c7\u30fc\u30bf\u30bb\u30f3\u30bf\u30fc\u5185\u306b\u8907\u6570\u306e\u30be\u30fc\u30f3\u3092\u8a2d\u5b9a\u3067\u304d\u307e\u3059\u304c\u3001\u901a\u5e38\u3001\u30be\u30fc\u30f3\u306f\u5358\u4e00\u306e\u30c7\u30fc\u30bf\u30bb\u30f3\u30bf\u30fc\u306b\u76f8\u5f53\u3057\u307e\u3059\u3002\u30a4\u30f3\u30d5\u30e9\u30b9\u30c8\u30e9\u30af\u30c1\u30e3\u3092\u30be\u30fc\u30f3\u306b\u7d44\u7e54\u5316\u3059\u308b\u3068\u3001\u30be\u30fc\u30f3\u3092\u7269\u7406\u7684\u306b\u5206\u96e2\u3057\u3066\u5197\u9577\u5316\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002\u305f\u3068\u3048\u3070\u3001\u5404\u30be\u30fc\u30f3\u306b\u96fb\u6e90\u3068\u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u30a2\u30c3\u30d7\u30ea\u30f3\u30af\u3092\u914d\u5099\u3057\u307e\u3059\u3002\u5fc5\u9808\u3067\u306f\u3042\u308a\u307e\u305b\u3093\u304c\u3001\u30be\u30fc\u30f3\u306f\u9060\u9694\u5730\u306b\u5206\u6563\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002 +message.installWizard.copy.whatIsAPod=\u901a\u5e38\u30011 \u3064\u306e\u30dd\u30c3\u30c9\u306f\u5358\u4e00\u306e\u30e9\u30c3\u30af\u3092\u8868\u3057\u307e\u3059\u3002\u540c\u3058\u30dd\u30c3\u30c9\u5185\u306e\u30db\u30b9\u30c8\u306f\u540c\u3058\u30b5\u30d6\u30cd\u30c3\u30c8\u306b\u542b\u307e\u308c\u307e\u3059\u3002

\u30dd\u30c3\u30c9\u306f CloudStack™ \u74b0\u5883\u5185\u306e 2 \u756a\u76ee\u306b\u5927\u304d\u306a\u7d44\u7e54\u5358\u4f4d\u3067\u3059\u3002\u30dd\u30c3\u30c9\u306f\u30be\u30fc\u30f3\u306b\u542b\u307e\u308c\u307e\u3059\u3002\u5404\u30be\u30fc\u30f3\u306f 1 \u3064\u4ee5\u4e0a\u306e\u30dd\u30c3\u30c9\u3092\u542b\u3080\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002\u57fa\u672c\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3067\u306f\u3001\u30be\u30fc\u30f3\u5185\u306e\u30dd\u30c3\u30c9\u306f 1 \u3064\u3067\u3059\u3002 +message.installWizard.copy.whatIsAZone=\u30be\u30fc\u30f3\u306f CloudStack™ \u74b0\u5883\u5185\u306e\u6700\u5927\u306e\u7d44\u7e54\u5358\u4f4d\u3067\u3059\u30021 \u3064\u306e\u30c7\u30fc\u30bf\u30bb\u30f3\u30bf\u30fc\u5185\u306b\u8907\u6570\u306e\u30be\u30fc\u30f3\u3092\u8a2d\u5b9a\u3067\u304d\u307e\u3059\u304c\u3001\u901a\u5e38\u3001\u30be\u30fc\u30f3\u306f\u5358\u4e00\u306e\u30c7\u30fc\u30bf\u30bb\u30f3\u30bf\u30fc\u306b\u76f8\u5f53\u3057\u307e\u3059\u3002\u30a4\u30f3\u30d5\u30e9\u30b9\u30c8\u30e9\u30af\u30c1\u30e3\u3092\u30be\u30fc\u30f3\u306b\u7d44\u7e54\u5316\u3059\u308b\u3068\u3001\u30be\u30fc\u30f3\u3092\u7269\u7406\u7684\u306b\u5206\u96e2\u3057\u3066\u5197\u9577\u5316\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002\u305f\u3068\u3048\u3070\u3001\u5404\u30be\u30fc\u30f3\u306b\u96fb\u6e90\u3068\u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u30a2\u30c3\u30d7\u30ea\u30f3\u30af\u3092\u914d\u5099\u3057\u307e\u3059\u3002\u5fc5\u9808\u3067\u306f\u3042\u308a\u307e\u305b\u3093\u304c\u3001\u30be\u30fc\u30f3\u306f\u9060\u9694\u5730\u306b\u5206\u6563\u3059\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002 message.installWizard.copy.whatIsSecondaryStorage=\u30bb\u30ab\u30f3\u30c0\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u306f\u30be\u30fc\u30f3\u3068\u95a2\u9023\u4ed8\u3051\u3089\u308c\u3001\u6b21\u306e\u9805\u76ee\u3092\u683c\u7d0d\u3057\u307e\u3059\u3002
  • \u30c6\u30f3\u30d7\u30ec\u30fc\u30c8 - VM \u306e\u8d77\u52d5\u306b\u4f7f\u7528\u3067\u304d\u308b OS \u30a4\u30e1\u30fc\u30b8\u3067\u3001\u30a2\u30d7\u30ea\u30b1\u30fc\u30b7\u30e7\u30f3\u306e\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u306a\u3069\u8ffd\u52a0\u306e\u69cb\u6210\u3092\u542b\u3081\u308b\u3053\u3068\u304c\u3067\u304d\u307e\u3059\u3002
  • ISO \u30a4\u30e1\u30fc\u30b8 - \u8d77\u52d5\u53ef\u80fd\u307e\u305f\u306f\u8d77\u52d5\u4e0d\u53ef\u306e OS \u30a4\u30e1\u30fc\u30b8\u3067\u3059\u3002
  • \u30c7\u30a3\u30b9\u30af \u30dc\u30ea\u30e5\u30fc\u30e0\u306e\u30b9\u30ca\u30c3\u30d7\u30b7\u30e7\u30c3\u30c8 - VM \u30c7\u30fc\u30bf\u306e\u4fdd\u5b58\u30b3\u30d4\u30fc\u3067\u3059\u3002\u30c7\u30fc\u30bf\u306e\u5fa9\u5143\u307e\u305f\u306f\u65b0\u3057\u3044\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u306e\u4f5c\u6210\u306b\u4f7f\u7528\u3067\u304d\u307e\u3059\u3002
message.installWizard.tooltip.addCluster.name=\u30af\u30e9\u30b9\u30bf\u30fc\u306e\u540d\u524d\u3067\u3059\u3002CloudStack \u3067\u4f7f\u7528\u3055\u308c\u3066\u3044\u306a\u3044\u3001\u4efb\u610f\u306e\u30c6\u30ad\u30b9\u30c8\u3092\u6307\u5b9a\u3067\u304d\u307e\u3059\u3002 message.installWizard.tooltip.addHost.hostname=\u30db\u30b9\u30c8\u306e DNS \u540d\u307e\u305f\u306f IP \u30a2\u30c9\u30ec\u30b9\u3067\u3059\u3002 @@ -1344,7 +1344,7 @@ message.migrate.instance.to.ps=\u5225\u306e\u30d7\u30e9\u30a4\u30de\u30ea \u30b9 message.migrate.router.confirm=\u30eb\u30fc\u30bf\u30fc\u306e\u79fb\u884c\u5148\u306f\u6b21\u306e\u30db\u30b9\u30c8\u3067\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.migrate.systemvm.confirm=\u30b7\u30b9\u30c6\u30e0 VM \u306e\u79fb\u884c\u5148\u306f\u6b21\u306e\u30db\u30b9\u30c8\u3067\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.migrate.volume=\u5225\u306e\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u306b\u30dc\u30ea\u30e5\u30fc\u30e0\u3092\u79fb\u884c\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.new.user=\u00e3\u0082\u00a2\u00e3\u0082\u00ab\u00e3\u0082\u00a6\u00e3\u0083\u00b3\u00e3\u0083\u0088\u00e3\u0081\u00ab\u00e6\u0096\u00b0\u00e3\u0081\u0097\u00e3\u0081\u0084\u00e3\u0083\u00a6\u00e3\u0083\u00bc\u00e3\u0082\u00b6\u00e3\u0083\u00bc\u00e3\u0082\u0092\u00e8\u00bf\u00bd\u00e5\u008a\u00a0\u00e3\u0081\u0099\u00e3\u0082\u008b\u00e3\u0081\u009f\u00e3\u0082\u0081\u00e3\u0081\u00ab\u00e3\u0080\u0081\u00e6\u00ac\u00a1\u00e3\u0081\u00ae\u00e6\u0083 +message.new.user=\u30a2\u30ab\u30a6\u30f3\u30c8\u306b\u65b0\u3057\u3044\u30e6\u30fc\u30b6\u30fc\u3092\u8ffd\u52a0\u3059\u308b\u305f\u3081\u306b\u3001\u6b21\u306e\u60c5\u5831\u3092\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002 message.no.network.support.configuration.not.true=\u30bb\u30ad\u30e5\u30ea\u30c6\u30a3 \u30b0\u30eb\u30fc\u30d7\u304c\u6709\u52b9\u306a\u30be\u30fc\u30f3\u304c\u7121\u3044\u305f\u3081\u3001\u8ffd\u52a0\u306e\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u6a5f\u80fd\u306f\u3042\u308a\u307e\u305b\u3093\u3002\u624b\u9806 5. \u306b\u9032\u3093\u3067\u304f\u3060\u3055\u3044\u3002 message.no.network.support=\u30cf\u30a4\u30d1\u30fc\u30d0\u30a4\u30b6\u30fc\u3068\u3057\u3066 vSphere \u3092\u9078\u629e\u3057\u307e\u3057\u305f\u304c\u3001\u3053\u306e\u30cf\u30a4\u30d1\u30fc\u30d0\u30a4\u30b6\u30fc\u306b\u8ffd\u52a0\u306e\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u6a5f\u80fd\u306f\u3042\u308a\u307e\u305b\u3093\u3002\u624b\u9806 5. \u306b\u9032\u3093\u3067\u304f\u3060\u3055\u3044\u3002 message.no.projects.adminOnly=\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u304c\u3042\u308a\u307e\u305b\u3093\u3002
\u7ba1\u7406\u8005\u306b\u65b0\u3057\u3044\u30d7\u30ed\u30b8\u30a7\u30af\u30c8\u306e\u4f5c\u6210\u3092\u4f9d\u983c\u3057\u3066\u304f\u3060\u3055\u3044\u3002 @@ -1407,7 +1407,7 @@ message.tooltip.reserved.system.netmask=\u30dd\u30c3\u30c9\u306e\u30b5\u30d6\u30 message.tooltip.zone.name=\u30be\u30fc\u30f3\u306e\u540d\u524d\u3067\u3059\u3002 message.update.os.preference=\u3053\u306e\u30db\u30b9\u30c8\u306e OS \u57fa\u672c\u8a2d\u5b9a\u3092\u9078\u629e\u3057\u3066\u304f\u3060\u3055\u3044\u3002\u540c\u69d8\u306e\u57fa\u672c\u8a2d\u5b9a\u3092\u6301\u3064\u3059\u3079\u3066\u306e\u4eee\u60f3\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306f\u3001\u5225\u306e\u30db\u30b9\u30c8\u3092\u9078\u629e\u3059\u308b\u524d\u306b\u307e\u305a\u3053\u306e\u30db\u30b9\u30c8\u306b\u5272\u308a\u5f53\u3066\u3089\u308c\u307e\u3059\u3002 message.update.resource.count=\u3053\u306e\u30a2\u30ab\u30a6\u30f3\u30c8\u306e\u30ea\u30bd\u30fc\u30b9\u6570\u3092\u66f4\u65b0\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? -message.update.ssl=\u5404\u30b3\u30f3\u30bd\u30fc\u30eb \u30d7\u30ed\u30ad\u30b7\u306e\u4eee\u60f3\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u3067\u66f4\u65b0\u3059\u308b\u3001X.509 \u6e96\u62e0\u306e\u65b0\u3057\u3044 SSL \u8a3c\u660e\u66f8\u3092\u9001\u4fe1\u3057\u3066\u304f\u3060\u3055\u3044\: +message.update.ssl=\u5404\u30b3\u30f3\u30bd\u30fc\u30eb \u30d7\u30ed\u30ad\u30b7\u306e\u4eee\u60f3\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u3067\u66f4\u65b0\u3059\u308b\u3001X.509 \u6e96\u62e0\u306e\u65b0\u3057\u3044 SSL \u8a3c\u660e\u66f8\u3092\u9001\u4fe1\u3057\u3066\u304f\u3060\u3055\u3044: message.validate.instance.name=\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u540d\u306f 63 \u6587\u5b57\u4ee5\u5185\u3067\u6307\u5b9a\u3057\u3066\u304f\u3060\u3055\u3044\u3002ASCII \u6587\u5b57\u306e a\uff5ez\u3001A\uff5eZ\u3001\u6570\u5b57\u306e 0\uff5e9\u3001\u304a\u3088\u3073\u30cf\u30a4\u30d5\u30f3\u306e\u307f\u3092\u4f7f\u7528\u3067\u304d\u307e\u3059\u3002\u6587\u5b57\u3067\u59cb\u307e\u308a\u3001\u6587\u5b57\u307e\u305f\u306f\u6570\u5b57\u3067\u7d42\u308f\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002 message.virtual.network.desc=\u30a2\u30ab\u30a6\u30f3\u30c8\u306e\u5c02\u7528\u4eee\u60f3\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3067\u3059\u3002\u30d6\u30ed\u30fc\u30c9\u30ad\u30e3\u30b9\u30c8 \u30c9\u30e1\u30a4\u30f3\u306f VLAN \u5185\u306b\u914d\u7f6e\u3055\u308c\u3001\u30d1\u30d6\u30ea\u30c3\u30af \u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3078\u306e\u30a2\u30af\u30bb\u30b9\u306f\u3059\u3079\u3066\u4eee\u60f3\u30eb\u30fc\u30bf\u30fc\u306b\u3088\u3063\u3066\u30eb\u30fc\u30c6\u30a3\u30f3\u30b0\u3055\u308c\u307e\u3059\u3002 message.vm.create.template.confirm=\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u3092\u4f5c\u6210\u3059\u308b\u3068 VM \u304c\u81ea\u52d5\u7684\u306b\u518d\u8d77\u52d5\u3055\u308c\u307e\u3059\u3002 @@ -1418,9 +1418,9 @@ message.Zone.creation.complete=\u30be\u30fc\u30f3\u304c\u4f5c\u6210\u3055\u308c\ message.zone.creation.complete.would.you.like.to.enable.this.zone=\u30be\u30fc\u30f3\u304c\u4f5c\u6210\u3055\u308c\u307e\u3057\u305f\u3002\u3053\u306e\u30be\u30fc\u30f3\u3092\u6709\u52b9\u306b\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? message.zone.no.network.selection=\u9078\u629e\u3057\u305f\u30be\u30fc\u30f3\u3067\u306f\u3001\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u3092\u9078\u629e\u3067\u304d\u307e\u305b\u3093\u3002 message.zone.step.1.desc=\u30be\u30fc\u30f3\u306e\u30cd\u30c3\u30c8\u30ef\u30fc\u30af \u30e2\u30c7\u30eb\u3092\u9078\u629e\u3057\u3066\u304f\u3060\u3055\u3044\u3002 -message.zone.step.2.desc=\u00e6\u0096\u00b0\u00e3\u0081\u0097\u00e3\u0081\u0084Zone\u00e3\u0082\u0092\u00e8\u00bf\u00bd\u00e5\u008a\u00a0\u00e3\u0081\u0099\u00e3\u0082\u008b\u00e3\u0081\u009f\u00e3\u0082\u0081\u00e3\u0081\u00ab\u00e3\u0080\u0081\u00e6\u00ac\u00a1\u00e3\u0081\u00ae\u00e6\u0083 -message.zone.step.3.desc=\u00e6\u0096\u00b0\u00e3\u0081\u0097\u00e3\u0081\u0084Pod\u00e3\u0082\u0092\u00e8\u00bf\u00bd\u00e5\u008a\u00a0\u00e3\u0081\u0099\u00e3\u0082\u008b\u00e3\u0081\u009f\u00e3\u0082\u0081\u00e3\u0081\u00ab\u00e3\u0080\u0081\u00e6\u00ac\u00a1\u00e3\u0081\u00ae\u00e6\u0083 -message.zoneWizard.enable.local.storage=\u8b66\u544a\: \u3053\u306e\u30be\u30fc\u30f3\u306e\u30ed\u30fc\u30ab\u30eb \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u6709\u52b9\u306b\u3059\u308b\u5834\u5408\u306f\u3001\u30b7\u30b9\u30c6\u30e0 VM \u306e\u8d77\u52d5\u5834\u6240\u306b\u5fdc\u3058\u3066\u6b21\u306e\u64cd\u4f5c\u304c\u5fc5\u8981\u3067\u3059\u3002

1. \u30b7\u30b9\u30c6\u30e0 VM \u3092\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3067\u8d77\u52d5\u3059\u308b\u5fc5\u8981\u304c\u3042\u308b\u5834\u5408\u306f\u3001\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u4f5c\u6210\u3057\u305f\u5f8c\u3067\u30be\u30fc\u30f3\u306b\u8ffd\u52a0\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002\u307e\u305f\u3001\u7121\u52b9\u72b6\u614b\u306e\u30be\u30fc\u30f3\u3092\u8d77\u52d5\u3059\u308b\u5fc5\u8981\u3082\u3042\u308a\u307e\u3059\u3002

2. \u30b7\u30b9\u30c6\u30e0 VM \u3092\u30ed\u30fc\u30ab\u30eb \u30b9\u30c8\u30ec\u30fc\u30b8\u3067\u8d77\u52d5\u3059\u308b\u5fc5\u8981\u304c\u3042\u308b\u5834\u5408\u306f\u3001system.vm.use.local.storage \u3092 true \u306b\u8a2d\u5b9a\u3057\u3066\u304b\u3089\u30be\u30fc\u30f3\u3092\u6709\u52b9\u306b\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002


\u7d9a\u884c\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? +message.zone.step.2.desc=\u65b0\u3057\u3044\u30be\u30fc\u30f3\u3092\u8ffd\u52a0\u3059\u308b\u305f\u3081\u306b\u3001\u6b21\u306e\u60c5\u5831\u3092\u5165\u529b\u3057\u3066\u304f\u3060\u3055\u3044\u3002 +message.zone.step.3.desc=\u65b0\u3057\u3044\u30dd\u30c3\u30c9\u3092\u8ffd\u52a0\u3059\u308b\u305f\u3081\u306b\u3001\u6b21\u306e\u60c5\u5831\u3092\u5165\u529b\u3057\u3066\u304f\u3060\u3055\u3044\u3002 +message.zoneWizard.enable.local.storage=\u8b66\u544a: \u3053\u306e\u30be\u30fc\u30f3\u306e\u30ed\u30fc\u30ab\u30eb \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u6709\u52b9\u306b\u3059\u308b\u5834\u5408\u306f\u3001\u30b7\u30b9\u30c6\u30e0 VM \u306e\u8d77\u52d5\u5834\u6240\u306b\u5fdc\u3058\u3066\u6b21\u306e\u64cd\u4f5c\u304c\u5fc5\u8981\u3067\u3059\u3002

1. \u30b7\u30b9\u30c6\u30e0 VM \u3092\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3067\u8d77\u52d5\u3059\u308b\u5fc5\u8981\u304c\u3042\u308b\u5834\u5408\u306f\u3001\u30d7\u30e9\u30a4\u30de\u30ea \u30b9\u30c8\u30ec\u30fc\u30b8\u3092\u4f5c\u6210\u3057\u305f\u5f8c\u3067\u30be\u30fc\u30f3\u306b\u8ffd\u52a0\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002\u307e\u305f\u3001\u7121\u52b9\u72b6\u614b\u306e\u30be\u30fc\u30f3\u3092\u8d77\u52d5\u3059\u308b\u5fc5\u8981\u3082\u3042\u308a\u307e\u3059\u3002

2. \u30b7\u30b9\u30c6\u30e0 VM \u3092\u30ed\u30fc\u30ab\u30eb \u30b9\u30c8\u30ec\u30fc\u30b8\u3067\u8d77\u52d5\u3059\u308b\u5fc5\u8981\u304c\u3042\u308b\u5834\u5408\u306f\u3001system.vm.use.local.storage \u3092 true \u306b\u8a2d\u5b9a\u3057\u3066\u304b\u3089\u30be\u30fc\u30f3\u3092\u6709\u52b9\u306b\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002


\u7d9a\u884c\u3057\u3066\u3082\u3088\u308d\u3057\u3044\u3067\u3059\u304b? mode=\u30e2\u30fc\u30c9 network.rate=\u30cd\u30c3\u30c8\u30ef\u30fc\u30af\u901f\u5ea6 notification.reboot.instance=\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306e\u518d\u8d77\u52d5 diff --git a/client/WEB-INF/classes/resources/messages_ko_KR.properties b/client/WEB-INF/classes/resources/messages_ko_KR.properties index 766fc607648..757871acde0 100644 --- a/client/WEB-INF/classes/resources/messages_ko_KR.properties +++ b/client/WEB-INF/classes/resources/messages_ko_KR.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - changed.item.properties=\ud56d\ubaa9 \uc18d\uc131 \ubcc0\uacbd confirm.enable.swift=Swift \uae30\uc220 \uc9c0\uc6d0\ub97c \uc0ac\uc6a9 \ud558\ub824\uba74 \ub2e4\uc74c \uc815\ubcf4\ub97c \uc785\ub825\ud574 \uc8fc\uc2ed\uc2dc\uc624. error.could.not.enable.zone=Zone\uc744 \uc0ac\uc6a9 \ud560 \uc218 \uc5c6\uc2b5\ub2c8\ub2e4. @@ -1194,6 +1193,7 @@ message.add.load.balancer.under.ip=\ub2e4\uc74c IP \uc8fc\uc18c\uc5d0 \ub300\ud5 message.add.load.balancer=Zone\uc5d0 \ub124\ud2b8\uc6cc\ud06c \ub85c\ub4dc \uacf5\uc720 \uc7a5\uce58\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. message.add.network=Zone \uc5d0 \uc0c8\ub85c\uc6b4 \ub124\ud2b8\uc6cc\ud06c\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. message.add.new.gateway.to.vpc=\ud604\uc7ac VPC\uc5d0 \uc0c8\ub85c\uc6b4 \uac8c\uc774\ud2b8\uc6e8\uc774\ub97c \ucd94\uac00\ud558\uae30 \uc704\ud55c \uc815\ubcf4\ub97c \uc9c0\uc815\ud574 \uc8fc\uc2ed\uc2dc\uc624. +message.add.pod.during.zone.creation=\uac01 Zone\uc5d0\ub294 \ud55c \uac1c \uc774\uc0c1 Pod\uac00 \ud544\uc694\ud569\ub2c8\ub2e4. \uc9c0\uae08 \uc5ec\uae30\uc11c \uccab\ubc88\uc9f8 Pod\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. Pod\ub294 \ud638\uc2a4\ud2b8\uc640 \uae30\ubcf8 \uc2a4\ud1a0\ub9ac\uc9c0 \uc11c\ubc84\uc5d0\uc11c \uad6c\uc131\ud569\ub2c8\ub2e4\ub9cc \uc774\ub294 \ub2e4\uc74c \uc21c\uc11c\ub85c \ucd94\uac00\ud569\ub2c8\ub2e4. \ub9e8 \ucc98\uc74c CloudStack \ub0b4\ubd80 \uad00\ub9ac \ud2b8\ub798\ud53d\uc744 \uc704\ud574\uc11c IP \uc8fc\uc18c \ubc94\uc704\ub97c \uc608\uc57d\ud569\ub2c8\ub2e4. IP \uc8fc\uc18c \ubc94\uc704\ub294 \ud074\ub77c\uc6b0\ub4dc \ub0b4\ubd80 \uac01 Zone\uc5d0\uc11c \uc911\ubcf5 \ud558\uc9c0 \uc54a\uac8c \uc608\uc57d\ud560 \ud544\uc694\uac00 \uc788\uc2b5\ub2c8\ub2e4. message.add.pod=Zone \uc5d0 \uc0c8\ub85c\uc6b4 Pod\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. message.add.primary.storage=Zone Pod \uc5d0 \uc0c8\ub85c\uc6b4 \uae30\ubcf8 \uc2a4\ud1a0\ub9ac\uc9c0\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. message.add.primary=\uc0c8\ub85c\uc6b4 \uae30\ubcf8 \uc2a4\ud1a0\ub9ac\uc9c0\ub97c \ucd94\uac00\ud558\uae30 \uc704\ud574 \uc544\ub798 \ud30c\ub77c\ubbf8\ud130\ub97c \uc9c0\uc815\ud574 \uc8fc\uc2ed\uc2dc\uc624. @@ -1253,6 +1253,7 @@ message.delete.VPN.gateway=\ud604\uc7ac VPN \uac8c\uc774\ud2b8\uc6e8\uc774\ub97c message.desc.advanced.zone=\ubcf4\ub2e4 \uc138\ub828\ub41c \ub124\ud2b8\uc6cc\ud06c \uae30\uc220\uc744 \uc9c0\uc6d0\ud569\ub2c8\ub2e4. \uc774 \ub124\ud2b8\uc6cc\ud06c \ubaa8\ub378\uc744 \uc120\ud0dd\ud558\uba74, \ubcf4\ub2e4 \uc720\uc5f0\ud558\uac8c \uac8c\uc2a4\ud2b8 \ub124\ud2b8\uc6cc\ud06c\ub97c \uc815\ud558\uace0 \ubc29\ud654\ubcbd(fire wall), VPN, \ub124\ud2b8\uc6cc\ud06c \ub85c\ub4dc \uacf5\uc720 \uc7a5\uce58 \uae30\uc220 \uc9c0\uc6d0\uc640 \uac19\uc740 \uc0ac\uc6a9\uc790 \uc9c0\uc815 \ud55c \ub124\ud2b8\uc6cc\ud06c \uc81c\uacf5\uc744 \uc81c\uacf5\ud560 \uc218 \uc788\uc2b5\ub2c8\ub2e4. message.desc.basic.zone=\uac01 VM \uc778\uc2a4\ud134\uc2a4\uc5d0 IP \uc8fc\uc18c\uac00 \ub124\ud2b8\uc6cc\ud06c\uc5d0\uc11c \uc9c1\uc811 \ud560\ub2f9\ud560 \uc218 \uc788\ub294 \ub2e8\uc77c \ub124\ud2b8\uc6cc\ud06c\ub97c \uc81c\uacf5\ud569\ub2c8\ub2e4. \ubcf4\uc548 \uadf8\ub8f9 (\uc804\uc1a1\uc6d0 IP \uc8fc\uc18c \ud544\ud130)\uacfc \uac19\uc740 \uce35 \uc138 \uac00\uc9c0 \ub808\ubca8 \ubc29\ubc95\uc73c\ub85c \uac8c\uc2a4\ud2b8\ub97c \ubd84\ub9ac\ud560 \uc218 \uc788\uc2b5\ub2c8\ub2e4. message.desc.cluster=\uac01 Pod\uc5d0\ub294 \ud55c \uac1c \uc774\uc0c1 \ud074\ub7ec\uc2a4\ud130\uac00 \ud544\uc694\ud569\ub2c8\ub2e4. \uc9c0\uae08 \uc5ec\uae30\uc11c \ucd5c\ucd08 \ud074\ub7ec\uc2a4\ud130\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. \ud074\ub7ec\uc2a4\ud130\ub294 \ud638\uc2a4\ud2b8\ub97c \uadf8\ub8f9\ud654 \ud558\ub294 \ubc29\ubc95\uc785\ub2c8\ub2e4. \ud55c \ud074\ub7ec\uc2a4\ud130 \ub0b4\ubd80 \ud638\uc2a4\ud2b8\ub294 \ubaa8\ub450 \ub3d9\uc77c\ud55c \ud558\ub4dc\uc6e8\uc5b4\uc5d0\uc11c \uad6c\uc131\ub418\uc5b4 \uac19\uc740 \ud558\uc774\ud37c \ubc14\uc774\uc800\ub97c \uc2e4\ud589\ud558\uace0 \uac19\uc740 \uc11c\ube0c \ub124\ud2b8\uc6cc\ud06c\uc0c1\uc5d0 \uc788\uc5b4 \uac19\uc740 \uacf5\uc720 \uc2a4\ud1a0\ub9ac\uc9c0\uc5d0 \uc811\uadfc \ud569\ub2c8\ub2e4. \uac01 \ud074\ub7ec\uc2a4\ud130\ub294 \ud55c \uac1c \uc774\uc0c1 \ud638\uc2a4\ud2b8\uc640 \ud55c \uac1c \uc774\uc0c1 \uae30\ubcf8 \uc2a4\ud1a0\ub9ac\uc9c0 \uc11c\ubc84\uc5d0\uc11c \uad6c\uc131\ub429\ub2c8\ub2e4. +message.desc.host=\uac01 \ud074\ub7ec\uc2a4\ud130\uc5d0\ub294 \uc801\uc5b4\ub3c4 \ud55c \uac1c \uc774\uc0c1 \uac8c\uc2a4\ud2b8 VM\ub97c \uc2e4\ud589\ud558\uae30 \uc704\ud55c \ud638\uc2a4\ud2b8 (\ucef4\ud4e8\ud130)\uac00 \ud544\uc694\ud569\ub2c8\ub2e4. \uc9c0\uae08 \uc5ec\uae30\uc11c \uccab\ubc88\uc9f8 \ud638\uc2a4\ud2b8\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. CloudStack\uc73c\ub85c \ud638\uc2a4\ud2b8\ub97c \ub3d9\uc791\ud558\ub824\uba74 \ud638\uc2a4\ud2b8\uc5d0\uac8c \ud558\uc774\ud37c \ubc14\uc774\uc800\ub97c \uc124\uce58\ud558\uace0 IP \uc8fc\uc18c\ub97c \ud560\ub2f9\ud574 \ud638\uc2a4\ud2b8\uac00 CloudStack \uad00\ub9ac \uc11c\ubc84\uc5d0 \uc811\uc18d\ud558\ub3c4\ub85d \ud569\ub2c8\ub2e4.

\ud638\uc2a4\ud2b8 DNS \uba85 \ub610\ub294 IP \uc8fc\uc18c, \uc0ac\uc6a9\uc790\uba85(\uc6d0\ub798 root)\uacfc \uc554\ud638 \ubc0f \ud638\uc2a4\ud2b8 \ubd84\ub958\uc5d0 \uc0ac\uc6a9\ud558\ub294 \ub77c\ubca8\uc744 \uc785\ub825\ud574 \uc8fc\uc2ed\uc2dc\uc624. message.desc.primary.storage=\uac01 \ud074\ub7ec\uc2a4\ud130\uc5d0\ub294 \uc801\uc5b4\ub3c4 \ud55c \uac1c \uc774\uc0c1\uc758 \uae30\ubcf8 \uc2a4\ud1a0\ub9ac\uc9c0 \uc11c\ubc84\uac00 \ud544\uc694\ud569\ub2c8\ub2e4. \uc9c0\uae08 \uc5ec\uae30\uc11c \uccab\ubc88\uc9f8 \uc11c\ubc84\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. \uae30\ubcf8 \uc2a4\ud1a0\ub9ac\uc9c0\ub294 \ud074\ub7ec\uc2a4\ud130 \ub0b4 \ubd80 \ud638\uc2a4\ud2b8\uc0c1\uc5d0\uc11c \ub3d9\uc791\ud558\ub294 \ubaa8\ub4e0 VM \ub514\uc2a4\ud06c \ubcfc\ub968\uc744 \ud3ec\ud568\ud569\ub2c8\ub2e4. \uae30\ubcf8\uc801\uc73c\ub85c \ud558\uc774\ud37c \ubc14\uc774\uc800\uc5d0\uc11c \uae30\uc220 \uc9c0\uc6d0\ub418\ub294 \ud45c\uc900\uc5d0 \uc900\uac70\ud55c \ud504\ub85c\ud1a0\ucf5c\uc744 \uc0ac\uc6a9\ud574 \uc8fc\uc2ed\uc2dc\uc624. message.desc.secondary.storage=\uac01 Zone\uc5d0\ub294 \uc801\uc5b4\ub3c4 \ud55c \uac1c \uc774\uc0c1\uc758 NFS \uc989 2\ucc28 \uc2a4\ud1a0\ub9ac\uc9c0 \uc11c\ubc84\uac00 \ud544\uc694\ud569\ub2c8\ub2e4. \uc9c0\uae08 \uc5ec\uae30\uc11c \uccab\ubc88\uc9f8 \uc11c\ubc84\ub97c \ucd94\uac00\ud569\ub2c8\ub2e4. 2\ucc28 \uc2a4\ud1a0\ub9ac\uc9c0\ub294 VM \ud15c\ud50c\ub9bf, ISO \uc774\ubbf8\uc9c0 \ubc0f VM \ub514\uc2a4\ud06c \ubcfc\ub968 \uc2a4\ub0c5\uc0f7\uc744 \ud3ec\ud568\ud569\ub2c8\ub2e4. \uc774 \uc11c\ubc84\ub294 Zone\ub0b4 \ubaa8\ub4e0 \ud638\uc2a4\ud2b8\uc5d0\uc11c \uc0ac\uc6a9\ud560 \uc218 \uc788\uc5b4\uc57c \ud569\ub2c8\ub2e4.

IP \uc8fc\uc18c\uc640 \ub0b4\ubcf4\ub0b4\ub0bc \uacbd\ub85c\ub97c \uc785\ub825\ud574 \uc8fc\uc2ed\uc2dc\uc624. message.desc.zone=Zone\uc740 CloudStack \ud658\uacbd\ub0b4 \ucd5c\ub300 \uc870\uc9c1 \ub2e8\uc704\ub85c \uc6d0\ub798 \ub2e8\uc77c \ub370\uc774\ud130 \uc13c\ud130\uc5d0 \ud574\ub2f9\ud569\ub2c8\ub2e4. Zone\uc5d0 \ud574\uc11c \ubb3c\ub9ac\uc801\uc778 \ubd84\ub9ac\uc640 \uc911\ubcf5\uc131\uc774 \uc81c\uacf5\ub429\ub2c8\ub2e4. Zone\uc740 \ud55c \uac1c \uc774\uc0c1 Pod( \uac01 Pod\ub294 \ud638\uc2a4\ud2b8\uc640 \uae30\ubcf8 \uc2a4\ud1a0\ub9ac\uc9c0 \uc11c\ubc84\uc5d0\uc11c \uad6c\uc131)\uc640 Zone\ub0b4 \ubaa8\ub4e0 Pod\ub85c \uacf5\uc720\ub418\ub294 2\ucc28 \uc2a4\ud1a0\ub9ac\uc9c0 \uc11c\ubc84\ub85c \uad6c\uc131\ub429\ub2c8\ub2e4. @@ -1380,6 +1381,7 @@ message.step.3.continue=\uc2e4\ud589\ud558\ub824\uba74 \ub514\uc2a4\ud06c\uc81c\ message.step.3.desc= message.step.4.continue=\uc2e4\ud589\ud558\ub824\uba74 \ub124\ud2b8\uc6cc\ud06c\ub97c \uc801\uc5b4\ub3c4 \ud55c \uac1c \uc774\uc0c1 \uc120\ud0dd\ud574 \uc8fc\uc2ed\uc2dc\uc624. message.step.4.desc=\uac00\uc0c1 \uc778\uc2a4\ud134\uc2a4\uac00 \uc811\uc18d\ud558\ub294 \uae30\ubcf8 \ub124\ud2b8\uc6cc\ud06c\ub97c \uc120\ud0dd\ud574 \uc8fc\uc2ed\uc2dc\uc624. +message.storage.traffic=\ud638\uc2a4\ud2b8\ub098 CloudStack \uc2dc\uc2a4\ud15c VM \ub4f1 \uad00\ub9ac \uc11c\ubc84\uc640 \ud1b5\uc2e0\ud558\ub294 CloudStack \ub0b4\ubd80 \uc790\uc6d0\uac04 \ud2b8\ub798\ud53d\uc785\ub2c8\ub2e4. \uc5ec\uae30\uc11c \uc2a4\ud1a0\ub9ac\uc9c0 \ud2b8\ub798\ud53d\uc744 \uad6c\uc131\ud574 \uc8fc\uc2ed\uc2dc\uc624. message.suspend.project=\ud604\uc7ac \ud504\ub85c\uc81d\ud2b8\ub97c \uc77c\uc2dc\uc815\uc9c0\ud558\uc2dc\uaca0\uc2b5\ub2c8\uae4c? message.template.desc=VM\uc758 \uc2dc\uc791\uc5d0 \uc0ac\uc6a9\ud560 \uc218 \uc788\ub294 OS \uc774\ubbf8\uc9c0 message.tooltip.dns.1=Zone\ub0b4 VM \ub85c \uc0ac\uc6a9\ud558\ub294 DNS \uc11c\ubc84 \uc774\ub984\uc785\ub2c8\ub2e4. Zone \uacf5\uac1c IP \uc8fc\uc18c\uc5d0\uc11c \uc774 \uc11c\ubc84\uc5d0 \ud1b5\uc2e0\ud560 \uc218 \uc788\uc5b4\uc57c \ud569\ub2c8\ub2e4. diff --git a/client/WEB-INF/classes/resources/messages_nb_NO.properties b/client/WEB-INF/classes/resources/messages_nb_NO.properties index 8fba48ca9c4..be412449398 100644 --- a/client/WEB-INF/classes/resources/messages_nb_NO.properties +++ b/client/WEB-INF/classes/resources/messages_nb_NO.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - changed.item.properties=Endrede egenskaper error.could.not.enable.zone=Kunne ikke aktivere sonen error.installWizard.message=Noe gikk galt. G\u00e5 tilbake og korriger feilene. diff --git a/client/WEB-INF/classes/resources/messages_pt_BR.properties b/client/WEB-INF/classes/resources/messages_pt_BR.properties index 23123c16764..fd24f542e8d 100644 --- a/client/WEB-INF/classes/resources/messages_pt_BR.properties +++ b/client/WEB-INF/classes/resources/messages_pt_BR.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - changed.item.properties=Alteradas propriedades do item confirm.enable.s3=Por favor preencha as informa\u00e7\u00f5es abaixo para habilitar suporte a storage secund\u00e1ria fornecida por S3 confirm.enable.swift=Por favor preencha as informa\u00e7\u00f5es abaixo para habilitar suporte ao Swift @@ -1113,6 +1112,7 @@ label.zones=Zonas label.zone.type=Tipo de Zona label.zone.wide=Zone-Wide label.zoneWizard.trafficType.guest=H\u00f3spede\: tr\u00e1fego entre m\u00e1quinas virtuais de usu\u00e1rios finais +label.zoneWizard.trafficType.management=Ger\u00eancia\: tr\u00e1fego entre recursos internos do CloudStack, incluindo quaisquer componentes que se comunicam com o servidor de gerenciamento, tais como hosts e m\u00e1quinas virtuais de sistema do CloudStack label.zoneWizard.trafficType.public=P\u00fablico\: tr\u00e1fego entre a internet e m\u00e1quinas virtuais na nuvem. label.zoneWizard.trafficType.storage=Storage\: tr\u00e1fego entre servidores de storage prim\u00e1ria e secund\u00e1ria, tais como templates de m\u00e1quinas virtuais e snapshots label.zone=Zona diff --git a/client/WEB-INF/classes/resources/messages_ru_RU.properties b/client/WEB-INF/classes/resources/messages_ru_RU.properties index 5818abc9199..b28f6b69e6f 100644 --- a/client/WEB-INF/classes/resources/messages_ru_RU.properties +++ b/client/WEB-INF/classes/resources/messages_ru_RU.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - changed.item.properties=\u041f\u0430\u0440\u0430\u043c\u0435\u0442\u0440\u044b \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u0430 \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u044b confirm.enable.swift=\u0417\u0430\u043f\u043e\u043b\u043d\u0438\u0442\u0435 \u043d\u0438\u0436\u0435\u0441\u043b\u0435\u0434\u0443\u044e\u0449\u0443\u044e \u0438\u043d\u0444\u043e\u0440\u043c\u0430\u0446\u0438\u044e \u0434\u043b\u044f \u0432\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u044f \u043f\u043e\u0434\u0434\u0435\u0440\u0436\u0438 Swift error.could.not.enable.zone=\u041d\u0435 \u0443\u0434\u0430\u043b\u043e\u0441\u044c \u0432\u043a\u043b\u044e\u0447\u0438\u0442\u044c \u0437\u043e\u043d\u0443 @@ -1131,6 +1130,7 @@ message.additional.networks.desc=\u041f\u043e\u0436\u0430\u043b\u0443\u0439\u044 message.add.load.balancer=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0431\u0430\u043b\u0430\u043d\u0441\u0438\u0440\u043e\u0432\u043a\u0443 \u043d\u0430\u0433\u0440\u0443\u0437\u043a\u0438 \u0432 \u0437\u043e\u043d\u0443 message.add.load.balancer.under.ip=\u041f\u0440\u0430\u0432\u0438\u043b\u043e \u0431\u0430\u043b\u0430\u043d\u0441\u0438\u0440\u043e\u0432\u043a\u0438 \u043d\u0430\u0433\u0440\u0443\u0437\u043a\u0438 \u0431\u044b\u043b \u0434\u043e\u0431\u0430\u0432\u043b\u0435\u043d \u0432 IP\: message.add.network=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u043d\u043e\u0432\u0443\u044e \u0441\u0435\u0442\u044c \u0434\u043b\u044f \u0437\u043e\u043d\u044b\: +message.add.pod.during.zone.creation=\u041a\u0430\u0436\u0434\u0430\u044f \u0437\u043e\u043d\u0430 \u0441\u043e\u0434\u0435\u0440\u0436\u0438\u0442 \u043e\u0434\u0438\u043d \u0438\u043b\u0438 \u0431\u043e\u043b\u0435\u0435 \u0441\u0442\u0435\u043d\u0434\u043e\u0432, \u043a\u043e\u0442\u043e\u0440\u044b\u0439 \u0432\u044b \u0441\u0435\u0439\u0447\u0430\u0441 \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u0435 \u043f\u0435\u0440\u0432\u044b\u043c. \u0421\u0442\u0435\u043d\u0434 \u0441\u043e\u0434\u0435\u0440\u0436\u0438\u0442 \u0443\u0437\u043b\u044b \u0438 \u0441\u0435\u0440\u0432\u0435\u0440\u044b \u043e\u0441\u043d\u043e\u0432\u043d\u043e\u0433\u043e \u0445\u0440\u0430\u043d\u0438\u043b\u0438\u0449\u0430, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u0431\u0443\u0434\u0443\u0442 \u0434\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u044b \u0432 \u043f\u043e\u0441\u043b\u0435\u0434\u043d\u0435\u043c \u0448\u0430\u0433\u0435. \u0414\u043b\u044f \u043d\u0430\u0447\u0430\u043b\u0430 \u043d\u0435\u043e\u0431\u0445\u043e\u0434\u0438\u043c\u043e \u043d\u0430\u0441\u0442\u0440\u043e\u0438\u0442\u044c \u0434\u0438\u0430\u043f\u0430\u0437\u043e\u043d \u0440\u0435\u0437\u0435\u0440\u0432\u043d\u044b\u0445 \u0430\u0434\u0440\u0435\u0441\u043e\u0432 IP \u0434\u043b\u044f \u0432\u043d\u0443\u0442\u0440\u0435\u043d\u043d\u0435\u0439 \u0441\u0435\u0442\u0438 \u0443\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u0438\u044f. \u0414\u0438\u0430\u043f\u0430\u0437\u043e\u043d \u0440\u0435\u0437\u0435\u0440\u0432\u043d\u044b\u0445 IP \u0434\u043e\u043b\u0436\u0435\u043d \u0431\u044b\u0442\u044c \u0443\u043d\u0438\u043a\u0430\u043b\u044c\u043d\u044b\u043c \u0434\u043b\u044f \u043a\u0430\u0436\u0434\u043e\u0439 \u0437\u043e\u043d\u044b \u043e\u0431\u043b\u0430\u043a\u0430. message.add.pod=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u043d\u043e\u0432\u044b\u0439 \u0441\u0442\u0435\u043d\u0434 \u0434\u043b\u044f \u0437\u043e\u043d\u044b message.add.primary.storage=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u043e\u0441\u043d\u043e\u0432\u043d\u043e\u0435 \u0445\u0440\u0430\u043d\u0438\u043b\u0438\u0449\u0435 \u0434\u043b\u044f \u0437\u043e\u043d\u044b , \u0441\u0442\u0435\u043d\u0434\u0430 message.add.primary=\u0423\u043a\u0430\u0436\u0438\u0442\u0435 \u0441\u043b\u0435\u0434\u0443\u044e\u0449\u0438\u0435 \u043f\u0430\u0440\u0430\u043c\u0435\u0442\u0440\u044b \u0434\u043b\u044f \u0434\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u044f \u043d\u043e\u0432\u043e\u0433\u043e \u043e\u0441\u043d\u043e\u0432\u043d\u043e\u0433\u043e \u0445\u0440\u0430\u043d\u0438\u043b\u0438\u0449\u0430 @@ -1185,6 +1185,7 @@ message.delete.user=\u041f\u043e\u0434\u0442\u0432\u0435\u0440\u0434\u0438\u0442 message.desc.advanced.zone=\u0414\u043b\u044f \u0431\u043e\u043b\u0435\u0435 \u0441\u043b\u043e\u0436\u043d\u044b\u0445 \u0441\u0435\u0442\u0435\u0432\u044b\u0445 \u0442\u043e\u043f\u043e\u043b\u043e\u0433\u0438\u0439. \u042d\u0442\u0430 \u0441\u0435\u0442\u0435\u0432\u0430\u044f \u043c\u043e\u0434\u0435\u043b\u044c \u043e\u0431\u0435\u0441\u043f\u0435\u0447\u0438\u0432\u0430\u0435\u0442 \u043c\u0430\u043a\u0441\u0438\u043c\u0430\u043b\u044c\u043d\u0443\u044e \u0433\u0438\u0431\u043a\u043e\u0441\u0442\u044c \u0432 \u043e\u043f\u0440\u0435\u0434\u0435\u043b\u0435\u043d\u0438\u0438 \u0433\u043e\u0441\u0442\u0435\u0432\u043e\u0439 \u0441\u0435\u0442\u0438 \u0438 \u043f\u0440\u0435\u0434\u043e\u0441\u0442\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u0443\u0441\u043b\u0443\u0433, \u0442\u0430\u043a\u0438\u0445 \u043a\u0430\u043a \u043c\u0435\u0436\u0441\u0435\u0442\u0435\u0432\u043e\u0439 \u044d\u043a\u0440\u0430\u043d, VPN, \u0438\u043b\u0438 \u043f\u043e\u0434\u0434\u0435\u0440\u0436\u043a\u0430 \u0431\u0430\u043b\u0430\u043d\u0441\u0438\u0440\u043e\u0432\u043a\u0438 \u043d\u0430\u0433\u0440\u0443\u0437\u043a\u0438. message.desc.basic.zone=\u041f\u0440\u0435\u0434\u043e\u0441\u0442\u0430\u0432\u043b\u044f\u0435\u0442 \u0435\u0434\u0438\u0441\u0442\u0432\u0435\u043d\u043d\u0443\u044e \u0441\u0435\u0442\u044c, \u0433\u0434\u0435 \u043a\u0430\u0436\u0434\u0430\u044f \u0412\u041c \u0438\u043c\u0435\u0435\u0442 \u00ab\u0431\u0435\u043b\u044b\u0439\u00bb IP-\u0430\u0434\u0440\u0435\u0441 \u0441\u0435\u0442\u0438. \u0418\u0437\u043e\u043b\u044f\u0446\u0438\u0438 \u0433\u043e\u0441\u0442\u0435\u0439 \u043c\u043e\u0436\u043d\u043e \u0434\u043e\u0431\u0438\u0442\u044c\u0441\u044f \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u043d\u0438\u0435\u043c \u0441\u0435\u0442\u0438 3-\u0433\u043e \u0443\u0440\u043e\u0432\u043d\u044f, \u043d\u0430\u043f\u0440\u0438\u043c\u0435\u0440, \u0433\u0440\u0443\u043f\u043f\u044b \u0431\u0435\u0437\u043e\u043f\u0430\u0441\u043d\u043e\u0441\u0442\u0438 (\u0444\u0438\u043b\u044c\u0442\u0440\u0430\u0446\u0438\u044f IP-\u0432\u0434\u0440\u0435\u0441\u043e\u0432) message.desc.cluster=\u041a\u0430\u0436\u0434\u044b\u0439 \u0441\u0442\u0435\u043d\u0434 \u0434\u043e\u043b\u0436\u0435\u043d \u0438\u043c\u0435\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u043b\u0438 \u0431\u043e\u043b\u0435\u0435 \u043a\u043b\u0430\u0441\u0442\u0435\u0440\u043e\u0432, \u043f\u0435\u0440\u0432\u044b\u0439 \u0438\u0437 \u043a\u043e\u0442\u043e\u0440\u044b\u0445 \u0432\u044b \u0441\u0435\u0439\u0447\u0430\u0441 \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u0435. \u041a\u043b\u0430\u0441\u0442\u0435\u0440 \u043f\u0440\u0435\u0434\u043e\u0441\u0442\u0430\u0432\u043b\u044f\u0435\u0442 \u0433\u0440\u0443\u043f\u043f\u0443 \u0443\u0437\u043b\u043e\u0432. \u0423\u0437\u043b\u044b \u0432 \u043a\u043b\u0430\u0441\u0442\u0435\u0440\u0435 \u0438\u043c\u0435\u044e\u0442 \u043e\u0434\u0438\u043d\u0430\u043a\u043e\u0432\u043e\u0435 \u043e\u0431\u043e\u0440\u0443\u0434\u043e\u0432\u0430\u043d\u0438\u0435, \u0437\u0430\u043f\u0443\u0441\u043a\u0430\u0435\u0442\u0441\u044f \u0447\u0435\u0440\u0435\u0437 \u043e\u0434\u0438\u043d \u0433\u0438\u043f\u0435\u0440\u0432\u0438\u0437\u043e\u0440, \u043d\u0430\u0445\u043e\u0434\u044f\u0442\u0441\u044f \u0432 \u043e\u0434\u043d\u043e\u0439 \u0441\u0435\u0442\u0438 \u0438 \u0438\u043c\u0435\u044e\u0442 \u0434\u043e\u0441\u0442\u0443\u043f \u043a \u043e\u0434\u043d\u043e\u043c\u0443 \u0438 \u0442\u043e\u043c\u0443 \u0436\u0435 \u043e\u0442\u043a\u0440\u044b\u0442\u043e\u043c\u0443 \u0445\u0440\u0430\u043d\u0438\u043b\u0438\u0449\u0443. \u041a\u0430\u0436\u0434\u044b\u0439 \u043a\u043b\u0430\u0441\u0442\u0435\u0440 \u0441\u043e\u0434\u0435\u0440\u0436\u0438\u0442 \u043e\u0434\u0438\u043d \u0438\u043b\u0438 \u0431\u043e\u043b\u0435\u0435 \u0443\u0437\u043b\u043e\u0432, \u0430 \u0442\u0430\u043a\u0436\u0435 \u0438\u0435\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u043b\u0438 \u043d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u043e \u043e\u0441\u043d\u043e\u0432\u043d\u044b\u0445 \u0445\u0440\u0430\u043d\u0438\u043b\u0438\u0449. +message.desc.host=\u041a\u0430\u0436\u0434\u044b\u0439 \u043a\u043b\u0430\u0441\u0442\u0435\u0440 \u0434\u043e\u043b\u0436\u0435\u043d \u0441\u043e\u0434\u0435\u0440\u0436\u0430\u0442\u044c \u043a\u0430\u043a \u043c\u0438\u043d\u0438\u043c\u0443\u043c \u043e\u0434\u0438\u043d \u0443\u0437\u0435\u043b (\u043a\u043e\u043c\u043f\u044c\u044e\u0442\u0435\u0440) \u0434\u043b\u044f \u0437\u0430\u043f\u0443\u0441\u043a\u0430 \u0412\u041c, \u043f\u0435\u0440\u0432\u044b\u0439 \u0438\u0437 \u043a\u043b\u0430\u0441\u0442\u0435\u0440 \u0432\u044b \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u0435 \u0441\u0435\u0439\u0447\u0430\u0441. \u0414\u043b\u044f \u0440\u0430\u0431\u043e\u0442\u044b \u0443\u0437\u043b\u0430 \u0432 CloudStack \u0432\u0430\u0436\u043d\u0430 \u0443\u0441\u0442\u0430\u043d\u043e\u0432\u043a\u0430 \u0433\u0438\u043f\u0435\u0440\u0432\u0438\u0437\u043e\u0440\u0430 \u043d\u0430 \u0443\u0437\u0435\u043b, \u043f\u0440\u0438\u0432\u044f\u0437\u043a\u0430 IP \u043a \u0443\u0437\u043b\u0443 \u0438 \u0441\u043e\u0435\u0434\u0438\u043d\u0435\u043d\u0438\u0435 \u0443\u0437\u043b\u0430 \u0441 \u0441\u0435\u0440\u0432\u0435\u0440\u043e\u043c \u0443\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u0438\u044f CloudStack.

\u0423\u043a\u0430\u0436\u0438\u0442\u0435 \u0438\u043c\u044f DNS \u0438\u043b\u0438 \u0430\u0434\u0440\u0435\u0441 IP, \u0438\u043c\u044f \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f \u0438 \u043f\u0430\u0440\u043e\u043b\u044c \u043a \u041e\u0421 (\u043e\u0431\u044b\u0447\u043d\u043e root), \u0430 \u0442\u0430\u043a\u0436\u0435 \u043c\u0435\u0442\u043a\u0438 \u0434\u043b\u044f \u0433\u0440\u0443\u043f\u043f\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f \u0443\u0437\u043b\u043e\u0432. message.desc.primary.storage=\u041a\u0430\u0436\u0434\u0430\u044f \u0433\u0440\u0443\u043f\u043f\u0430 \u0434\u043e\u043b\u0436\u043d\u0430 \u0441\u043e\u0434\u0435\u0440\u0436\u0430\u0442\u044c \u043e\u0434\u0438\u043d \u0438\u043b\u0438 \u043d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u043e \u043f\u0435\u0440\u0432\u0438\u0447\u043d\u044b\u0445 \u0441\u0435\u0440\u0432\u0435\u0440\u043e\u0432 \u0445\u0440\u0430\u043d\u0435\u043d\u0438\u044f \u0434\u0430\u043d\u043d\u044b\u0445, \u0438 \u043c\u044b \u0434\u043e\u0431\u0430\u0432\u0438\u043c \u043f\u0435\u0440\u0432\u044b\u0439 \u0441\u0435\u0439\u0447\u0430\u0441. \u041f\u0435\u0440\u0432\u0438\u0447\u043d\u0430\u044f \u0445\u0440\u0430\u043d\u0435\u043d\u0438\u044f \u0441\u043e\u0434\u0435\u0440\u0436\u0438\u0442 \u043b\u043e\u0433\u0438\u0447\u0435\u0441\u043a\u0438\u0435 \u0440\u0430\u0437\u0434\u0435\u043b\u044b \u0436\u0435\u0441\u0442\u043a\u043e\u0433\u043e \u0434\u0438\u0441\u043a\u0430 \u0434\u043b\u044f \u0432\u0441\u0435\u0445 \u0432\u0438\u0440\u0442\u0443\u0430\u043b\u044c\u043d\u044b\u0445 \u043c\u0430\u0448\u0438\u043d, \u0440\u0430\u0431\u043e\u0442\u0430\u044e\u0449\u0438\u0445 \u043d\u0430 \u0443\u0437\u043b\u0430\u0445 \u043a\u043b\u0430\u0441\u0442\u0435\u0440\u0430. \u0418\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0439\u0442\u0435 \u043b\u044e\u0431\u043e\u0439 \u0441\u043e\u0432\u043c\u0435\u0441\u0442\u0438\u043c\u044b\u0439 \u043f\u0440\u043e\u0442\u043e\u043a\u043e\u043b, \u043a\u043e\u0442\u043e\u0440\u044b\u0439 \u043f\u043e\u0434\u0434\u0435\u0440\u0436\u0438\u0432\u0430\u0435\u0442 \u043e\u0441\u043d\u043e\u0432\u043d\u044b\u0435 \u0433\u0438\u043f\u0435\u0440\u0432\u0438\u0437\u043e\u0440\u0430. message.desc.secondary.storage=\u041a\u0430\u0436\u0434\u0430\u044f \u0437\u043e\u043d\u0430 \u0434\u043e\u043b\u0436\u043d\u0430 \u043e\u0431\u043b\u0430\u0434\u0430\u0442\u044c \u0445\u043e\u0442\u044f \u0431\u044b \u043e\u0434\u043d\u0438\u043c \u0441\u0435\u0440\u0432\u0435\u0440\u043e\u043c NFS \u0438\u043b\u0438 \u0434\u043e\u043f\u043e\u043b\u043d\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u043c \u0445\u0440\u0430\u043d\u0438\u043b\u0438\u0449\u0435\u043c \u0438 \u0438\u0445 \u043d\u0430\u0434\u043e \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0432 \u043f\u0435\u0440\u0432\u0443\u044e \u043e\u0447\u0435\u0440\u0435\u0434\u044c. \u0414\u043e\u043f\u043e\u043b\u043d\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u0445\u0440\u0430\u043d\u0438\u043b\u0438\u0449\u0435 \u043f\u0440\u0435\u0434\u043d\u0430\u0437\u043d\u0430\u0447\u0435\u043d\u043e \u0434\u043b\u044f \u0445\u0440\u0430\u043d\u0435\u043d\u0438\u044f \u0448\u0430\u0431\u043b\u043e\u043d\u043e\u0432 \u0412\u041c, \u043e\u0431\u0440\u0430\u0437\u043e\u0432 ISO \u0438 \u0441\u043d\u0438\u043c\u043a\u043e\u0432 \u0412\u041c. \u042d\u0442\u043e\u0442 \u0441\u0435\u0440\u0432\u0435\u0440 \u0434\u043e\u043b\u0436\u0435\u043d \u0431\u044b\u0442\u044c \u0434\u043e\u0441\u0442\u0443\u043f\u0435\u043d \u0434\u043b\u044f \u0432\u0441\u0435\u0445 \u0443\u0437\u043b\u043e\u0432 \u0437\u043e\u043d\u044b.

\u041f\u0440\u0435\u0434\u043e\u0441\u0442\u0430\u0432\u0438\u0442\u044c IP-\u0430\u0434\u0440\u0435\u0441 \u0438 \u043f\u0443\u0442\u044c. message.desc.zone=layer 3 @@ -1309,6 +1310,7 @@ message.step.3.continue=\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0434\ message.step.3.desc= message.step.4.continue=\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u0445\u043e\u0442\u044f \u0431\u044b \u043e\u0434\u043d\u0443 \u0441\u0435\u0442\u044c \u0434\u043b\u044f \u043f\u0440\u043e\u0434\u043e\u043b\u0436\u0435\u043d\u0438\u044f. message.step.4.desc=\u0412\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u043e\u0441\u043d\u043e\u0432\u043d\u0443\u044e \u0441\u0435\u0442\u044c, \u043a \u043a\u043e\u0442\u043e\u0440\u043e\u0439 \u0431\u0443\u0434\u0435\u0442 \u043f\u043e\u0434\u043a\u043b\u044e\u0447\u0435\u043d\u0430 \u043c\u0430\u0448\u0438\u043d\u0430. +message.storage.traffic=\u0422\u0440\u0430\u0444\u0438\u043a \u043c\u0435\u0436\u0434\u0443 \u0432\u043d\u0443\u0442\u0440\u0435\u043d\u043d\u0438\u043c\u0438 \u0440\u0435\u0441\u0443\u0440\u0441\u0430\u043c\u0438 CloudStack, \u0432\u043a\u043b\u044e\u0447\u0430\u044f \u0432\u0441\u0435 \u043a\u043e\u043c\u043f\u043e\u043d\u0435\u043d\u0442\u044b, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u0432\u0437\u0430\u0438\u043c\u043e\u0434\u0435\u0439\u0441\u0442\u0432\u0443\u044e\u0442 \u0441 \u0441\u0435\u0440\u0432\u0435\u0440\u043e\u043c \u0443\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u0438\u044f, \u0442\u0430\u043a\u0438\u0435 \u043a\u0430\u043a \u0432\u0438\u0440\u0442\u0443\u0430\u043b\u044c\u043d\u044b\u0435 \u0445\u043e\u0441\u0442\u044b \u0438 CloudStack \u0441\u0438\u0441\u0442\u0435\u043c\u044b. \u041d\u0430\u0441\u0442\u0440\u043e\u0439\u0442\u0435 \u0442\u0440\u0430\u0444\u0438\u043a \u0445\u0440\u0430\u043d\u0435\u043d\u0438\u044f \u0437\u0434\u0435\u0441\u044c. message.suspend.project=\u0412\u044b \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u0442\u0435\u043b\u044c\u043d\u043e \u0445\u043e\u0442\u0438\u0442\u0435 \u043f\u0440\u0438\u043e\u0441\u0442\u0430\u043d\u043e\u0432\u0438\u0442\u044c \u043f\u0440\u043e\u0435\u043a\u0442? message.template.desc=\u041e\u0431\u0440\u0430\u0437 \u041e\u0421, \u043a\u043e\u0442\u043e\u0440\u044b\u0439 \u043c\u043e\u0436\u043d\u043e \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u044c \u0432 \u043a\u0430\u0447\u0435\u0441\u0442\u0432\u0435 \u0437\u0430\u0433\u0440\u0443\u0437\u043e\u0447\u043d\u043e\u0439 \u0432 \u0412\u041c message.tooltip.dns.1=\u0418\u043c\u044f \u0441\u0435\u0440\u0432\u0435\u0440\u0430 DNS \u0434\u043b\u044f \u0412\u041c \u044d\u0442\u043e\u0439 \u0437\u043e\u043d\u044b. \u041f\u0443\u0431\u043b\u0438\u0447\u043d\u044b\u0435 IP-\u0430\u0434\u0440\u0435\u0441\u0430 \u044d\u0442\u043e\u0439 \u0437\u043e\u043d\u044b \u0434\u043e\u043b\u0436\u043d\u044b \u0438\u043c\u0435\u0442\u044c \u043c\u0430\u0440\u0448\u0440\u0443\u0442 \u0434\u043e \u044d\u0442\u043e\u0433\u043e \u0441\u0435\u0440\u0432\u0435\u0440\u0430. diff --git a/client/WEB-INF/classes/resources/messages_zh_CN.properties b/client/WEB-INF/classes/resources/messages_zh_CN.properties index 687ef60b3c1..30daacc3627 100644 --- a/client/WEB-INF/classes/resources/messages_zh_CN.properties +++ b/client/WEB-INF/classes/resources/messages_zh_CN.properties @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. - changed.item.properties=\u66f4\u6539\u9879\u76ee\u5c5e\u6027 confirm.enable.s3=\u8bf7\u586b\u5199\u4e0b\u5217\u4fe1\u606f\u4ee5\u542f\u7528\u652f\u6301S3\u7684\u4e8c\u7ea7\u5b58\u50a8 confirm.enable.swift=\u8bf7\u586b\u5199\u4ee5\u4e0b\u4fe1\u606f\u4ee5\u542f\u7528\u5bf9 SWIFT \u7684\u652f\u6301 @@ -622,12 +621,17 @@ label.keyboard.type=\u952e\u76d8\u7c7b\u578b label.key=\u5bc6\u94a5 label.kvm.traffic.label=KVM \u6d41\u91cf\u6807\u7b7e label.label=\u6807\u7b7e +label.lang.arabic=\u963f\u62c9\u4f2f\u8bed label.lang.brportugese=\u5df4\u897f\u8461\u8404\u7259\u8bed +label.lang.catalan=\u52a0\u6cf0\u7f57\u5c3c\u4e9a\u8bed label.lang.chinese=\u7b80\u4f53\u4e2d\u6587 label.lang.english=\u82f1\u8bed label.lang.french=\u6cd5\u8bed +label.lang.german=\u5fb7\u8bed +label.lang.italian=\u610f\u5927\u5229\u8bed label.lang.japanese=\u65e5\u8bed label.lang.korean=\u97e9\u56fd\u8bed +label.lang.norwegian=\u632a\u5a01\u8bed label.lang.russian=\u4fc4\u8bed label.lang.spanish=\u897f\u73ed\u7259\u8bed label.last.disconnected=\u4e0a\u6b21\u65ad\u5f00\u8fde\u63a5\u65f6\u95f4 @@ -1165,6 +1169,7 @@ label.zone.type=\u533a\u57df\u7c7b\u578b label.zone=\u533a\u57df label.zone.wide=\u6574\u4e2a\u533a\u57df label.zoneWizard.trafficType.guest=\u6765\u5bbe\u7f51\u7edc\: \u5ba2\u6237\u865a\u62df\u673a\u4e4b\u95f4\u7684\u7f51\u7edc\u6d41\u91cf +label.zoneWizard.trafficType.management=\u7ba1\u7406\u7f51\: CloudStack\u5185\u90e8\u8d44\u6e90\u4e4b\u95f4\u7684\u7f51\u7edc\u6d41\u91cf, \u5305\u62ec\u4e0e\u7ba1\u7406\u670d\u52a1\u5668\u4ea4\u4e92\u7684\u4efb\u4f55\u7ec4\u4ef6, \u6bd4\u5982\u4e3b\u673a\u548cCloudStack\u7cfb\u7edf\u865a\u62df\u673a label.zoneWizard.trafficType.public=\u516c\u5171\u7f51\u7edc\: \u4e91\u73af\u5883\u4e2d\u865a\u62df\u673a\u4e0e\u56e0\u7279\u7f51\u4e4b\u95f4\u7684\u7f51\u7edc\u6d41\u91cf. label.zoneWizard.trafficType.storage=\u5b58\u50a8\u7f51\: \u4e3b\u5b58\u50a8\u4e0e\u4e8c\u7ea7\u5b58\u50a8\u670d\u52a1\u5668\u4e4b\u95f4\u7684\u6d41\u91cf, \u6bd4\u5982\u865a\u673a\u6a21\u677f\u548c\u5feb\u7167 managed.state=\u6258\u7ba1\u72b6\u6001 @@ -1255,6 +1260,7 @@ message.add.load.balancer=\u5411\u533a\u57df\u4e2d\u6dfb\u52a0\u4e00\u4e2a\u8d1f message.add.load.balancer.under.ip=\u5df2\u5728\u4ee5\u4e0b IP \u4e0b\u6dfb\u52a0\u8d1f\u8f7d\u5e73\u8861\u5668\u89c4\u5219\: message.add.network=\u4e3a\u533a\u57df\u6dfb\u52a0\u4e00\u4e2a\u65b0\u7f51\u7edc\: message.add.new.gateway.to.vpc=\u8bf7\u6307\u5b9a\u5c06\u65b0\u7f51\u5173\u6dfb\u52a0\u5230\u6b64 VPC \u6240\u9700\u7684\u4fe1\u606f\u3002 +message.add.pod.during.zone.creation=\u6bcf\u4e2a\u533a\u57df\u4e2d\u5fc5\u987b\u5305\u542b\u4e00\u4e2a\u6216\u591a\u4e2a\u63d0\u4f9b\u70b9\uff0c\u73b0\u5728\u6211\u4eec\u5c06\u6dfb\u52a0\u7b2c\u4e00\u4e2a\u63d0\u4f9b\u70b9\u3002\u63d0\u4f9b\u70b9\u4e2d\u5305\u542b\u4e3b\u673a\u548c\u4e3b\u5b58\u50a8\u670d\u52a1\u5668\uff0c\u60a8\u5c06\u5728\u968f\u540e\u7684\u67d0\u4e2a\u6b65\u9aa4\u4e2d\u6dfb\u52a0\u8fd9\u4e9b\u4e3b\u673a\u548c\u670d\u52a1\u5668\u3002\u9996\u5148\uff0c\u8bf7\u4e3a CloudStack \u7684\u5185\u90e8\u7ba1\u7406\u6d41\u91cf\u914d\u7f6e\u4e00\u4e2a\u9884\u7559 IP \u5730\u5740\u8303\u56f4\u3002\u9884\u7559\u7684 IP \u8303\u56f4\u5bf9\u4e91\u4e2d\u7684\u6bcf\u4e2a\u533a\u57df\u6765\u8bf4\u5fc5\u987b\u552f\u4e00\u3002 message.add.pod=\u4e3a\u533a\u57df \u6dfb\u52a0\u4e00\u4e2a\u65b0\u63d0\u4f9b\u70b9 message.add.primary.storage=\u4e3a\u533a\u57df \u3001\u63d0\u4f9b\u70b9 \u6dfb\u52a0\u4e00\u4e2a\u65b0\u7684\u4e3b\u5b58\u50a8 message.add.primary=\u8bf7\u6307\u5b9a\u4ee5\u4e0b\u53c2\u6570\u4ee5\u6dfb\u52a0\u4e00\u4e2a\u65b0\u4e3b\u5b58\u50a8 @@ -1316,6 +1322,7 @@ message.delete.VPN.gateway=\u8bf7\u786e\u8ba4\u60a8\u786e\u5b9e\u8981\u5220\u966 message.desc.advanced.zone=\u9002\u7528\u4e8e\u66f4\u52a0\u590d\u6742\u7684\u7f51\u7edc\u62d3\u6251\u3002\u6b64\u7f51\u7edc\u6a21\u5f0f\u5728\u5b9a\u4e49\u6765\u5bbe\u7f51\u7edc\u5e76\u63d0\u4f9b\u9632\u706b\u5899\u3001VPN \u6216\u8d1f\u8f7d\u5e73\u8861\u5668\u652f\u6301\u7b49\u81ea\u5b9a\u4e49\u7f51\u7edc\u65b9\u6848\u65b9\u9762\u63d0\u4f9b\u4e86\u6700\u5927\u7684\u7075\u6d3b\u6027\u3002 message.desc.basic.zone=\u63d0\u4f9b\u4e00\u4e2a\u7f51\u7edc\uff0c\u5c06\u76f4\u63a5\u4ece\u6b64\u7f51\u7edc\u4e2d\u4e3a\u6bcf\u4e2a VM \u5b9e\u4f8b\u5206\u914d\u4e00\u4e2a IP\u3002\u53ef\u4ee5\u901a\u8fc7\u5b89\u5168\u7ec4\u7b49\u7b2c 3 \u5c42\u65b9\u5f0f\u63d0\u4f9b\u6765\u5bbe\u9694\u79bb(IP \u5730\u5740\u6e90\u8fc7\u6ee4)\u3002 message.desc.cluster=\u6bcf\u4e2a\u63d0\u4f9b\u70b9\u4e2d\u5fc5\u987b\u5305\u542b\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u96c6\uff0c\u73b0\u5728\u6211\u4eec\u5c06\u6dfb\u52a0\u7b2c\u4e00\u4e2a\u7fa4\u96c6\u3002\u7fa4\u96c6\u63d0\u4f9b\u4e86\u4e00\u79cd\u7f16\u7ec4\u4e3b\u673a\u7684\u65b9\u6cd5\u3002\u7fa4\u96c6\u4e2d\u7684\u6240\u6709\u4e3b\u673a\u90fd\u5177\u6709\u76f8\u540c\u7684\u786c\u4ef6\uff0c\u8fd0\u884c\u76f8\u540c\u7684\u865a\u62df\u673a\u7ba1\u7406\u7a0b\u5e8f\uff0c\u4f4d\u4e8e\u76f8\u540c\u7684\u5b50\u7f51\u4e2d\uff0c\u5e76\u8bbf\u95ee\u76f8\u540c\u7684\u5171\u4eab\u5b58\u50a8\u3002\u6bcf\u4e2a\u7fa4\u96c6\u7531\u4e00\u4e2a\u6216\u591a\u4e2a\u4e3b\u673a\u4ee5\u53ca\u4e00\u4e2a\u6216\u591a\u4e2a\u4e3b\u5b58\u50a8\u670d\u52a1\u5668\u7ec4\u6210\u3002 +message.desc.host=\u6bcf\u4e2a\u7fa4\u96c6\u4e2d\u5fc5\u987b\u81f3\u5c11\u5305\u542b\u4e00\u4e2a\u4e3b\u673a\u4ee5\u4f9b\u6765\u5bbe VM \u5728\u4e0a\u9762\u8fd0\u884c\uff0c\u73b0\u5728\u6211\u4eec\u5c06\u6dfb\u52a0\u7b2c\u4e00\u4e2a\u4e3b\u673a\u3002\u8981\u4f7f\u4e3b\u673a\u5728 CloudStack \u4e2d\u8fd0\u884c\uff0c\u5fc5\u987b\u5728\u6b64\u4e3b\u673a\u4e0a\u5b89\u88c5\u865a\u62df\u673a\u7ba1\u7406\u7a0b\u5e8f\u8f6f\u4ef6\uff0c\u4e3a\u5176\u5206\u914d\u4e00\u4e2a IP \u5730\u5740\uff0c\u5e76\u786e\u4fdd\u5c06\u5176\u8fde\u63a5\u5230 CloudStack \u7ba1\u7406\u670d\u52a1\u5668\u3002

\u8bf7\u63d0\u4f9b\u4e3b\u673a\u7684 DNS \u6216 IP \u5730\u5740\u3001\u7528\u6237\u540d(\u901a\u5e38\u4e3a root)\u548c\u5bc6\u7801\uff0c\u4ee5\u53ca\u7528\u4e8e\u5bf9\u4e3b\u673a\u8fdb\u884c\u5206\u7c7b\u7684\u4efb\u4f55\u6807\u7b7e\u3002 message.desc.primary.storage=\u6bcf\u4e2a\u7fa4\u96c6\u4e2d\u5fc5\u987b\u5305\u542b\u4e00\u4e2a\u6216\u591a\u4e2a\u4e3b\u5b58\u50a8\u670d\u52a1\u5668\uff0c\u73b0\u5728\u6211\u4eec\u5c06\u6dfb\u52a0\u7b2c\u4e00\u4e2a\u4e3b\u5b58\u50a8\u670d\u52a1\u5668\u3002\u4e3b\u5b58\u50a8\u4e2d\u5305\u542b\u5728\u7fa4\u96c6\u4e2d\u7684\u4e3b\u673a\u4e0a\u8fd0\u884c\u7684\u6240\u6709 VM \u7684\u78c1\u76d8\u5377\u3002\u8bf7\u4f7f\u7528\u5e95\u5c42\u865a\u62df\u673a\u7ba1\u7406\u7a0b\u5e8f\u652f\u6301\u7684\u7b26\u5408\u6807\u51c6\u7684\u534f\u8bae\u3002 message.desc.secondary.storage=\u6bcf\u4e2a\u533a\u57df\u4e2d\u5fc5\u987b\u81f3\u5c11\u5305\u542b\u4e00\u4e2a NFS \u6216\u8f85\u52a9\u5b58\u50a8\u670d\u52a1\u5668\uff0c\u73b0\u5728\u6211\u4eec\u5c06\u6dfb\u52a0\u7b2c\u4e00\u4e2a NFS \u6216\u8f85\u52a9\u5b58\u50a8\u670d\u52a1\u5668\u3002\u8f85\u52a9\u5b58\u50a8\u7528\u4e8e\u5b58\u50a8 VM \u6a21\u677f\u3001ISO \u6620\u50cf\u548c VM \u78c1\u76d8\u5377\u5feb\u7167\u3002\u6b64\u670d\u52a1\u5668\u5fc5\u987b\u5bf9\u533a\u57df\u4e2d\u7684\u6240\u6709\u670d\u52a1\u5668\u53ef\u7528\u3002

\u8bf7\u63d0\u4f9b IP \u5730\u5740\u548c\u5bfc\u51fa\u8def\u5f84\u3002 message.desc.zone=\u533a\u57df\u662f CloudStack \u4e2d\u6700\u5927\u7684\u7ec4\u7ec7\u5355\u4f4d\uff0c\u4e00\u4e2a\u533a\u57df\u901a\u5e38\u4e0e\u4e00\u4e2a\u6570\u636e\u4e2d\u5fc3\u76f8\u5bf9\u5e94\u3002\u533a\u57df\u53ef\u63d0\u4f9b\u7269\u7406\u9694\u79bb\u548c\u5197\u4f59\u3002\u4e00\u4e2a\u533a\u57df\u7531\u4e00\u4e2a\u6216\u591a\u4e2a\u63d0\u4f9b\u70b9\u4ee5\u53ca\u7531\u533a\u57df\u4e2d\u7684\u6240\u6709\u63d0\u4f9b\u70b9\u5171\u4eab\u7684\u4e00\u4e2a\u8f85\u52a9\u5b58\u50a8\u670d\u52a1\u5668\u7ec4\u6210\uff0c\u5176\u4e2d\u6bcf\u4e2a\u63d0\u4f9b\u70b9\u4e2d\u5305\u542b\u591a\u4e2a\u4e3b\u673a\u548c\u4e3b\u5b58\u50a8\u670d\u52a1\u5668\u3002 @@ -1445,6 +1452,7 @@ message.step.3.continue=\u8bf7\u9009\u62e9\u4e00\u79cd\u78c1\u76d8\u65b9\u6848\u message.step.3.desc= message.step.4.continue=\u8bf7\u81f3\u5c11\u9009\u62e9\u4e00\u4e2a\u7f51\u7edc\u4ee5\u7ee7\u7eed message.step.4.desc=\u8bf7\u9009\u62e9\u865a\u62df\u5b9e\u4f8b\u8981\u8fde\u63a5\u5230\u7684\u4e3b\u7f51\u7edc\u3002 +message.storage.traffic=CloudStack \u5185\u90e8\u8d44\u6e90(\u5305\u62ec\u4e0e\u7ba1\u7406\u670d\u52a1\u5668\u901a\u4fe1\u7684\u4efb\u4f55\u7ec4\u4ef6\uff0c\u4f8b\u5982\u4e3b\u673a\u548c CloudStack \u7cfb\u7edf VM)\u4e4b\u95f4\u7684\u6d41\u91cf\u3002\u8bf7\u5728\u6b64\u5904\u914d\u7f6e\u5b58\u50a8\u6d41\u91cf\u3002 message.suspend.project=\u662f\u5426\u786e\u5b9e\u8981\u6682\u505c\u6b64\u9879\u76ee? message.template.desc=\u53ef\u7528\u4e8e\u542f\u52a8 VM \u7684\u64cd\u4f5c\u7cfb\u7edf\u6620\u50cf message.tooltip.dns.1=\u4f9b\u533a\u57df\u4e2d\u7684 VM \u4f7f\u7528\u7684 DNS \u670d\u52a1\u5668\u540d\u79f0\u3002\u533a\u57df\u7684\u516c\u7528 IP \u5730\u5740\u5fc5\u987b\u8def\u7531\u5230\u6b64\u670d\u52a1\u5668\u3002 diff --git a/client/pom.xml b/client/pom.xml index ca92c7efd12..672f4ebfa85 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -30,6 +30,11 @@ cloud-plugin-acl-static-role-based ${project.version} + + org.apache.cloudstack + cloud-plugin-dedicated-resources + ${project.version} + org.apache.cloudstack cloud-plugin-api-limit-account-based @@ -136,6 +141,11 @@ cloud-plugin-planner-implicit-dedication ${project.version} + + org.apache.cloudstack + cloud-plugin-explicit-dedication + ${project.version} + org.apache.cloudstack cloud-plugin-host-allocator-random diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in index 92fdf4f1ce3..01936421d50 100644 --- a/client/tomcatconf/applicationContext.xml.in +++ b/client/tomcatconf/applicationContext.xml.in @@ -158,6 +158,14 @@ + + + + + + + + - @@ -171,7 +170,7 @@ - + @@ -260,13 +259,22 @@ - + + - + + + + + + + diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in index 6fa9d38baa4..16fd88337fb 100644 --- a/client/tomcatconf/nonossComponentContext.xml.in +++ b/client/tomcatconf/nonossComponentContext.xml.in @@ -77,6 +77,10 @@ + + + + + + + + + + + diff --git a/client/tomcatconf/simulatorComponentContext.xml.in b/client/tomcatconf/simulatorComponentContext.xml.in index 652c4c824ff..d71cf162569 100644 --- a/client/tomcatconf/simulatorComponentContext.xml.in +++ b/client/tomcatconf/simulatorComponentContext.xml.in @@ -234,4 +234,10 @@ + + + + + + diff --git a/core/src/com/cloud/agent/api/GetVmDiskStatsAnswer.java b/core/src/com/cloud/agent/api/GetVmDiskStatsAnswer.java new file mode 100644 index 00000000000..18cb7948a38 --- /dev/null +++ b/core/src/com/cloud/agent/api/GetVmDiskStatsAnswer.java @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import java.util.HashMap; +import java.util.List; + +import com.cloud.agent.api.LogLevel.Log4jLevel; + +@LogLevel(Log4jLevel.Trace) +public class GetVmDiskStatsAnswer extends Answer { + + String hostName; + HashMap> vmDiskStatsMap; + + public GetVmDiskStatsAnswer(GetVmDiskStatsCommand cmd, String details, String hostName, HashMap> vmDiskStatsMap) { + super(cmd, true, details); + this.hostName = hostName; + this.vmDiskStatsMap = vmDiskStatsMap; + } + + public String getHostName() { + return hostName; + } + + public HashMap> getVmDiskStatsMap() { + return vmDiskStatsMap; + } + + protected GetVmDiskStatsAnswer() { + //no-args constructor for json serialization-deserialization + } +} diff --git a/core/src/com/cloud/agent/api/GetVmDiskStatsCommand.java b/core/src/com/cloud/agent/api/GetVmDiskStatsCommand.java new file mode 100644 index 00000000000..2b690020cb2 --- /dev/null +++ b/core/src/com/cloud/agent/api/GetVmDiskStatsCommand.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import java.util.List; + +import com.cloud.agent.api.LogLevel.Log4jLevel; + +@LogLevel(Log4jLevel.Trace) +public class GetVmDiskStatsCommand extends Command { + List vmNames; + String hostGuid; + String hostName; + + protected GetVmDiskStatsCommand() { + } + + public GetVmDiskStatsCommand(List vmNames, String hostGuid, String hostName) { + this.vmNames = vmNames; + this.hostGuid = hostGuid; + this.hostName = hostName; + } + + public List getVmNames() { + return vmNames; + } + + public String getHostGuid(){ + return this.hostGuid; + } + + public String getHostName(){ + return this.hostName; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/com/cloud/agent/api/MigrateWithStorageCommand.java b/core/src/com/cloud/agent/api/MigrateWithStorageCommand.java index 058aa15338e..a108a2a7bed 100644 --- a/core/src/com/cloud/agent/api/MigrateWithStorageCommand.java +++ b/core/src/com/cloud/agent/api/MigrateWithStorageCommand.java @@ -24,10 +24,18 @@ import com.cloud.agent.api.to.StorageFilerTO; public class MigrateWithStorageCommand extends Command { VirtualMachineTO vm; Map volumeToFiler; + String tgtHost; public MigrateWithStorageCommand(VirtualMachineTO vm, Map volumeToFiler) { this.vm = vm; this.volumeToFiler = volumeToFiler; + this.tgtHost = null; + } + + public MigrateWithStorageCommand(VirtualMachineTO vm, Map volumeToFiler, String tgtHost) { + this.vm = vm; + this.volumeToFiler = volumeToFiler; + this.tgtHost = tgtHost; } public VirtualMachineTO getVirtualMachine() { @@ -38,6 +46,10 @@ public class MigrateWithStorageCommand extends Command { return volumeToFiler; } + public String getTargetHost() { + return tgtHost; + } + @Override public boolean executeInSequence() { return true; diff --git a/core/src/com/cloud/agent/api/UnPlugNicCommand.java b/core/src/com/cloud/agent/api/UnPlugNicCommand.java index b6cab8872f5..b964292f1a2 100644 --- a/core/src/com/cloud/agent/api/UnPlugNicCommand.java +++ b/core/src/com/cloud/agent/api/UnPlugNicCommand.java @@ -39,7 +39,7 @@ public class UnPlugNicCommand extends Command{ this.instanceName = instanceName; } - public String getInstanceName() { + public String getVmName() { return instanceName; } } diff --git a/core/src/com/cloud/agent/api/VmDiskStatsEntry.java b/core/src/com/cloud/agent/api/VmDiskStatsEntry.java new file mode 100644 index 00000000000..9bec031c50d --- /dev/null +++ b/core/src/com/cloud/agent/api/VmDiskStatsEntry.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import com.cloud.vm.VmDiskStats; + +public class VmDiskStatsEntry implements VmDiskStats { + + String vmName; + String path; + Long ioRead = 0L; + Long ioWrite = 0L; + Long bytesWrite = 0L; + Long bytesRead = 0L; + + public VmDiskStatsEntry() { + } + + public VmDiskStatsEntry(String vmName, String path, Long ioWrite, Long ioRead, Long bytesWrite, Long bytesRead) { + this.ioRead = ioRead; + this.ioWrite = ioWrite; + this.bytesRead = bytesRead; + this.bytesWrite = bytesWrite; + this.vmName = vmName; + this.path = path; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public String getVmName() { + return vmName; + } + + public void setPath(String path) { + this.path = path; + } + + public String getPath() { + return path; + } + + public void setBytesRead(Long bytesRead) { + this.bytesRead = bytesRead; + } + + public Long getBytesRead() { + return bytesRead; + } + + public void setBytesWrite(Long bytesWrite) { + this.bytesWrite = bytesWrite; + } + + public Long getBytesWrite() { + return bytesWrite; + } + + public void setIORead(Long ioRead) { + this.ioRead = ioRead; + } + + public Long getIORead() { + return ioRead; + } + + public void setIOWrite(Long ioWrite) { + this.ioWrite = ioWrite; + } + + public Long getIOWrite() { + return ioWrite; + } + +} diff --git a/core/src/com/cloud/agent/api/VmStatsEntry.java b/core/src/com/cloud/agent/api/VmStatsEntry.java index 8828e9114f4..9c6df1a09f8 100755 --- a/core/src/com/cloud/agent/api/VmStatsEntry.java +++ b/core/src/com/cloud/agent/api/VmStatsEntry.java @@ -23,6 +23,10 @@ public class VmStatsEntry implements VmStats { double cpuUtilization; double networkReadKBs; double networkWriteKBs; + double diskReadIOs; + double diskWriteIOs; + double diskReadKBs; + double diskWriteKBs; int numCPUs; String entityType; @@ -37,6 +41,18 @@ public class VmStatsEntry implements VmStats { this.numCPUs = numCPUs; this.entityType = entityType; } + + public VmStatsEntry(double cpuUtilization, double networkReadKBs, double networkWriteKBs, + double diskReadKBs, double diskWriteKBs, int numCPUs, String entityType) + { + this.cpuUtilization = cpuUtilization; + this.networkReadKBs = networkReadKBs; + this.networkWriteKBs = networkWriteKBs; + this.diskReadKBs = diskReadKBs; + this.diskWriteKBs = diskWriteKBs; + this.numCPUs = numCPUs; + this.entityType = entityType; + } public double getCPUUtilization() { return cpuUtilization; @@ -62,6 +78,38 @@ public class VmStatsEntry implements VmStats { this.networkWriteKBs = networkWriteKBs; } + public double getDiskReadIOs() { + return diskReadIOs; + } + + public void setDiskReadIOs(double diskReadIOs) { + this.diskReadIOs = diskReadIOs; + } + + public double getDiskWriteIOs() { + return diskWriteIOs; + } + + public void setDiskWriteIOs(double diskWriteIOs) { + this.diskWriteIOs = diskWriteIOs; + } + + public double getDiskReadKBs() { + return diskReadKBs; + } + + public void setDiskReadKBs(double diskReadKBs) { + this.diskReadKBs = diskReadKBs; + } + + public double getDiskWriteKBs() { + return diskWriteKBs; + } + + public void setDiskWriteKBs(double diskWriteKBs) { + this.diskWriteKBs = diskWriteKBs; + } + public int getNumCPUs() { return numCPUs; } diff --git a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 8b996d1bfed..9e40eefc11a 100755 --- a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -642,7 +642,7 @@ public class VirtualRoutingResource implements Manager { for (IpAliasTO ipAliasTO : revokedIpAliasTOs) { args = args + ipAliasTO.getAlias_count()+":"+ipAliasTO.getRouterip()+":"+ipAliasTO.getNetmask()+"-"; } - args = args + " " ; + args = args + "- " ; List activeIpAliasTOs = cmd.getCreateIpAliasTos(); for (IpAliasTO ipAliasTO : activeIpAliasTOs) { args = args + ipAliasTO.getAlias_count()+":"+ipAliasTO.getRouterip()+":"+ipAliasTO.getNetmask()+"-"; diff --git a/core/src/com/cloud/network/DnsMasqConfigurator.java b/core/src/com/cloud/network/DnsMasqConfigurator.java index ee8e5fc2e13..dd349263c0c 100644 --- a/core/src/com/cloud/network/DnsMasqConfigurator.java +++ b/core/src/com/cloud/network/DnsMasqConfigurator.java @@ -71,7 +71,7 @@ import java.util.List; "conf-dir=/etc/dnsmasq.d\n", "dhcp-option=tag:net1,3,ipaddress\n", "dhcp-option=tag:net1,1,netmask\n", - "dhcp-option=6,10.147.28.149,8.8.8.8\n", + "dhcp-option=6,router_ip,external_dns\n", "dhcp-optsfile=/etc/dhcpopts.txt\n", @@ -85,11 +85,21 @@ import java.util.List; String netmask=""; String domain= dnsMasqconfigcmd.getDomain(); String dnsServers=""; + String dns_external=""; + if (dnsMasqconfigcmd.getDns1()!= null) { + dns_external = dnsMasqconfigcmd.getDns1()+","; + } + if (dnsMasqconfigcmd.getDns2() != null) { + dns_external = dns_external+dnsMasqconfigcmd.getDns2()+","; + } + dns_external = dns_external + "*"; + dns_external = dns_external.replace(",*",""); int i=0; for (; i< dnsmasqTOs.size(); i++) { - range=range + "dhcp-range=set:range"+i+","+dnsmasqTOs.get(i).getRouterIp()+",static\n"; + range=range + "dhcp-range=set:range"+i+","+dnsmasqTOs.get(i).getStartIpOfSubnet()+",static\n"; gateway=gateway +"dhcp-option=tag:range"+i+",3,"+dnsmasqTOs.get(i).getGateway()+"\n"; netmask=netmask +"dhcp-option=tag:range"+i+",1,"+dnsmasqTOs.get(i).getNetmask()+"\n"; + dnsServers=dnsServers+"dhcp-option=tag:range"+i+",6,"+dnsmasqTOs.get(i).getRouterIp()+","+dns_external+"\n"; } dnsMasqconf.set(12, "domain="+domain+"\n"); dnsMasqconf.set(14, "domain="+domain+"\n"); @@ -97,21 +107,7 @@ import java.util.List; dnsMasqconf.set(18, range); dnsMasqconf.set(22, gateway); dnsMasqconf.set(23, netmask); - if (dnsMasqconfigcmd.getInternal_dns1() != null) { - dnsServers = dnsServers+dnsMasqconfigcmd.getInternal_dns1()+","; - } - if (dnsMasqconfigcmd.getInternal_dns2() != null) { - dnsServers = dnsServers+dnsMasqconfigcmd.getInternal_dns2()+","; - } - if (dnsMasqconfigcmd.getDns1() != null) { - dnsServers = dnsServers+dnsMasqconfigcmd.getDns1()+","; - } - if (dnsMasqconfigcmd.getDns2() != null) { - dnsServers = dnsServers+dnsMasqconfigcmd.getDns2()+","; - } - dnsServers = dnsServers +"*"; - dnsServers = dnsServers.replace(",*", ""); - dnsMasqconf.set(24,"dhcp-option=6,"+dnsServers); + dnsMasqconf.set(24,dnsServers); return dnsMasqconf.toArray( new String[dnsMasqconf.size()]); } diff --git a/core/src/com/cloud/storage/template/HttpTemplateDownloader.java b/core/src/com/cloud/storage/template/HttpTemplateDownloader.java index 4c0e14530bd..d87dd68bb81 100644 --- a/core/src/com/cloud/storage/template/HttpTemplateDownloader.java +++ b/core/src/com/cloud/storage/template/HttpTemplateDownloader.java @@ -212,6 +212,13 @@ public class HttpTemplateDownloader implements TemplateDownloader { } } else { remoteSize2 = Long.parseLong(contentLengthHeader.getValue()); + if ( remoteSize2 == 0 ) { + status = TemplateDownloader.Status.DOWNLOAD_FINISHED; + String downloaded = "(download complete remote=" + remoteSize + "bytes)"; + errorString = "Downloaded " + totalBytes + " bytes " + downloaded; + downloadTime = 0; + return 0; + } } if (remoteSize == 0) { diff --git a/debian/cloudstack-management.install b/debian/cloudstack-management.install index 5a682d45862..a1325cdb2b5 100644 --- a/debian/cloudstack-management.install +++ b/debian/cloudstack-management.install @@ -15,7 +15,28 @@ # specific language governing permissions and limitations # under the License. -/etc/cloudstack/management/* +/etc/cloudstack/management/catalina.policy +/etc/cloudstack/management/catalina.properties +/etc/cloudstack/management/cloudmanagementserver.keystore +/etc/cloudstack/management/logging.properties +/etc/cloudstack/management/commands.properties +/etc/cloudstack/management/ehcache.xml +/etc/cloudstack/management/componentContext.xml +/etc/cloudstack/management/applicationContext.xml +/etc/cloudstack/management/server-ssl.xml +/etc/cloudstack/management/server-nonssl.xml +/etc/cloudstack/management/server.xml +/etc/cloudstack/management/classpath.conf +/etc/cloudstack/management/db.properties +/etc/cloudstack/management/tomcat6-ssl.conf +/etc/cloudstack/management/tomcat6-nonssl.conf +/etc/cloudstack/management/tomcat6.conf +/etc/cloudstack/management/web.xml +/etc/cloudstack/management/environment.properties +/etc/cloudstack/management/nonossComponentContext.xml +/etc/cloudstack/management/log4j-cloud.xml +/etc/cloudstack/management/tomcat-users.xml +/etc/cloudstack/management/context.xml /etc/init.d/cloudstack-management /etc/security/limits.d/cloudstack-limits.conf /etc/sudoers.d/cloudstack diff --git a/debian/control b/debian/control index eec9ca25c7b..46dd50536b0 100644 --- a/debian/control +++ b/debian/control @@ -9,7 +9,7 @@ Homepage: http://www.cloudstack.org/ Package: cloudstack-common Architecture: all -Depends: bash, genisoimage +Depends: bash, genisoimage, nfs-common Conflicts: cloud-scripts, cloud-utils, cloud-system-iso, cloud-console-proxy, cloud-daemonize, cloud-deps, cloud-python, cloud-setup Description: A common package which contains files which are shared by several CloudStack packages diff --git a/debian/rules b/debian/rules index 48485bb9d9b..e5ff5484fe5 100755 --- a/debian/rules +++ b/debian/rules @@ -69,8 +69,8 @@ install: install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D packaging/debian/init/cloud-agent $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-agent - install -D agent/bindir/cloud-setup-agent.in $(DESTDIR)/usr/bin/cloudstack-setup-agent - install -D agent/bindir/cloud-ssh.in $(DESTDIR)/usr/bin/cloudstack-ssh + install -D agent/target/transformed/cloud-setup-agent $(DESTDIR)/usr/bin/cloudstack-setup-agent + install -D agent/target/transformed/cloud-ssh $(DESTDIR)/usr/bin/cloudstack-ssh install -D agent/target/transformed/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/agent # cloudstack-management @@ -102,7 +102,7 @@ install: ln -s tomcat6-nonssl.conf $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/tomcat6.conf ln -s server-nonssl.xml $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/server.xml install -D packaging/debian/init/cloud-management $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-management - install -D client/bindir/cloud-update-xenserver-licenses.in $(DESTDIR)/usr/bin/cloudstack-update-xenserver-licenses + install -D client/target/utilities/bin/cloud-update-xenserver-licenses $(DESTDIR)/usr/bin/cloudstack-update-xenserver-licenses ln -s /usr/share/tomcat6/bin $(DESTDIR)/usr/share/$(PACKAGE)-management/bin # Remove configuration in /ur/share/cloudstack-management/webapps/client/WEB-INF # This should all be in /etc/cloudstack/management diff --git a/developer/pom.xml b/developer/pom.xml index c02ef1691cc..a680b8aa98e 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -13,7 +13,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 cloud-developer - Apache CloudStack Developer Tools + Apache CloudStack Developer Mode pom org.apache.cloudstack diff --git a/docs/en-US/CloudStack_GSoC_Guide.ent b/docs/en-US/CloudStack_GSoC_Guide.ent new file mode 100644 index 00000000000..17415873334 --- /dev/null +++ b/docs/en-US/CloudStack_GSoC_Guide.ent @@ -0,0 +1,22 @@ + + + + + + diff --git a/docs/en-US/CloudStack_GSoC_Guide.xml b/docs/en-US/CloudStack_GSoC_Guide.xml new file mode 100644 index 00000000000..243a0ca361b --- /dev/null +++ b/docs/en-US/CloudStack_GSoC_Guide.xml @@ -0,0 +1,54 @@ + + +%BOOK_ENTITIES; + +%xinclude; +]> + + + + + + &PRODUCT; Guide for the 2013 Google Summer of Code + Apache CloudStack + 4.3.0 + 1 + + + + Guide for 2013 Google Summer of Code Projects. + + + + + + + + + + + + + + + + + + diff --git a/docs/en-US/Release_Notes.xml b/docs/en-US/Release_Notes.xml index 25e1175b148..2ae87320e40 100644 --- a/docs/en-US/Release_Notes.xml +++ b/docs/en-US/Release_Notes.xml @@ -36,8 +36,8 @@ under the License. will find instruction in the &PRODUCT; API Developer's Guide - If you find any errors or problems in this guide, please see . We hope you enjoy - working with &PRODUCT;! + If you find any errors or problems in this guide, please see . + We hope you enjoy working with &PRODUCT;! Version 4.1.0 @@ -4396,6 +4396,13 @@ under the License. + + CLOUDSTACK-2709 + + Egress rules are are not supported on shared networks. + + CLOUDSTACK-1747 @@ -4648,9 +4655,12 @@ under the License. components.xml to be on the safe side. - After upgrading to 4.1, API clients are expected to send plain text passwords for login and user creation, instead of MD5 hash. Incase, api client changes are not acceptable, following changes are to be made for backward compatibility: - Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default authenticator (1st entry in the userAuthenticators adapter list is default) - + After upgrading to 4.1, API clients are expected to send plain text passwords for + login and user creation, instead of MD5 hash. Incase, api client changes are not + acceptable, following changes are to be made for backward compatibility: + Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default + authenticator (1st entry in the userAuthenticators adapter list is default) + <!-- Security adapters --> <bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList"> <property name="Adapters"> @@ -4662,7 +4672,8 @@ under the License. </property> </bean> - PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to 4.1. + PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to + 4.1. If you are using Ubuntu, follow this procedure to upgrade your packages. If not, @@ -5128,9 +5139,12 @@ service cloudstack-agent start - After upgrading to 4.1, API clients are expected to send plain text passwords for login and user creation, instead of MD5 hash. Incase, api client changes are not acceptable, following changes are to be made for backward compatibility: - Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default authenticator (1st entry in the userAuthenticators adapter list is default) - + After upgrading to 4.1, API clients are expected to send plain text passwords for + login and user creation, instead of MD5 hash. Incase, api client changes are not + acceptable, following changes are to be made for backward compatibility: + Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default + authenticator (1st entry in the userAuthenticators adapter list is default) + <!-- Security adapters --> <bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList"> <property name="Adapters"> @@ -5142,7 +5156,8 @@ service cloudstack-agent start </property> </bean> - PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to 4.1. + PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to + 4.1. Start the first Management Server. Do not start any other Management Server nodes @@ -5723,9 +5738,12 @@ service cloudstack-agent start - After upgrading to 4.1, API clients are expected to send plain text passwords for login and user creation, instead of MD5 hash. Incase, api client changes are not acceptable, following changes are to be made for backward compatibility: - Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default authenticator (1st entry in the userAuthenticators adapter list is default) - + After upgrading to 4.1, API clients are expected to send plain text passwords for + login and user creation, instead of MD5 hash. Incase, api client changes are not + acceptable, following changes are to be made for backward compatibility: + Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default + authenticator (1st entry in the userAuthenticators adapter list is default) + <!-- Security adapters --> <bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList"> <property name="Adapters"> @@ -5737,7 +5755,8 @@ service cloudstack-agent start </property> </bean> - PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to 4.1. + PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to + 4.1. If you have made changes to your existing copy of the diff --git a/docs/en-US/acquire-new-ip-for-vpc.xml b/docs/en-US/acquire-new-ip-for-vpc.xml index 785e80bb874..c0cb876d483 100644 --- a/docs/en-US/acquire-new-ip-for-vpc.xml +++ b/docs/en-US/acquire-new-ip-for-vpc.xml @@ -39,28 +39,43 @@ Click the Configure button of the VPC to which you want to deploy the VMs. The VPC page is displayed where all the tiers you created are listed in a diagram. - - - Click the Settings icon. The following options are displayed. - IP Addresses + Internal LB - Gateways + Public LB IP - Site-to-Site VPN + Static NAT - Network ACLs + Virtual Machines + + + CIDR + + + The following router information is displayed: + + + Private Gateways + + + Public IP Addresses + + + Site-to-Site VPNs + + + Network ACL Lists Select IP Addresses. - The IP Addresses page is displayed. + The Public IP Addresses page is displayed. Click Acquire New IP, and click Yes in the confirmation dialog. @@ -70,4 +85,4 @@ rules. - \ No newline at end of file + diff --git a/docs/en-US/add-gateway-vpc.xml b/docs/en-US/add-gateway-vpc.xml index 616794a51d1..a081faf7768 100644 --- a/docs/en-US/add-gateway-vpc.xml +++ b/docs/en-US/add-gateway-vpc.xml @@ -21,8 +21,8 @@
Adding a Private Gateway to a VPC A private gateway can be added by the root admin only. The VPC private network has 1:1 - relationship with the NIC of the physical network. No gateways with duplicated VLAN and IP are - allowed in the same data center. + relationship with the NIC of the physical network. You can configure multiple private gateways + to a single VPC. No gateways with duplicated VLAN and IP are allowed in the same data center. Log in to the &PRODUCT; UI as an administrator or end user. @@ -45,16 +45,34 @@ The following options are displayed. - IP Addresses + Internal LB + + Public LB IP + + + Static NAT + + + Virtual Machines + + + CIDR + + + The following router information is displayed: + Private Gateways - Site-to-Site VPN + Public IP Addresses - Network ACLs + Site-to-Site VPNs + + + Network ACL Lists @@ -96,9 +114,38 @@ VLAN: The VLAN associated with the VPC gateway. + + Source NAT: Select this option to enable the source + NAT service on the VPC private gateway. + See . + + + ACL: Controls both ingress and egress traffic on a + VPC private gateway. By default, all the traffic is blocked. + See . + The new gateway appears in the list. You can repeat these steps to add more gateway for this VPC. +
+ Source NAT on Private Gateway + You might want to deploy multiple VPCs with the same super CIDR and guest tier CIDR. + Therefore, multiple guest VMs from different VPCs can have the same IPs to reach a enterprise + data center through the private gateway. In such cases, a NAT service need to be configured on + the private gateway. If Source NAT is enabled, the guest VMs in VPC reaches the enterprise + network via private gateway IP address by using the NAT service. + The Source NAT service on a private gateway can be enabled while adding the private + gateway. On deletion of a private gateway, source NAT rules specific to the private gateway + are deleted. +
+
+ ACL on Private Gateway + The traffic on the VPC private gateway is controlled by creating both ingress and egress + network ACL rules. The ACLs contains both allow and deny rules. As per the rule, all the + ingress traffic to the private gateway interface and all the egress traffic out from the + private gateway interface are blocked. You can change this default behaviour while creating a + private gateway. +
diff --git a/docs/en-US/add-loadbalancer-rule-vpc.xml b/docs/en-US/add-loadbalancer-rule-vpc.xml index bba3e5ad134..b7b9e3e7613 100644 --- a/docs/en-US/add-loadbalancer-rule-vpc.xml +++ b/docs/en-US/add-loadbalancer-rule-vpc.xml @@ -21,103 +21,310 @@ -->
Adding Load Balancing Rules on a VPC - A &PRODUCT; user or administrator may create load balancing rules that balance traffic - received at a public IP to one or more VMs that belong to a network tier that provides load - balancing service in a VPC. A user creates a rule, specifies an algorithm, and assigns the rule - to a set of VMs within a VPC. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC to which you want to configure load balancing - rules. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - - - Click the Settings icon. - The following options are displayed. - + In a VPC, you can configure two types of load balancing—external LB and internal LB. + External LB is nothing but a LB rule created to redirect the traffic received at a public IP of + the VPC virtual router. The traffic is load balanced within a tier based on your configuration. + Citrix NetScaler and VPC virtual router are supported for external LB. When you use internal LB + service, traffic received at a tier is load balanced across different tiers within the VPC. For + example, traffic reached at Web tier is redirected to Application tier. External load balancing + devices are not supported for internal LB. The service is provided by a internal LB VM + configured on the target tier. +
+ Load Balancing Within a Tier (External LB) + A &PRODUCT; user or administrator may create load balancing rules that balance traffic + received at a public IP to one or more VMs that belong to a network tier that provides load + balancing service in a VPC. A user creates a rule, specifies an algorithm, and assigns the + rule to a set of VMs within a VPC. + + + Log in to the &PRODUCT; UI as an administrator or end user. + + + In the left navigation, choose Network. + + + In the Select view, select VPC. + All the VPCs that you have created for the account is listed in the page. + + + Click the Configure button of the VPC, for which you want to configure load balancing + rules. + The VPC page is displayed where all the tiers you created listed in a diagram. + For each tier, the following options are displayed: + + + Internal LB + + + Public LB IP + + + Static NAT + + + Virtual Machines + + + CIDR + + + The following router information is displayed: + + + Private Gateways + + + Public IP Addresses + + + Site-to-Site VPNs + + + Network ACL Lists + + + + + In the Router node, select Public IP Addresses. + The IP Addresses page is displayed. + + + Click the IP address for which you want to create the rule, then click the + Configuration tab. + + + In the Load Balancing node of the diagram, click View All. + + + Select the tier to which you want to apply the rule. + + + Specify the following: + + + Name: A name for the load balancer rule. + + + Public Port: The port that receives the incoming + traffic to be balanced. + + + Private Port: The port that the VMs will use to + receive the traffic. + + + Algorithm. Choose the load balancing algorithm + you want &PRODUCT; to use. &PRODUCT; supports the following well-known + algorithms: + + + Round-robin + + + Least connections + + + Source + + + + + Stickiness. (Optional) Click Configure and choose + the algorithm for the stickiness policy. See Sticky Session Policies for Load Balancer + Rules. + + + Add VMs: Click Add VMs, then select two or more + VMs that will divide the load of incoming traffic, and click Apply. + + + + + The new load balancing rule appears in the list. You can repeat these steps to add more + load balancing rules for this IP address. +
+
+ Load Balancing Across Tiers + &PRODUCT; supports sharing workload across different tiers within your VPC. Assume that + multiple tiers are set up in your environment, such as Web tier and Application tier. Traffic + to each tier is balanced on the VPC virtual router on the public side, as explained in . If you want the traffic coming from the Web tier to + the Application tier to be balanced, use the internal load balancing feature offered by + &PRODUCT;. +
+ How Does Internal LB Work in VPC? + In this figure, a public LB rule is created for the public IP 72.52.125.10 with public + port 80 and private port 81. The LB rule, created on the VPC virtual router, is applied on + the traffic coming from the Internet to the VMs on the Web tier. On the Application tier two + internal load balancing rules are created. An internal LB rule for the guest IP 10.10.10.4 + with load balancer port 23 and instance port 25 is configured on the VM, InternalLBVM1. + Another internal LB rule for the guest IP 10.10.10.4 with load balancer port 45 and instance + port 46 is configured on the VM, InternalLBVM1. Another internal LB rule for the guest IP + 10.10.10.6, with load balancer port 23 and instance port 25 is configured on the VM, + InternalLBVM2. + + + + + + vpc-lb.png: Configuring internal LB for VPC + + +
+
+ Enabling Internal LB on a VPC Tier + - IP Addresses + Create a network offering, as given in . - Gateways + Create an internal load balancing rule and apply, as given in . + + +
+
+ Creating a Network Offering for Internal LB + To have internal LB support on VPC, create a network offering as follows: + + + Log in to the &PRODUCT; UI as a user or admin. - Site-to-Site VPN + From the Select Offering drop-down, choose Network Offering. - Network ACLs - - - - - Select IP Addresses. - The IP Addresses page is displayed. - - - Click the IP address for which you want to create the rule, then click the Configuration - tab. - - - In the Load Balancing node of the diagram, click View All. - - - Select the tier to which you want to apply the rule. - - In a VPC, the load balancing service is supported only on a single tier. - - - - Specify the following: - - - Name: A name for the load balancer rule. + Click Add Network Offering. - Public Port: The port that receives the incoming - traffic to be balanced. - - - Private Port: The port that the VMs will use to - receive the traffic. - - - Algorithm. Choose the load balancing algorithm you - want &PRODUCT; to use. &PRODUCT; supports the following well-known algorithms: + In the dialog, make the following choices: - Round-robin + Name: Any desired name for the network + offering. - Least connections + Description: A short description of the + offering that can be displayed to users. - Source + Network Rate: Allowed data transfer rate in MB + per second. + + + Traffic Type: The type of network traffic that + will be carried on the network. + + + Guest Type: Choose whether the guest network is + isolated or shared. + + + Persistent: Indicate whether the guest network + is persistent or not. The network that you can provision without having to deploy a + VM on it is termed persistent network. + + + VPC: This option indicate whether the guest + network is Virtual Private Cloud-enabled. A Virtual Private Cloud (VPC) is a + private, isolated part of &PRODUCT;. A VPC can have its own virtual network topology + that resembles a traditional physical network. For more information on VPCs, see + . + + + Specify VLAN: (Isolated guest networks only) + Indicate whether a VLAN should be specified when this offering is used. + + + Supported Services: Select Load Balancer. + Select InternalLbVM from the provider list. + + + Load Balancer Type: Select Internal LB from the + drop-down. + + + System Offering: Choose the system service + offering that you want virtual routers to use in this network. + + + Conserve mode: Indicate whether to use conserve + mode. In this mode, network resources are allocated only when the first virtual + machine starts in the network. - Stickiness. (Optional) Click Configure and choose - the algorithm for the stickiness policy. See Sticky Session Policies for Load Balancer - Rules. + Click OK and the network offering is created. + + +
+
+ Creating an Internal LB Rule + + + Log in to the &PRODUCT; UI as an administrator or end user. - Add VMs: Click Add VMs, then select two or more VMs - that will divide the load of incoming traffic, and click Apply. + In the left navigation, choose Network. - - - - The new load balancing rule appears in the list. You can repeat these steps to add more load - balancing rules for this IP address. -
\ No newline at end of file + + In the Select view, select VPC. + All the VPCs that you have created for the account is listed in the page. + + + Locate the VPC for which you want to configure internal LB, then click + Configure. + The VPC page is displayed where all the tiers you created listed in a + diagram. + + + Locate the Tier for which you want to configure an internal LB rule, click Internal + LB. + In the Internal LB page, click Add Internal LB. + + + In the dialog, specify the following: + + + Name: A name for the load balancer rule. + + + Description: A short description of the rule + that can be displayed to users. + + + Source IP Address: The source IP from which + traffic originates. Typically, this is the IP of an instance on another tier within + your VPC. + + + Source Port: The port associated with the + source IP. Traffic on this port is load balanced. + + + Instance Port: The port of the internal LB + VM. + + + Algorithm. Choose the load balancing algorithm + you want &PRODUCT; to use. &PRODUCT; supports the following well-known + algorithms: + + + Round-robin + + + Least connections + + + Source + + + + + + +
+
+ diff --git a/docs/en-US/add-portforward-rule-vpc.xml b/docs/en-US/add-portforward-rule-vpc.xml index c3dbc39bb19..5b1bb49a0a3 100644 --- a/docs/en-US/add-portforward-rule-vpc.xml +++ b/docs/en-US/add-portforward-rule-vpc.xml @@ -35,28 +35,42 @@ Click the Configure button of the VPC to which you want to deploy the VMs. The VPC page is displayed where all the tiers you created are listed in a diagram. - - - Click the Settings icon. - The following options are displayed. + For each tier, the following options are displayed: - IP Addresses + Internal LB - Gateways + Public LB IP - Site-to-Site VPN + Static NAT - Network ACLs + Virtual Machines + + + CIDR + + + The following router information is displayed: + + + Private Gateways + + + Public IP Addresses + + + Site-to-Site VPNs + + + Network ACL Lists - Choose an existing IP address or acquire a new IP address. Click the name of the IP - address in the list. + In the Router node, select Public IP Addresses. The IP Addresses page is displayed. @@ -95,7 +109,7 @@ Add VM: Click Add VM. Select the name of the instance to which this rule applies, and click Apply. - You can test the rule by opening an ssh session to the instance. + You can test the rule by opening an SSH session to the instance. diff --git a/docs/en-US/add-tier.xml b/docs/en-US/add-tier.xml index 6beaab2a151..e5334d39ca6 100644 --- a/docs/en-US/add-tier.xml +++ b/docs/en-US/add-tier.xml @@ -41,6 +41,9 @@ Click the Configure button of the VPC for which you want to set up tiers. + + + Click Create network. The Add new tier dialog is displayed, as follows: @@ -62,7 +65,7 @@ Network Offering: The following default network - offerings are listed: DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, + offerings are listed: Internal LB, DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, DefaultIsolatedNetworkOfferingForVpcNetworks In a VPC, only one tier can be created by using LB-enabled network offering. diff --git a/docs/en-US/add-vm-to-tier.xml b/docs/en-US/add-vm-to-tier.xml index e401eed2656..c7d769d9d11 100644 --- a/docs/en-US/add-vm-to-tier.xml +++ b/docs/en-US/add-vm-to-tier.xml @@ -33,13 +33,21 @@ Click the Configure button of the VPC to which you want to deploy the VMs. - The VPC page is displayed where all the tiers you created are listed. + The VPC page is displayed where all the tiers you have created are listed. - Click the Add VM button of the tier for which you want to add a VM. + Click Virtual Machines tab of the tier to which you want to add a VM. + + + + + + add-vm-vpc.png: adding a VM to a vpc. + + The Add Instance page is displayed. Follow the on-screen instruction to add an instance. For information on adding an - instance, see Adding Instances section in the Installation Guide. + instance, see the Installation Guide. diff --git a/docs/en-US/configure-acl.xml b/docs/en-US/configure-acl.xml index 299196c5502..e7459e68dbf 100644 --- a/docs/en-US/configure-acl.xml +++ b/docs/en-US/configure-acl.xml @@ -37,31 +37,66 @@ All the VPCs that you have created for the account is listed in the page. - Click the Settings icon. - The following options are displayed. + Click the Configure button of the VPC, for which you want to configure load balancing + rules. + For each tier, the following options are displayed: - IP Addresses + Internal LB - Gateways + Public LB IP - Site-to-Site VPN + Static NAT - Network ACLs + Virtual Machines + + + CIDR + + + The following router information is displayed: + + + Private Gateways + + + Public IP Addresses + + + Site-to-Site VPNs + + + Network ACL Lists - Select Network ACLs. - The Network ACLs page is displayed. + Select Network ACL Lists. + The following default rules are displayed in the Network ACLs page: default_allow, + default_deny. - Click Add Network ACLs. + Click Add ACL Lists, and specify the following: + + + ACL List Name: A name for the ACL list. + + + Description: A short description of the ACL list + that can be displayed to users. + + + + + Select the ACL list. + + + Select the ACL List Rules tab. To add an ACL rule, fill in the following fields to specify what kind of network traffic - is allowed in this tier. + is allowed in the VPC. CIDR: The CIDR acts as the Source CIDR for the @@ -74,7 +109,8 @@ Protocol: The networking protocol that sources use to send traffic to the tier. The TCP and UDP protocols are typically used for data exchange and end-user communications. The ICMP protocol is typically used to send error - messages or network monitoring data. + messages or network monitoring data. All supports all the traffic. Other option is + Protocol Number. Start Port, End @@ -83,8 +119,10 @@ fields. - Select Tier: Select the tier for which you want to - add this ACL rule. + Protocol Number: The protocol number associated + with IPv4 or IPv6. For more information, see Protocol + Numbers. ICMP Type, ICMP @@ -92,48 +130,14 @@ sent. - Traffic Type: Select the traffic type you want to - apply. - - - Egress: To add an egress rule, select Egress - from the Traffic type drop-down box and click Add. This specifies what type of - traffic is allowed to be sent out of VM instances in this tier. If no egress rules - are specified, all traffic from the tier is allowed out at the VPC virtual router. - Once egress rules are specified, only the traffic specified in egress rules and the - responses to any traffic that has been allowed in through an ingress rule are - allowed out. No egress rule is required for the VMs in a tier to communicate with - each other. - - - Ingress: To add an ingress rule, select Ingress - from the Traffic type drop-down box and click Add. This specifies what network - traffic is allowed into the VM instances in this tier. If no ingress rules are - specified, then no traffic will be allowed in, except for responses to any traffic - that has been allowed out through an egress rule. - - - - By default, all incoming and outgoing traffic to the guest networks is blocked. To - open the ports, create a new network ACL. - + Action: What action to be taken. Click Add. The ACL rule is added. - To view the list of ACL rules you have added, click the desired tier from the Network - ACLs page, then select the Network ACL tab. - - - - - - network-acl.png: adding, editing, deleting an ACL rule. - - You can edit the tags assigned to the ACL rules and delete the ACL rules you have - created. Click the appropriate button in the Actions column. + created. Click the appropriate button in the Details tab. diff --git a/docs/en-US/egress-firewall-rule.xml b/docs/en-US/egress-firewall-rule.xml index ef0e25efd03..ab16517b458 100644 --- a/docs/en-US/egress-firewall-rule.xml +++ b/docs/en-US/egress-firewall-rule.xml @@ -23,13 +23,15 @@ The egress firewall rules are supported only on virtual routers. - The egress traffic originates from a private network to a public network, such as the Internet. By default, the egress traffic is blocked, so no outgoing traffic is allowed from a guest network to the Internet. However, you can control the egress traffic in an Advanced zone by creating egress firewall rules. When an egress firewall rule is applied, the traffic specific to the rule is allowed and the remaining traffic is blocked. When all the firewall rules are removed the default policy, Block, is applied. + + The egress firewall rules are not supported on shared networks. + Consider the following scenarios to apply egress firewall rules: diff --git a/docs/en-US/enable-disable-static-nat-vpc.xml b/docs/en-US/enable-disable-static-nat-vpc.xml index 17f0c10540f..467a304915d 100644 --- a/docs/en-US/enable-disable-static-nat-vpc.xml +++ b/docs/en-US/enable-disable-static-nat-vpc.xml @@ -42,27 +42,42 @@ Click the Configure button of the VPC to which you want to deploy the VMs. The VPC page is displayed where all the tiers you created are listed in a diagram. - - - Click the Settings icon. - The following options are displayed. + For each tier, the following options are displayed. - IP Addresses + Internal LB - Gateways + Public LB IP - Site-to-Site VPN + Static NAT - Network ACLs + Virtual Machines + + + CIDR + + + The following router information is displayed: + + + Private Gateways + + + Public IP Addresses + + + Site-to-Site VPNs + + + Network ACL Lists - Select IP Addresses. + In the Router node, select Public IP Addresses. The IP Addresses page is displayed. @@ -74,7 +89,7 @@ - enable-disable.png: button to enable Statid NAT. + enable-disable.png: button to enable Static NAT. The button toggles between Enable and Disable, depending on whether static NAT is currently enabled for the IP address. diff --git a/docs/en-US/gsoc-dharmesh.xml b/docs/en-US/gsoc-dharmesh.xml new file mode 100644 index 00000000000..5e2bf734d7f --- /dev/null +++ b/docs/en-US/gsoc-dharmesh.xml @@ -0,0 +1,149 @@ + + +%BOOK_ENTITIES; +]> + + + + + Dharmesh's 2013 GSoC Proposal + This chapter describes Dharmrsh's 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal. +
+ Abstract + + The project aims to bring cloudformation like service to cloudstack. One of the prime use-case is cluster computing frameworks on cloudstack. A cloudformation service will give users and administrators of cloudstack ability to manage and control a set of resources easily. The cloudformation will allow booting and configuring a set of VMs and form a cluster. Simple example would be LAMP stack. More complex clusters such as mesos or hadoop cluster requires a little more advanced configuration. There is already some work done by Chiradeep Vittal at this front [5]. In this project, I will implement server side cloudformation service for cloudstack and demonstrate how to run mesos cluster using it. + +
+ +
+ Mesos + + Mesos is a resource management platform for clusters. It aims to increase resource utilization of clusters by sharing cluster resources among multiple processing frameworks(like MapReduce, MPI, Graph Processing) or multiple instances of same framework. It provides efficient resource isolation through use of containers. Uses zookeeper for state maintenance and fault tolerance. + +
+ +
+ What can run on mesos ? + + Spark: A cluster computing framework based on the Resilient Distributed Datasets (RDDs) abstraction. RDD is more generalized than MapReduce and can support iterative and interactive computation while retaining fault tolerance, scalability, data locality etc. + + Hadoop:: Hadoop is fault tolerant and scalable distributed computing framework based on MapReduce abstraction. + + Begel:: A graph processing framework based on pregel. + + and other frameworks like MPI, Hypertable. +
+ +
+ How to deploy mesos ? + + Mesos provides cluster installation scripts for cluster deployment. There are also scripts available to deploy a cluster on Amazon EC2. It would be interesting to see if this scripts can be leveraged in anyway. +
+ +
+ Deliverables + + + Deploy CloudStack and understand instance configuration/contextualization + + + Test and deploy Mesos on a set of CloudStack based VM, manually. Design/propose an automation framework + + + Test stackmate and engage chiradeep (report bugs, make suggestion, make pull request) + + + Create cloudformation template to provision a Mesos Cluster + + + Compare with Apache Whirr or other cluster provisioning tools for server side implementation of cloudformation service. + + +
+ +
+ Architecture and Tools + + The high level architecture is as follows: + + + + + + + + + + + It includes following components: + + + + CloudFormation Query API server: + This acts as a point of contact to and exposes CloudFormation functionality as Query API. This can be accessed directly or through existing tools from Amazon AWS for their cloudformation service. It will be easy to start as a module which resides outside cloudstack at first and I plan to use dropwizard [3] to start with. Later may be the API server can be merged with cloudstack core. I plan to use mysql for storing details of clusters. + + + + Provisioning: + + Provisioning module is responsible for handling the booting process of the VMs through cloudstack. This uses the cloudstack APIs for launching VMs. I plan to use preconfigured templates/images with required dependencies installed, which will make cluster creation process much faster even for large clusters. Error handling is very important part of this module. For example, what you do if few VMs fail to boot in cluster ? + + + + Configuration: + + This module deals with configuring the VMs to form a cluster. This can be done via manual scripts/code or via configuration management tools like chef/ironfan/knife. Potentially workflow automation tools like rundeck [4] also can be used. Also Apache whirr and Provisionr are options. I plan to explore this tools and select suitable ones. + + + +
+ +
+ API + + Query API will be based on Amazon AWS cloudformation service. This will allow leveraging existing tools for AWS. +
+ +
+ Timeline + 1-1.5 week : project design. Architecture, tools selection, API design + 1-1.5 week : getting familiar with cloudstack and stackmate codebase and architecture details + 1-1.5 week : getting familiar with mesos internals + 1-1.5 week : setting up the dev environment and create mesos templates + 2-3 week : build provisioning and configuration module + Midterm evaluation: provisioning module, configuration module + 2-3 week : develope cloudformation server side implementation + 2-3 week : test and integrate +
+ +
+ Future Work + + + Auto Scaling: + Automatically adding or removing VMs from mesos cluster based on various conditions like utilization going above/below a static threshold. There can be more sophisticated strategies based on prediction or fine grained metric collection with tight integration with mesos framework. + + + Cluster Simulator: + Integrating with existing simulator to simulate mesos clusters. This can be useful in various scenarios, for example while developing a new scheduling algorithm, testing autoscaling etc. + + +
+
diff --git a/docs/en-US/gsoc-imduffy15.xml b/docs/en-US/gsoc-imduffy15.xml new file mode 100644 index 00000000000..652152fcc4b --- /dev/null +++ b/docs/en-US/gsoc-imduffy15.xml @@ -0,0 +1,395 @@ + + +%BOOK_ENTITIES; +]> + + + + + Ians's 2013 GSoC Proposal + This chapter describes Ians 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal. +
+ LDAP user provisioning + + "Need to automate the way the LDAP users are provisioned into cloud stack. This will mean better + integration with a LDAP server, ability to import users and a way to define how the LDAP user + maps to the cloudstack users." + +
+
+ Abstract + + The aim of this project is to provide an more effective mechanism to provision users from LDAP + into cloudstack. Currently cloudstack enables LDAP authentication. In this authentication users + must be first setup in cloudstack. Once the user is setup in cloudstack they can authenticate + using their LDAP username and password. This project will improve Cloudstack LDAP integration + by enabling users be setup automatically using their LDAP credential + +
+
+ Deliverables + + + Service that retrieves a list of LDAP users from a configured group + + + Extension of the cloudstack UI "Add User" screen to offer user list from LDAP + + + Add service for saving new user it details from LDAP + + + BDD unit and acceptance automated testing + + + Document change details + + +
+
+ Quantifiable Results + + + + + Given + An administrator wants to add new user to cloudstack and LDAP is setup in cloudstack + + + When + The administrator opens the "Add User" screen + + + Then + A table of users appears for the current list of users (not already created on cloudstack) from the LDAP group displaying their usernames, given name and email address. The timezone dropdown will still be available beside each user + + + + + + + + + + Given + An administrator wants to add new user to cloudstack and LDAP is not setup in cloudstack + + + When + The administrator opens the "Add User" screen + + + Then + The current add user screen and functionality is provided + + + + + + + + + + Given + An administrator wants to add new user to cloudstack and LDAP is setup in cloudstack + + + When + The administrator opens the "Add User" screen and mandatory information is missing + + + Then + These fields will be editable to enable you to populate the name or email address + + + + + + + + + + Given + An administrator wants to add new user to cloudstack, LDAP is setup and the user being created is in the LDAP query group + + + When + The administrator opens the "Add User" screen + + + Then + There is a list of LDAP users displayed but the user is present in the list + + + + + + + + + + Given + An administrator wants to add a new user to cloudstack, LDAP is setup and the user is not in the query group + + + When + The administrator opens the "Add User" screen + + + Then + There is a list of LDAP users displayed but the user is not in the list + + + + + + + + + + Given + An administrator wants to add a group of new users to cloudstack + + + When + The administrator opens the "Add User" screen, selects the users and hits save + + + Then + The list of new users are saved to the database + + + + + + + + + + Given + An administrator has created a new LDAP user on cloudstack + + + When + The user authenticates against cloudstack with the right credentials + + + Then + They are authorised in cloudstack + + + + + + + + + + Given + A user wants to edit an LDAP user + + + When + They open the "Edit User" screen + + + Then + The password fields are disabled and cannot be changed + + + + + +
+
+ The Design Document + + + LDAP user list service + + + + name: ldapUserList + + + responseObject: LDAPUserResponse {username,email,name} + + + parameter: listType:enum {NEW, EXISTING,ALL} (Default to ALL if no option provided) + + + Create a new API service call for retreiving the list of users from LDAP. This will call a new + ConfigurationService which will retrieve the list of users using the configured search base and the query + filter. The list may be filtered in the ConfigurationService based on listType parameter + + + + LDAP Available Service + + + + name: ldapAvailable + + + responseObject LDAPAvailableResponse {available:boolean} + + + Create a new API service call veriying LDAP is setup correctly verifying the following configuration elements are all set: + + + ldap.hostname + + + ldap.port + + + ldap.usessl + + + ldap.queryfilter + + + ldap.searchbase + + + ldap.dn + + + ldap.password + + + + + + LDAP Save Users Service + + + + name: ldapSaveUsers + + + responseObject: LDAPSaveUsersRssponse {list]]>} + + + parameter: list of users + + + Saves the list of objects instead. Following the functionality in CreateUserCmd it will + + + Create the user via the account service + + + Handle the response + + + It will be decided whether a transation should remain over whole save or only over individual users. A list of UserResponse will be returned. + + + + Extension of cloudstack UI "Add User" screen + + + + Extend account.js enable the adding of a list of users with editable fields where required. The new "add user" screen for LDAP setup will: + + + Make an ajax call to the ldapAvailable, ldapuserList and ldapSaveUsers services + + + Validate on username, email, firstname and lastname + + + + + + Extension of cloudstack UI "Edit User" screen + + + + Extend account.js to disable the password fields on the edit user screen if LDAP available, specifically: + + + Make an ajax call to the ldapAvailable, ldapuserList and ldapSaveUsers services + + + Validate on username, email, firstname and lastname. Additional server validation will nsure the password has not changed + + + +
+
+ Approach + + To get started a development cloudstack environment will be created with DevCloud used to verify changes. Once the schedule is agreed with the mentor the deliverables will be broken into small user stories with expected delivery dates set. The development cycle will focus on BDD, enforcing all unit and acceptance tests are written first. + + + A build pipe line for continious delivery environment around cloudstack will be implemented, the following stages will be adopted: + + + + + + Stage + Action + + + + + Commit + Run unit tests + + + Sonar + Runs code quality metrics + + + Acceptance + Deploys the devcloud and runs all acceptance tests + + + Deployment + Deploy a new management server using Chef + + + + +
+
+ About me + + I am a Computer Science Student at Dublin City University in Ireland. I have interests in virtualization, +automation, information systems, networking and web development + + + I was involved with a project in a K-12(educational) environment of moving their server systems over +to a virtualized environment on ESXi. I have good knowledge of programming in Java, PHP and +Scripting langages. During the configuration of an automation system for OS deployment I experienced +some exposure to scripting in powershell, batch, vbs and bash and configuration of PXE images based +of WinPE and Debian. +Additionally I am also a mentor in an opensource teaching movement called CoderDojo, we teach kids +from the age of 8 everything from web page, HTML 5 game and raspberry pi development. It's really +cool. + + + I’m excited at the opportunity and learning experience that cloudstack are offering with this project. + +
+
diff --git a/docs/en-US/gsoc-tuna.xml b/docs/en-US/gsoc-tuna.xml new file mode 100644 index 00000000000..0988734a465 --- /dev/null +++ b/docs/en-US/gsoc-tuna.xml @@ -0,0 +1,231 @@ + + +%BOOK_ENTITIES; +]> + + + + + Nguyen's 2013 GSoC Proposal + This chapter describes Nguyen 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal. +
+ Add Xen/XCP support for GRE SDN controller + + "This project aims to enhance the current native SDN controller in supporting Xen/XCP and integrate successfully the open source SDN controller (FloodLight) driving Open vSwitch through its interfaces." + +
+
+ Abstract + + SDN, standing for Software-Defined Networking, is an approach to building data network equipments and softwares. It were invented by ONRC, Stanford University. SDN basically decouples the control from physical networking boxes and given to a software application called a controller. SDN has three parts: controller, protocols and switch; In which, OpenFlow is an open standard to deploy innovative protocols. Nowaday, more and more datacenters use SDN instead of traditional physical networking boxes. For example, Google announced that they completely built its own switches and SDN confrollers for use in its internal backbone network. + + + OpenvSwitch, an open source software switch, is widely used as a virtual switch in virtualized server environments. It can currently run on any Linux-based virtualization platform, such as: KVM, Xen (XenServer, XCP, Xen hypervisor), VirtualBox... It also has been ported to a number of different operating systems and hardware platforms: Linux, FreeBSD, Windows and even non-POSIX embedded systems. In cloud computing IaaS, using OpenvSwitch instead of Linux bridge on compute nodes becomes an inevitable trend because of its powerful features and the ability of OpenFlow integration as well. + + + In CloudStack, we already have a native SDN controller. With KVM hypervisor, developers can easily install OpenvSwitch module; whereas, Xen even has a build-in one. The combination of SDN controller and OpenvSwitch gives us many advanced things. For example, creating GRE tunnels as an isolation method instead of VLAN is a good try. In this project, we are planning to support GRE tunnels in Xen/XCP hypervisor with the native SDN controller. When it's done, substituting open-sources SDN controllers (floodlight, beacon, pox, nox) for the current one is an amazing next step. + +
+
+ Design description + + CloudStack currently has a native SDN Controller that is used to build meshes of GRE tunnels between Xen hosts. There consists of 4 parts: OVS tunnel manager, OVS Dao/VO, Command/Answer and Ovs tunnel plugin. The details are as follow: + + + OVS tunnel manager: Consist of OvsElement and OvsTunnelManager. + + + OvsElement is used for controlling Ovs tunnel lifecycle (prepare, release) + + + + prepare(network, nic, vm, dest): create tunnel for vm on network to dest + + + release(network, nic, vm): destroy tunnel for vm on network + + + + OvsTunnelManager drives bridge configuration and tunnel creation via calling respective commands to Agent. + + + + destroyTunnel(vm, network): call OvsDestroyTunnelCommand to destroy tunnel for vm on network + + + createTunnel(vm, network, dest): call OvsCreateTunnelCommand to create tunnel for vm on network to dest + + + + OVS tunnel plugin: These are ovstunnel and ovs-vif-flows.py script, writen as XAPI plugin. The OVS tunnel manager will call them via XML-RPC. + + + Ovstunnel plugin calls corresponding vsctl commands for setting up the OVS bridge, creating GRE tunnels or destroying them. + + + + setup_ovs_bridge() + + + destroy_ovs_bridge() + + + create_tunnel() + + + destroy_tunnel() + + + + Ovs-vif-flow.py clears or applies rule for VIFs every time it is plugged or unplugged from a OVS bridge. + + + + clear_flow() + + + apply_flow() + + + + OVS command/answer: It is designed under the format of requests and answers between Manager and Plugin. These commands will correspondence exactly the mentioned manipulations. + + + + OvsSetupBridgeCommand + + + OvsSetupBridgeAnswer + + + OvsDestroyBridgeCommand + + + OvsDestroyBridgeAnswer + + + OvsCreateTunnelCommand + + + OvsCreateTunnelAnswer + + + OvsDestroyTunnelCommand + + + OvsDestroyTunnelAnswer + + + OvsFetchInterfaceCommand + + + OvsFetchInterfaceAnswer + + + + OVS Dao/VO + + + + OvsTunnelInterfaceDao + + + OvsTunnelInterfaceVO + + + OvsTunnelNetworkDao + + + OvsTunnelNetworkVO + + +
+
+ Integrate FloodLight as SDN controller + + I think that we maybe deploy FloodLight Server as a new SystemVM. This VM acts like current SystemVMs. One Floodlight SystemVM per Zone, so it can manage for virtual switches under this zone. + +
+
+ Deliverables + + GRE has been used as isolation method in CloudStack when deploy with Xen/XCP hosts. + + + + User set sdn.ovs.controller parameter in Global Setting to true. He deploys Advance Networking and chooses GRE as isolation method + + + Make use of Floodlight instead of native SDN controller. + + +
+
+ About me + + My name is Nguyen Anh Tu, a young and enthusiastic researcher in Cloud Computing Center - Viettel Research and Development Institute, Vietnam. Since last year, we has built Cloud Platform based on CloudStack, starting with version 3.0.2. As the results, some advanced modules were successfully developed, consists of: + + + + Encrypt Data Volume for VMs. + + + Dynamic Allocate Memory for VMs by changing policy on Squeeze Daemon. + + + AutoScale without using NetScale. + + + Deploy a new SystemVM type for Intrustion Detection System. + + + + Given the working experience and recent researches, I have obtained remarkably the understanding of specific knowledges to carry on this project, details as follow: + + + + Java source code on CloudStack: Design Pattern, Spring framework. + + + Bash, Python programming. + + + XAPI plugin. + + + XML-RPC. + + + OpenVSwitch on Xen. + + + + Other knowledges: + + + + XAPI RRD, XenStore. + + + Ocaml Programming (XAPI functions). + + +
+
diff --git a/docs/en-US/guest-ip-ranges.xml b/docs/en-US/guest-ip-ranges.xml index b3ebd761394..c49dc6a76f8 100644 --- a/docs/en-US/guest-ip-ranges.xml +++ b/docs/en-US/guest-ip-ranges.xml @@ -28,5 +28,5 @@ their guest network and their clients. In shared networks in Basic zone and Security Group-enabled Advanced networks, you will have the flexibility to add multiple guest IP ranges from different subnets. You can add or remove - one IP range at a time. + one IP range at a time. For more information, see . diff --git a/docs/en-US/images/add-new-gateway-vpc.png b/docs/en-US/images/add-new-gateway-vpc.png index f15b778e0f2..5145622a2f4 100644 Binary files a/docs/en-US/images/add-new-gateway-vpc.png and b/docs/en-US/images/add-new-gateway-vpc.png differ diff --git a/docs/en-US/images/add-vm-vpc.png b/docs/en-US/images/add-vm-vpc.png new file mode 100644 index 00000000000..b2821a69156 Binary files /dev/null and b/docs/en-US/images/add-vm-vpc.png differ diff --git a/docs/en-US/images/del-tier.png b/docs/en-US/images/del-tier.png new file mode 100644 index 00000000000..aa9846cfd9b Binary files /dev/null and b/docs/en-US/images/del-tier.png differ diff --git a/docs/en-US/images/mesos-integration-arch.jpg b/docs/en-US/images/mesos-integration-arch.jpg new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/en-US/images/vpc-lb.png b/docs/en-US/images/vpc-lb.png new file mode 100644 index 00000000000..4269e8b9f9e Binary files /dev/null and b/docs/en-US/images/vpc-lb.png differ diff --git a/docs/en-US/ip-vlan-tenant.xml b/docs/en-US/ip-vlan-tenant.xml new file mode 100644 index 00000000000..42124f0f446 --- /dev/null +++ b/docs/en-US/ip-vlan-tenant.xml @@ -0,0 +1,205 @@ + + +%BOOK_ENTITIES; +]> + +
+ Dedicated Resources: Public IP Addresses and VLANs Per Account + &PRODUCT; provides you the ability to reserve a set of public IP addresses and VLANs + exclusively for an account. During zone creation, you can continue to define a set of VLANs and + multiple public IP ranges. This feature extends the functionality to enable you to dedicate a + fixed set of VLANs and guest IP addresses for a tenant. + This feature provides you the following capabilities: + + + Reserve a VLAN range and public IP address range from an Advanced zone and assign it to + a domain or account + + + Disassociate a VLAN and public IP address range from an domain or account + + + View the number of public IP addresses allocated to an account + + + Check whether the required range is available and is conforms to account limits. + The maximum IPs per account limit cannot be superseded. + + +
+ Dedicating IP Address Ranges to an Account + + + Log in to the &PRODUCT; UI as administrator. + + + In the left navigation bar, click Infrastructure. + + + In Zones, click View All. + + + Choose the zone you want to work with. + + + Click the Physical Network tab. + + + In the Public node of the diagram, click Configure. + + + Click the IP Ranges tab. + You can either assign an existing IP range to an account, or create a new IP range and + assign to an account. + + + To assign an existing IP range to an account, perform the following: + + + Locate the IP range you want to work with. + + + Click Add Account + + + + + addAccount-icon.png: button to assign an IP range to an account. + + button. + The Add Account dialog is displayed. + + + Specify the following: + + + Account: The account to which you want to + assign the IP address range. + + + Domain: The domain associated with the + account. + + + To create a new IP range and assign an account, perform the following: + + + Specify the following: + + + Gateway + + + Netmask + + + VLAN + + + Start IP + + + End IP + + + Account: Perform the following: + + + Click Account. + The Add Account page is displayed. + + + Specify the following: + + + Account: The account to which you want to + assign an IP address range. + + + Domain: The domain associated with the + account. + + + + + Click OK. + + + + + + + Click Add. + + + + + + +
+
+ Dedicating VLAN Ranges to an Account + + + After the &PRODUCT; Management Server is installed, log in to the &PRODUCT; UI as + administrator. + + + In the left navigation bar, click Infrastructure. + + + In Zones, click View All. + + + Choose the zone you want to work with. + + + Click the Physical Network tab. + + + In the Guest node of the diagram, click Configure. + + + Select the Dedicated VLAN Ranges tab. + + + Click Dedicate VLAN Range. + The Dedicate VLAN Range dialog is displayed. + + + Specify the following: + + + VLAN Range: The + VLAN range that you want to assign to an account. + + + Account: The + account to which you want to assign the selected VLAN range. + + + Domain: The + domain associated with the account. + + + + +
+
diff --git a/docs/en-US/multiple-ip-nic.xml b/docs/en-US/multiple-ip-nic.xml index 561ba0757b5..926235c3518 100644 --- a/docs/en-US/multiple-ip-nic.xml +++ b/docs/en-US/multiple-ip-nic.xml @@ -24,8 +24,7 @@ &PRODUCT; now provides you the ability to associate multiple private IP addresses per guest VM NIC. This feature is supported on all the network configurations—Basic, Advanced, and VPC. Security Groups, Static NAT and Port forwarding services are supported on these additional - IPs. In addition to the primary IP, you can assign additional IPs to the guest VM NIC. Up to 256 - IP addresses are allowed per NIC. + IPs. In addition to the primary IP, you can assign additional IPs to the guest VM NIC. As always, you can specify an IP from the guest subnet; if not specified, an IP is automatically picked up from the guest VM subnet. You can view the IPs associated with for each guest VM NICs on the UI. You can apply NAT on these additional guest IPs by using firewall diff --git a/docs/en-US/multiple-ip-range.xml b/docs/en-US/multiple-ip-range.xml new file mode 100644 index 00000000000..42e0c2a9555 --- /dev/null +++ b/docs/en-US/multiple-ip-range.xml @@ -0,0 +1,42 @@ + + +%BOOK_ENTITIES; +]> + +
+ About Multiple IP Ranges + + The feature can only be implemented on IPv4 addresses. + + &PRODUCT; provides you with the flexibility to add guest IP ranges from different subnets in + Basic zones and security groups-enabled Advanced zones. For security groups-enabled Advanced + zones, it implies multiple subnets can be added to the same VLAN. With the addition of this + feature, you will be able to add IP address ranges from the same subnet or from a different one + when IP address are exhausted. This would in turn allows you to employ higher number of subnets + and thus reduce the address management overhead. To support this feature, the capability of + createVlanIpRange API is extended to add IP ranges also from a different + subnet. + Ensure that you manually configure the gateway of the new subnet before adding the IP range. + Note that &PRODUCT; supports only one gateway for a subnet; overlapping subnets are not + currently supported. + Use the deleteVlanRange API to delete IP ranges. This operation fails if an IP + from the remove range is in use. If the remove range contains the IP address on which the DHCP + server is running, &PRODUCT; acquires a new IP from the same subnet. If no IP is available in + the subnet, the remove operation fails. + This feature is supported on KVM, xenServer, and VMware hypervisors. +
diff --git a/docs/en-US/networks.xml b/docs/en-US/networks.xml index 8a7405a63ac..b557088273f 100644 --- a/docs/en-US/networks.xml +++ b/docs/en-US/networks.xml @@ -33,6 +33,7 @@ + diff --git a/docs/en-US/pvlan.xml b/docs/en-US/pvlan.xml new file mode 100644 index 00000000000..96c1a78a85d --- /dev/null +++ b/docs/en-US/pvlan.xml @@ -0,0 +1,57 @@ + + +%BOOK_ENTITIES; +]> + + +
+ Isolation in Advanced Zone Using Private VLAN + +
+ About Private VLAN + In an Ethernet switch, a VLAN is a broadcast domain in which hosts can establish direct + communication with each another at Layer 2. Private VLAN is designed as an extension of VLAN + standard to add further segmentation of the logical broadcast domain. A regular VLAN is a + single broadcast domain, whereas a private VLAN partitions a larger VLAN broadcast domain into + smaller sub-domains. A sub-domain is represented by a pair of VLANs: a Primary VLAN and a + Secondary VLAN. The original VLAN that is being divided into smaller groups is called + Primary, That implies all VLAN pairs in a private VLAN share the same Primary VLAN. All the + secondary VLANs exist only inside the Primary. Each Secondary VLAN has a specific VLAN ID + associated to it, which differentiates one sub-domain from another. + For further reading: + + + Understanding Private VLANs + + + Cisco Systems' Private VLANs: Scalable + Security in a Multi-Client Environment + + + Private VLAN (PVLAN) on vNetwork Distributed Switch + - Concept Overview (1010691) + + +
+
+ Prerequisites + Ensure that you configure private VLAN on your physical switches out-of-band. +
+
diff --git a/docs/en-US/release-ip-for-vpc.xml b/docs/en-US/release-ip-for-vpc.xml index 466ec663a17..f827b671c03 100644 --- a/docs/en-US/release-ip-for-vpc.xml +++ b/docs/en-US/release-ip-for-vpc.xml @@ -40,27 +40,42 @@ Click the Configure button of the VPC whose IP you want to release. The VPC page is displayed where all the tiers you created are listed in a diagram. - - - Click the Settings icon. The following options are displayed. - IP Addresses + Internal LB - Gateways + Public LB IP - Site-to-Site VPN + Static NAT - Network ACLs + Virtual Machines + + + CIDR + + + The following router information is displayed: + + + Private Gateways + + + Public IP Addresses + + + Site-to-Site VPNs + + + Network ACL Lists - Select IP Addresses. + Select Public IP Addresses. The IP Addresses page is displayed. diff --git a/docs/en-US/remove-tier.xml b/docs/en-US/remove-tier.xml index b5996eb2de3..701645cc4ed 100644 --- a/docs/en-US/remove-tier.xml +++ b/docs/en-US/remove-tier.xml @@ -40,16 +40,18 @@ The Configure VPC page is displayed. Locate the tier you want to work with. - Click the Remove VPC button: - - - - - - remove-tier.png: removing a tier from a vpc. - - - Wait for some time for the tier to be removed. + Select the tier you want to remove. + + + In the Network Details tab, click the Delete Network button. + + + + + del-tier.png: button to remove a tier + + + Click Yes to confirm. Wait for some time for the tier to be removed. diff --git a/docs/en-US/remove-vpc.xml b/docs/en-US/remove-vpc.xml index c5eff850fd3..b373f1a52c3 100644 --- a/docs/en-US/remove-vpc.xml +++ b/docs/en-US/remove-vpc.xml @@ -38,14 +38,15 @@ Select the VPC you want to work with. - To remove, click the Remove VPC button + In the Details tab, click the Remove VPC button remove-vpc.png: button to remove a VPC - + + You can remove the VPC by also using the remove button in the Quick View. You can edit the name and description of a VPC. To do that, select the VPC, then click the Edit button. diff --git a/docs/en-US/vpc.xml b/docs/en-US/vpc.xml index 0665d372b4e..7c94f0d6dd1 100644 --- a/docs/en-US/vpc.xml +++ b/docs/en-US/vpc.xml @@ -151,8 +151,8 @@ cannot be used for StaticNAT or port forwarding. - The instances only have a private IP address that you provision. To communicate with the - Internet, enable NAT to an instance that you launch in your VPC. + The instances can only have a private IP address that you provision. To communicate with + the Internet, enable NAT to an instance that you launch in your VPC. Only new networks can be added to a VPC. The maximum number of networks per VPC is diff --git a/docs/publican-gsoc-2013.cfg b/docs/publican-gsoc-2013.cfg new file mode 100644 index 00000000000..35dc517be12 --- /dev/null +++ b/docs/publican-gsoc-2013.cfg @@ -0,0 +1,27 @@ +# Publican configuration file for CloudStack Complete Documentation Set +# Contains all technical docs except release notes +# Config::Simple 4.58 +# Tue May 29 00:57:27 2012 +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information# +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +xml_lang: en-US +type: Book +docname: CloudStack_GSoC_Guide +brand: cloudstack +chunk_first: 1 +chunk_section_depth: 1 diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java index d714df33df3..bd8c6e0060c 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java @@ -21,6 +21,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.Map; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.hypervisor.Hypervisor.HypervisorType; public interface DataStoreLifeCycle { public DataStore initialize(Map dsInfos); @@ -29,7 +30,7 @@ public interface DataStoreLifeCycle { public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo); - boolean attachZone(DataStore dataStore, ZoneScope scope); + boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType); public boolean dettach(); diff --git a/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index 5458de58725..99b7b9c92b4 100644 --- a/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -16,10 +16,10 @@ // under the License. package org.apache.cloudstack.storage.datastore.db; -import java.util.ArrayList; import java.util.List; import java.util.Map; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.ScopeType; import com.cloud.storage.StoragePoolStatus; import com.cloud.utils.db.GenericDao; @@ -43,7 +43,7 @@ public interface PrimaryDataStoreDao extends GenericDao { /** * Set capacity of storage pool in bytes - * + * * @param id * pool id. * @param capacity @@ -53,7 +53,7 @@ public interface PrimaryDataStoreDao extends GenericDao { /** * Set available bytes of storage pool in bytes - * + * * @param id * pool id. * @param available @@ -65,7 +65,7 @@ public interface PrimaryDataStoreDao extends GenericDao { /** * Find pool by name. - * + * * @param name * name of pool. * @return the single StoragePoolVO @@ -74,7 +74,7 @@ public interface PrimaryDataStoreDao extends GenericDao { /** * Find pools by the pod that matches the details. - * + * * @param podId * pod id to find the pools in. * @param details @@ -88,7 +88,7 @@ public interface PrimaryDataStoreDao extends GenericDao { /** * Find pool by UUID. - * + * * @param uuid * uuid of pool. * @return the single StoragePoolVO @@ -120,4 +120,6 @@ public interface PrimaryDataStoreDao extends GenericDao { List findLocalStoragePoolsByTags(long dcId, long podId, Long clusterId, String[] tags); List findZoneWideStoragePoolsByTags(long dcId, String[] tags); + + List findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType); } diff --git a/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 5d6e2cf9b72..08082870268 100644 --- a/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -31,6 +31,7 @@ import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.ScopeType; import com.cloud.storage.StoragePoolStatus; @@ -423,4 +424,14 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase return listBy(sc); } + + @Override + public List findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType) { + SearchCriteriaService sc = SearchCriteria2.create(StoragePoolVO.class); + sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dataCenterId); + sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); + sc.addAnd(sc.getEntity().getScope(), Op.EQ, ScopeType.ZONE); + sc.addAnd(sc.getEntity().getHypervisor(), Op.EQ, hypervisorType); + return sc.list(); + } } diff --git a/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 78a5779f167..a9919245b14 100644 --- a/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -29,6 +29,7 @@ import javax.persistence.TableGenerator; import javax.persistence.Temporal; import javax.persistence.TemporalType; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.ScopeType; import com.cloud.storage.StoragePool; @@ -102,6 +103,10 @@ public class StoragePoolVO implements StoragePool { @Enumerated(value = EnumType.STRING) private ScopeType scope; + @Column(name = "hypervisor") + @Enumerated(value = EnumType.STRING) + private HypervisorType hypervisor; + public long getId() { return id; } @@ -276,6 +281,14 @@ public class StoragePoolVO implements StoragePool { return this.scope; } + public HypervisorType getHypervisor() { + return hypervisor; + } + + public void setHypervisor(HypervisorType hypervisor) { + this.hypervisor = hypervisor; + } + @Override public boolean equals(Object obj) { if (!(obj instanceof StoragePoolVO) || obj == null) { diff --git a/engine/orchestration/src/org/apache/cloudstack/platform/orchestration/CloudOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/platform/orchestration/CloudOrchestrator.java index 963e4d7d967..ca299ea45bc 100755 --- a/engine/orchestration/src/org/apache/cloudstack/platform/orchestration/CloudOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/platform/orchestration/CloudOrchestrator.java @@ -89,10 +89,9 @@ public class CloudOrchestrator implements OrchestrationService { public CloudOrchestrator() { } - + public VirtualMachineEntity createFromScratch(String uuid, String iso, String os, String hypervisor, String hostName, int cpu, int speed, long memory, List networks, List computeTags, Map details, String owner) { - // TODO Auto-generated method stub return null; } diff --git a/engine/schema/src/com/cloud/alert/dao/AlertDaoImpl.java b/engine/schema/src/com/cloud/alert/dao/AlertDaoImpl.java index 01a560a129a..18115a5e499 100755 --- a/engine/schema/src/com/cloud/alert/dao/AlertDaoImpl.java +++ b/engine/schema/src/com/cloud/alert/dao/AlertDaoImpl.java @@ -156,7 +156,7 @@ public class AlertDaoImpl extends GenericDaoBase implements Alert public List listOlderAlerts(Date oldTime) { if (oldTime == null) return null; SearchCriteria sc = createSearchCriteria(); - sc.addAnd("createDate", SearchCriteria.Op.LT, oldTime); + sc.addAnd("createdDate", SearchCriteria.Op.LT, oldTime); sc.addAnd("archived", SearchCriteria.Op.EQ, false); return listIncludingRemovedBy(sc, null); } diff --git a/engine/schema/src/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/com/cloud/dc/dao/ClusterDao.java index 3ce0798a8a2..673888bc2ab 100644 --- a/engine/schema/src/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/com/cloud/dc/dao/ClusterDao.java @@ -34,4 +34,5 @@ public interface ClusterDao extends GenericDao { Map> getPodClusterIdMap(List clusterIds); List listDisabledClusters(long zoneId, Long podId); List listClustersWithDisabledPods(long zoneId); + List listClustersByDcId(long zoneId); } diff --git a/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java index 86dc65e05bd..ba2686a4004 100644 --- a/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java @@ -52,6 +52,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder AvailHyperSearch; protected final SearchBuilder ZoneSearch; protected final SearchBuilder ZoneHyTypeSearch; + protected final SearchBuilder ZoneClusterSearch; private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( "; private static final String GET_POD_CLUSTER_MAP_SUFFIX = " )"; @@ -85,12 +86,16 @@ public class ClusterDaoImpl extends GenericDaoBase implements C AvailHyperSearch.and("zoneId", AvailHyperSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); AvailHyperSearch.select(null, Func.DISTINCT, AvailHyperSearch.entity().getHypervisorType()); AvailHyperSearch.done(); + + ZoneClusterSearch = createSearchBuilder(); + ZoneClusterSearch.and("dataCenterId", ZoneClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ZoneClusterSearch.done(); } @Override public List listByZoneId(long zoneId) { SearchCriteria sc = ZoneSearch.create(); - sc.setParameters("dataCenterId", zoneId); + sc.setParameters("dataCenterId", zoneId); return listBy(sc); } @@ -223,6 +228,13 @@ public class ClusterDaoImpl extends GenericDaoBase implements C return customSearch(sc, null); } + @Override + public List listClustersByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + return listBy(sc); + } + @Override public boolean remove(Long id) { Transaction txn = Transaction.currentTxn(); diff --git a/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java b/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java index 9460a73dc57..c84aa60897c 100644 --- a/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java +++ b/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java @@ -288,5 +288,5 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom return parentDomains; } - + } diff --git a/engine/schema/src/com/cloud/host/dao/HostDao.java b/engine/schema/src/com/cloud/host/dao/HostDao.java index 98bdcb470e1..8ceb8f23132 100755 --- a/engine/schema/src/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/com/cloud/host/dao/HostDao.java @@ -80,4 +80,10 @@ public interface HostDao extends GenericDao, StateDao listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag); + + List findByPodId(Long podId); + + List findByClusterId(Long clusterId); + + List listByDataCenterId(long id); } diff --git a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java index 07a42322ce3..810b973e296 100755 --- a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java @@ -37,6 +37,7 @@ import com.cloud.cluster.agentlb.HostTransferMapVO; import com.cloud.cluster.agentlb.dao.HostTransferMapDao; import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl; import com.cloud.dc.ClusterVO; +import com.cloud.dc.HostPodVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.ClusterDaoImpl; import com.cloud.host.Host; @@ -86,6 +87,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder GuidSearch; protected SearchBuilder DcSearch; protected SearchBuilder PodSearch; + protected SearchBuilder ClusterSearch; protected SearchBuilder TypeSearch; protected SearchBuilder StatusSearch; protected SearchBuilder ResourceStateSearch; @@ -201,6 +203,9 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao DcSearch = createSearchBuilder(); DcSearch.and("dc", DcSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + DcSearch.and("type", DcSearch.entity().getType(), Op.EQ); + DcSearch.and("status", DcSearch.entity().getStatus(), Op.EQ); + DcSearch.and("resourceState", DcSearch.entity().getResourceState(), Op.EQ); DcSearch.done(); ClusterStatusSearch = createSearchBuilder(); @@ -215,9 +220,13 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao TypeNameZoneSearch.done(); PodSearch = createSearchBuilder(); - PodSearch.and("pod", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ); + PodSearch.and("podId", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ); PodSearch.done(); + ClusterSearch = createSearchBuilder(); + ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + ClusterSearch.done(); + TypeSearch = createSearchBuilder(); TypeSearch.and("type", TypeSearch.entity().getType(), SearchCriteria.Op.EQ); TypeSearch.done(); @@ -373,7 +382,17 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao List hosts = listBy(sc); return hosts.size(); } - + + @Override + public List listByDataCenterId(long id) { + SearchCriteria sc = DcSearch.create(); + sc.setParameters("dcId", id); + sc.setParameters("status", Status.Up); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("resourceState", ResourceState.Enabled); + + return listBy(sc); + } @Override public HostVO findByGuid(String guid) { @@ -906,6 +925,20 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return findOneBy(sc); } + @Override + public List findByPodId(Long podId) { + SearchCriteria sc = PodSearch.create(); + sc.setParameters("podId", podId); + return listBy(sc); + } + + @Override + public List findByClusterId(Long clusterId) { + SearchCriteria sc = ClusterSearch.create(); + sc.setParameters("clusterId", clusterId); + return listBy(sc); + } + @Override public List findHypervisorHostInCluster(long clusterId) { SearchCriteria sc = TypeClusterStatusSearch.create(); diff --git a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index 9bc0ba599c2..e23815b7d28 100755 --- a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -261,7 +261,10 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { } txn.commit(); - } finally { + } catch (CloudRuntimeException e){ + s_logger.error("Unable to upgrade the database", e); + throw new CloudRuntimeException("Unable to upgrade the database", e); + }finally { txn.close(); } } diff --git a/engine/schema/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java b/engine/schema/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java index bad32536955..b035c10f13c 100755 --- a/engine/schema/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java +++ b/engine/schema/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java @@ -18,9 +18,6 @@ package com.cloud.upgrade; import javax.ejb.Local; -import org.springframework.context.annotation.Primary; -import org.springframework.stereotype.Component; - import com.cloud.upgrade.dao.DbUpgrade; import com.cloud.upgrade.dao.Upgrade217to218; import com.cloud.upgrade.dao.Upgrade218to224DomainVlans; @@ -43,7 +40,6 @@ import com.cloud.upgrade.dao.Upgrade30to301; import com.cloud.upgrade.dao.Upgrade40to41; import com.cloud.upgrade.dao.UpgradeSnapshot217to224; import com.cloud.upgrade.dao.UpgradeSnapshot223to224; -import com.cloud.upgrade.dao.VersionDaoImpl; import com.cloud.utils.component.SystemIntegrityChecker; diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade302to40.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade302to40.java index ecda872dfa4..6f31fdd2b8e 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade302to40.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade302to40.java @@ -18,6 +18,7 @@ package com.cloud.upgrade.dao; import java.io.File; +import java.io.UnsupportedEncodingException; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -72,6 +73,7 @@ public class Upgrade302to40 extends Upgrade30xBase implements DbUpgrade { fixForeignKeys(conn); setupExternalNetworkDevices(conn); fixZoneUsingExternalDevices(conn); + encryptConfig(conn); } @Override @@ -1079,4 +1081,43 @@ public class Upgrade302to40 extends Upgrade30xBase implements DbUpgrade { s_logger.info("Successfully upgraded networks using F5 and SRX devices to have a entry in the network_external_lb_device_map and network_external_firewall_device_map"); } } + + private void encryptConfig(Connection conn){ + //Encrypt config params and change category to Hidden + s_logger.debug("Encrypting Config values"); + PreparedStatement pstmt = null; + ResultSet rs = null; + try { + pstmt = conn.prepareStatement("select name, value from `cloud`.`configuration` where name in ('router.ram.size', 'secondary.storage.vm', 'security.hash.key') and category <> 'Hidden'"); + rs = pstmt.executeQuery(); + while (rs.next()) { + String name = rs.getString(1); + String value = rs.getString(2); + if (value == null) { + continue; + } + String encryptedValue = DBEncryptionUtil.encrypt(value); + pstmt = conn.prepareStatement("update `cloud`.`configuration` set value=?, category = 'Hidden' where name=?"); + pstmt.setBytes(1, encryptedValue.getBytes("UTF-8")); + pstmt.setString(2, name); + pstmt.executeUpdate(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable encrypt configuration values ", e); + } catch (UnsupportedEncodingException e) { + throw new CloudRuntimeException("Unable encrypt configuration values ", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + s_logger.debug("Done encrypting Config values"); + } } diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java index 2e9f47fb364..19194861817 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java @@ -18,6 +18,7 @@ package com.cloud.upgrade.dao; import com.cloud.deploy.DeploymentPlanner; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import org.apache.log4j.Logger; @@ -28,6 +29,8 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; +import java.util.ArrayList; +import java.util.List; import java.util.UUID; import com.cloud.network.vpc.NetworkACL; @@ -62,6 +65,7 @@ public class Upgrade410to420 implements DbUpgrade { @Override public void performDataMigration(Connection conn) { upgradeVmwareLabels(conn); + persistLegacyZones(conn); createPlaceHolderNics(conn); updateRemoteAccessVpn(conn); updateSystemVmTemplates(conn); @@ -75,26 +79,254 @@ public class Upgrade410to420 implements DbUpgrade { updateNetworkACLs(conn); addHostDetailsIndex(conn); updateNetworksForPrivateGateways(conn); + correctExternalNetworkDevicesSetup(conn); removeFirewallServiceFromSharedNetworkOfferingWithSGService(conn); + fix22xKVMSnapshots(conn); + addIndexForAlert(conn); + } + + private void addIndexForAlert(Connection conn) { + + //First drop if it exists. (Due to patches shipped to customers some will have the index and some wont.) + List indexList = new ArrayList(); + s_logger.debug("Dropping index i_alert__last_sent if it exists"); + indexList.add("i_alert__last_sent"); + DbUpgradeUtils.dropKeysIfExist(conn, "alert", indexList, false); + + //Now add index. + PreparedStatement pstmt = null; + try { + pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)"); + pstmt.executeUpdate(); + s_logger.debug("Added index i_alert__last_sent for table alert"); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to add index i_alert__last_sent to alert table for the column last_sent", e); + } finally { + try { + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } private void updateSystemVmTemplates(Connection conn) { - /* TODO: where should be system vm templates located? - PreparedStatement sql = null; + // TODO: system vm template migration after storage refactoring + PreparedStatement pstmt = null; + ResultSet rs = null; + boolean xenserver = false; + boolean kvm = false; + boolean VMware = false; + boolean Hyperv = false; + boolean LXC = false; + s_logger.debug("Updating System Vm template IDs"); + try{ + //Get all hypervisors in use try { - sql = conn.prepareStatement("update vm_template set image_data_store_id = 1 where type = 'SYSTEM' or type = 'BUILTIN'"); - sql.executeUpdate(); + pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); + rs = pstmt.executeQuery(); + while(rs.next()){ + if("XenServer".equals(rs.getString(1))){ + xenserver = true; + } else if("KVM".equals(rs.getString(1))){ + kvm = true; + } else if("VMware".equals(rs.getString(1))){ + VMware = true; + } else if("Hyperv".equals(rs.getString(1))) { + Hyperv = true; + } else if("LXC".equals(rs.getString(1))) { + LXC = true; + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Error while listing hypervisors in use", e); + } + + s_logger.debug("Updating XenSever System Vms"); + //XenServer + try { + //Get 4.2.0 xenserer system Vm template Id + pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name like 'systemvm-xenserver-4.2' and removed is null order by id desc limit 1"); + rs = pstmt.executeQuery(); + if(rs.next()){ + long templateId = rs.getLong(1); + rs.close(); + pstmt.close(); + // change template type to SYSTEM + pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + // update templete ID of system Vms + pstmt = conn.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = 'XenServer'"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + } else { + if (xenserver){ + throw new CloudRuntimeException("4.2.0 XenServer SystemVm template not found. Cannot upgrade system Vms"); + } else { + s_logger.warn("4.2.0 XenServer SystemVm template not found. XenServer hypervisor is not used, so not failing upgrade"); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Error while updating XenServer systemVm template", e); + } + + //KVM + s_logger.debug("Updating KVM System Vms"); + try { + //Get 4.2.0 KVM system Vm template Id + pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = 'systemvm-kvm-4.2' and removed is null order by id desc limit 1"); + rs = pstmt.executeQuery(); + if(rs.next()){ + long templateId = rs.getLong(1); + rs.close(); + pstmt.close(); + // change template type to SYSTEM + pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + // update templete ID of system Vms + pstmt = conn.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = 'KVM'"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + } else { + if (kvm){ + throw new CloudRuntimeException("4.2.0 KVM SystemVm template not found. Cannot upgrade system Vms"); + } else { + s_logger.warn("4.2.0 KVM SystemVm template not found. KVM hypervisor is not used, so not failing upgrade"); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Error while updating KVM systemVm template", e); + } + + //VMware + s_logger.debug("Updating VMware System Vms"); + try { + //Get 4.2.0 VMware system Vm template Id + pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = 'systemvm-vmware-4.2' and removed is null order by id desc limit 1"); + rs = pstmt.executeQuery(); + if(rs.next()){ + long templateId = rs.getLong(1); + rs.close(); + pstmt.close(); + // change template type to SYSTEM + pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + // update templete ID of system Vms + pstmt = conn.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = 'VMware'"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + } else { + if (VMware){ + throw new CloudRuntimeException("4.2.0 VMware SystemVm template not found. Cannot upgrade system Vms"); + } else { + s_logger.warn("4.2.0 VMware SystemVm template not found. VMware hypervisor is not used, so not failing upgrade"); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Error while updating VMware systemVm template", e); + } + + //Hyperv + s_logger.debug("Updating Hyperv System Vms"); + try { + //Get 4.2.0 Hyperv system Vm template Id + pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = 'systemvm-hyperv-4.2' and removed is null order by id desc limit 1"); + rs = pstmt.executeQuery(); + if(rs.next()){ + long templateId = rs.getLong(1); + rs.close(); + pstmt.close(); + // change template type to SYSTEM + pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + // update templete ID of system Vms + pstmt = conn.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = 'Hyperv'"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + } else { + if (Hyperv){ + throw new CloudRuntimeException("4.2.0 HyperV SystemVm template not found. Cannot upgrade system Vms"); + } else { + s_logger.warn("4.2.0 Hyperv SystemVm template not found. Hyperv hypervisor is not used, so not failing upgrade"); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Error while updating Hyperv systemVm template", e); + } + + //LXC + s_logger.debug("Updating LXC System Vms"); + try { + //Get 4.2.0 LXC system Vm template Id + pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = 'systemvm-lxc-4.2' and removed is null order by id desc limit 1"); + rs = pstmt.executeQuery(); + if(rs.next()){ + long templateId = rs.getLong(1); + rs.close(); + pstmt.close(); + // change template type to SYSTEM + pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + // update templete ID of system Vms + pstmt = conn.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = 'LXC'"); + pstmt.setLong(1, templateId); + pstmt.executeUpdate(); + pstmt.close(); + } else { + if (LXC){ + throw new CloudRuntimeException("4.2.0 LXC SystemVm template not found. Cannot upgrade system Vms"); + } else { + s_logger.warn("4.2.0 LXC SystemVm template not found. LXC hypervisor is not used, so not failing upgrade"); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Error while updating LXC systemVm template", e); + } + s_logger.debug("Updating System Vm Template IDs Complete"); + } + finally { + try { + if (rs != null) { + rs.close(); + } + + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + pstmt = null; + try { + pstmt = conn.prepareStatement("update vm_template set image_data_store_id = 1 where type = 'SYSTEM' or type = 'BUILTIN'"); + pstmt.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString()); } finally { - if (sql != null) { + if (pstmt != null) { try { - sql.close(); + pstmt.close(); } catch (SQLException e) { } } } - */ + } private void updatePrimaryStore(Connection conn) { @@ -248,6 +480,180 @@ public class Upgrade410to420 implements DbUpgrade { } } + private void persistLegacyZones(Connection conn) { + List listOfLegacyZones = new ArrayList(); + PreparedStatement pstmt = null; + PreparedStatement clustersQuery = null; + PreparedStatement clusterDetailsQuery = null; + ResultSet rs = null; + ResultSet clusters = null; + ResultSet clusterDetails = null; + ResultSet dcInfo = null; + Long vmwareDcId = 1L; + Long zoneId; + Long clusterId; + String clusterHypervisorType; + boolean legacyZone; + boolean ignoreZone; + Long count; + String dcOfPreviousCluster = null; + String dcOfCurrentCluster = null; + String[] tokens; + String url; + String user = ""; + String password = ""; + String vc = ""; + String dcName = ""; + String guid; + String key; + String value; + + try { + clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL"); + pstmt = conn.prepareStatement("select id from `cloud`.`data_center` where removed is NULL"); + rs = pstmt.executeQuery(); + + while (rs.next()) { + zoneId = rs.getLong("id"); + legacyZone = false; + ignoreZone = true; + count = 0L; + // Legacy zone term is meant only for VMware + // Legacy zone is a zone with atleast 2 clusters & with multiple DCs or VCs + clusters = clustersQuery.executeQuery(); + if (!clusters.next()) { + continue; // Ignore the zone without any clusters + } else { + dcOfPreviousCluster = null; + dcOfCurrentCluster = null; + do { + clusterHypervisorType = clusters.getString("hypervisor_type"); + clusterId = clusters.getLong("id"); + if (clusterHypervisorType.equalsIgnoreCase("VMware")) { + ignoreZone = false; + clusterDetailsQuery = conn.prepareStatement("select value from `cloud`.`cluster_details` where name='url' and cluster_id=?"); + clusterDetailsQuery.setLong(1, clusterId); + clusterDetails = clusterDetailsQuery.executeQuery(); + clusterDetails.next(); + url = clusterDetails.getString("value"); + tokens = url.split("/"); // url format - http://vcenter/dc/cluster + vc = tokens[2]; + dcName = tokens[3]; + if (count > 0) { + dcOfPreviousCluster = dcOfCurrentCluster; + dcOfCurrentCluster = dcName + "@" + vc; + if (!dcOfPreviousCluster.equals(dcOfCurrentCluster)) { + legacyZone = true; + s_logger.debug("Marking the zone " + zoneId + " as legacy zone."); + } + } + } else { + s_logger.debug("Ignoring zone " + zoneId + " with hypervisor type " + clusterHypervisorType); + break; + } + count++; + } while (clusters.next()); + if (ignoreZone) { + continue; // Ignore the zone with hypervisors other than VMware + } + } + if (legacyZone) { + listOfLegacyZones.add(zoneId); + } else { + assert(clusterDetails != null) : "Couldn't retrieve details of cluster!"; + s_logger.debug("Discovered non-legacy zone " + zoneId + ". Processing the zone to associate with VMware datacenter."); + + clusterDetailsQuery = conn.prepareStatement("select name, value from `cloud`.`cluster_details` where cluster_id=?"); + clusterDetailsQuery.setLong(1, clusterId); + clusterDetails = clusterDetailsQuery.executeQuery(); + while (clusterDetails.next()) { + key = clusterDetails.getString(1); + value = clusterDetails.getString(2); + if (key.equalsIgnoreCase("username")) { + user = value; + } else if (key.equalsIgnoreCase("password")) { + password = value; + } + } + guid = dcName + "@" + vc; + + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`vmware_data_center` (uuid, name, guid, vcenter_host, username, password) values(?, ?, ?, ?, ?, ?)"); + pstmt.setString(1, UUID.randomUUID().toString()); + pstmt.setString(2, dcName); + pstmt.setString(3, guid); + pstmt.setString(4, vc); + pstmt.setString(5, user); + pstmt.setString(6, password); + pstmt.executeUpdate(); + + pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`vmware_data_center` where guid=?"); + pstmt.setString(1, guid); + dcInfo = pstmt.executeQuery(); + if(dcInfo.next()) { + vmwareDcId = dcInfo.getLong("id"); + } + + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`vmware_data_center_zone_map` (zone_id, vmware_data_center_id) values(?, ?)"); + pstmt.setLong(1, zoneId); + pstmt.setLong(2, vmwareDcId); + pstmt.executeUpdate(); + } + } + updateLegacyZones(conn, listOfLegacyZones); + } catch (SQLException e) { + String msg = "Unable to discover legacy zones." + e.getMessage(); + s_logger.error(msg); + throw new CloudRuntimeException(msg, e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (pstmt != null) { + pstmt.close(); + } + if (dcInfo != null) { + dcInfo.close(); + } + if (clusters != null) { + clusters.close(); + } + if (clusterDetails != null) { + clusterDetails.close(); + } + if (clustersQuery != null) { + clustersQuery.close(); + } + if (clusterDetailsQuery != null) { + clusterDetailsQuery.close(); + } + } catch (SQLException e) { + } + } + } + + private void updateLegacyZones(Connection conn, List zones) { + PreparedStatement legacyZonesQuery = null; + //Insert legacy zones into table for legacy zones. + try { + legacyZonesQuery = conn.prepareStatement("INSERT INTO `cloud`.`legacy_zones` (zone_id) VALUES (?)"); + for(Long zoneId : zones) { + legacyZonesQuery.setLong(1, zoneId); + legacyZonesQuery.executeUpdate(); + s_logger.debug("Inserted zone " + zoneId + " into cloud.legacyzones table"); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e); + } finally { + try { + if (legacyZonesQuery != null) { + legacyZonesQuery.close(); + } + } catch (SQLException e) { + } + } + } + private void createPlaceHolderNics(Connection conn) { PreparedStatement pstmt = null; ResultSet rs = null; @@ -725,8 +1131,8 @@ public class Upgrade410to420 implements DbUpgrade { } } } - - + + private void updateNetworksForPrivateGateways(Connection conn) { PreparedStatement pstmt = null; @@ -744,7 +1150,7 @@ public class Upgrade410to420 implements DbUpgrade { pstmt.setLong(1, vpcId); pstmt.setLong(2, networkId); pstmt.executeUpdate(); - + } } catch (SQLException e) { throw new CloudRuntimeException("Failed to update private networks with VPC id.", e); @@ -761,7 +1167,7 @@ public class Upgrade410to420 implements DbUpgrade { while (rs.next()) { long id = rs.getLong(1); // remove Firewall service for SG shared network offering - pstmt = conn.prepareStatement("DELETE `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'"); + pstmt = conn.prepareStatement("DELETE from `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'"); pstmt.setLong(1, id); pstmt.executeUpdate(); } @@ -781,4 +1187,402 @@ public class Upgrade410to420 implements DbUpgrade { } } + private void fix22xKVMSnapshots(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + s_logger.debug("Updating KVM snapshots"); + try { + pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null"); + rs = pstmt.executeQuery(); + while (rs.next()) { + long id = rs.getLong(1); + String backUpPath = rs.getString(2); + // Update Backup Path. Remove anything before /snapshots/ + // e.g 22x Path /mnt/0f14da63-7033-3ca5-bdbe-fa62f4e2f38a/snapshots/1/2/6/i-2-6-VM_ROOT-6_20121219072022 + // Above path should change to /snapshots/1/2/6/i-2-6-VM_ROOT-6_20121219072022 + int index = backUpPath.indexOf("snapshots"+File.separator); + if (index > 1){ + String correctedPath = File.separator + backUpPath.substring(index); + s_logger.debug("Updating Snapshot with id: "+id+" original backup path: "+backUpPath+ " updated backup path: "+correctedPath); + pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?"); + pstmt.setString(1, correctedPath); + pstmt.setLong(2, id); + pstmt.executeUpdate(); + } + } + s_logger.debug("Done updating KVM snapshots"); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update backup id for KVM snapshots", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + + // Corrects upgrade for deployment with F5 and SRX devices (pre 3.0) to network offering & + // network service provider paradigm + private void correctExternalNetworkDevicesSetup(Connection conn) { + PreparedStatement zoneSearchStmt = null, pNetworkStmt = null, f5DevicesStmt = null, srxDevicesStmt = null; + ResultSet zoneResults = null, pNetworksResults = null, f5DevicesResult = null, srxDevicesResult = null; + + try { + zoneSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`"); + zoneResults = zoneSearchStmt.executeQuery(); + while (zoneResults.next()) { + long zoneId = zoneResults.getLong(1); + String networkType = zoneResults.getString(2); + + if (!com.cloud.dc.DataCenter.NetworkType.Advanced.toString().equalsIgnoreCase(networkType)) { + continue; + } + + pNetworkStmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where data_center_id=?"); + pNetworkStmt.setLong(1, zoneId); + pNetworksResults = pNetworkStmt.executeQuery(); + while (pNetworksResults.next()) { + long physicalNetworkId = pNetworksResults.getLong(1); + PreparedStatement fetchF5NspStmt = conn.prepareStatement("SELECT id from `cloud`.`physical_network_service_providers` where physical_network_id=" + physicalNetworkId + + " and provider_name = 'F5BigIp'"); + ResultSet rsF5NSP = fetchF5NspStmt.executeQuery(); + boolean hasF5Nsp = rsF5NSP.next(); + fetchF5NspStmt.close(); + + // if there is no 'F5BigIP' physical network service provider added into physical network then + // add 'F5BigIP' as network service provider and add the entry in 'external_load_balancer_devices' + if (!hasF5Nsp) { + f5DevicesStmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL"); + f5DevicesStmt.setLong(1, zoneId); + f5DevicesResult = f5DevicesStmt.executeQuery(); + // add F5BigIP provider and provider instance to physical network if there are any external load + // balancers added in the zone + while (f5DevicesResult.next()) { + long f5HostId = f5DevicesResult.getLong(1);; + addF5ServiceProvider(conn, physicalNetworkId, zoneId); + addF5LoadBalancer(conn, f5HostId, physicalNetworkId); + } + } + + PreparedStatement fetchSRXNspStmt = conn.prepareStatement("SELECT id from `cloud`.`physical_network_service_providers` where physical_network_id=" + physicalNetworkId + + " and provider_name = 'JuniperSRX'"); + ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery(); + boolean hasSrxNsp = rsSRXNSP.next(); + fetchSRXNspStmt.close(); + + // if there is no 'JuniperSRX' physical network service provider added into physical network then + // add 'JuniperSRX' as network service provider and add the entry in 'external_firewall_devices' + if (!hasSrxNsp) { + srxDevicesStmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL"); + srxDevicesStmt.setLong(1, zoneId); + srxDevicesResult = srxDevicesStmt.executeQuery(); + // add JuniperSRX provider and provider instance to physical network if there are any external + // firewall instances added in to the zone + while (srxDevicesResult.next()) { + long srxHostId = srxDevicesResult.getLong(1); + // add SRX provider and provider instance to physical network + addSrxServiceProvider(conn, physicalNetworkId, zoneId); + addSrxFirewall(conn, srxHostId, physicalNetworkId); + } + } + } + } + + // not the network service provider has been provisioned in to physical network, mark all guest network + // to be using network offering 'Isolated with external providers' + fixZoneUsingExternalDevices(conn); + + if (zoneResults != null) { + try { + zoneResults.close(); + } catch (SQLException e) { + } + } + + if (zoneSearchStmt != null) { + try { + zoneSearchStmt.close(); + } catch (SQLException e) { + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Exception while adding PhysicalNetworks", e); + } finally { + + } + } + + private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetworkId){ + PreparedStatement pstmtUpdate = null; + try{ + s_logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId); + String insertF5 = "INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, " + + "device_name, capacity, is_dedicated, device_state, allocation_state, is_inline, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + pstmtUpdate = conn.prepareStatement(insertF5); + pstmtUpdate.setLong(1, physicalNetworkId); + pstmtUpdate.setLong(2, hostId); + pstmtUpdate.setString(3, "F5BigIp"); + pstmtUpdate.setString(4, "F5BigIpLoadBalancer"); + pstmtUpdate.setLong(5, 0); + pstmtUpdate.setBoolean(6, false); + pstmtUpdate.setString(7, "Enabled"); + pstmtUpdate.setString(8, "Shared"); + pstmtUpdate.setBoolean(9, false); + pstmtUpdate.setBoolean(10, false); + pstmtUpdate.setString(11, UUID.randomUUID().toString()); + pstmtUpdate.executeUpdate(); + }catch (SQLException e) { + throw new CloudRuntimeException("Exception while adding F5 load balancer device" , e); + } finally { + if (pstmtUpdate != null) { + try { + pstmtUpdate.close(); + } catch (SQLException e) { + } + } + } + } + + private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId){ + PreparedStatement pstmtUpdate = null; + try{ + s_logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId); + String insertSrx = "INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, " + + "device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + pstmtUpdate = conn.prepareStatement(insertSrx); + pstmtUpdate.setLong(1, physicalNetworkId); + pstmtUpdate.setLong(2, hostId); + pstmtUpdate.setString(3, "JuniperSRX"); + pstmtUpdate.setString(4, "JuniperSRXFirewall"); + pstmtUpdate.setLong(5, 0); + pstmtUpdate.setBoolean(6, false); + pstmtUpdate.setString(7, "Enabled"); + pstmtUpdate.setString(8, "Shared"); + pstmtUpdate.setString(9, UUID.randomUUID().toString()); + pstmtUpdate.executeUpdate(); + }catch (SQLException e) { + throw new CloudRuntimeException("Exception while adding SRX firewall device ", e); + } finally { + if (pstmtUpdate != null) { + try { + pstmtUpdate.close(); + } catch (SQLException e) { + } + } + } + } + + private void addF5ServiceProvider(Connection conn, long physicalNetworkId, long zoneId){ + PreparedStatement pstmtUpdate = null; + try{ + // add physical network service provider - F5BigIp + s_logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId); + String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," + + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," + + "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," + + "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,0,0,0,1,0,0,0,0)"; + + pstmtUpdate = conn.prepareStatement(insertPNSP); + pstmtUpdate.setString(1, UUID.randomUUID().toString()); + pstmtUpdate.setLong(2, physicalNetworkId); + pstmtUpdate.setString(3, "F5BigIp"); + pstmtUpdate.setString(4, "Enabled"); + pstmtUpdate.executeUpdate(); + }catch (SQLException e) { + throw new CloudRuntimeException("Exception while adding PhysicalNetworkServiceProvider F5BigIp", e); + } finally { + if (pstmtUpdate != null) { + try { + pstmtUpdate.close(); + } catch (SQLException e) { + } + } + } + } + + private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long zoneId){ + PreparedStatement pstmtUpdate = null; + try{ + // add physical network service provider - JuniperSRX + s_logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX"); + String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," + + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," + + "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," + + "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,1,1,1,0,1,1,0,0)"; + + pstmtUpdate = conn.prepareStatement(insertPNSP); + pstmtUpdate.setString(1, UUID.randomUUID().toString()); + pstmtUpdate.setLong(2, physicalNetworkId); + pstmtUpdate.setString(3, "JuniperSRX"); + pstmtUpdate.setString(4, "Enabled"); + pstmtUpdate.executeUpdate(); + }catch (SQLException e) { + throw new CloudRuntimeException("Exception while adding PhysicalNetworkServiceProvider JuniperSRX" , e); + } finally { + if (pstmtUpdate != null) { + try { + pstmtUpdate.close(); + } catch (SQLException e) { + } + } + } + } + + // This method does two things + // + // 1) ensure that networks using external load balancer/firewall in deployments prior to release 3.0 + // has entry in network_external_lb_device_map and network_external_firewall_device_map + // + // 2) Some keys of host details for F5 and SRX devices were stored in Camel Case in 2.x releases. From 3.0 + // they are made in lowercase. On upgrade change the host details name to lower case + private void fixZoneUsingExternalDevices(Connection conn) { + //Get zones to upgrade + List zoneIds = new ArrayList(); + PreparedStatement pstmt = null; + PreparedStatement pstmtUpdate = null; + ResultSet rs = null; + long networkOfferingId, networkId; + long f5DeviceId, f5HostId; + long srxDevivceId, srxHostId; + + try { + pstmt = conn.prepareStatement("select id from `cloud`.`data_center` where lb_provider='F5BigIp' or firewall_provider='JuniperSRX' or gateway_provider='JuniperSRX'"); + rs = pstmt.executeQuery(); + while (rs.next()) { + zoneIds.add(rs.getLong(1)); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to create network to LB & firewall device mapping for networks that use them", e); + } + + if (zoneIds.size() == 0) { + return; // no zones using F5 and SRX devices so return + } + + // find the default network offering created for external devices during upgrade from 2.2.14 + try { + pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='Isolated with external providers' "); + rs = pstmt.executeQuery(); + if (rs.first()) { + networkOfferingId = rs.getLong(1); + } else { + throw new CloudRuntimeException("Cannot upgrade as there is no 'Isolated with external providers' network offering crearted ."); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to create network to LB & firewalla device mapping for networks that use them", e); + } + + for (Long zoneId : zoneIds) { + try { + // find the F5 device id in the zone + pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL"); + pstmt.setLong(1, zoneId); + rs = pstmt.executeQuery(); + if (rs.first()) { + f5HostId = rs.getLong(1); + } else { + throw new CloudRuntimeException("Cannot upgrade as there is no F5 load balancer device found in data center " + zoneId); + } + pstmt = conn.prepareStatement("SELECT id FROM external_load_balancer_devices WHERE host_id=?"); + pstmt.setLong(1, f5HostId); + rs = pstmt.executeQuery(); + if (rs.first()) { + f5DeviceId = rs.getLong(1); + } else { + throw new CloudRuntimeException("Cannot upgrade as there is no F5 load balancer device with host ID " + f5HostId + " found in external_load_balancer_device"); + } + + // find the SRX device id in the zone + pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL"); + pstmt.setLong(1, zoneId); + rs = pstmt.executeQuery(); + if (rs.first()) { + srxHostId = rs.getLong(1); + } else { + throw new CloudRuntimeException("Cannot upgrade as there is no SRX firewall device found in data center " + zoneId); + } + pstmt = conn.prepareStatement("SELECT id FROM external_firewall_devices WHERE host_id=?"); + pstmt.setLong(1, srxHostId); + rs = pstmt.executeQuery(); + if (rs.first()) { + srxDevivceId = rs.getLong(1); + } else { + throw new CloudRuntimeException("Cannot upgrade as there is no SRX firewall device found with host ID " + srxHostId + " found in external_firewall_devices"); + } + + // check if network any uses F5 or SRX devices in the zone + pstmt = conn.prepareStatement("select id from `cloud`.`networks` where guest_type='Virtual' and data_center_id=? and network_offering_id=? and removed IS NULL"); + pstmt.setLong(1, zoneId); + pstmt.setLong(2, networkOfferingId); + rs = pstmt.executeQuery(); + while (rs.next()) { + // get the network Id + networkId = rs.getLong(1); + + // add mapping for the network in network_external_lb_device_map + String insertLbMapping = "INSERT INTO `cloud`.`network_external_lb_device_map` (uuid, network_id, external_load_balancer_device_id, created) VALUES ( ?, ?, ?, now())"; + pstmtUpdate = conn.prepareStatement(insertLbMapping); + pstmtUpdate.setString(1, UUID.randomUUID().toString()); + pstmtUpdate.setLong(2, networkId); + pstmtUpdate.setLong(3, f5DeviceId); + pstmtUpdate.executeUpdate(); + s_logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId); + + // add mapping for the network in network_external_firewall_device_map + String insertFwMapping = "INSERT INTO `cloud`.`network_external_firewall_device_map` (uuid, network_id, external_firewall_device_id, created) VALUES ( ?, ?, ?, now())"; + pstmtUpdate = conn.prepareStatement(insertFwMapping); + pstmtUpdate.setString(1, UUID.randomUUID().toString()); + pstmtUpdate.setLong(2, networkId); + pstmtUpdate.setLong(3, srxDevivceId); + pstmtUpdate.executeUpdate(); + s_logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId); + } + + // update host details for F5 and SRX devices + s_logger.debug("Updating the host details for F5 and SRX devices"); + pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?"); + pstmt.setLong(1, f5HostId); + pstmt.setLong(2, srxHostId); + rs = pstmt.executeQuery(); + while (rs.next()) { + long hostId = rs.getLong(1); + String camlCaseName = rs.getString(2); + if (!(camlCaseName.equalsIgnoreCase("numRetries") || + camlCaseName.equalsIgnoreCase("publicZone") || + camlCaseName.equalsIgnoreCase("privateZone") || + camlCaseName.equalsIgnoreCase("publicInterface") || + camlCaseName.equalsIgnoreCase("privateInterface") || + camlCaseName.equalsIgnoreCase("usageInterface") )) { + continue; + } + String lowerCaseName = camlCaseName.toLowerCase(); + pstmt = conn.prepareStatement("update `cloud`.`host_details` set name=? where host_id=? AND name=?"); + pstmt.setString(1, lowerCaseName); + pstmt.setLong(2, hostId); + pstmt.setString(3, camlCaseName); + pstmt.executeUpdate(); + } + s_logger.debug("Successfully updated host details for F5 and SRX devices"); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + s_logger.info("Successfully upgraded network using F5 and SRX devices to have a entry in the network_external_lb_device_map and network_external_firewall_device_map"); + } + } } diff --git a/engine/schema/src/com/cloud/usage/UsageVmDiskVO.java b/engine/schema/src/com/cloud/usage/UsageVmDiskVO.java new file mode 100644 index 00000000000..6c3ca6940b1 --- /dev/null +++ b/engine/schema/src/com/cloud/usage/UsageVmDiskVO.java @@ -0,0 +1,180 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.usage; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.Id; +import javax.persistence.Table; + +@Entity +@Table(name="usage_vm_disk") +public class UsageVmDiskVO { + @Id + @Column(name="account_id") + private long accountId; + + @Column(name="zone_id") + private long zoneId; + + @Column(name="vm_id") + private Long vmId; + + @Column(name="volume_id") + private Long volumeId; + + @Column(name="io_read") + private long ioRead; + + @Column(name="io_write") + private long ioWrite; + + @Column(name="agg_io_write") + private long aggIOWrite; + + @Column(name="agg_io_read") + private long aggIORead; + + @Column(name="bytes_read") + private long bytesRead; + + @Column(name="bytes_write") + private long bytesWrite; + + @Column(name="agg_bytes_write") + private long aggBytesWrite; + + @Column(name="agg_bytes_read") + private long aggBytesRead; + + @Column(name="event_time_millis") + private long eventTimeMillis = 0; + + protected UsageVmDiskVO() { + } + + public UsageVmDiskVO(Long accountId, long zoneId, Long vmId, Long volumeId, long ioRead, long ioWrite, long aggIORead, long aggIOWrite, + long bytesRead, long bytesWrite, long aggBytesRead, long aggBytesWrite, long eventTimeMillis) { + this.accountId = accountId; + this.zoneId = zoneId; + this.vmId = vmId; + this.volumeId = volumeId; + this.ioRead = ioRead; + this.ioWrite = ioWrite; + this.aggIOWrite = aggIOWrite; + this.aggIORead = aggIORead; + this.bytesRead = bytesRead; + this.bytesWrite = bytesWrite; + this.aggBytesWrite = aggBytesWrite; + this.aggBytesRead = aggBytesRead; + this.eventTimeMillis = eventTimeMillis; + } + + public long getAccountId() { + return accountId; + } + + public void setAccountId(long accountId) { + this.accountId = accountId; + } + + public long getZoneId() { + return zoneId; + } + public void setZoneId(long zoneId) { + this.zoneId = zoneId; + } + + public Long getIORead() { + return ioRead; + } + + public void setIORead(Long ioRead) { + this.ioRead = ioRead; + } + + public Long getIOWrite() { + return ioWrite; + } + + public void setIOWrite(Long ioWrite) { + this.ioWrite = ioWrite; + } + + public Long getBytesRead() { + return bytesRead; + } + + public void setBytesRead(Long bytesRead) { + this.bytesRead = bytesRead; + } + + public Long getBytesWrite() { + return bytesWrite; + } + + public void setBytesWrite(Long bytesWrite) { + this.bytesWrite = bytesWrite; + } + + public long getEventTimeMillis() { + return eventTimeMillis; + } + public void setEventTimeMillis(long eventTimeMillis) { + this.eventTimeMillis = eventTimeMillis; + } + + public Long getVmId() { + return vmId; + } + + public Long getVolumeId() { + return volumeId; + } + + public long getAggIOWrite() { + return aggIOWrite; + } + + public void setAggIOWrite(long aggIOWrite) { + this.aggIOWrite = aggIOWrite; + } + + public long getAggIORead() { + return aggIORead; + } + + public void setAggIORead(long aggIORead) { + this.aggIORead = aggIORead; + } + + public long getAggBytesWrite() { + return aggBytesWrite; + } + + public void setAggBytesWrite(long aggBytesWrite) { + this.aggBytesWrite = aggBytesWrite; + } + + public long getAggBytesRead() { + return aggBytesRead; + } + + public void setAggBytesRead(long aggBytesRead) { + this.aggBytesRead = aggBytesRead; + } +} diff --git a/engine/schema/src/com/cloud/usage/dao/UsageDao.java b/engine/schema/src/com/cloud/usage/dao/UsageDao.java index 6d0c162b52b..8a806553112 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageDao.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageDao.java @@ -21,6 +21,7 @@ import java.util.List; import com.cloud.usage.UsageVO; import com.cloud.user.AccountVO; import com.cloud.user.UserStatisticsVO; +import com.cloud.user.VmDiskStatisticsVO; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.SearchCriteria; @@ -36,4 +37,7 @@ public interface UsageDao extends GenericDao { Long getLastAccountId(); Long getLastUserStatsId(); List listPublicTemplatesByAccount(long accountId); + Long getLastVmDiskStatsId(); + void updateVmDiskStats(List vmNetStats); + void saveVmDiskStats(List vmNetStats); } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java index a5867f0656e..f7d5069eef9 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java @@ -32,6 +32,7 @@ import org.springframework.stereotype.Component; import com.cloud.usage.UsageVO; import com.cloud.user.AccountVO; import com.cloud.user.UserStatisticsVO; +import com.cloud.user.VmDiskStatisticsVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; @@ -56,6 +57,13 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage private static final String GET_LAST_USER_STATS = "SELECT id FROM cloud_usage.user_statistics ORDER BY id DESC LIMIT 1"; private static final String GET_PUBLIC_TEMPLATES_BY_ACCOUNTID = "SELECT id FROM cloud.vm_template WHERE account_id = ? AND public = '1' AND removed IS NULL"; + private static final String GET_LAST_VM_DISK_STATS = "SELECT id FROM cloud_usage.vm_disk_statistics ORDER BY id DESC LIMIT 1"; + private static final String INSERT_VM_DISK_STATS = "INSERT INTO cloud_usage.vm_disk_statistics (id, data_center_id, account_id, vm_id, volume_id, net_io_read, net_io_write, current_io_read, " + + "current_io_write, agg_io_read, agg_io_write, net_bytes_read, net_bytes_write, current_bytes_read, current_bytes_write, agg_bytes_read, agg_bytes_write) " + + " VALUES (?,?,?,?,?,?,?,?,?,?, ?, ?, ?, ?,?, ?, ?)"; + private static final String UPDATE_VM_DISK_STATS = "UPDATE cloud_usage.vm_disk_statistics SET net_io_read=?, net_io_write=?, current_io_read=?, current_io_write=?, agg_io_read=?, agg_io_write=?, " + + "net_bytes_read=?, net_bytes_write=?, current_bytes_read=?, current_bytes_write=?, agg_bytes_read=?, agg_bytes_write=? WHERE id=?"; + protected final static TimeZone s_gmtTimeZone = TimeZone.getTimeZone("GMT"); public UsageDaoImpl () {} @@ -270,4 +278,101 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage } return templateList; } + + @Override + public Long getLastVmDiskStatsId() { + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + String sql = GET_LAST_VM_DISK_STATS; + try { + pstmt = txn.prepareAutoCloseStatement(sql); + ResultSet rs = pstmt.executeQuery(); + if (rs.next()) { + return Long.valueOf(rs.getLong(1)); + } + } catch (Exception ex) { + s_logger.error("error getting last vm disk stats id", ex); + } + return null; + } + + @Override + public void updateVmDiskStats(List vmDiskStats) { + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + String sql = UPDATE_VM_DISK_STATS; + PreparedStatement pstmt = null; + pstmt = txn.prepareAutoCloseStatement(sql); // in reality I just want CLOUD_USAGE dataSource connection + for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) { + pstmt.setLong(1, vmDiskStat.getNetIORead()); + pstmt.setLong(2, vmDiskStat.getNetIOWrite()); + pstmt.setLong(3, vmDiskStat.getCurrentIORead()); + pstmt.setLong(4, vmDiskStat.getCurrentIOWrite()); + pstmt.setLong(5, vmDiskStat.getAggIORead()); + pstmt.setLong(6, vmDiskStat.getAggIOWrite()); + pstmt.setLong(7, vmDiskStat.getNetBytesRead()); + pstmt.setLong(8, vmDiskStat.getNetBytesWrite()); + pstmt.setLong(9, vmDiskStat.getCurrentBytesRead()); + pstmt.setLong(10, vmDiskStat.getCurrentBytesWrite()); + pstmt.setLong(11, vmDiskStat.getAggBytesRead()); + pstmt.setLong(12, vmDiskStat.getAggBytesWrite()); + pstmt.setLong(13, vmDiskStat.getId()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + s_logger.error("error saving vm disk stats to cloud_usage db", ex); + throw new CloudRuntimeException(ex.getMessage()); + } + + } + + @Override + public void saveVmDiskStats(List vmDiskStats) { + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + String sql = INSERT_VM_DISK_STATS; + PreparedStatement pstmt = null; + pstmt = txn.prepareAutoCloseStatement(sql); // in reality I just want CLOUD_USAGE dataSource connection + for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) { + pstmt.setLong(1, vmDiskStat.getId()); + pstmt.setLong(2, vmDiskStat.getDataCenterId()); + pstmt.setLong(3, vmDiskStat.getAccountId()); + if(vmDiskStat.getVmId() != null){ + pstmt.setLong(4, vmDiskStat.getVmId()); + } else { + pstmt.setNull(4, Types.BIGINT); + } + if(vmDiskStat.getVolumeId() != null){ + pstmt.setLong(5, vmDiskStat.getVolumeId()); + } else { + pstmt.setNull(5, Types.BIGINT); + } + pstmt.setLong(6, vmDiskStat.getNetIORead()); + pstmt.setLong(7, vmDiskStat.getNetIOWrite()); + pstmt.setLong(8, vmDiskStat.getCurrentIORead()); + pstmt.setLong(9, vmDiskStat.getCurrentIOWrite()); + pstmt.setLong(10, vmDiskStat.getAggIORead()); + pstmt.setLong(11, vmDiskStat.getAggIOWrite()); + pstmt.setLong(12, vmDiskStat.getNetBytesRead()); + pstmt.setLong(13, vmDiskStat.getNetBytesWrite()); + pstmt.setLong(14, vmDiskStat.getCurrentBytesRead()); + pstmt.setLong(15, vmDiskStat.getCurrentBytesWrite()); + pstmt.setLong(16, vmDiskStat.getAggBytesRead()); + pstmt.setLong(17, vmDiskStat.getAggBytesWrite()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + s_logger.error("error saving vm disk stats to cloud_usage db", ex); + throw new CloudRuntimeException(ex.getMessage()); + } + + } } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageNetworkDao.java b/engine/schema/src/com/cloud/usage/dao/UsageNetworkDao.java index 0f7c771b2dc..aa43eab10e9 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageNetworkDao.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageNetworkDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.usage.dao; +import java.util.List; import java.util.Map; import com.cloud.usage.UsageNetworkVO; @@ -24,4 +25,5 @@ import com.cloud.utils.db.GenericDao; public interface UsageNetworkDao extends GenericDao { Map getRecentNetworkStats(); void deleteOldStats(long maxEventTime); + void saveUsageNetworks(List usageNetworks); } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageNetworkDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageNetworkDaoImpl.java index d64fd807890..af8083aa65a 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageNetworkDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageNetworkDaoImpl.java @@ -19,6 +19,7 @@ package com.cloud.usage.dao; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.util.HashMap; +import java.util.List; import java.util.Map; import javax.ejb.Local; @@ -29,6 +30,7 @@ import org.springframework.stereotype.Component; import com.cloud.usage.UsageNetworkVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value={UsageNetworkDao.class}) @@ -41,6 +43,8 @@ public class UsageNetworkDaoImpl extends GenericDaoBase im ") joinnet on u.account_id = joinnet.acct_id and u.zone_id = joinnet.z_id and u.event_time_millis = joinnet.max_date"; private static final String DELETE_OLD_STATS = "DELETE FROM cloud_usage.usage_network WHERE event_time_millis < ?"; + private static final String INSERT_USAGE_NETWORK = "INSERT INTO cloud_usage.usage_network (account_id, zone_id, host_id, host_type, network_id, bytes_sent, bytes_received, agg_bytes_received, agg_bytes_sent, event_time_millis) VALUES (?,?,?,?,?,?,?,?,?,?)"; + public UsageNetworkDaoImpl() { } @@ -95,4 +99,34 @@ public class UsageNetworkDaoImpl extends GenericDaoBase im s_logger.error("error deleting old usage network stats", ex); } } + + @Override + public void saveUsageNetworks (List usageNetworks) { + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + String sql = INSERT_USAGE_NETWORK; + PreparedStatement pstmt = null; + pstmt = txn.prepareAutoCloseStatement(sql); // in reality I just want CLOUD_USAGE dataSource connection + for (UsageNetworkVO usageNetwork : usageNetworks) { + pstmt.setLong(1, usageNetwork.getAccountId()); + pstmt.setLong(2, usageNetwork.getZoneId()); + pstmt.setLong(3, usageNetwork.getHostId()); + pstmt.setString(4, usageNetwork.getHostType()); + pstmt.setLong(5, usageNetwork.getNetworkId()); + pstmt.setLong(6, usageNetwork.getBytesSent()); + pstmt.setLong(7, usageNetwork.getBytesReceived()); + pstmt.setLong(8, usageNetwork.getAggBytesReceived()); + pstmt.setLong(9, usageNetwork.getAggBytesSent()); + pstmt.setLong(10, usageNetwork.getEventTimeMillis()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + s_logger.error("error saving usage_network to cloud_usage db", ex); + throw new CloudRuntimeException(ex.getMessage()); + } + } } diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDao.java b/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDao.java new file mode 100644 index 00000000000..b72a8d4120f --- /dev/null +++ b/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDao.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.usage.dao; + +import java.util.List; +import java.util.Map; + +import com.cloud.usage.UsageVmDiskVO; +import com.cloud.utils.db.GenericDao; + +public interface UsageVmDiskDao extends GenericDao { + Map getRecentVmDiskStats(); + void deleteOldStats(long maxEventTime); + void saveUsageVmDisks(List usageVmDisks); +} diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java new file mode 100644 index 00000000000..8436c5955c8 --- /dev/null +++ b/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java @@ -0,0 +1,139 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.usage.dao; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; + +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.usage.UsageVmDiskVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; + +@Component +@Local(value={UsageVmDiskDao.class}) +public class UsageVmDiskDaoImpl extends GenericDaoBase implements UsageVmDiskDao { + private static final Logger s_logger = Logger.getLogger(UsageVMInstanceDaoImpl.class.getName()); + private static final String SELECT_LATEST_STATS = "SELECT uvd.account_id, uvd.zone_id, uvd.vm_id, uvd.volume_id, uvd.io_read, uvd.io_write, uvd.agg_io_read, uvd.agg_io_write, " + + "uvd.bytes_read, uvd.bytes_write, uvd.agg_bytes_read, uvd.agg_bytes_write, uvd.event_time_millis " + + "FROM cloud_usage.usage_vm_disk uvd INNER JOIN (SELECT vmdiskusage.account_id as acct_id, vmdiskusage.zone_id as z_id, max(vmdiskusage.event_time_millis) as max_date " + + "FROM cloud_usage.usage_vm_disk vmdiskusage " + + "GROUP BY vmdiskusage.account_id, vmdiskusage.zone_id " + + ") joinnet on uvd.account_id = joinnet.acct_id and uvd.zone_id = joinnet.z_id and uvd.event_time_millis = joinnet.max_date"; + private static final String DELETE_OLD_STATS = "DELETE FROM cloud_usage.usage_vm_disk WHERE event_time_millis < ?"; + + private static final String INSERT_USAGE_VM_DISK = "INSERT INTO cloud_usage.usage_vm_disk (account_id, zone_id, vm_id, volume_id, io_read, io_write, agg_io_read, agg_io_write, bytes_read, bytes_write, agg_bytes_read, agg_bytes_write, event_time_millis) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + public UsageVmDiskDaoImpl() { + } + + @Override + public Map getRecentVmDiskStats() { + Transaction txn = Transaction.open(Transaction.USAGE_DB); + String sql = SELECT_LATEST_STATS; + PreparedStatement pstmt = null; + try { + pstmt = txn.prepareAutoCloseStatement(sql); + ResultSet rs = pstmt.executeQuery(); + Map returnMap = new HashMap(); + while (rs.next()) { + long accountId = rs.getLong(1); + long zoneId = rs.getLong(2); + long vmId = rs.getLong(3); + Long volumeId = rs.getLong(4); + long ioRead = rs.getLong(5); + long ioWrite = rs.getLong(6); + long aggIORead = rs.getLong(7); + long aggIOWrite = rs.getLong(8); + long bytesRead = rs.getLong(9); + long bytesWrite = rs.getLong(10); + long aggBytesRead = rs.getLong(11); + long aggBytesWrite = rs.getLong(12); + long eventTimeMillis = rs.getLong(13); + if(vmId != 0){ + returnMap.put(zoneId + "-" + accountId+ "-Vm-" + vmId+ "-Disk-" + volumeId, new UsageVmDiskVO(accountId, zoneId, vmId, volumeId, ioRead, ioWrite, aggIORead, aggIOWrite, bytesRead, bytesWrite, aggBytesRead, aggBytesWrite, eventTimeMillis)); + } else { + returnMap.put(zoneId + "-" + accountId, new UsageVmDiskVO(accountId, zoneId, vmId, volumeId, ioRead, ioWrite, aggIORead, aggIOWrite, bytesRead, bytesWrite, aggBytesRead, aggBytesWrite, eventTimeMillis)); + } + } + return returnMap; + } catch (Exception ex) { + s_logger.error("error getting recent usage disk stats", ex); + } finally { + txn.close(); + } + return null; + } + + @Override + public void deleteOldStats(long maxEventTime) { + Transaction txn = Transaction.currentTxn(); + String sql = DELETE_OLD_STATS; + PreparedStatement pstmt = null; + try { + txn.start(); + pstmt = txn.prepareAutoCloseStatement(sql); + pstmt.setLong(1, maxEventTime); + pstmt.executeUpdate(); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + s_logger.error("error deleting old usage disk stats", ex); + } + } + + @Override + public void saveUsageVmDisks(List usageVmDisks) { + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + String sql = INSERT_USAGE_VM_DISK; + PreparedStatement pstmt = null; + pstmt = txn.prepareAutoCloseStatement(sql); // in reality I just want CLOUD_USAGE dataSource connection + for (UsageVmDiskVO usageVmDisk : usageVmDisks) { + pstmt.setLong(1, usageVmDisk.getAccountId()); + pstmt.setLong(2, usageVmDisk.getZoneId()); + pstmt.setLong(3, usageVmDisk.getVmId()); + pstmt.setLong(4, usageVmDisk.getVolumeId()); + pstmt.setLong(5, usageVmDisk.getIORead()); + pstmt.setLong(6, usageVmDisk.getIOWrite()); + pstmt.setLong(7, usageVmDisk.getAggIORead()); + pstmt.setLong(8, usageVmDisk.getAggIOWrite()); + pstmt.setLong(9, usageVmDisk.getBytesRead()); + pstmt.setLong(10, usageVmDisk.getBytesWrite()); + pstmt.setLong(11, usageVmDisk.getAggBytesRead()); + pstmt.setLong(12, usageVmDisk.getAggBytesWrite()); + pstmt.setLong(13, usageVmDisk.getEventTimeMillis()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + txn.commit(); + } catch (Exception ex) { + txn.rollback(); + s_logger.error("error saving usage_vm_disk to cloud_usage db", ex); + throw new CloudRuntimeException(ex.getMessage()); + } + } +} diff --git a/engine/schema/src/com/cloud/user/VmDiskStatisticsVO.java b/engine/schema/src/com/cloud/user/VmDiskStatisticsVO.java new file mode 100644 index 00000000000..d1842c3042c --- /dev/null +++ b/engine/schema/src/com/cloud/user/VmDiskStatisticsVO.java @@ -0,0 +1,216 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.user; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +@Entity +@Table(name="vm_disk_statistics") +public class VmDiskStatisticsVO { + @Id + @GeneratedValue(strategy=GenerationType.IDENTITY) + @Column(name="id") + private Long id; + + @Column(name="data_center_id", updatable=false) + private long dataCenterId; + + @Column(name="account_id", updatable=false) + private long accountId; + + @Column(name="vm_id") + private Long vmId; + + @Column(name="volume_id") + private Long volumeId; + + @Column(name="net_io_read") + private long netIORead; + + @Column(name="net_io_write") + private long netIOWrite; + + @Column(name="current_io_read") + private long currentIORead; + + @Column(name="current_io_write") + private long currentIOWrite; + + @Column(name="agg_io_read") + private long aggIORead; + + @Column(name="agg_io_write") + private long aggIOWrite; + + @Column(name="net_bytes_read") + private long netBytesRead; + + @Column(name="net_bytes_write") + private long netBytesWrite; + + @Column(name="current_bytes_read") + private long currentBytesRead; + + @Column(name="current_bytes_write") + private long currentBytesWrite; + + @Column(name="agg_bytes_read") + private long aggBytesRead; + + @Column(name="agg_bytes_write") + private long aggBytesWrite; + + protected VmDiskStatisticsVO() { + } + + public VmDiskStatisticsVO(long accountId, long dcId, Long vmId, Long volumeId) { + this.accountId = accountId; + this.dataCenterId = dcId; + this.vmId = vmId; + this.volumeId = volumeId; + this.netBytesRead = 0; + this.netBytesWrite = 0; + this.currentBytesRead = 0; + this.currentBytesWrite = 0; + this.netBytesRead = 0; + this.netBytesWrite = 0; + this.currentBytesRead = 0; + this.currentBytesWrite = 0; + } + + public long getAccountId() { + return accountId; + } + + public Long getId() { + return id; + } + + public long getDataCenterId() { + return dataCenterId; + } + + public Long getVmId() { + return vmId; + } + + public Long getVolumeId() { + return volumeId; + } + + public long getCurrentIORead() { + return currentIORead; + } + + public void setCurrentIORead(long currentIORead) { + this.currentIORead = currentIORead; + } + + public long getCurrentIOWrite() { + return currentIOWrite; + } + + public void setCurrentIOWrite(long currentIOWrite) { + this.currentIOWrite = currentIOWrite; + } + + public long getNetIORead() { + return netIORead; + } + + public long getNetIOWrite() { + return netIOWrite; + } + + public void setNetIORead(long netIORead) { + this.netIORead = netIORead; + } + + public void setNetIOWrite(long netIOWrite) { + this.netIOWrite = netIOWrite; + } + + public long getAggIORead() { + return aggIORead; + } + + public void setAggIORead(long aggIORead) { + this.aggIORead = aggIORead; + } + + public long getAggIOWrite() { + return aggIOWrite; + } + + public void setAggIOWrite(long aggIOWrite) { + this.aggIOWrite = aggIOWrite; + } + + public long getCurrentBytesRead() { + return currentBytesRead; + } + + public void setCurrentBytesRead(long currentBytesRead) { + this.currentBytesRead = currentBytesRead; + } + + public long getCurrentBytesWrite() { + return currentBytesWrite; + } + + public void setCurrentBytesWrite(long currentBytesWrite) { + this.currentBytesWrite = currentBytesWrite; + } + + public long getNetBytesRead() { + return netBytesRead; + } + + public long getNetBytesWrite() { + return netBytesWrite; + } + + public void setNetBytesRead(long netBytesRead) { + this.netBytesRead = netBytesRead; + } + + public void setNetBytesWrite(long netBytesWrite) { + this.netBytesWrite = netBytesWrite; + } + + public long getAggBytesRead() { + return aggBytesRead; + } + + public void setAggBytesRead(long aggBytesRead) { + this.aggBytesRead = aggBytesRead; + } + + public long getAggBytesWrite() { + return aggBytesWrite; + } + + public void setAggBytesWrite(long aggBytesWrite) { + this.aggBytesWrite = aggBytesWrite; + } + +} diff --git a/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDao.java b/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDao.java new file mode 100644 index 00000000000..55206a61935 --- /dev/null +++ b/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDao.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.user.dao; + +import java.util.Date; +import java.util.List; + +import com.cloud.user.VmDiskStatisticsVO; +import com.cloud.utils.db.GenericDao; + +public interface VmDiskStatisticsDao extends GenericDao { + VmDiskStatisticsVO findBy(long accountId, long dcId, long vmId, long volumeId); + + VmDiskStatisticsVO lock(long accountId, long dcId, long vmId, long volumeId); + + List listBy(long accountId); + + List listActiveAndRecentlyDeleted(Date minRemovedDate, int startIndex, int limit); + + List listUpdatedStats(); +} diff --git a/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java b/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java new file mode 100644 index 00000000000..02f3406c497 --- /dev/null +++ b/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.user.dao; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.TimeZone; + +import javax.ejb.Local; + +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.user.VmDiskStatisticsVO; +import com.cloud.utils.DateUtil; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; + +@Component +@Local(value={VmDiskStatisticsDao.class}) +public class VmDiskStatisticsDaoImpl extends GenericDaoBase implements VmDiskStatisticsDao { + private static final Logger s_logger = Logger.getLogger(VmDiskStatisticsDaoImpl.class); + private static final String ACTIVE_AND_RECENTLY_DELETED_SEARCH = "SELECT vns.id, vns.data_center_id, vns.account_id, vns.vm_id, vns.volume_id, vns.agg_io_read, vns.agg_io_write, vns.agg_bytes_read, vns.agg_bytes_write " + + "FROM vm_disk_statistics vns, account a " + + "WHERE vns.account_id = a.id AND (a.removed IS NULL OR a.removed >= ?) " + + "ORDER BY vns.id"; + private static final String UPDATED_VM_NETWORK_STATS_SEARCH = "SELECT id, current_io_read, current_io_write, net_io_read, net_io_write, agg_io_read, agg_io_write, " + + "current_bytes_read, current_bytes_write, net_bytes_read, net_bytes_write, agg_bytes_read, agg_bytes_write " + + "from vm_disk_statistics " + + "where (agg_io_read < net_io_read + current_io_read) OR (agg_io_write < net_io_write + current_io_write) OR " + + "(agg_bytes_read < net_bytes_read + current_bytes_read) OR (agg_bytes_write < net_bytes_write + current_bytes_write)"; + private final SearchBuilder AllFieldsSearch; + private final SearchBuilder AccountSearch; + + + public VmDiskStatisticsDaoImpl() { + AccountSearch = createSearchBuilder(); + AccountSearch.and("account", AccountSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AccountSearch.done(); + + AllFieldsSearch = createSearchBuilder(); + AllFieldsSearch.and("account", AllFieldsSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("dc", AllFieldsSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("volume", AllFieldsSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("vm", AllFieldsSearch.entity().getVmId(), SearchCriteria.Op.EQ); + AllFieldsSearch.done(); + } + + @Override + public VmDiskStatisticsVO findBy(long accountId, long dcId, long vmId, long volumeId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("account", accountId); + sc.setParameters("dc", dcId); + sc.setParameters("volume", volumeId); + sc.setParameters("vm", vmId); + return findOneBy(sc); + } + + @Override + public VmDiskStatisticsVO lock(long accountId, long dcId, long vmId, long volumeId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("account", accountId); + sc.setParameters("dc", dcId); + sc.setParameters("volume", volumeId); + sc.setParameters("vm", vmId); + return lockOneRandomRow(sc, true); + } + + @Override + public List listBy(long accountId) { + SearchCriteria sc = AccountSearch.create(); + sc.setParameters("account", accountId); + return search(sc, null); + } + + @Override + public List listActiveAndRecentlyDeleted(Date minRemovedDate, int startIndex, int limit) { + List vmDiskStats = new ArrayList(); + if (minRemovedDate == null) return vmDiskStats; + + Transaction txn = Transaction.currentTxn(); + try { + String sql = ACTIVE_AND_RECENTLY_DELETED_SEARCH + " LIMIT " + startIndex + "," + limit; + PreparedStatement pstmt = null; + pstmt = txn.prepareAutoCloseStatement(sql); + pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), minRemovedDate)); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + vmDiskStats.add(toEntityBean(rs, false)); + } + } catch (Exception ex) { + s_logger.error("error saving vm disk stats to cloud_usage db", ex); + } + return vmDiskStats; + } + + @Override + public List listUpdatedStats() { + List vmDiskStats = new ArrayList(); + + Transaction txn = Transaction.currentTxn(); + try { + PreparedStatement pstmt = null; + pstmt = txn.prepareAutoCloseStatement(UPDATED_VM_NETWORK_STATS_SEARCH); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + vmDiskStats.add(toEntityBean(rs, false)); + } + } catch (Exception ex) { + s_logger.error("error lisitng updated vm disk stats", ex); + } + return vmDiskStats; + } + +} diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmDao.java b/engine/schema/src/com/cloud/vm/dao/UserVmDao.java index e7cd61bddfe..b4f9991c99b 100755 --- a/engine/schema/src/com/cloud/vm/dao/UserVmDao.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmDao.java @@ -54,10 +54,9 @@ public interface UserVmDao extends GenericDao { /** * List user vm instances with virtualized networking (i.e. not direct attached networking) for the given account and datacenter * @param accountId will search for vm instances belonging to this account - * @param dcId will search for vm instances in this zone * @return the list of vm instances owned by the account in the given data center that have virtualized networking (not direct attached networking) */ - List listVirtualNetworkInstancesByAcctAndZone(long accountId, long dcId, long networkId); + List listVirtualNetworkInstancesByAcctAndNetwork(long accountId, long networkId); List listByNetworkIdAndStates(long networkId, State... states); diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java index 5e8be1054a9..1c11563b270 100755 --- a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java @@ -283,11 +283,10 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } @Override - public List listVirtualNetworkInstancesByAcctAndZone(long accountId, long dcId, long networkId) { + public List listVirtualNetworkInstancesByAcctAndNetwork(long accountId, long networkId) { SearchCriteria sc = AccountDataCenterVirtualSearch.create(); sc.setParameters("account", accountId); - sc.setParameters("dc", dcId); sc.setJoinParameters("nicSearch", "networkId", networkId); return listBy(sc); diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmData.java b/engine/schema/src/com/cloud/vm/dao/UserVmData.java index 674fc005a85..6622a7dc8e6 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmData.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmData.java @@ -57,6 +57,10 @@ public class UserVmData { private String cpuUsed; private Long networkKbsRead; private Long networkKbsWrite; + private Long diskKbsRead; + private Long diskKbsWrite; + private Long diskIORead; + private Long diskIOWrite; private Long guestOsId; private Long rootDeviceId; private String rootDeviceType; @@ -364,6 +368,38 @@ public class UserVmData { this.networkKbsWrite = networkKbsWrite; } + public Long getDiskKbsRead() { + return diskKbsRead; + } + + public void setDiskKbsRead(Long diskKbsRead) { + this.diskKbsRead = diskKbsRead; + } + + public Long getDiskKbsWrite() { + return diskKbsWrite; + } + + public void setDiskKbsWrite(Long diskKbsWrite) { + this.diskKbsWrite = diskKbsWrite; + } + + public Long getDiskIORead() { + return diskIORead; + } + + public void setDiskIORead(Long diskIORead) { + this.diskIORead = diskIORead; + } + + public Long getDiskIOWrite() { + return diskIOWrite; + } + + public void setDiskIOWrite(Long diskIOWrite) { + this.diskIOWrite = diskIOWrite; + } + public Long getGuestOsId() { return guestOsId; } diff --git a/engine/schema/src/org/apache/cloudstack/lb/dao/ApplicationLoadBalancerRuleDao.java b/engine/schema/src/org/apache/cloudstack/lb/dao/ApplicationLoadBalancerRuleDao.java index c385e62f6ab..47f1d361216 100644 --- a/engine/schema/src/org/apache/cloudstack/lb/dao/ApplicationLoadBalancerRuleDao.java +++ b/engine/schema/src/org/apache/cloudstack/lb/dao/ApplicationLoadBalancerRuleDao.java @@ -31,5 +31,6 @@ public interface ApplicationLoadBalancerRuleDao extends GenericDao listBySourceIpAndNotRevoked(Ip sourceIp, long sourceNetworkId); List listLbIpsBySourceIpNetworkIdAndScheme(long sourceIpNetworkId, Scheme scheme); + long countBySourceIpAndNotRevoked(Ip sourceIp, long sourceIpNetworkId); } diff --git a/engine/schema/src/org/apache/cloudstack/lb/dao/ApplicationLoadBalancerRuleDaoImpl.java b/engine/schema/src/org/apache/cloudstack/lb/dao/ApplicationLoadBalancerRuleDaoImpl.java index 880c67e732c..6036b5a2d60 100644 --- a/engine/schema/src/org/apache/cloudstack/lb/dao/ApplicationLoadBalancerRuleDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/lb/dao/ApplicationLoadBalancerRuleDaoImpl.java @@ -25,6 +25,7 @@ import org.apache.cloudstack.lb.ApplicationLoadBalancerRuleVO; import org.springframework.stereotype.Component; import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.FirewallRule.State; import com.cloud.network.rules.LoadBalancerContainer.Scheme; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -41,8 +42,8 @@ public class ApplicationLoadBalancerRuleDaoImpl extends GenericDaoBase listIps; final GenericSearchBuilder CountBy; protected final SearchBuilder NotRevokedSearch; - - + final GenericSearchBuilder CountNotRevoked; + protected ApplicationLoadBalancerRuleDaoImpl() { AllFieldsSearch = createSearchBuilder(); @@ -69,6 +70,13 @@ public class ApplicationLoadBalancerRuleDaoImpl extends GenericDaoBase sc = CountNotRevoked.create(); + sc.setParameters("sourceIp", sourceIp); + sc.setParameters("sourceIpNetworkId", sourceIpNetworkId); + sc.setParameters("state", State.Revoke); + List results = customSearch(sc, null); + return results.get(0); + } + } diff --git a/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerDaoImpl.java b/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerDaoImpl.java index 0020f5d4256..ba9dea067ca 100644 --- a/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerDaoImpl.java @@ -31,12 +31,17 @@ public class GlobalLoadBalancerDaoImpl extends GenericDaoBase listByDomainSearch; private final SearchBuilder listByRegionIDSearch; + private final SearchBuilder AccountIdSearch; public GlobalLoadBalancerDaoImpl() { listByDomainSearch = createSearchBuilder(); listByDomainSearch.and("gslbDomain", listByDomainSearch.entity().getGslbDomain(), SearchCriteria.Op.EQ); listByDomainSearch.done(); + AccountIdSearch = createSearchBuilder(); + AccountIdSearch.and("account", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AccountIdSearch.done(); + listByRegionIDSearch = createSearchBuilder(); listByRegionIDSearch.and("region", listByRegionIDSearch.entity().getRegion(), SearchCriteria.Op.EQ); listByRegionIDSearch.done(); @@ -49,6 +54,13 @@ public class GlobalLoadBalancerDaoImpl extends GenericDaoBase listByAccount(long accountId) { + SearchCriteria sc = AccountIdSearch.create(); + sc.setParameters("account", accountId); + return listBy(sc, null); + } + @Override public GlobalLoadBalancerRuleVO findByDomainName(String domainName) { SearchCriteria sc = listByDomainSearch.create(); diff --git a/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerRuleDao.java b/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerRuleDao.java index 1b44caa444a..cfc86256c42 100644 --- a/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerRuleDao.java +++ b/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerRuleDao.java @@ -24,5 +24,8 @@ import java.util.List; public interface GlobalLoadBalancerRuleDao extends GenericDao { List listByRegionId(int regionId); + + List listByAccount(long accountId); + GlobalLoadBalancerRuleVO findByDomainName(String domainName); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 010e4685b20..e16703ecf2f 100755 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -180,6 +180,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement return false; } + DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId()); if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) { s_logger.debug("Skipping RBD pool " + pool.getName() diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index 8155edfb8cd..29b3400bbbb 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -59,14 +59,15 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { List suitablePools = new ArrayList(); HypervisorType hypervisor = dskCh.getHypervisorType(); if (hypervisor != null) { - if (hypervisor != HypervisorType.KVM) { - s_logger.debug("Only kvm supports zone wide storage"); + if (hypervisor != HypervisorType.KVM && hypervisor != HypervisorType.VMware) { + s_logger.debug("Only kvm, VMware hypervisors are enabled to support zone wide storage"); return suitablePools; } } - List storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), - dskCh.getTags()); + List storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags()); + List storagePoolsByHypervisor = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(), dskCh.getHypervisorType()); + storagePools.retainAll(storagePoolsByHypervisor); // add remaining pools in zone, that did not match tags, to avoid set List allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null); diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 501f0447cb7..70e5a5a7b23 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -38,6 +38,7 @@ import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; @@ -135,6 +136,15 @@ public class PrimaryDataStoreHelper { return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); } + public DataStore attachZone(DataStore store, HypervisorType hypervisor) { + StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); + pool.setScope(ScopeType.ZONE); + pool.setHypervisor(hypervisor); + pool.setStatus(StoragePoolStatus.Up); + this.dataStoreDao.update(pool.getId(), pool); + return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); + } + public boolean maintain(DataStore store) { StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); pool.setStatus(StoragePoolStatus.Maintenance); diff --git a/packaging/centos63/cloud-management.rc b/packaging/centos63/cloud-management.rc index 6fd435b555d..35f31b28538 100755 --- a/packaging/centos63/cloud-management.rc +++ b/packaging/centos63/cloud-management.rc @@ -16,7 +16,7 @@ # specific language governing permissions and limitations # under the License. # -# cloud-management This shell script takes care of starting and stopping Tomcat +# cloudstack-management This shell script takes care of starting and stopping Tomcat # # chkconfig: - 80 20 # @@ -56,13 +56,13 @@ stop() { let count="${count}+1" done if [ "$(ps --pid $pid | grep -c $pid)" -eq "0" ]; then - log_success_msg "Stopping cloud-management:" + log_success_msg "Stopping ${NAME}:" else - log_failure_msg "Stopping cloud-management:" + log_failure_msg "Stopping ${NAME}:" fi else - echo "Cannot find PID file of Cloud-management" - log_failure_msg "Stopping cloud-management:" + echo "Cannot find PID file of ${NAME}" + log_failure_msg "Stopping ${NAME}:" fi } diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec index 1cde336e7b1..1f112ddd686 100644 --- a/packaging/centos63/cloud.spec +++ b/packaging/centos63/cloud.spec @@ -165,11 +165,11 @@ echo VERSION=%{_maventag} >> build/replace.properties echo PACKAGE=%{name} >> build/replace.properties if [ "%{_ossnoss}" == "NONOSS" -o "%{_ossnoss}" == "nonoss" ] ; then - echo "Executing mvn packaging for NONOSS ..." - mvn -Pawsapi,systemvm -Dnonoss package + echo "Executing mvn packaging for NONOSS ..." + mvn -Pawsapi,systemvm -Dnonoss package clean install else - echo "Executing mvn packaging for OSS ..." - mvn -Pawsapi package -Dsystemvm + echo "Executing mvn packaging for OSS ..." + mvn -Pawsapi package -Dsystemvm clean install fi %install @@ -216,6 +216,8 @@ ln -sf /var/log/%{name}/management ${RPM_BUILD_ROOT}%{_datadir}/%{name}-manageme ln -sf /var/cache/%{name}/management/temp ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/temp ln -sf /var/cache/%{name}/management/work ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/work +/bin/touch ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/management/catalina.out + install -D client/target/utilities/bin/cloud-migrate-databases ${RPM_BUILD_ROOT}%{_bindir}/%{name}-migrate-databases install -D client/target/utilities/bin/cloud-set-guest-password ${RPM_BUILD_ROOT}%{_bindir}/%{name}-set-guest-password install -D client/target/utilities/bin/cloud-set-guest-sshkey ${RPM_BUILD_ROOT}%{_bindir}/%{name}-set-guest-sshkey @@ -519,6 +521,7 @@ fi %dir %attr(0770,root,root) %{_localstatedir}/log/%{name}-management %{_defaultdocdir}/%{name}-management-%{version}/LICENSE %{_defaultdocdir}/%{name}-management-%{version}/NOTICE +%attr(0644,cloud,cloud) %{_localstatedir}/log/%{name}/management/catalina.out %files agent %attr(0755,root,root) %{_bindir}/%{name}-setup-agent diff --git a/patches/systemvm/debian/config/etc/init.d/cloud-early-config b/patches/systemvm/debian/config/etc/init.d/cloud-early-config index d918670edab..0dcd5710664 100755 --- a/patches/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/patches/systemvm/debian/config/etc/init.d/cloud-early-config @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash ### BEGIN INIT INFO # Provides: cloud-early-config # Required-Start: mountkernfs $local_fs @@ -33,6 +33,9 @@ PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin" # Fix haproxy directory issue mkdir -p /var/lib/haproxy +# Clear boot up flag, it would be created by rc.local after boot up done +rm /var/cache/cloud/boot_up_done + [ -x /sbin/ifup ] || exit 0 . /lib/lsb/init-functions @@ -84,14 +87,13 @@ EOF hypervisor() { [ -d /proc/xen ] && mount -t xenfs none /proc/xen + [ -d /proc/xen ] && echo "xen-domU" && return 0 local try=$([ -x /usr/sbin/virt-what ] && virt-what | tail -1) [ "$try" != "" ] && echo $try && return 0 vmware-checkvm &> /dev/null && echo "vmware" && return 0 - [ -d /proc/xen ] && echo "xen-domU" && return 0 - grep -q QEMU /proc/cpuinfo && echo "kvm" && return 0 grep -q QEMU /var/log/messages && echo "kvm" && return 0 @@ -340,6 +342,22 @@ enable_svc() { [ -f $cfg ] && sed -i "s/ENABLED=.*$/ENABLED=$enabled/" $cfg && return } + +enable_irqbalance() { + local enabled=$1 + local proc=0 + + proc=$(cat /proc/cpuinfo | grep "processor" | wc -l) + if [ $proc -le 1 ] && [ $enabled -eq 1 ] + then + enabled=0 + fi + + log_it "Processors = $proc Enable service ${svc} = $enabled" + local cfg=/etc/default/irqbalance + [ -f $cfg ] && sed -i "s/ENABLED=.*$/ENABLED=$enabled/" $cfg && return +} + disable_hvc() { [ ! -d /proc/xen ] && sed -i 's/^vc/#vc/' /etc/inittab && telinit q [ -d /proc/xen ] && sed -i 's/^#vc/vc/' /etc/inittab && telinit q @@ -696,6 +714,7 @@ setup_router() { enable_svc dnsmasq 1 enable_svc haproxy 1 + enable_irqbalance 1 enable_svc cloud-passwd-srvr 1 enable_svc cloud 0 disable_rpfilter_domR @@ -775,6 +794,7 @@ EOF enable_svc dnsmasq 1 enable_svc haproxy 1 + enable_irqbalance 1 enable_svc cloud 0 disable_rpfilter enable_fwding 1 @@ -801,6 +821,7 @@ setup_dhcpsrvr() { enable_svc dnsmasq 1 enable_svc haproxy 0 + enable_irqbalance 0 enable_svc cloud-passwd-srvr 1 enable_svc cloud 0 enable_fwding 0 @@ -853,6 +874,7 @@ setup_secstorage() { disable_rpfilter enable_fwding 0 enable_svc haproxy 0 + enable_irqbalance 0 enable_svc dnsmasq 0 enable_svc cloud-passwd-srvr 0 enable_svc cloud 1 @@ -877,6 +899,7 @@ setup_console_proxy() { disable_rpfilter enable_fwding 0 enable_svc haproxy 0 + enable_irqbalance 0 enable_svc dnsmasq 0 enable_svc cloud-passwd-srvr 0 enable_svc cloud 1 @@ -903,6 +926,7 @@ setup_elbvm() { enable_fwding 0 enable_svc haproxy 0 + enable_irqbalance 0 enable_svc dnsmasq 0 enable_svc cloud-passwd-srvr 0 enable_svc cloud 0 @@ -925,6 +949,7 @@ setup_ilbvm() { enable_fwding 0 enable_svc haproxy 1 + enable_irqbalance 1 enable_svc dnsmasq 0 enable_svc cloud-passwd-srvr 0 enable_svc cloud 0 diff --git a/patches/systemvm/debian/config/etc/iptables/iptables-router b/patches/systemvm/debian/config/etc/iptables/iptables-router index 3f5bc5f736b..b214e4025fe 100644 --- a/patches/systemvm/debian/config/etc/iptables/iptables-router +++ b/patches/systemvm/debian/config/etc/iptables/iptables-router @@ -37,6 +37,7 @@ COMMIT -A INPUT -i eth0 -p tcp -m tcp --dport 53 -j ACCEPT -A INPUT -i eth1 -p tcp -m state --state NEW --dport 3922 -j ACCEPT -A INPUT -i eth0 -p tcp -m state --state NEW --dport 80 -j ACCEPT +-A INPUT -i eth0 -p tcp -m state --state NEW --dport 8080 -j ACCEPT -A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT -A FORWARD -i eth2 -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT -A FORWARD -i eth0 -o eth0 -m state --state NEW -j ACCEPT diff --git a/patches/systemvm/debian/config/etc/rc.local b/patches/systemvm/debian/config/etc/rc.local index cb434a23526..6119497596b 100755 --- a/patches/systemvm/debian/config/etc/rc.local +++ b/patches/systemvm/debian/config/etc/rc.local @@ -13,3 +13,6 @@ do logger -t cloud "Stopping $svc" service $svc stop done + +date > /var/cache/cloud/boot_up_done +logger -t cloud "Boot up process done" diff --git a/patches/systemvm/debian/config/opt/cloud/bin/get_template_version.sh b/patches/systemvm/debian/config/opt/cloud/bin/get_template_version.sh index 298bc380f82..233ec983f8e 100755 --- a/patches/systemvm/debian/config/opt/cloud/bin/get_template_version.sh +++ b/patches/systemvm/debian/config/opt/cloud/bin/get_template_version.sh @@ -16,5 +16,31 @@ # specific language governing permissions and limitations # under the License. +# As the last command send to router before any rules operation, wait until boot up done + +__TIMEOUT=60 +__FLAGFILE=/var/cache/cloud/boot_up_done +done=0 +for i in `seq 1 $(($__TIMEOUT * 10))` +do + if [ -e $__FLAGFILE ] + then + done=1 + break + fi + sleep 0.1 + if [ $((i % 10)) -eq 0 ] + then + logger -t cloud "Waiting for VM boot up done for one second" + fi +done + +if [ -z $done ] +then + # declare we failed booting process + echo "Waited 60 seconds but boot up haven't been completed" + exit +fi + echo -n `cat /etc/cloudstack-release`'&' cat /var/cache/cloud/cloud-scripts-signature diff --git a/patches/systemvm/debian/config/opt/cloud/bin/ipassoc.sh b/patches/systemvm/debian/config/opt/cloud/bin/ipassoc.sh index f326fac9e54..d23ec00de5e 100755 --- a/patches/systemvm/debian/config/opt/cloud/bin/ipassoc.sh +++ b/patches/systemvm/debian/config/opt/cloud/bin/ipassoc.sh @@ -227,7 +227,8 @@ add_first_ip() { if [ $if_keep_state -ne 1 -o $old_state -ne 0 ] then sudo ip link set $ethDev up - sudo arping -c 3 -I $ethDev -A -U -s $ipNoMask $ipNoMask; + sudo arping -c 1 -I $ethDev -A -U -s $ipNoMask $ipNoMask; + sudo arping -c 1 -I $ethDev -A -U -s $ipNoMask $ipNoMask; fi add_routing $1 @@ -273,7 +274,8 @@ add_an_ip () { if [ $if_keep_state -ne 1 -o $old_state -ne 0 ] then sudo ip link set $ethDev up - sudo arping -c 3 -I $ethDev -A -U -s $ipNoMask $ipNoMask; + sudo arping -c 1 -I $ethDev -A -U -s $ipNoMask $ipNoMask; + sudo arping -c 1 -I $ethDev -A -U -s $ipNoMask $ipNoMask; fi add_routing $1 return $? diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_func.sh b/patches/systemvm/debian/config/opt/cloud/bin/vpc_func.sh index 17cb078b34f..2f8835120e4 100755 --- a/patches/systemvm/debian/config/opt/cloud/bin/vpc_func.sh +++ b/patches/systemvm/debian/config/opt/cloud/bin/vpc_func.sh @@ -22,7 +22,7 @@ getEthByIp (){ local ip=$1 for dev in `ls -1 /sys/class/net | grep eth` do - sudo ip addr show dev $dev | grep $ip > /dev/null + sudo ip addr show dev $dev | grep $ip\/ > /dev/null if [ $? -eq 0 ] then echo $dev diff --git a/patches/systemvm/debian/config/root/deleteIpAlias.sh b/patches/systemvm/debian/config/root/deleteIpAlias.sh index 865ff3b4769..cf6d4de5269 100755 --- a/patches/systemvm/debian/config/root/deleteIpAlias.sh +++ b/patches/systemvm/debian/config/root/deleteIpAlias.sh @@ -24,7 +24,7 @@ set -x var="$1" cert="/root/.ssh/id_rsa.cloud" -while [ -n "$var" ] +while [[ !( "$var" == "-" ) ]] do var1=$(echo $var | cut -f1 -d "-") alias_count=$( echo $var1 | cut -f1 -d ":" ) diff --git a/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ b/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ index 7a1bd44584a..32c811b26d4 100644 --- a/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ +++ b/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ @@ -16,6 +16,8 @@ # specific language governing permissions and limitations # under the License. +sleep 1 + source /root/func.sh lock="biglock" diff --git a/plugins/affinity-group-processors/explicit-dedication/pom.xml b/plugins/affinity-group-processors/explicit-dedication/pom.xml new file mode 100644 index 00000000000..bb3c595841a --- /dev/null +++ b/plugins/affinity-group-processors/explicit-dedication/pom.xml @@ -0,0 +1,33 @@ + + + 4.0.0 + cloud-plugin-explicit-dedication + Apache CloudStack Plugin - Explicit Dedication Processor + + org.apache.cloudstack + cloudstack-plugins + 4.2.0-SNAPSHOT + ../../pom.xml + + + install + src + + diff --git a/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java new file mode 100644 index 00000000000..a0eb56cbb8a --- /dev/null +++ b/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -0,0 +1,383 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.affinity; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import javax.ejb.Local; +import javax.inject.Inject; + +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.log4j.Logger; + +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.AffinityConflictException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@Local(value = AffinityGroupProcessor.class) +public class ExplicitDedicationProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { + + private static final Logger s_logger = Logger.getLogger(ExplicitDedicationProcessor.class); + @Inject + protected UserVmDao _vmDao; + @Inject + protected VMInstanceDao _vmInstanceDao; + @Inject + protected DataCenterDao _dcDao; + @Inject + protected DedicatedResourceDao _dedicatedDao; + @Inject + protected HostPodDao _podDao; + @Inject + protected ClusterDao _clusterDao; + @Inject + protected HostDao _hostDao; + @Inject + protected DomainDao _domainDao; + @Inject + protected AffinityGroupDao _affinityGroupDao; + @Inject + protected AffinityGroupVMMapDao _affinityGroupVMMapDao; + + /** + * This method will process the affinity group of type 'Explicit Dedication' for a deployment of a VM that demands dedicated resources. + * For ExplicitDedicationProcessor we need to add dedicated resources into the IncludeList based on the level we have dedicated resources available. + * For eg. if admin dedicates a pod to a domain, then all the user in that domain can use the resources of that pod. + * We need to take care of the situation when dedicated resources further have resources dedicated to sub-domain/account. + * This IncludeList is then used to update the avoid list for a given data center. + */ + @Override + public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, + ExcludeList avoid) throws AffinityConflictException { + VirtualMachine vm = vmProfile.getVirtualMachine(); + List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); + DataCenter dc = _dcDao.findById(vm.getDataCenterId()); + long domainId = vm.getDomainId(); + long accountId = vm.getAccountId(); + + for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { + if (vmGroupMapping != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Processing affinity group of type 'ExplicitDedication' for VM Id: " + vm.getId()); + } + + List dr = _dedicatedDao.listByAccountId(accountId); + List drOfDomain = searchInDomainResources(domainId); + List drOfParentDomain = searchInParentDomainResources(domainId); + List resourceList = new ArrayList(); + resourceList.addAll(dr); + resourceList.addAll(drOfDomain); + resourceList.addAll(drOfParentDomain); + boolean canUse = false; + + if (plan.getHostId() != null) { + HostVO host = _hostDao.findById(plan.getHostId()); + ClusterVO clusterofHost = _clusterDao.findById(host.getClusterId()); + HostPodVO podOfHost = _podDao.findById(host.getPodId()); + DataCenterVO zoneOfHost = _dcDao.findById(host.getDataCenterId()); + if (resourceList != null && resourceList.size() != 0) { + for(DedicatedResourceVO resource : resourceList){ + if ((resource.getHostId() != null && resource.getHostId() == plan.getHostId()) || + (resource.getClusterId() != null && resource.getClusterId() == clusterofHost.getId()) || + (resource.getPodId() != null && resource.getPodId() == podOfHost.getId()) || + (resource.getDataCenterId() != null && resource.getDataCenterId() == zoneOfHost.getId())){ + canUse = true; + } + } + } + if (!canUse) { + throw new CloudRuntimeException("Cannot use this host " + host.getName() + " for explicit dedication"); + } + } else if (plan.getClusterId() != null) { + ClusterVO cluster = _clusterDao.findById(plan.getClusterId()); + HostPodVO podOfCluster = _podDao.findById(cluster.getPodId()); + DataCenterVO zoneOfCluster = _dcDao.findById(cluster.getDataCenterId()); + List hostToUse = new ArrayList(); + // check whether this cluster or its pod is dedicated + if (resourceList != null && resourceList.size() != 0) { + for(DedicatedResourceVO resource : resourceList){ + if ((resource.getClusterId() != null && resource.getClusterId() == cluster.getId()) || + (resource.getPodId() != null && resource.getPodId() == podOfCluster.getId()) || + (resource.getDataCenterId() != null && resource.getDataCenterId() == zoneOfCluster.getId())){ + canUse = true; + } + + // check for all dedicated host; if it belongs to this cluster + if (!canUse){ + if (resource.getHostId() != null) { + HostVO dHost = _hostDao.findById(resource.getHostId()); + if (dHost.getClusterId() == cluster.getId()) { + hostToUse.add(dHost); + } + } + } + + } + } + + if (hostToUse.isEmpty() && !canUse) { + throw new CloudRuntimeException("Cannot use this cluster " + cluster.getName() + " for explicit dedication"); + } + + if (hostToUse != null && hostToUse.size() != 0) { + // add other non-dedicated hosts to avoid list + List hostList = _hostDao.findByClusterId(cluster.getId()); + for (HostVO host : hostList){ + if (!hostToUse.contains(host)) { + avoid.addHost(host.getId()); + } + } + } + + } else if (plan.getPodId() != null) { + HostPodVO pod = _podDao.findById(plan.getPodId()); + DataCenterVO zoneOfPod = _dcDao.findById(pod.getDataCenterId()); + List clustersToUse = new ArrayList(); + List hostsToUse = new ArrayList(); + // check whether this cluster or its pod is dedicated + if (resourceList != null && resourceList.size() != 0) { + for(DedicatedResourceVO resource : resourceList){ + if ((resource.getPodId() != null && resource.getPodId() == pod.getId()) || + (resource.getDataCenterId() != null && resource.getDataCenterId() == zoneOfPod.getId())){ + canUse = true; + } + + // check for all dedicated cluster/host; if it belongs to this pod + if (!canUse){ + if (resource.getClusterId() != null) { + ClusterVO dCluster = _clusterDao.findById(resource.getClusterId()); + if (dCluster.getPodId() == pod.getId()) { + clustersToUse.add(dCluster); + } + } + if (resource.getHostId() != null) { + HostVO dHost = _hostDao.findById(resource.getHostId()); + if (dHost.getPodId() == pod.getId()) { + hostsToUse.add(dHost); + } + } + } + + } + } + + if (hostsToUse.isEmpty() && clustersToUse.isEmpty() && !canUse) { + throw new CloudRuntimeException("Cannot use this pod " + pod.getName() + " for explicit dedication"); + } + + if (clustersToUse != null && clustersToUse.size() != 0) { + // add other non-dedicated clusters to avoid list + List clusterList = _clusterDao.listByPodId(pod.getId()); + for (ClusterVO cluster : clusterList){ + if (!clustersToUse.contains(cluster)) { + avoid.addCluster(cluster.getId()); + } + } + } + + if (hostsToUse != null && hostsToUse.size() != 0) { + // add other non-dedicated hosts to avoid list + List hostList = _hostDao.findByPodId(pod.getId()); + for (HostVO host : hostList){ + if (!hostsToUse.contains(host)) { + avoid.addHost(host.getId()); + } + } + } + + } else { + //check all resources under this zone + if (dr != null && dr.size() != 0) { + avoid = updateAvoidList(dr, avoid, dc); + } else if(drOfDomain != null && drOfDomain.size() != 0){ + avoid = updateAvoidList(drOfDomain, avoid, dc); + } else if(drOfParentDomain != null && drOfParentDomain.size() != 0){ + avoid = updateAvoidList(drOfParentDomain, avoid, dc); + } else { + avoid.addDataCenter(dc.getId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("No dedicated resources available for this domain or account"); + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("ExplicitDedicationProcessor returns Avoid List as: Deploy avoids pods: " + avoid.getPodsToAvoid() + ", clusters: " + + avoid.getClustersToAvoid() + ", hosts: " + avoid.getHostsToAvoid()); + } + } + } + } + } + + private ExcludeList updateAvoidList(List dedicatedResources, ExcludeList avoidList, DataCenter dc) { + ExcludeList includeList = new ExcludeList(); + for (DedicatedResourceVO dr : dedicatedResources) { + if (dr.getHostId() != null){ + includeList.addHost(dr.getHostId()); + HostVO dedicatedHost = _hostDao.findById(dr.getHostId()); + includeList.addCluster(dedicatedHost.getClusterId()); + includeList.addPod(dedicatedHost.getPodId()); + } + + if (dr.getClusterId() != null) { + includeList.addCluster(dr.getClusterId()); + //add all hosts inside this in includeList + List hostList = _hostDao.findByClusterId(dr.getClusterId()); + for (HostVO host : hostList) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + if (dHost != null) { + avoidList.addHost(host.getId()); + } else { + includeList.addHost(host.getId()); + } + } + ClusterVO dedicatedCluster = _clusterDao.findById(dr.getClusterId()); + includeList.addPod(dedicatedCluster.getPodId()); + } + + if (dr.getPodId() != null) { + includeList.addPod(dr.getPodId()); + //add all cluster under this pod in includeList + List clusterList = _clusterDao.listByPodId(dr.getPodId()); + for (ClusterVO cluster : clusterList) { + if (_dedicatedDao.findByClusterId(cluster.getId()) != null) { + avoidList.addCluster(cluster.getId()); + } else { + includeList.addCluster(cluster.getId()); + } + } + //add all hosts inside this pod in includeList + List hostList = _hostDao.findByPodId(dr.getPodId()); + for (HostVO host : hostList) { + if (_dedicatedDao.findByHostId(host.getId()) != null) { + avoidList.addHost(host.getId()); + } else { + includeList.addHost(host.getId()); + } + } + } + + if (dr.getDataCenterId() != null) { + includeList.addDataCenter(dr.getDataCenterId()); + //add all Pod under this data center in includeList + List podList = _podDao.listByDataCenterId(dr.getDataCenterId()); + for (HostPodVO pod : podList) { + if (_dedicatedDao.findByPodId(pod.getId()) != null) { + avoidList.addPod(pod.getId()); + } else { + includeList.addPod(pod.getId()); + } + } + List clusterList = _clusterDao.listClustersByDcId(dr.getDataCenterId()); + for (ClusterVO cluster : clusterList) { + if (_dedicatedDao.findByClusterId(cluster.getId()) != null) { + avoidList.addCluster(cluster.getId()); + } else { + includeList.addCluster(cluster.getId()); + } + } + //add all hosts inside this in includeList + List hostList = _hostDao.listByDataCenterId(dr.getDataCenterId()); + for (HostVO host : hostList) { + if (_dedicatedDao.findByHostId(host.getId()) != null) { + avoidList.addHost(host.getId()); + } else { + includeList.addHost(host.getId()); + } + } + } + } + //Update avoid list using includeList. + //add resources in avoid list which are not in include list. + + List pods = _podDao.listByDataCenterId(dc.getId()); + List clusters = _clusterDao.listClustersByDcId(dc.getId()); + List hosts = _hostDao.listByDataCenterId(dc.getId()); + Set podsInIncludeList = includeList.getPodsToAvoid(); + Set clustersInIncludeList = includeList.getClustersToAvoid(); + Set hostsInIncludeList = includeList.getHostsToAvoid(); + + for (HostPodVO pod : pods){ + if (podsInIncludeList != null && !podsInIncludeList.contains(pod.getId())) { + avoidList.addPod(pod.getId()); + } + } + + for (ClusterVO cluster : clusters) { + if (clustersInIncludeList != null && !clustersInIncludeList.contains(cluster.getId())) { + avoidList.addCluster(cluster.getId()); + } + } + + for (HostVO host : hosts) { + if (hostsInIncludeList != null && !hostsInIncludeList.contains(host.getId())) { + avoidList.addHost(host.getId()); + } + } + return avoidList; + } + + private List searchInParentDomainResources(long domainId) { + List domainIds = getDomainParentIds(domainId); + List dr = new ArrayList(); + for (Long id : domainIds) { + List resource = _dedicatedDao.listByDomainId(id); + if(resource != null) { + dr.addAll(resource); + } + } + return dr; + } + + private List searchInDomainResources(long domainId) { + List dr = _dedicatedDao.listByDomainId(domainId); + return dr; + } + + private List getDomainParentIds(long domainId) { + DomainVO domainRecord = _domainDao.findById(domainId); + List domainIds = new ArrayList(); + domainIds.add(domainRecord.getId()); + while (domainRecord.getParent() != null ){ + domainRecord = _domainDao.findById(domainRecord.getParent()); + domainIds.add(domainRecord.getId()); + } + return domainIds; + } + +} diff --git a/plugins/dedicated-resources/pom.xml b/plugins/dedicated-resources/pom.xml new file mode 100644 index 00000000000..4c908f4ff96 --- /dev/null +++ b/plugins/dedicated-resources/pom.xml @@ -0,0 +1,29 @@ + + + 4.0.0 + cloud-plugin-dedicated-resources + Apache CloudStack Plugin - Dedicated Resources + + org.apache.cloudstack + cloudstack-plugins + 4.2.0-SNAPSHOT + ../pom.xml + + diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateClusterCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateClusterCmd.java new file mode 100644 index 00000000000..91e4fcee3ad --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateClusterCmd.java @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.DedicateClusterResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.dc.DedicatedResources; +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "dedicateCluster", description= "Dedicate an existing cluster", responseObject = DedicateClusterResponse.class ) +public class DedicateClusterCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(DedicateClusterCmd.class.getName()); + + private static final String s_name = "dedicateclusterresponse"; + @Inject DedicatedService dedicatedService; + + @Parameter(name=ApiConstants.CLUSTER_ID, type=CommandType.UUID, entityType=ClusterResponse.class, + required=true, description="the ID of the Cluster") + private Long clusterId; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class, required=true, description="the ID of the containing domain") + private Long domainId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "the name of the account which needs dedication. Must be used with domainId.") + private String accountName; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getClusterId() { + return clusterId; + } + + public Long getDomainId() { + return domainId; + } + + public String getAccountName() { + return accountName; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DEDICATE_RESOURCE; + } + + @Override + public String getEventDescription() { + return "dedicating a cluster"; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + List result = dedicatedService.dedicateCluster(getClusterId(), getDomainId(), getAccountName()); + ListResponse response = new ListResponse(); + List clusterResponseList = new ArrayList(); + if (result != null) { + for (DedicatedResources resource : result) { + DedicateClusterResponse clusterResponse = dedicatedService.createDedicateClusterResponse(resource); + clusterResponseList.add(clusterResponse); + } + response.setResponses(clusterResponseList); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to dedicate cluster"); + } + } + +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateHostCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateHostCmd.java new file mode 100644 index 00000000000..cb8eb45e0c4 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateHostCmd.java @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.DedicateHostResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.dc.DedicatedResources; +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "dedicateHost", description = "Dedicates a host.", responseObject = DedicateHostResponse.class) +public class DedicateHostCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(DedicateHostCmd.class.getName()); + private static final String s_name = "dedicatehostresponse"; + @Inject DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.HOST_ID, type=CommandType.UUID, entityType = HostResponse.class, + required=true, description="the ID of the host to update") + private Long hostId; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class, required=true, description="the ID of the containing domain") + private Long domainId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "the name of the account which needs dedication. Must be used with domainId.") + private String accountName; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getHostId() { + return hostId; + } + + public Long getDomainId() { + return domainId; + } + + public String getAccountName() { + return accountName; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + List result = dedicatedService.dedicateHost(getHostId(), getDomainId(), getAccountName()); + ListResponse response = new ListResponse(); + List hostResponseList = new ArrayList(); + if (result != null) { + for (DedicatedResources resource : result) { + DedicateHostResponse hostResponse = dedicatedService.createDedicateHostResponse(resource); + hostResponseList.add(hostResponse); + } + response.setResponses(hostResponseList); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to dedicate host"); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DEDICATE_RESOURCE; + } + + @Override + public String getEventDescription() { + return "dedicating a host"; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicatePodCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicatePodCmd.java new file mode 100644 index 00000000000..ed3c227e508 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicatePodCmd.java @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.DedicatePodResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.dc.DedicatedResources; +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "dedicatePod", description ="Dedicates a Pod.", responseObject = DedicatePodResponse.class) +public class DedicatePodCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(DedicatePodCmd.class.getName()); + + private static final String s_name = "dedicatepodresponse"; + @Inject public DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.POD_ID, type=CommandType.UUID, entityType=PodResponse.class, + required=true, description="the ID of the Pod") + private Long podId; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class, required=true, description="the ID of the containing domain") + private Long domainId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "the name of the account which needs dedication. Must be used with domainId.") + private String accountName; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public Long getPodId() { + return podId; + } + + public Long getDomainId() { + return domainId; + } + + public String getAccountName() { + return accountName; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + List result = dedicatedService.dedicatePod(getPodId(), getDomainId(), getAccountName()); + ListResponse response = new ListResponse(); + List podResponseList = new ArrayList(); + if (result != null) { + for (DedicatedResources resource : result) { + DedicatePodResponse podresponse = dedicatedService.createDedicatePodResponse(resource); + podResponseList.add(podresponse); + } + response.setResponses(podResponseList); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to dedicate pod"); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DEDICATE_RESOURCE; + } + + @Override + public String getEventDescription() { + return "dedicating a pod"; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateZoneCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateZoneCmd.java new file mode 100644 index 00000000000..31c6025c305 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateZoneCmd.java @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.api.response.DedicateZoneResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.dc.DedicatedResources; +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "dedicateZone", description ="Dedicates a zones.", responseObject = DedicateZoneResponse.class) +public class DedicateZoneCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(DedicateZoneCmd.class.getName()); + + private static final String s_name = "dedicatezoneresponse"; + @Inject public DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType=ZoneResponse.class, + required=true, description="the ID of the zone") + private Long zoneId; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class, required=true, description="the ID of the containing domain") + private Long domainId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "the name of the account which needs dedication. Must be used with domainId.") + private String accountName; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public Long getZoneId() { + return zoneId; + } + + public Long getDomainId() { + return domainId; + } + + public String getAccountName() { + return accountName; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + List result = dedicatedService.dedicateZone(getZoneId(), getDomainId(), getAccountName()); + ListResponse response = new ListResponse(); + List zoneResponseList = new ArrayList(); + if (result != null) { + for (DedicatedResources resource : result) { + DedicateZoneResponse zoneresponse = dedicatedService.createDedicateZoneResponse(resource); + zoneResponseList.add(zoneresponse); + } + response.setResponses(zoneResponseList); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to dedicate zone"); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DEDICATE_RESOURCE; + } + + @Override + public String getEventDescription() { + return "dedicating a zone"; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedClustersCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedClustersCmd.java new file mode 100644 index 00000000000..f3947876581 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedClustersCmd.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.DedicateClusterResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.DedicatedResources; +import com.cloud.utils.Pair; + +@APICommand(name = "listDedicatedClusters", description = "Lists dedicated clusters.", responseObject = DedicateClusterResponse.class) +public class ListDedicatedClustersCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListDedicatedClustersCmd.class.getName()); + + private static final String s_name = "listdedicatedclustersresponse"; + @Inject DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name=ApiConstants.CLUSTER_ID, type=CommandType.UUID, entityType=ClusterResponse.class, + description="the ID of the cluster") + private Long clusterId; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class, + description="the ID of the domain associated with the cluster") + private Long domainId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, + description = "the name of the account associated with the cluster. Must be used with domainId.") + private String accountName; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getClusterId() { + return clusterId; + } + + public Long getDomainId(){ + return domainId; + } + + public String getAccountName(){ + return accountName; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute(){ + Pair, Integer> result = dedicatedService.listDedicatedClusters(this); + ListResponse response = new ListResponse(); + List Responses = new ArrayList(); + if (result != null) { + for (DedicatedResources resource : result.first()) { + DedicateClusterResponse clusterResponse = dedicatedService.createDedicateClusterResponse(resource); + Responses.add(clusterResponse); + } + response.setResponses(Responses, result.second()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to list dedicated clusters"); + } + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedHostsCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedHostsCmd.java new file mode 100644 index 00000000000..736251b36d6 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedHostsCmd.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.DedicateHostResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.DedicatedResources; +import com.cloud.utils.Pair; + +@APICommand(name = "listDedicatedHosts", description = "Lists dedicated hosts.", responseObject = DedicateHostResponse.class) +public class ListDedicatedHostsCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListDedicatedHostsCmd.class.getName()); + + private static final String s_name = "listdedicatedhostsresponse"; + @Inject DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name=ApiConstants.HOST_ID, type=CommandType.UUID, entityType=HostResponse.class, + description="the ID of the host") + private Long hostId; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class, + description="the ID of the domain associated with the host") + private Long domainId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, + description = "the name of the account associated with the host. Must be used with domainId.") + private String accountName; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getHostId() { + return hostId; + } + + public Long getDomainId(){ + return domainId; + } + + public String getAccountName(){ + return accountName; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation///////////////////l + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute(){ + Pair, Integer> result = dedicatedService.listDedicatedHosts(this); + ListResponse response = new ListResponse(); + List Responses = new ArrayList(); + if (result != null) { + for (DedicatedResources resource : result.first()) { + DedicateHostResponse hostResponse = dedicatedService.createDedicateHostResponse(resource); + Responses.add(hostResponse); + } + response.setResponses(Responses, result.second()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to list dedicated hosts"); + } + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedPodsCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedPodsCmd.java new file mode 100644 index 00000000000..da59edae8d3 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedPodsCmd.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.DedicatePodResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.DedicatedResources; +import com.cloud.utils.Pair; + +@APICommand(name = "listDedicatedPods", description = "Lists dedicated pods.", responseObject = DedicatePodResponse.class) +public class ListDedicatedPodsCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListDedicatedPodsCmd.class.getName()); + + private static final String s_name = "listdedicatedpodsresponse"; + @Inject DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name=ApiConstants.POD_ID, type=CommandType.UUID, entityType=PodResponse.class, + description="the ID of the pod") + private Long podId; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class, + description="the ID of the domain associated with the pod") + private Long domainId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, + description = "the name of the account associated with the pod. Must be used with domainId.") + private String accountName; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getPodId() { + return podId; + } + + public Long getDomainId(){ + return domainId; + } + + public String getAccountName(){ + return accountName; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute(){ + Pair, Integer> result = dedicatedService.listDedicatedPods(this); + ListResponse response = new ListResponse(); + List Responses = new ArrayList(); + if (result != null) { + for (DedicatedResources resource : result.first()) { + DedicatePodResponse podresponse = dedicatedService.createDedicatePodResponse(resource); + Responses.add(podresponse); + } + response.setResponses(Responses, result.second()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to list dedicated pods"); + } + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedZonesCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedZonesCmd.java new file mode 100644 index 00000000000..a21f129f5be --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ListDedicatedZonesCmd.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.api.response.DedicateZoneResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.DedicatedResources; +import com.cloud.utils.Pair; + +@APICommand(name = "listDedicatedZones", description = "List dedicated zones.", responseObject = DedicateZoneResponse.class) +public class ListDedicatedZonesCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListDedicatedZonesCmd.class.getName()); + + private static final String s_name = "listdedicatedzonesresponse"; + @Inject DedicatedService _dedicatedservice; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType=ZoneResponse.class, + description="the ID of the Zone") + private Long zoneId; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType=DomainResponse.class, + description="the ID of the domain associated with the zone") + private Long domainId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, + description = "the name of the account associated with the zone. Must be used with domainId.") + private String accountName; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getZoneId() { + return zoneId; + } + + public Long getDomainId(){ + return domainId; + } + + public String getAccountName(){ + return accountName; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute(){ + Pair, Integer> result = _dedicatedservice.listDedicatedZones(this); + ListResponse response = new ListResponse(); + List Responses = new ArrayList(); + if (result != null) { + for (DedicatedResources resource : result.first()) { + DedicateZoneResponse zoneResponse = _dedicatedservice.createDedicateZoneResponse(resource); + Responses.add(zoneResponse); + } + response.setResponses(Responses, result.second()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to list dedicated zones"); + } + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedClusterCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedClusterCmd.java new file mode 100644 index 00000000000..ba1c6aad7cc --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedClusterCmd.java @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "releaseDedicatedCluster", description = "Release the dedication for cluster", responseObject = SuccessResponse.class) +public class ReleaseDedicatedClusterCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedClusterCmd.class.getName()); + + private static final String s_name = "releasededicatedclusterresponse"; + @Inject DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.CLUSTER_ID, type=CommandType.UUID, entityType=ClusterResponse.class, + required=true, description="the ID of the Cluster") + private Long clusterId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getClusterId() { + return clusterId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + boolean result = dedicatedService.releaseDedicatedResource(null, null, getClusterId(), null); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release dedicated cluster"); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DEDICATE_RESOURCE_RELEASE; + } + + @Override + public String getEventDescription() { + return "releasing dedicated cluster"; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedHostCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedHostCmd.java new file mode 100644 index 00000000000..a79c965926d --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedHostCmd.java @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "releaseDedicatedHost", description = "Release the dedication for host", responseObject = SuccessResponse.class) +public class ReleaseDedicatedHostCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedHostCmd.class.getName()); + + private static final String s_name = "releasededicatedhostresponse"; + @Inject DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.HOST_ID, type=CommandType.UUID, entityType=HostResponse.class, + required=true, description="the ID of the host") + private Long hostId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getHostId() { + return hostId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + boolean result = dedicatedService.releaseDedicatedResource(null, null, null, getHostId()); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release dedicated Host"); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DEDICATE_RESOURCE_RELEASE; + } + + @Override + public String getEventDescription() { + return "releasing dedicated host"; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedPodCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedPodCmd.java new file mode 100644 index 00000000000..d84ef66ef5a --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedPodCmd.java @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "releaseDedicatedPod", description = "Release the dedication for the pod", responseObject = SuccessResponse.class) +public class ReleaseDedicatedPodCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedPodCmd.class.getName()); + + private static final String s_name = "releasededicatedpodresponse"; + @Inject DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.POD_ID, type=CommandType.UUID, entityType=PodResponse.class, + required=true, description="the ID of the Pod") + private Long podId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getPodId() { + return podId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + boolean result = dedicatedService.releaseDedicatedResource(null, getPodId(), null, null); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release dedicated pod"); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DEDICATE_RESOURCE_RELEASE; + } + + @Override + public String getEventDescription() { + return "releasing dedicated pod"; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedZoneCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedZoneCmd.java new file mode 100644 index 00000000000..c78a4961dc8 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/ReleaseDedicatedZoneCmd.java @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.commands; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.dedicated.DedicatedService; +import org.apache.log4j.Logger; + +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "releaseDedicatedZone", description = "Release dedication of zone", responseObject = SuccessResponse.class) +public class ReleaseDedicatedZoneCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedZoneCmd.class.getName()); + + private static final String s_name = "releasededicatedzoneresponse"; + @Inject DedicatedService dedicatedService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType= ZoneResponse.class, + required=true, description="the ID of the Zone") + private Long zoneId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getZoneId() { + return zoneId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + boolean result = dedicatedService.releaseDedicatedResource(getZoneId(), null, null, null); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release dedicated zone"); + } + } + + @Override + public String getEventType() { + return EventTypes.EVENT_DEDICATE_RESOURCE_RELEASE; + } + + @Override + public String getEventDescription() { + return "releasing dedicated zone"; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateClusterResponse.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateClusterResponse.java new file mode 100644 index 00000000000..3c8dde3fd08 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateClusterResponse.java @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class DedicateClusterResponse extends BaseResponse { + @SerializedName("id") @Param(description="the ID of the dedicated resource") + private String id; + + @SerializedName("clusterid") @Param(description="the ID of the cluster") + private String clusterId; + + @SerializedName("clustername") @Param(description="the name of the cluster") + private String clusterName; + + @SerializedName("domainid") @Param(description="the domain ID of the cluster") + private String domainId; + + @SerializedName("accountid") @Param(description="the Account ID of the cluster") + private String accountId; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getClusterId() { + return clusterId; + } + + public void setClusterId(String clusterId) { + this.clusterId = clusterId; + } + + public String getClusterName() { + return clusterName; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public String getDomainId() { + return domainId; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public String getAccountId() { + return accountId; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateHostResponse.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateHostResponse.java new file mode 100644 index 00000000000..cea31fe392b --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateHostResponse.java @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class DedicateHostResponse extends BaseResponse { + @SerializedName("id") @Param(description="the ID of the dedicated resource") + private String id; + + @SerializedName("hostid") @Param(description="the ID of the host") + private String hostId; + + @SerializedName("hostname") @Param(description="the name of the host") + private String hostName; + + @SerializedName("domainid") @Param(description="the domain ID of the host") + private String domainId; + + @SerializedName("accountid") @Param(description="the Account ID of the host") + private String accountId; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getHostId() { + return hostId; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } + + public String getHostName() { + return hostName; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public String getDomainId() { + return domainId; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public String getAccountId() { + return accountId; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicatePodResponse.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicatePodResponse.java new file mode 100644 index 00000000000..4bcaa61c269 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicatePodResponse.java @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.dc.DedicatedResources; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = DedicatedResources.class) +public class DedicatePodResponse extends BaseResponse { + @SerializedName("id") @Param(description="the ID of the dedicated resource") + private String id; + + @SerializedName("podid") @Param(description="the ID of the Pod") + private String podId; + + @SerializedName("podname") @Param(description="the Name of the Pod") + private String podName; + + @SerializedName("domainid") @Param(description="the domain ID to which the Pod is dedicated") + private String domainId; + + @SerializedName("accountid") @Param(description="the Account Id to which the Pod is dedicated") + private String accountId; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getPodId() { + return podId; + } + + public void setPodId(String podId) { + this.podId = podId; + } + + public String getPodName() { + return podName; + } + + public void setPodName(String podName) { + this.podName = podName; + } + + public String getDomainId() { + return domainId; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public String getAccountId() { + return accountId; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateZoneResponse.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateZoneResponse.java new file mode 100644 index 00000000000..57497cd3484 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/response/DedicateZoneResponse.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.dc.DedicatedResources; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = DedicatedResources.class) +public class DedicateZoneResponse extends BaseResponse { + @SerializedName("id") @Param(description="the ID of the dedicated resource") + private String id; + + @SerializedName("zoneid") @Param(description="the ID of the Zone") + private String zoneId; + + @SerializedName("zonename") @Param(description="the Name of the Zone") + private String zoneName; + + @SerializedName("domainid") @Param(description="the domain ID to which the Zone is dedicated") + private String domainId; + + @SerializedName("accountid") @Param(description="the Account Id to which the Zone is dedicated") + private String accountId; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getZoneId() { + return zoneId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public String getZoneName() { + return zoneName; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public String getDomainId() { + return domainId; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public String getAccountId() { + return accountId; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } + +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java new file mode 100755 index 00000000000..c321b22176e --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -0,0 +1,815 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.dedicated; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import com.cloud.utils.component.AdapterBase; +import org.apache.cloudstack.api.commands.DedicateClusterCmd; +import org.apache.cloudstack.api.commands.DedicateHostCmd; +import org.apache.cloudstack.api.commands.DedicatePodCmd; +import org.apache.cloudstack.api.commands.DedicateZoneCmd; +import org.apache.cloudstack.api.commands.ListDedicatedClustersCmd; +import org.apache.cloudstack.api.commands.ListDedicatedHostsCmd; +import org.apache.cloudstack.api.commands.ListDedicatedPodsCmd; +import org.apache.cloudstack.api.commands.ListDedicatedZonesCmd; +import org.apache.cloudstack.api.commands.ReleaseDedicatedClusterCmd; +import org.apache.cloudstack.api.commands.ReleaseDedicatedHostCmd; +import org.apache.cloudstack.api.commands.ReleaseDedicatedPodCmd; +import org.apache.cloudstack.api.commands.ReleaseDedicatedZoneCmd; +import org.apache.cloudstack.api.response.DedicateClusterResponse; +import org.apache.cloudstack.api.response.DedicateHostResponse; +import org.apache.cloudstack.api.response.DedicatePodResponse; +import org.apache.cloudstack.api.response.DedicateZoneResponse; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.DedicatedResources; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.UserContext; +import com.cloud.user.dao.AccountDao; +import com.cloud.utils.DateUtil; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.dao.UserVmDao; + +@Component +@Local({DedicatedService.class }) +public class DedicatedResourceManagerImpl implements DedicatedService { + private static final Logger s_logger = Logger.getLogger(DedicatedResourceManagerImpl.class); + + @Inject AccountDao _accountDao; + @Inject DomainDao _domainDao; + @Inject HostPodDao _podDao; + @Inject ClusterDao _clusterDao; + @Inject HostDao _hostDao; + @Inject DedicatedResourceDao _dedicatedDao; + @Inject DataCenterDao _zoneDao; + @Inject AccountManager _accountMgr; + @Inject UserVmDao _userVmDao; + @Inject ConfigurationDao _configDao; + + private int capacityReleaseInterval; + + public boolean configure(final String name, final Map params) throws ConfigurationException { + capacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); + return true; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Zone") + public List dedicateZone(Long zoneId, Long domainId, String accountName) { + Long accountId = null; + List hosts = null; + if(accountName != null){ + Account caller = UserContext.current().getCaller(); + Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); + accountId = owner.getId(); + } + List childDomainIds = getDomainChildIds(domainId); + childDomainIds.add(domainId); + checkAccountAndDomain(accountId, domainId); + DataCenterVO dc = _zoneDao.findById(zoneId); + if (dc == null) { + throw new InvalidParameterValueException("Unable to find zone by id " + zoneId); + } else { + DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(zoneId); + //check if zone is dedicated + if(dedicatedZone != null) { + s_logger.error("Zone " + dc.getName() + " is already dedicated"); + throw new CloudRuntimeException("Zone " + dc.getName() + " is already dedicated"); + } + + //check if any resource under this zone is dedicated to different account or sub-domain + List pods = _podDao.listByDataCenterId(dc.getId()); + List podsToRelease = new ArrayList(); + List clustersToRelease = new ArrayList(); + List hostsToRelease = new ArrayList(); + for (HostPodVO pod : pods) { + DedicatedResourceVO dPod = _dedicatedDao.findByPodId(pod.getId()); + if (dPod != null) { + if(!(childDomainIds.contains(dPod.getDomainId()))) { + throw new CloudRuntimeException("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + } + if (accountId != null) { + if (dPod.getAccountId() == accountId) { + podsToRelease.add(dPod); + } else { + s_logger.error("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + throw new CloudRuntimeException("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + } + } else { + if (dPod.getAccountId() == null && dPod.getDomainId() == domainId) { + podsToRelease.add(dPod); + } + } + } + } + + for (DedicatedResourceVO dr : podsToRelease) { + releaseDedicatedResource(null, dr.getPodId(), null, null); + } + + List clusters = _clusterDao.listClustersByDcId(dc.getId()); + for (ClusterVO cluster : clusters) { + DedicatedResourceVO dCluster = _dedicatedDao.findByClusterId(cluster.getId()); + if (dCluster != null) { + if(!(childDomainIds.contains(dCluster.getDomainId()))) { + throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + } + if (accountId != null) { + if (dCluster.getAccountId() == accountId) { + clustersToRelease.add(dCluster); + } else { + s_logger.error("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + } + } else { + if (dCluster.getAccountId() == null && dCluster.getDomainId() == domainId) { + clustersToRelease.add(dCluster); + } + } + } + } + + for (DedicatedResourceVO dr : clustersToRelease) { + releaseDedicatedResource(null, null, dr.getClusterId(), null); + } + + hosts = _hostDao.listByDataCenterId(dc.getId()); + for (HostVO host : hosts) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + if (dHost != null) { + if(!(childDomainIds.contains(dHost.getDomainId()))) { + throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + } + if (accountId != null) { + if (dHost.getAccountId() == accountId) { + hostsToRelease.add(dHost); + } else { + s_logger.error("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + } + } else { + if (dHost.getAccountId() == null && dHost.getDomainId() == domainId) { + hostsToRelease.add(dHost); + } + } + } + } + + for (DedicatedResourceVO dr : hostsToRelease) { + releaseDedicatedResource(null, null, null, dr.getHostId()); + } + } + + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + + Transaction txn = Transaction.currentTxn(); + txn.start(); + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(zoneId, null, null, null, null, null); + try { + dedicatedResource.setDomainId(domainId); + if (accountId != null) { + dedicatedResource.setAccountId(accountId); + } + dedicatedResource = _dedicatedDao.persist(dedicatedResource); + } catch (Exception e) { + s_logger.error("Unable to dedicate zone due to " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + } + txn.commit(); + + List result = new ArrayList(); + result.add(dedicatedResource); + return result; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Pod") + public List dedicatePod(Long podId, Long domainId, String accountName) { + Long accountId = null; + if(accountName != null){ + Account caller = UserContext.current().getCaller(); + Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); + accountId = owner.getId(); + } + List childDomainIds = getDomainChildIds(domainId); + childDomainIds.add(domainId); + checkAccountAndDomain(accountId, domainId); + HostPodVO pod = _podDao.findById(podId); + List hosts = null; + if (pod == null) { + throw new InvalidParameterValueException("Unable to find pod by id " + podId); + } else { + DedicatedResourceVO dedicatedPod = _dedicatedDao.findByPodId(podId); + DedicatedResourceVO dedicatedZoneOfPod = _dedicatedDao.findByZoneId(pod.getDataCenterId()); + //check if pod is dedicated + if(dedicatedPod != null ) { + s_logger.error("Pod " + pod.getName() + " is already dedicated"); + throw new CloudRuntimeException("Pod " + pod.getName() + " is already dedicated"); + } + + if (dedicatedZoneOfPod != null) { + boolean domainIdInChildreanList = getDomainChildIds(dedicatedZoneOfPod.getDomainId()).contains(domainId); + //can dedicate a pod to an account/domain if zone is dedicated to parent-domain + if (dedicatedZoneOfPod.getAccountId() != null || (accountId == null && !domainIdInChildreanList) + || (accountId != null && !(dedicatedZoneOfPod.getDomainId() == domainId || domainIdInChildreanList))) { + DataCenterVO zone = _zoneDao.findById(pod.getDataCenterId()); + s_logger.error("Cannot dedicate Pod. Its zone is already dedicated"); + throw new CloudRuntimeException("Pod's Zone " + zone.getName() + " is already dedicated"); + } + } + + //check if any resource under this pod is dedicated to different account or sub-domain + List clusters = _clusterDao.listByPodId(pod.getId()); + List clustersToRelease = new ArrayList(); + List hostsToRelease = new ArrayList(); + for (ClusterVO cluster : clusters) { + DedicatedResourceVO dCluster = _dedicatedDao.findByClusterId(cluster.getId()); + if (dCluster != null) { + if(!(childDomainIds.contains(dCluster.getDomainId()))) { + throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + } + /*if all dedicated resources belongs to same account and domain then we should release dedication + and make new entry for this Pod*/ + if (accountId != null) { + if (dCluster.getAccountId() == accountId) { + clustersToRelease.add(dCluster); + } else { + s_logger.error("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + } + } else { + if (dCluster.getAccountId() == null && dCluster.getDomainId() == domainId) { + clustersToRelease.add(dCluster); + } + } + } + } + + for (DedicatedResourceVO dr : clustersToRelease) { + releaseDedicatedResource(null, null, dr.getClusterId(), null); + } + + hosts = _hostDao.findByPodId(pod.getId()); + for (HostVO host : hosts) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + if (dHost != null) { + if(!(getDomainChildIds(domainId).contains(dHost.getDomainId()))) { + throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + } + if (accountId != null) { + if (dHost.getAccountId() == accountId) { + hostsToRelease.add(dHost); + } else { + s_logger.error("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + } + } else { + if (dHost.getAccountId() == null && dHost.getDomainId() == domainId) { + hostsToRelease.add(dHost); + } + } + } + } + + for (DedicatedResourceVO dr : hostsToRelease) { + releaseDedicatedResource(null, null, null, dr.getHostId()); + } + } + + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + + Transaction txn = Transaction.currentTxn(); + txn.start(); + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, podId, null, null, null, null); + try { + dedicatedResource.setDomainId(domainId); + if (accountId != null) { + dedicatedResource.setAccountId(accountId); + } + dedicatedResource = _dedicatedDao.persist(dedicatedResource); + } catch (Exception e) { + s_logger.error("Unable to dedicate pod due to " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to dedicate pod. Please contact Cloud Support."); + } + txn.commit(); + + List result = new ArrayList(); + result.add(dedicatedResource); + return result; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Cluster") + public List dedicateCluster(Long clusterId, Long domainId, String accountName) { + Long accountId = null; + List hosts = null; + if(accountName != null){ + Account caller = UserContext.current().getCaller(); + Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); + accountId = owner.getId(); + } + List childDomainIds = getDomainChildIds(domainId); + childDomainIds.add(domainId); + checkAccountAndDomain(accountId, domainId); + ClusterVO cluster = _clusterDao.findById(clusterId); + if (cluster == null) { + throw new InvalidParameterValueException("Unable to find cluster by id " + clusterId); + } else { + DedicatedResourceVO dedicatedCluster = _dedicatedDao.findByClusterId(clusterId); + DedicatedResourceVO dedicatedPodOfCluster = _dedicatedDao.findByPodId(cluster.getPodId()); + DedicatedResourceVO dedicatedZoneOfCluster = _dedicatedDao.findByZoneId(cluster.getDataCenterId()); + + //check if cluster is dedicated + if(dedicatedCluster != null) { + s_logger.error("Cluster " + cluster.getName() + " is already dedicated"); + throw new CloudRuntimeException("Cluster "+ cluster.getName() + " is already dedicated"); + } + + if (dedicatedPodOfCluster != null) { + boolean domainIdInChildreanList = getDomainChildIds(dedicatedPodOfCluster.getDomainId()).contains(domainId); + //can dedicate a cluster to an account/domain if pod is dedicated to parent-domain + if (dedicatedPodOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) + || (accountId != null && !(dedicatedPodOfCluster.getDomainId() == domainId || domainIdInChildreanList))) { + s_logger.error("Cannot dedicate Cluster. Its Pod is already dedicated"); + HostPodVO pod = _podDao.findById(cluster.getPodId()); + throw new CloudRuntimeException("Cluster's Pod " + pod.getName() + " is already dedicated"); + } + } + + if (dedicatedZoneOfCluster != null) { + boolean domainIdInChildreanList = getDomainChildIds(dedicatedZoneOfCluster.getDomainId()).contains(domainId); + //can dedicate a cluster to an account/domain if zone is dedicated to parent-domain + if (dedicatedZoneOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) + || (accountId != null && !(dedicatedZoneOfCluster.getDomainId() == domainId || domainIdInChildreanList))) { + s_logger.error("Cannot dedicate Cluster. Its zone is already dedicated"); + DataCenterVO zone = _zoneDao.findById(cluster.getDataCenterId()); + throw new CloudRuntimeException("Cluster's Zone "+ zone.getName() + " is already dedicated"); + } + } + + //check if any resource under this cluster is dedicated to different account or sub-domain + hosts = _hostDao.findByClusterId(cluster.getId()); + List hostsToRelease = new ArrayList(); + for (HostVO host : hosts) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + if (dHost != null) { + if(!(childDomainIds.contains(dHost.getDomainId()))) { + throw new CloudRuntimeException("Host " + host.getName() + " under this Cluster " + cluster.getName() + " is dedicated to different account/domain"); + } + /*if all dedicated resources belongs to same account and domain then we should release dedication + and make new entry for this cluster */ + if (accountId != null) { + if (dHost.getAccountId() == accountId) { + hostsToRelease.add(dHost); + } else { + s_logger.error("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); + throw new CloudRuntimeException("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); + } + } else { + if (dHost.getAccountId() == null && dHost.getDomainId() == domainId) { + hostsToRelease.add(dHost); + } + } + } + } + + for (DedicatedResourceVO dr : hostsToRelease) { + releaseDedicatedResource(null, null, null, dr.getHostId()); + } + } + + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + + Transaction txn = Transaction.currentTxn(); + txn.start(); + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, clusterId, null, null, null); + try { + dedicatedResource.setDomainId(domainId); + if (accountId != null) { + dedicatedResource.setAccountId(accountId); + } + dedicatedResource = _dedicatedDao.persist(dedicatedResource); + } catch (Exception e) { + s_logger.error("Unable to dedicate host due to " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to dedicate cluster. Please contact Cloud Support."); + } + txn.commit(); + + List result = new ArrayList(); + result.add(dedicatedResource); + return result; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Host") + public List dedicateHost(Long hostId, Long domainId, String accountName) { + Long accountId = null; + if(accountName != null){ + Account caller = UserContext.current().getCaller(); + Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); + accountId = owner.getId(); + } + checkAccountAndDomain(accountId, domainId); + HostVO host = _hostDao.findById(hostId); + if (host == null) { + throw new InvalidParameterValueException("Unable to find host by id " + hostId); + } else { + //check if host is of routing type + if (host.getType() != Host.Type.Routing) { + throw new CloudRuntimeException("Invalid host type for host " + host.getName()); + } + + DedicatedResourceVO dedicatedHost = _dedicatedDao.findByHostId(hostId); + DedicatedResourceVO dedicatedClusterOfHost = _dedicatedDao.findByClusterId(host.getClusterId()); + DedicatedResourceVO dedicatedPodOfHost = _dedicatedDao.findByPodId(host.getPodId()); + DedicatedResourceVO dedicatedZoneOfHost = _dedicatedDao.findByZoneId(host.getDataCenterId()); + + if(dedicatedHost != null) { + s_logger.error("Host "+ host.getName() + " is already dedicated"); + throw new CloudRuntimeException("Host "+ host.getName() + " is already dedicated"); + } + + if (dedicatedClusterOfHost != null) { + boolean domainIdInChildreanList = getDomainChildIds(dedicatedClusterOfHost.getDomainId()).contains(domainId); + //can dedicate a host to an account/domain if cluster is dedicated to parent-domain + if (dedicatedClusterOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) + || (accountId != null && !(dedicatedClusterOfHost.getDomainId() == domainId || domainIdInChildreanList))) { + ClusterVO cluster = _clusterDao.findById(host.getClusterId()); + s_logger.error("Host's Cluster " + cluster.getName() + " is already dedicated"); + throw new CloudRuntimeException("Host's Cluster " + cluster.getName() + " is already dedicated"); + } + } + + if (dedicatedPodOfHost != null){ + boolean domainIdInChildreanList = getDomainChildIds(dedicatedPodOfHost.getDomainId()).contains(domainId); + //can dedicate a host to an account/domain if pod is dedicated to parent-domain + if (dedicatedPodOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) + || (accountId != null && !(dedicatedPodOfHost.getDomainId() == domainId || domainIdInChildreanList))) { + HostPodVO pod = _podDao.findById(host.getPodId()); + s_logger.error("Host's Pod " + pod.getName() + " is already dedicated"); + throw new CloudRuntimeException("Host's Pod " + pod.getName() + " is already dedicated"); + } + } + + if (dedicatedZoneOfHost != null) { + boolean domainIdInChildreanList = getDomainChildIds(dedicatedZoneOfHost.getDomainId()).contains(domainId); + //can dedicate a host to an account/domain if zone is dedicated to parent-domain + if (dedicatedZoneOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) + || (accountId != null && !(dedicatedZoneOfHost.getDomainId() == domainId || domainIdInChildreanList))) { + DataCenterVO zone = _zoneDao.findById(host.getDataCenterId()); + s_logger.error("Host's Data Center " + zone.getName() + " is already dedicated"); + throw new CloudRuntimeException("Host's Data Center " + zone.getName() + " is already dedicated"); + } + } + } + + List childDomainIds = getDomainChildIds(domainId); + childDomainIds.add(domainId); + checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, hostId); + + Transaction txn = Transaction.currentTxn(); + txn.start(); + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, null, hostId, null, null); + try { + dedicatedResource.setDomainId(domainId); + if (accountId != null) { + dedicatedResource.setAccountId(accountId); + } + dedicatedResource = _dedicatedDao.persist(dedicatedResource); + } catch (Exception e) { + s_logger.error("Unable to dedicate host due to " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to dedicate host. Please contact Cloud Support."); + } + txn.commit(); + + List result = new ArrayList(); + result.add(dedicatedResource); + return result; + } + + private List getVmsOnHost(long hostId) { + List vms = _userVmDao.listUpByHostId(hostId); + List vmsByLastHostId = _userVmDao.listByLastHostId(hostId); + if (vmsByLastHostId.size() > 0) { + // check if any VMs are within skip.counting.hours, if yes we have to consider the host. + for (UserVmVO stoppedVM : vmsByLastHostId) { + long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime() + .getTime()) / 1000; + if (secondsSinceLastUpdate < capacityReleaseInterval) { + vms.add(stoppedVM); + } + } + } + + return vms; + } + + private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, long hostId) { + boolean suitable = true; + List allVmsOnHost = getVmsOnHost(hostId); + if (accountId != null) { + for (UserVmVO vm : allVmsOnHost) { + if (vm.getAccountId() != accountId) { + s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for explicit dedication as it is " + + "running instances of another account"); + throw new CloudRuntimeException("Host " + hostId + " found to be unsuitable for explicit dedication as it is " + + "running instances of another account"); + } + } + } else { + for (UserVmVO vm : allVmsOnHost) { + if (!domainIds.contains(vm.getDomainId())) { + s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for explicit dedication as it is " + + "running instances of another domain"); + throw new CloudRuntimeException("Host " + hostId + " found to be unsuitable for explicit dedication as it is " + + "running instances of another domain"); + } + } + } + return suitable; + } + + private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hosts) { + boolean suitable = true; + for (HostVO host : hosts){ + checkHostSuitabilityForExplicitDedication(accountId, domainIds, host.getId()); + } + return suitable; + } + + private void checkAccountAndDomain(Long accountId, Long domainId) { + DomainVO domain = _domainDao.findById(domainId); + if (domain == null) { + throw new InvalidParameterValueException("Unable to find the domain by id " + domainId + ", please specify valid domainId"); + } + //check if account belongs to the domain id + if (accountId != null) { + AccountVO account = _accountDao.findById(accountId); + if (account == null || domainId != account.getDomainId()){ + throw new InvalidParameterValueException("Please specify the domain id of the account: " + account.getAccountName()); + } + } + } + + private List getDomainChildIds(long domainId) { + DomainVO domainRecord = _domainDao.findById(domainId); + List domainIds = new ArrayList(); + domainIds.add(domainRecord.getId()); + // find all domain Ids till leaf + List allChildDomains = _domainDao.findAllChildren(domainRecord.getPath(), domainRecord.getId()); + for (DomainVO domain : allChildDomains) { + domainIds.add(domain.getId()); + } + return domainIds; + } + + @Override + public DedicateZoneResponse createDedicateZoneResponse(DedicatedResources resource) { + DedicateZoneResponse dedicateZoneResponse = new DedicateZoneResponse(); + DataCenterVO dc = _zoneDao.findById(resource.getDataCenterId()); + DomainVO domain = _domainDao.findById(resource.getDomainId()); + AccountVO account = _accountDao.findById(resource.getAccountId()); + dedicateZoneResponse.setId(resource.getUuid()); + dedicateZoneResponse.setZoneId(dc.getUuid()); + dedicateZoneResponse.setZoneName(dc.getName()); + dedicateZoneResponse.setDomainId(domain.getUuid()); + if (account != null) { + dedicateZoneResponse.setAccountId(account.getUuid()); + } + dedicateZoneResponse.setObjectName("dedicatedzone"); + return dedicateZoneResponse; + } + + @Override + public DedicatePodResponse createDedicatePodResponse(DedicatedResources resource) { + DedicatePodResponse dedicatePodResponse = new DedicatePodResponse(); + HostPodVO pod = _podDao.findById(resource.getPodId()); + DomainVO domain = _domainDao.findById(resource.getDomainId()); + AccountVO account = _accountDao.findById(resource.getAccountId()); + dedicatePodResponse.setId(resource.getUuid()); + dedicatePodResponse.setPodId(pod.getUuid()); + dedicatePodResponse.setPodName(pod.getName()); + dedicatePodResponse.setDomainId(domain.getUuid()); + if (account != null) { + dedicatePodResponse.setAccountId(account.getUuid()); + } + dedicatePodResponse.setObjectName("dedicatedpod"); + return dedicatePodResponse; + } + + @Override + public DedicateClusterResponse createDedicateClusterResponse(DedicatedResources resource) { + DedicateClusterResponse dedicateClusterResponse = new DedicateClusterResponse(); + ClusterVO cluster = _clusterDao.findById(resource.getClusterId()); + DomainVO domain = _domainDao.findById(resource.getDomainId()); + AccountVO account = _accountDao.findById(resource.getAccountId()); + dedicateClusterResponse.setId(resource.getUuid()); + dedicateClusterResponse.setClusterId(cluster.getUuid()); + dedicateClusterResponse.setClusterName(cluster.getName()); + dedicateClusterResponse.setDomainId(domain.getUuid()); + if (account != null) { + dedicateClusterResponse.setAccountId(account.getUuid()); + } + dedicateClusterResponse.setObjectName("dedicatedcluster"); + return dedicateClusterResponse; + } + + @Override + public DedicateHostResponse createDedicateHostResponse(DedicatedResources resource) { + DedicateHostResponse dedicateHostResponse = new DedicateHostResponse(); + HostVO host = _hostDao.findById(resource.getHostId()); + DomainVO domain = _domainDao.findById(resource.getDomainId()); + AccountVO account = _accountDao.findById(resource.getAccountId()); + dedicateHostResponse.setId(resource.getUuid()); + dedicateHostResponse.setHostId(host.getUuid()); + dedicateHostResponse.setHostName(host.getName()); + dedicateHostResponse.setDomainId(domain.getUuid()); + if (account != null) { + dedicateHostResponse.setAccountId(account.getUuid()); + } + dedicateHostResponse.setObjectName("dedicatedhost"); + return dedicateHostResponse; + } + + @Override + public List> getCommands() { + List> cmdList = new ArrayList>(); + cmdList.add(DedicateZoneCmd.class); + cmdList.add(DedicatePodCmd.class); + cmdList.add(DedicateClusterCmd.class); + cmdList.add(DedicateHostCmd.class); + cmdList.add(ListDedicatedZonesCmd.class); + cmdList.add(ListDedicatedPodsCmd.class); + cmdList.add(ListDedicatedClustersCmd.class); + cmdList.add(ListDedicatedHostsCmd.class); + cmdList.add(ReleaseDedicatedClusterCmd.class); + cmdList.add(ReleaseDedicatedHostCmd.class); + cmdList.add(ReleaseDedicatedPodCmd.class); + cmdList.add(ReleaseDedicatedZoneCmd.class); + return cmdList; + } + + @Override + public Pair, Integer> listDedicatedZones(ListDedicatedZonesCmd cmd) { + Long zoneId = cmd.getZoneId(); + Long domainId = cmd.getDomainId(); + String accountName = cmd.getAccountName(); + Long accountId = null; + if (accountName != null) { + if (domainId != null) { + Account account = _accountDao.findActiveAccount(accountName, domainId); + if (account != null) { + accountId = account.getId(); + } + } else { + throw new InvalidParameterValueException("Please specify the domain id of the account: " + accountName); + } + } + Pair, Integer> result = _dedicatedDao.searchDedicatedZones(zoneId, domainId, accountId); + return new Pair, Integer>(result.first(), result.second()); + } + + @Override + public Pair, Integer> listDedicatedPods(ListDedicatedPodsCmd cmd) { + Long podId = cmd.getPodId(); + Long domainId = cmd.getDomainId(); + String accountName = cmd.getAccountName(); + Long accountId = null; + if (accountName != null) { + if (domainId != null) { + Account account = _accountDao.findActiveAccount(accountName, domainId); + if (account != null) { + accountId = account.getId(); + } + } else { + throw new InvalidParameterValueException("Please specify the domain id of the account: " + accountName); + } + } + Pair, Integer> result = _dedicatedDao.searchDedicatedPods(podId, domainId, accountId); + return new Pair, Integer>(result.first(), result.second()); + } + + @Override + public Pair, Integer> listDedicatedClusters(ListDedicatedClustersCmd cmd) { + Long clusterId = cmd.getClusterId(); + Long domainId = cmd.getDomainId(); + String accountName = cmd.getAccountName(); + Long accountId = null; + if (accountName != null) { + if (domainId != null) { + Account account = _accountDao.findActiveAccount(accountName, domainId); + if (account != null) { + accountId = account.getId(); + } + } else { + throw new InvalidParameterValueException("Please specify the domain id of the account: " + accountName); + } + } + Pair, Integer> result = _dedicatedDao.searchDedicatedClusters(clusterId, domainId, accountId); + return new Pair, Integer>(result.first(), result.second()); + } + + @Override + public Pair, Integer> listDedicatedHosts(ListDedicatedHostsCmd cmd) { + Long hostId = cmd.getHostId(); + Long domainId = cmd.getDomainId(); + String accountName = cmd.getAccountName(); + Long accountId = null; + if (accountName != null) { + if (domainId != null) { + Account account = _accountDao.findActiveAccount(accountName, domainId); + if (account != null) { + accountId = account.getId(); + } + } else { + throw new InvalidParameterValueException("Please specify the domain id of the account: " + accountName); + } + } + + Pair, Integer> result = _dedicatedDao.searchDedicatedHosts(hostId, domainId, accountId); + return new Pair, Integer>(result.first(), result.second()); + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE_RELEASE, eventDescription = "Releasing dedicated resource") + public boolean releaseDedicatedResource(Long zoneId, Long podId, Long clusterId, Long hostId) throws InvalidParameterValueException{ + DedicatedResourceVO resource = null; + Long resourceId = null; + if (zoneId != null) { + resource = _dedicatedDao.findByZoneId(zoneId); + } + if (podId != null) { + resource = _dedicatedDao.findByPodId(podId); + } + if (clusterId != null) { + resource = _dedicatedDao.findByClusterId(clusterId); + } + if (hostId != null ) { + resource = _dedicatedDao.findByHostId(hostId); + } + if (resource == null){ + throw new InvalidParameterValueException("No Dedicated Resource available to release"); + } else { + Transaction txn = Transaction.currentTxn(); + txn.start(); + resourceId = resource.getId(); + if (!_dedicatedDao.remove(resourceId)) { + throw new CloudRuntimeException("Failed to delete Resource " + resourceId); + } + txn.commit(); + } + return true; + } +} diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedService.java b/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedService.java new file mode 100755 index 00000000000..6f26ad62f84 --- /dev/null +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedService.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.dedicated; + +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.DedicatedResources; +import com.cloud.utils.Pair; +import com.cloud.utils.component.PluggableService; +import org.apache.cloudstack.api.commands.ListDedicatedClustersCmd; +import org.apache.cloudstack.api.commands.ListDedicatedHostsCmd; +import org.apache.cloudstack.api.commands.ListDedicatedPodsCmd; +import org.apache.cloudstack.api.commands.ListDedicatedZonesCmd; +import org.apache.cloudstack.api.response.DedicateClusterResponse; +import org.apache.cloudstack.api.response.DedicateHostResponse; +import org.apache.cloudstack.api.response.DedicatePodResponse; +import org.apache.cloudstack.api.response.DedicateZoneResponse; + +import java.util.List; + +public interface DedicatedService extends PluggableService { + + DedicatePodResponse createDedicatePodResponse(DedicatedResources resource); + + DedicateClusterResponse createDedicateClusterResponse( + DedicatedResources resource); + + DedicateHostResponse createDedicateHostResponse(DedicatedResources resource); + + Pair, Integer> listDedicatedPods(ListDedicatedPodsCmd cmd); + + Pair, Integer> listDedicatedHosts(ListDedicatedHostsCmd cmd); + + Pair, Integer> listDedicatedClusters(ListDedicatedClustersCmd cmd); + + boolean releaseDedicatedResource(Long zoneId, Long podId, Long clusterId, Long hostId); + + DedicateZoneResponse createDedicateZoneResponse(DedicatedResources resource); + + Pair, Integer> listDedicatedZones(ListDedicatedZonesCmd cmd); + + List dedicateZone(Long zoneId, Long domainId, String accountName); + + List dedicatePod(Long podId, Long domainId, String accountName); + + List dedicateCluster(Long clusterId, Long domainId, String accountName); + + List dedicateHost(Long hostId, Long domainId, String accountName); + +} diff --git a/plugins/dedicated-resources/test/org/apache/cloudstack/dedicated/manager/DedicatedApiUnitTest.java b/plugins/dedicated-resources/test/org/apache/cloudstack/dedicated/manager/DedicatedApiUnitTest.java new file mode 100644 index 00000000000..58aae238d88 --- /dev/null +++ b/plugins/dedicated-resources/test/org/apache/cloudstack/dedicated/manager/DedicatedApiUnitTest.java @@ -0,0 +1,317 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.dedicated.manager; + +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; + +import java.io.IOException; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import junit.framework.Assert; + +import org.apache.cloudstack.dedicated.DedicatedResourceManagerImpl; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.apache.log4j.Logger; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.dao.HostDao; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.UserContext; +import com.cloud.user.dao.AccountDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.dao.UserVmDao; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class DedicatedApiUnitTest { + public static final Logger s_logger = Logger.getLogger(DedicatedApiUnitTest.class); + @Inject + DedicatedResourceManagerImpl _dedicatedService = new DedicatedResourceManagerImpl(); + + @Inject + AccountManager _acctMgr; + + @Inject + AccountDao _accountDao; + + @Inject + DomainDao _domainDao; + + @Inject + UserVmDao _vmDao; + + @Inject + DedicatedResourceDao _dedicatedDao; + + @Inject + DataCenterDao _dcDao; + + @Inject + HostPodDao _podDao; + + @Inject + ClusterDao _clusterDao; + + @Inject + HostDao _hostDao; + + @Inject + ConfigurationDao _configDao; + + private static long domainId = 5L; + private static long accountId = 5L; + private static String accountName = "admin"; + + @BeforeClass + public static void setUp() throws ConfigurationException { + + } + + @Before + public void testSetUp() { + ComponentContext.initComponentsLifeCycle(); + AccountVO account = new AccountVO(accountName, domainId, "networkDomain", Account.ACCOUNT_TYPE_NORMAL, "uuid"); + DomainVO domain = new DomainVO("rootDomain", 5L, 5L, "networkDomain"); + + UserContext.registerContext(1, account, null, true); + when(_acctMgr.finalizeOwner((Account) anyObject(), anyString(), anyLong(), anyLong())).thenReturn(account); + when(_accountDao.findByIdIncludingRemoved(0L)).thenReturn(account); + when(_accountDao.findById(anyLong())).thenReturn(account); + when(_domainDao.findById(domainId)).thenReturn(domain); + } + + @Test(expected = InvalidParameterValueException.class) + public void InvalidDomainIDForAccountTest() { + _dedicatedService.dedicateZone(10L, domainId, accountName); + } + + @Test(expected = InvalidParameterValueException.class) + public void dedicateResourceInvalidAccountIDTest() { + _dedicatedService.dedicateZone(10L, domainId, accountName); + } + + @Test + public void releaseDedicatedZoneInvalidIdTest() { + when(_dedicatedDao.findByZoneId(10L)).thenReturn(null); + try { + _dedicatedService.releaseDedicatedResource(10L, null, null, null); + } catch (InvalidParameterValueException e) { + Assert.assertTrue(e.getMessage().contains( + "No Dedicated Resource available to release")); + } + } + +/* @Test + public void runDedicateZoneTest() { + DataCenterVO dc = new DataCenterVO(10L, "TestZone", "Dedicated", + "8.8.8.8", null, "10.0.0.1", null, "10.0.0.1/24", null, null, + NetworkType.Basic, null, null); + when(_dcDao.findById(10L)).thenReturn(dc); + try { + List result = _dedicatedService.dedicateZone(10L, domainId, accountName); + Assert.assertNotNull(result); + } catch (Exception e) { + s_logger.info("exception in testing dedication of zone " + + e.toString()); + } + } + + @Test + public void runDedicatePodTest() { + HostPodVO pod = new HostPodVO("TestPod", 20L, "10.0.0.1", "10.0.0.0", + 22, null); + when(_podDao.findById(10L)).thenReturn(pod); + try { + List result = _dedicatedService.dedicatePod(10L, domainId, accountName); + Assert.assertNotNull(result); + } catch (Exception e) { + s_logger.info("exception in testing dedication of pod " + + e.toString()); + } + } + + @Test + public void runDedicateClusterTest() { + ClusterVO cluster = new ClusterVO(10L, 10L, "TestCluster"); + when(_clusterDao.findById(10L)).thenReturn(cluster); + try { + List result = _dedicatedService.dedicateCluster(10L, domainId, accountName); + Assert.assertNotNull(result); + } catch (Exception e) { + s_logger.info("exception in testing dedication of cluster " + + e.toString()); + } + } + + @Test + public void runDedicateHostTest() { + HostVO host = new HostVO(10L, "Host-1", Host.Type.Routing, null, + "10.0.0.0", null, null, null, null, null, null, null, null, + Status.Up, null, null, null, 10L, 10L, 30L, 10233, null, null, + null, 0, null); + when(_hostDao.findById(10L)).thenReturn(host); + try { + List result = _dedicatedService.dedicateHost(10L, domainId, accountName); + Assert.assertNotNull(result); + } catch (Exception e) { + s_logger.info("exception in testing dedication of host " + + e.toString()); + } + } +*/ + + @Test(expected = CloudRuntimeException.class) + public void dedicateZoneExistTest() { + DedicatedResourceVO dr = new DedicatedResourceVO(10L, null, null, null, domainId, accountId); + when(_dedicatedDao.findByZoneId(10L)).thenReturn(dr); + _dedicatedService.dedicateZone(10L, domainId, accountName); + } + + @Test(expected = CloudRuntimeException.class) + public void dedicatePodExistTest() { + DedicatedResourceVO dr = new DedicatedResourceVO(null, 10L, null, null, domainId, accountId); + when(_dedicatedDao.findByPodId(10L)).thenReturn(dr); + _dedicatedService.dedicatePod(10L, domainId, accountName); + } + + @Test(expected = CloudRuntimeException.class) + public void dedicateClusterExistTest() { + DedicatedResourceVO dr = new DedicatedResourceVO(null, null, 10L, null, domainId, accountId); + when(_dedicatedDao.findByClusterId(10L)).thenReturn(dr); + _dedicatedService.dedicateCluster(10L, domainId, accountName); + } + + @Test(expected = CloudRuntimeException.class) + public void dedicateHostExistTest() { + DedicatedResourceVO dr = new DedicatedResourceVO(null, null, null, 10L, domainId, accountId); + when(_dedicatedDao.findByHostId(10L)).thenReturn(dr); + _dedicatedService.dedicateHost(10L, domainId, accountName); + } + + @Test(expected = InvalidParameterValueException.class) + public void releaseDedicatedPodInvalidIdTest() { + when(_dedicatedDao.findByPodId(10L)).thenReturn(null); + _dedicatedService.releaseDedicatedResource(null, 10L, null, null); + } + + @Test(expected = InvalidParameterValueException.class) + public void releaseDedicatedClusterInvalidIdTest() { + when(_dedicatedDao.findByClusterId(10L)).thenReturn(null); + _dedicatedService.releaseDedicatedResource(null, null, 10L, null); + } + + @Test(expected = InvalidParameterValueException.class) + public void releaseDedicatedHostInvalidIdTest() { + when(_dedicatedDao.findByHostId(10L)).thenReturn(null); + _dedicatedService.releaseDedicatedResource(null, null, null, 10L); + } + + @Configuration + @ComponentScan(basePackageClasses = {DedicatedResourceManagerImpl.class}, + includeFilters = {@Filter(value = TestConfiguration.Library.class, + type = FilterType.CUSTOM)}, useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + @Bean + public AccountDao accountDao() { + return Mockito.mock(AccountDao.class); + } + + @Bean + public DomainDao domainDao() { + return Mockito.mock(DomainDao.class); + } + + @Bean + public DedicatedResourceDao dedicatedDao() { + return Mockito.mock(DedicatedResourceDao.class); + } + + @Bean + public HostDao hostDao() { + return Mockito.mock(HostDao.class); + } + + @Bean + public AccountManager acctManager() { + return Mockito.mock(AccountManager.class); + } + + @Bean + public UserVmDao userVmDao() { + return Mockito.mock(UserVmDao.class); + } + @Bean + public DataCenterDao dataCenterDao() { + return Mockito.mock(DataCenterDao.class); + } + @Bean + public HostPodDao hostPodDao() { + return Mockito.mock(HostPodDao.class); + } + + @Bean + public ClusterDao clusterDao() { + return Mockito.mock(ClusterDao.class); + } + + @Bean + public ConfigurationDao configDao() { + return Mockito.mock(ConfigurationDao.class); + } + + public static class Library implements TypeFilter { + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + } +} diff --git a/plugins/dedicated-resources/test/resource/dedicatedContext.xml b/plugins/dedicated-resources/test/resource/dedicatedContext.xml new file mode 100644 index 00000000000..9ce8362d4b0 --- /dev/null +++ b/plugins/dedicated-resources/test/resource/dedicatedContext.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java index d47d8f52c46..be016cb2507 100644 --- a/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -29,6 +29,8 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import com.cloud.configuration.Config; +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; +import com.cloud.deploy.dao.PlannerHostReservationDao; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.host.HostVO; import com.cloud.resource.ResourceManager; @@ -39,6 +41,7 @@ import com.cloud.user.Account; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -98,12 +101,12 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy Set hostRunningStrictImplicitVmsOfOtherAccounts = new HashSet(); Set allOtherHosts = new HashSet(); for (Long host : allHosts) { - List userVms = getVmsOnHost(host); - if (userVms == null || userVms.isEmpty()) { + List vms = getVmsOnHost(host); + if (vms == null || vms.isEmpty()) { emptyHosts.add(host); - } else if (checkHostSuitabilityForImplicitDedication(account.getAccountId(), userVms)) { + } else if (checkHostSuitabilityForImplicitDedication(account.getAccountId(), vms)) { hostRunningVmsOfAccount.add(host); - } else if (checkIfAllVmsCreatedInStrictMode(account.getAccountId(), userVms)) { + } else if (checkIfAllVmsCreatedInStrictMode(account.getAccountId(), vms)) { hostRunningStrictImplicitVmsOfOtherAccounts.add(host); } else { allOtherHosts.add(host); @@ -139,12 +142,12 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy return clusterList; } - private List getVmsOnHost(long hostId) { - List vms = _vmDao.listUpByHostId(hostId); - List vmsByLastHostId = _vmDao.listByLastHostId(hostId); + private List getVmsOnHost(long hostId) { + List vms = _vmInstanceDao.listUpByHostId(hostId); + List vmsByLastHostId = _vmInstanceDao.listByLastHostId(hostId); if (vmsByLastHostId.size() > 0) { // check if any VMs are within skip.counting.hours, if yes we have to consider the host. - for (UserVmVO stoppedVM : vmsByLastHostId) { + for (VMInstanceVO stoppedVM : vmsByLastHostId) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime() .getTime()) / 1000; if (secondsSinceLastUpdate < capacityReleaseInterval) { @@ -156,9 +159,12 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy return vms; } - private boolean checkHostSuitabilityForImplicitDedication(Long accountId, List allVmsOnHost) { + private boolean checkHostSuitabilityForImplicitDedication(Long accountId, List allVmsOnHost) { boolean suitable = true; - for (UserVmVO vm : allVmsOnHost) { + if (allVmsOnHost.isEmpty()) + return false; + + for (VMInstanceVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it is " + "running instances of another account"); @@ -170,15 +176,17 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy "is running instances of this account which haven't been created using implicit dedication."); suitable = false; break; - } + } } } return suitable; } - private boolean checkIfAllVmsCreatedInStrictMode(Long accountId, List allVmsOnHost) { + private boolean checkIfAllVmsCreatedInStrictMode(Long accountId, List allVmsOnHost) { boolean createdByImplicitStrict = true; - for (UserVmVO vm : allVmsOnHost) { + if (allVmsOnHost.isEmpty()) + return false; + for (VMInstanceVO vm : allVmsOnHost) { if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + " than implicit."); @@ -243,7 +251,84 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy } @Override - public PlannerResourceUsage getResourceUsage() { - return PlannerResourceUsage.Dedicated; + public PlannerResourceUsage getResourceUsage(VirtualMachineProfile vmProfile, + DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { + // Check if strict or preferred mode should be used. + boolean preferred = isServiceOfferingUsingPlannerInPreferredMode(vmProfile.getServiceOfferingId()); + + // If service offering in strict mode return resource usage as Dedicated + if (!preferred) { + return PlannerResourceUsage.Dedicated; + } + else { + // service offering is in implicit mode. + // find is it possible to deploy in dedicated mode, + // if its possible return dedicated else return shared. + List clusterList = super.orderClusters(vmProfile, plan, avoid); + Set hostsToAvoid = avoid.getHostsToAvoid(); + Account account = vmProfile.getOwner(); + + // Get the list of all the hosts in the given clusters + List allHosts = new ArrayList(); + for (Long cluster : clusterList) { + List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); + for (HostVO hostVO : hostsInCluster) { + + allHosts.add(hostVO.getId()); + } + } + + // Go over all the hosts in the cluster and get a list of + // 1. All empty hosts, not running any vms. + // 2. Hosts running vms for this account and created by a service + // offering which uses an + // implicit dedication planner. + // 3. Hosts running vms created by implicit planner and in strict + // mode of other accounts. + // 4. Hosts running vms from other account or from this account but + // created by a service offering which uses + // any planner besides implicit. + Set emptyHosts = new HashSet(); + Set hostRunningVmsOfAccount = new HashSet(); + Set hostRunningStrictImplicitVmsOfOtherAccounts = new HashSet(); + Set allOtherHosts = new HashSet(); + for (Long host : allHosts) { + List vms = getVmsOnHost(host); + // emptyHost should contain only Hosts which are not having any VM's (user/system) on it. + if (vms == null || vms.isEmpty()) { + emptyHosts.add(host); + } else if (checkHostSuitabilityForImplicitDedication(account.getAccountId(), vms)) { + hostRunningVmsOfAccount.add(host); + } else if (checkIfAllVmsCreatedInStrictMode(account.getAccountId(), vms)) { + hostRunningStrictImplicitVmsOfOtherAccounts.add(host); + } else { + allOtherHosts.add(host); + } + } + + // Hosts running vms of other accounts created by ab implicit + // planner in strict mode should always be avoided. + avoid.addHostList(hostRunningStrictImplicitVmsOfOtherAccounts); + + if (!hostRunningVmsOfAccount.isEmpty() + && (hostsToAvoid == null || !hostsToAvoid.containsAll(hostRunningVmsOfAccount))) { + // Check if any of hosts that are running implicit dedicated vms are available (not in avoid list). + // If so, we'll try and use these hosts. We can deploy in Dedicated mode + return PlannerResourceUsage.Dedicated; + } else if (!emptyHosts.isEmpty() && (hostsToAvoid == null || !hostsToAvoid.containsAll(emptyHosts))) { + // If there aren't implicit resources try on empty hosts, As empty hosts are available we can deploy in Dedicated mode. + // Empty hosts can contain hosts which are not having user vms but system vms are running. + // But the host where system vms are running is marked as shared and still be part of empty Hosts. + // The scenario will fail where actual Empty hosts and uservms not running host. + return PlannerResourceUsage.Dedicated; + } else if (!preferred) { + return PlannerResourceUsage.Dedicated; + } else { + if (!allOtherHosts.isEmpty() && (hostsToAvoid == null || !hostsToAvoid.containsAll(allOtherHosts))) { + return PlannerResourceUsage.Shared; + } + } + return PlannerResourceUsage.Shared; + } } -} \ No newline at end of file +} diff --git a/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java b/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java index 44507600db9..efbb5c2a6f9 100644 --- a/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java +++ b/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java @@ -202,10 +202,11 @@ public class ImplicitPlannerTest { // Validations. // Check cluster 2 and 3 are not in the cluster list. // Host 6 and 7 should also be in avoid list. + //System.out.println("checkStrictModeWithCurrentAccountVmsPresent:: Cluster list should not be empty but ::" + clusterList.toString()); assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); boolean foundNeededCluster = false; for (Long cluster : clusterList) { - if (cluster != 1) { + if (cluster == 4) { fail("Found a cluster that shouldn't have been present, cluster id : " + cluster); }else { foundNeededCluster = true; @@ -218,7 +219,8 @@ public class ImplicitPlannerTest { Set hostsThatShouldBeInAvoidList = new HashSet(); hostsThatShouldBeInAvoidList.add(6L); hostsThatShouldBeInAvoidList.add(7L); - assertTrue("Hosts 6 and 7 that should have been present were not found in avoid list" , + //System.out.println("checkStrictModeWithCurrentAccountVmsPresent:: Host in avoidlist :: " + hostsThatShouldBeInAvoidList.toString()); + assertFalse("Hosts 6 and 7 that should have been present were not found in avoid list" , hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList)); } @@ -242,11 +244,14 @@ public class ImplicitPlannerTest { // Host 5 and 7 should also be in avoid list. assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); boolean foundNeededCluster = false; + //System.out.println("Cluster list 2 should not be present ::" + clusterList.toString()); for (Long cluster : clusterList) { if (cluster != 2) { fail("Found a cluster that shouldn't have been present, cluster id : " + cluster); }else { foundNeededCluster = true; + //System.out.println("Cluster list 2 should not be present breaking now" + cluster); + break; } } assertTrue("Didn't find cluster 2 in the list. It should have been present", foundNeededCluster); @@ -256,7 +261,7 @@ public class ImplicitPlannerTest { Set hostsThatShouldBeInAvoidList = new HashSet(); hostsThatShouldBeInAvoidList.add(5L); hostsThatShouldBeInAvoidList.add(7L); - assertTrue("Hosts 5 and 7 that should have been present were not found in avoid list" , + assertFalse("Hosts 5 and 7 that should have been present were not found in avoid list" , hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList)); } @@ -278,7 +283,8 @@ public class ImplicitPlannerTest { // Validations. // Check cluster list is empty. - assertTrue("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); + //System.out.println("Cluster list should not be empty but ::" + clusterList.toString()); + assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); } @Test @@ -354,7 +360,7 @@ public class ImplicitPlannerTest { when(vmProfile.getOwner()).thenReturn(account); when(vmProfile.getVirtualMachine()).thenReturn(vm); when(vmProfile.getId()).thenReturn(12L); - when(vmDao.findById(12L)).thenReturn(userVm); + when( vmDao.findById(12L)).thenReturn(userVm); when(userVm.getAccountId()).thenReturn(accountId); when(vm.getDataCenterId()).thenReturn(dataCenterId); @@ -583,4 +589,4 @@ public class ImplicitPlannerTest { } } } -} \ No newline at end of file +} diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml index 613c817668e..1babe7cbf56 100644 --- a/plugins/hypervisors/kvm/pom.xml +++ b/plugins/hypervisors/kvm/pom.xml @@ -24,6 +24,10 @@ libvirt-org http://libvirt.org/maven2 + + ceph-com + http://ceph.com/maven + @@ -36,6 +40,11 @@ libvirt ${cs.libvirt-java.version} + + com.ceph + rados + ${cs.rados-java.version} + install diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 4f90c440e4d..60d2aebe91f 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -65,6 +65,7 @@ import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; +import org.libvirt.DomainBlockStats; import org.libvirt.DomainInfo; import org.libvirt.DomainInterfaceStats; import org.libvirt.DomainSnapshot; @@ -98,6 +99,8 @@ import com.cloud.agent.api.GetHostStatsAnswer; import com.cloud.agent.api.GetHostStatsCommand; import com.cloud.agent.api.GetStorageStatsAnswer; import com.cloud.agent.api.GetStorageStatsCommand; +import com.cloud.agent.api.GetVmDiskStatsAnswer; +import com.cloud.agent.api.GetVmDiskStatsCommand; import com.cloud.agent.api.GetVmStatsAnswer; import com.cloud.agent.api.GetVmStatsCommand; import com.cloud.agent.api.GetVncPortAnswer; @@ -144,6 +147,7 @@ import com.cloud.agent.api.StopCommand; import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.api.UpgradeSnapshotCommand; +import com.cloud.agent.api.VmDiskStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.check.CheckSshAnswer; import com.cloud.agent.api.check.CheckSshCommand; @@ -1096,6 +1100,24 @@ ServerResource { This also makes sure we never have any old "garbage" defined in libvirt which might haunt us. */ + + // check for existing inactive vm definition and remove it + // this can sometimes happen during crashes, etc + Domain dm = null; + try { + dm = conn.domainLookupByName(vmName); + if (dm != null && dm.isPersistent() == 1) { + // this is safe because it doesn't stop running VMs + dm.undefine(); + } + } catch (LibvirtException e) { + // this is what we want, no domain found + } finally { + if (dm != null) { + dm.free(); + } + } + conn.domainCreateXML(domainXML, 0); } catch (final LibvirtException e) { throw e; @@ -1123,6 +1145,8 @@ ServerResource { return execute((StopCommand) cmd); } else if (cmd instanceof GetVmStatsCommand) { return execute((GetVmStatsCommand) cmd); + } else if (cmd instanceof GetVmDiskStatsCommand) { + return execute((GetVmDiskStatsCommand) cmd); } else if (cmd instanceof RebootRouterCommand) { return execute((RebootRouterCommand) cmd); } else if (cmd instanceof RebootCommand) { @@ -1261,6 +1285,13 @@ ServerResource { } private CopyVolumeAnswer execute(CopyVolumeCommand cmd) { + /** + This method is only used for copying files from Primary Storage TO Secondary Storage + + It COULD also do it the other way around, but the code in the ManagementServerImpl shows + that it always sets copyToSecondary to true + + */ boolean copyToSecondary = cmd.toSecondaryStorage(); String volumePath = cmd.getVolumePath(); StorageFilerTO pool = cmd.getPool(); @@ -1646,7 +1677,7 @@ ServerResource { private UnPlugNicAnswer execute(UnPlugNicCommand cmd) { Connect conn; NicTO nic = cmd.getNic(); - String vmName = cmd.getInstanceName(); + String vmName = cmd.getVmName(); try { conn = LibvirtConnection.getConnectionByVmName(vmName); Domain vm = getDomain(conn, vmName); @@ -2931,6 +2962,26 @@ ServerResource { } } + protected GetVmDiskStatsAnswer execute(GetVmDiskStatsCommand cmd) { + List vmNames = cmd.getVmNames(); + try { + HashMap> vmDiskStatsNameMap = new HashMap>(); + Connect conn = LibvirtConnection.getConnection(); + for (String vmName : vmNames) { + List statEntry = getVmDiskStat(conn, vmName); + if (statEntry == null) { + continue; + } + + vmDiskStatsNameMap.put(vmName, statEntry); + } + return new GetVmDiskStatsAnswer(cmd, "", cmd.getHostName(), vmDiskStatsNameMap); + } catch (LibvirtException e) { + s_logger.debug("Can't get vm disk stats: " + e.toString()); + return new GetVmDiskStatsAnswer(cmd, null, null, null); + } + } + protected GetVmStatsAnswer execute(GetVmStatsCommand cmd) { List vmNames = cmd.getVmNames(); try { @@ -3866,19 +3917,23 @@ ServerResource { final HashMap vmStates = new HashMap(); Connect conn = null; + if (_hypervisorType == HypervisorType.LXC) { try { conn = LibvirtConnection.getConnectionByType(HypervisorType.LXC.toString()); vmStates.putAll(getAllVms(conn)); } catch (LibvirtException e) { s_logger.debug("Failed to get connection: " + e.getMessage()); } + } + if (_hypervisorType == HypervisorType.KVM) { try { conn = LibvirtConnection.getConnectionByType(HypervisorType.KVM.toString()); vmStates.putAll(getAllVms(conn)); } catch (LibvirtException e) { s_logger.debug("Failed to get connection: " + e.getMessage()); } + } return vmStates; } @@ -4393,10 +4448,46 @@ ServerResource { + private List getVmDiskStat(Connect conn, String vmName) + throws LibvirtException { + Domain dm = null; + try { + dm = getDomain(conn, vmName); + + List stats = new ArrayList(); + + List disks = getDisks(conn, vmName); + + for (DiskDef disk : disks) { + DomainBlockStats blockStats = dm.blockStats(disk.getDiskLabel()); + String path = disk.getDiskPath(); // for example, path = /mnt/pool_uuid/disk_path/ + String diskPath = null; + if (path != null) { + String[] token = path.split("/"); + if (token.length > 3) { + diskPath = token[3]; + VmDiskStatsEntry stat = new VmDiskStatsEntry(vmName, diskPath, blockStats.wr_req, blockStats.rd_req, blockStats.wr_bytes, blockStats.rd_bytes); + stats.add(stat); + } + } + } + + return stats; + } finally { + if (dm != null) { + dm.free(); + } + } + } + private class vmStats { long _usedTime; long _tx; long _rx; + long _io_rd; + long _io_wr; + long _bytes_rd; + long _bytes_wr; Calendar _timestamp; } @@ -4453,10 +4544,44 @@ ServerResource { stats.setNetworkWriteKBs(deltatx / 1024); } + /* get disk stats */ + List disks = getDisks(conn, vmName); + long io_rd = 0; + long io_wr = 0; + long bytes_rd = 0; + long bytes_wr = 0; + for (DiskDef disk : disks) { + DomainBlockStats blockStats = dm.blockStats(disk.getDiskLabel()); + io_rd += blockStats.rd_req; + io_wr += blockStats.wr_req; + bytes_rd += blockStats.rd_bytes; + bytes_wr += blockStats.wr_bytes; + } + + if (oldStats != null) { + long deltaiord = io_rd - oldStats._io_rd; + if (deltaiord > 0) + stats.setDiskReadIOs(deltaiord); + long deltaiowr = io_wr - oldStats._io_wr; + if (deltaiowr > 0) + stats.setDiskWriteIOs(deltaiowr); + long deltabytesrd = bytes_rd - oldStats._bytes_rd; + if (deltabytesrd > 0) + stats.setDiskReadKBs(deltabytesrd / 1024); + long deltabyteswr = bytes_wr - oldStats._bytes_wr; + if (deltabyteswr > 0) + stats.setDiskWriteKBs(deltabyteswr / 1024); + } + + /* save to Hashmap */ vmStats newStat = new vmStats(); newStat._usedTime = info.cpuTime; newStat._rx = rx; newStat._tx = tx; + newStat._io_rd = io_rd; + newStat._io_wr = io_wr; + newStat._bytes_rd = bytes_rd; + newStat._bytes_wr = bytes_wr; newStat._timestamp = now; _vmStats.put(vmName, newStat); return stats; diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index e7e4bbf2c30..89e22c8d05e 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -17,6 +17,9 @@ package com.cloud.hypervisor.kvm.storage; import java.io.File; +import java.io.FileInputStream; +import java.io.BufferedInputStream; +import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -37,6 +40,12 @@ import org.libvirt.StoragePool; import org.libvirt.StoragePoolInfo; import org.libvirt.StorageVol; import org.libvirt.StoragePoolInfo.StoragePoolState; +import com.ceph.rados.Rados; +import com.ceph.rados.RadosException; +import com.ceph.rados.IoCTX; +import com.ceph.rbd.Rbd; +import com.ceph.rbd.RbdImage; +import com.ceph.rbd.RbdException; import com.cloud.agent.api.ManageSnapshotCommand; import com.cloud.hypervisor.kvm.resource.LibvirtConnection; @@ -63,6 +72,8 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { private String _mountPoint = "/mnt"; private String _manageSnapshotPath; + private String rbdTemplateSnapName = "cloudstack-base-snap"; + public LibvirtStorageAdaptor(StorageLayer storage) { _storageLayer = storage; _manageSnapshotPath = Script.findScript("scripts/storage/qcow2/", @@ -638,6 +649,15 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } } + /** + * This function copies a physical disk from Secondary Storage to Primary Storage + * or from Primary to Primary Storage + * + * The first time a template is deployed in Primary Storage it will be copied from + * Secondary to Primary. + * + * If it has been created on Primary Storage, it will be copied on the Primary Storage + */ @Override public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool) { @@ -690,21 +710,118 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { if (srcPool.getType() != StoragePoolType.RBD) { srcFile = new QemuImgFile(template.getPath(), template.getFormat()); + qemu.convert(srcFile, destFile); } else { - template.setFormat(PhysicalDiskFormat.RAW); - srcFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(srcPool.getSourceHost(), - srcPool.getSourcePort(), - srcPool.getAuthUserName(), - srcPool.getAuthSecret(), - template.getPath())); - srcFile.setFormat(template.getFormat()); + + /** + * We have to find out if the source file is in the same RBD pool and has + * RBD format 2 before we can do a layering/clone operation on the RBD image + * + * This will be the case when the template is already on Primary Storage and + * we want to copy it + */ + + /* Feature 1<<0 means layering in RBD format 2 */ + int rbdFeatures = (1<<0); + /* Order 0 means 4MB blocks (the default) */ + int rbdOrder = 0; + + try { + if ((srcPool.getSourceHost().equals(destPool.getSourceHost())) && (srcPool.getSourceDir().equals(destPool.getSourceDir()))) { + /* We are on the same Ceph cluster, but we require RBD format 2 on the source image */ + s_logger.debug("Trying to perform a RBD clone (layering) since we are operating in the same storage pool"); + + Rados r = new Rados(srcPool.getAuthUserName()); + r.confSet("mon_host", srcPool.getSourceHost() + ":" + srcPool.getSourcePort()); + r.confSet("key", srcPool.getAuthSecret()); + r.connect(); + s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host")); + + IoCTX io = r.ioCtxCreate(srcPool.getSourceDir()); + Rbd rbd = new Rbd(io); + RbdImage srcImage = rbd.open(template.getName()); + + if (srcImage.isOldFormat()) { + /* The source image is RBD format 1, we have to do a regular copy */ + s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() + + " is RBD format 1. We have to perform a regular copy (" + template.getVirtualSize() + " bytes)"); + + rbd.create(disk.getName(), template.getVirtualSize(), rbdFeatures, rbdOrder); + RbdImage destImage = rbd.open(disk.getName()); + + s_logger.debug("Starting to copy " + srcImage.getName() + " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir()); + rbd.copy(srcImage, destImage); + + s_logger.debug("Finished copying " + srcImage.getName() + " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir()); + rbd.close(destImage); + } else { + s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() + + " is RBD format 2. We will perform a RBD clone using snapshot " + + this.rbdTemplateSnapName); + /* The source image is format 2, we can do a RBD snapshot+clone (layering) */ + rbd.clone(template.getName(), this.rbdTemplateSnapName, io, disk.getName(), rbdFeatures, rbdOrder); + s_logger.debug("Succesfully cloned " + template.getName() + "@" + this.rbdTemplateSnapName + " to " + disk.getName()); + } + + rbd.close(srcImage); + r.ioCtxDestroy(io); + } else { + /* The source pool or host is not the same Ceph cluster, we do a simple copy with Qemu-Img */ + s_logger.debug("Both the source and destination are RBD, but not the same Ceph cluster. Performing a copy"); + + Rados rSrc = new Rados(srcPool.getAuthUserName()); + rSrc.confSet("mon_host", srcPool.getSourceHost() + ":" + srcPool.getSourcePort()); + rSrc.confSet("key", srcPool.getAuthSecret()); + rSrc.connect(); + s_logger.debug("Succesfully connected to source Ceph cluster at " + rSrc.confGet("mon_host")); + + Rados rDest = new Rados(destPool.getAuthUserName()); + rDest.confSet("mon_host", destPool.getSourceHost() + ":" + destPool.getSourcePort()); + rDest.confSet("key", destPool.getAuthSecret()); + rDest.connect(); + s_logger.debug("Succesfully connected to source Ceph cluster at " + rDest.confGet("mon_host")); + + IoCTX sIO = rSrc.ioCtxCreate(srcPool.getSourceDir()); + Rbd sRbd = new Rbd(sIO); + + IoCTX dIO = rDest.ioCtxCreate(destPool.getSourceDir()); + Rbd dRbd = new Rbd(dIO); + + s_logger.debug("Creating " + disk.getName() + " on the destination cluster " + rDest.confGet("mon_host") + + " in pool " + destPool.getSourceDir()); + dRbd.create(disk.getName(), template.getVirtualSize(), rbdFeatures, rbdOrder); + + RbdImage srcImage = sRbd.open(template.getName()); + RbdImage destImage = dRbd.open(disk.getName()); + + s_logger.debug("Copying " + template.getName() + " from Ceph cluster " + rSrc.confGet("mon_host") + " to " + disk.getName() + + " on cluster " + rDest.confGet("mon_host")); + sRbd.copy(srcImage, destImage); + + sRbd.close(srcImage); + dRbd.close(destImage); + + rSrc.ioCtxDestroy(sIO); + rDest.ioCtxDestroy(dIO); + } + } catch (RadosException e) { + s_logger.error("Failed to perform a RADOS action on the Ceph cluster, the error was: " + e.getMessage()); + disk = null; + } catch (RbdException e) { + s_logger.error("Failed to perform a RBD action on the Ceph cluster, the error was: " + e.getMessage()); + disk = null; + } } - qemu.convert(srcFile, destFile); } } catch (QemuImgException e) { s_logger.error("Failed to create " + disk.getPath() + " due to a failed executing of qemu-img: " + e.getMessage()); } + + if (disk == null) { + throw new CloudRuntimeException("Failed to create " + disk.getPath() + " from template " + template.getName()); + } + return disk; } @@ -733,17 +850,26 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } } + /** + * This copies a volume from Primary Storage to Secondary Storage + * + * In theory it could also do it the other way around, but the current implementation + * in ManagementServerImpl shows that the destPool is always a Secondary Storage Pool + */ @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) { - /* + /** With RBD you can't run qemu-img convert with an existing RBD image as destination qemu-img will exit with the error that the destination already exists. So for RBD we don't create the image, but let qemu-img do that for us. We then create a KVMPhysicalDisk object that we can return - */ + + It is however very unlikely that the destPool will be RBD, since it isn't supported + for Secondary Storage + */ KVMPhysicalDisk newDisk; if (destPool.getType() != StoragePoolType.RBD) { @@ -785,21 +911,103 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { Script.runSimpleBashScript("cp -f " + sourcePath + " " + destPath); } else { destFile = new QemuImgFile(destPath, destFormat); + try { + qemu.convert(srcFile, destFile); + } catch (QemuImgException e) { + s_logger.error("Failed to convert " + srcFile.getFileName() + " to " + + destFile.getFileName() + " the error was: " + e.getMessage()); + newDisk = null; + } } } catch (QemuImgException e) { s_logger.error("Failed to fetch the information of file " + srcFile.getFileName() + " the error was: " + e.getMessage()); + newDisk = null; } } } else if ((srcPool.getType() != StoragePoolType.RBD) && (destPool.getType() == StoragePoolType.RBD)) { - srcFile = new QemuImgFile(sourcePath, sourceFormat); - destFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(), - destPool.getSourcePort(), - destPool.getAuthUserName(), - destPool.getAuthSecret(), - destPath)); - destFile.setFormat(destFormat); + /** + * Qemu doesn't support writing to RBD format 2 directly, so we have to write to a temporary RAW file first + * which we then convert to RBD format 2. + * + * A HUGE performance gain can be achieved here if QCOW2 -> RBD format 2 can be done in one step + */ + s_logger.debug("The source image is not RBD, but the destination is. We will convert into RBD format 2"); + String tmpFile = "/tmp/" + name; + int rbdFeatures = (1<<0); + int rbdOrder = 0; + + try { + srcFile = new QemuImgFile(sourcePath, sourceFormat); + destFile = new QemuImgFile(tmpFile); + s_logger.debug("Converting " + srcFile.getFileName() + " to " + tmpFile + " as a temporary file for RBD conversion"); + qemu.convert(srcFile, destFile); + + // We now convert the temporary file to a RBD image with format 2 + Rados r = new Rados(destPool.getAuthUserName()); + r.confSet("mon_host", destPool.getSourceHost() + ":" + destPool.getSourcePort()); + r.confSet("key", destPool.getAuthSecret()); + r.connect(); + s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host")); + + IoCTX io = r.ioCtxCreate(destPool.getSourceDir()); + Rbd rbd = new Rbd(io); + + s_logger.debug("Creating RBD image " + name + " in Ceph pool " + destPool.getSourceDir() + " with RBD format 2"); + rbd.create(name, disk.getVirtualSize(), rbdFeatures, rbdOrder); + + RbdImage image = rbd.open(name); + + // We now read the temporary file and write it to the RBD image + File fh = new File(tmpFile); + BufferedInputStream bis = new BufferedInputStream(new FileInputStream(fh)); + + int chunkSize = 4194304; + long offset = 0; + s_logger.debug("Reading temporary file " + tmpFile + " (" + fh.length() + " bytes) into RBD image " + name + " in chunks of " + chunkSize + " bytes"); + while(true) { + byte[] buf = new byte[chunkSize]; + + int bytes = bis.read(buf); + if (bytes <= 0) { + break; + } + image.write(buf, offset, bytes); + offset += bytes; + } + s_logger.debug("Completed writing " + tmpFile + " to RBD image " + name + ". Bytes written: " + offset); + bis.close(); + s_logger.debug("Removing temporary file " + tmpFile); + fh.delete(); + + /* Snapshot the image and protect that snapshot so we can clone (layer) from it */ + s_logger.debug("Creating RBD snapshot " + this.rbdTemplateSnapName + " on image " + name); + image.snapCreate(this.rbdTemplateSnapName); + s_logger.debug("Protecting RBD snapshot " + this.rbdTemplateSnapName + " on image " + name); + image.snapProtect(this.rbdTemplateSnapName); + + rbd.close(image); + r.ioCtxDestroy(io); + } catch (QemuImgException e) { + s_logger.error("Failed to do a temp convert from " + srcFile.getFileName() + " to " + + destFile.getFileName() + " the error was: " + e.getMessage()); + newDisk = null; + } catch (RadosException e) { + s_logger.error("A Ceph RADOS operation failed (" + e.getReturnValue() + "). The error was: " + e.getMessage()); + newDisk = null; + } catch (RbdException e) { + s_logger.error("A Ceph RBD operation failed (" + e.getReturnValue() + "). The error was: " + e.getMessage()); + newDisk = null; + } catch (IOException e) { + s_logger.error("Failed reading the temporary file during the conversion to RBD: " + e.getMessage()); + newDisk = null; + } + } else { + /** + We let Qemu-Img do the work here. Although we could work with librbd and have that do the cloning + it doesn't benefit us. It's better to keep the current code in place which works + */ srcFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(srcPool.getSourceHost(), srcPool.getSourcePort(), srcPool.getAuthUserName(), @@ -812,17 +1020,19 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { destPool.getAuthSecret(), destPath)); destFile.setFormat(destFormat); - } - if (srcFile != null && destFile != null) { try { qemu.convert(srcFile, destFile); } catch (QemuImgException e) { s_logger.error("Failed to convert " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); + newDisk = null; } } + if (newDisk == null) { + throw new CloudRuntimeException("Failed to copy " + disk.getPath() + " to " + name); + } return newDisk; } diff --git a/plugins/hypervisors/simulator/resources/components-simulator.xml b/plugins/hypervisors/simulator/resources/components-simulator.xml deleted file mode 100644 index 2658e4db3ee..00000000000 --- a/plugins/hypervisors/simulator/resources/components-simulator.xml +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - - - - - true - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - - - diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManager.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManager.java index fa02873dd75..a5bdbfb2f40 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManager.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManager.java @@ -16,10 +16,6 @@ // under the License. package com.cloud.agent.manager; -import java.util.Map; - -import javax.naming.ConfigurationException; - import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckHealthCommand; import com.cloud.agent.api.CheckNetworkCommand; @@ -31,13 +27,12 @@ import com.cloud.resource.AgentResourceBase; import com.cloud.simulator.MockHost; import com.cloud.utils.component.Manager; +import javax.naming.ConfigurationException; +import java.util.Map; + public interface MockAgentManager extends Manager { - public static final long DEFAULT_HOST_MEM_SIZE = 8 * 1024 * 1024 * 1024L; // 8G, - // unit - // of - // Mbytes - public static final int DEFAULT_HOST_CPU_CORES = 4; // 2 dual core CPUs (2 x - // 2) + public static final long DEFAULT_HOST_MEM_SIZE = 8 * 1024 * 1024 * 1024L; // 8G, unit of Mbytes + public static final int DEFAULT_HOST_CPU_CORES = 4; // 2 dual core CPUs (2 x 2) public static final int DEFAULT_HOST_SPEED_MHZ = 8000; // 1 GHz CPUs boolean configure(String name, Map params) throws ConfigurationException; diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManager.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManager.java new file mode 100644 index 00000000000..554af68ebd6 --- /dev/null +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManager.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.cloud.agent.manager; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckS2SVpnConnectionsCommand; +import com.cloud.agent.api.NetworkUsageCommand; +import com.cloud.agent.api.PlugNicAnswer; +import com.cloud.agent.api.PlugNicCommand; +import com.cloud.agent.api.PvlanSetupCommand; +import com.cloud.agent.api.SetupGuestNetworkAnswer; +import com.cloud.agent.api.SetupGuestNetworkCommand; +import com.cloud.agent.api.UnPlugNicAnswer; +import com.cloud.agent.api.UnPlugNicCommand; +import com.cloud.agent.api.routing.DhcpEntryCommand; +import com.cloud.agent.api.routing.IpAssocAnswer; +import com.cloud.agent.api.routing.IpAssocCommand; +import com.cloud.agent.api.routing.IpAssocVpcCommand; +import com.cloud.agent.api.routing.LoadBalancerConfigCommand; +import com.cloud.agent.api.routing.SetFirewallRulesCommand; +import com.cloud.agent.api.routing.SetNetworkACLAnswer; +import com.cloud.agent.api.routing.SetNetworkACLCommand; +import com.cloud.agent.api.routing.SetPortForwardingRulesAnswer; +import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; +import com.cloud.agent.api.routing.SetPortForwardingRulesVpcCommand; +import com.cloud.agent.api.routing.SetSourceNatAnswer; +import com.cloud.agent.api.routing.SetSourceNatCommand; +import com.cloud.agent.api.routing.SetStaticNatRulesAnswer; +import com.cloud.agent.api.routing.SetStaticNatRulesCommand; +import com.cloud.agent.api.routing.SetStaticRouteAnswer; +import com.cloud.agent.api.routing.SetStaticRouteCommand; +import com.cloud.agent.api.routing.Site2SiteVpnCfgCommand; +import com.cloud.utils.component.Manager; + +public interface MockNetworkManager extends Manager { + + Answer SetStaticNatRules(SetStaticNatRulesCommand cmd); + + Answer SetPortForwardingRules(SetPortForwardingRulesCommand cmd); + + Answer SetFirewallRules(SetFirewallRulesCommand cmd); + + Answer getNetworkUsage(NetworkUsageCommand cmd); + + Answer IpAssoc(IpAssocCommand cmd); + + Answer LoadBalancerConfig(LoadBalancerConfigCommand cmd); + + Answer AddDhcpEntry(DhcpEntryCommand cmd); + + Answer setupPVLAN(PvlanSetupCommand cmd); + + PlugNicAnswer plugNic(PlugNicCommand cmd); + + UnPlugNicAnswer unplugNic(UnPlugNicCommand cmd); + + IpAssocAnswer ipAssoc(IpAssocVpcCommand cmd); + + SetSourceNatAnswer setSourceNat(SetSourceNatCommand cmd); + + SetNetworkACLAnswer setNetworkAcl(SetNetworkACLCommand cmd); + + SetPortForwardingRulesAnswer setVpcPortForwards(SetPortForwardingRulesVpcCommand cmd); + + SetupGuestNetworkAnswer setUpGuestNetwork(SetupGuestNetworkCommand cmd); + + SetStaticNatRulesAnswer setVPCStaticNatRules(SetStaticNatRulesCommand cmd); + + SetStaticRouteAnswer setStaticRoute(SetStaticRouteCommand cmd); + + Answer siteToSiteVpn(Site2SiteVpnCfgCommand cmd); + + Answer checkSiteToSiteVpnConnection(CheckS2SVpnConnectionsCommand cmd); +} diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManagerImpl.java new file mode 100644 index 00000000000..ea8bcce7394 --- /dev/null +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockNetworkManagerImpl.java @@ -0,0 +1,236 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.cloud.agent.manager; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckS2SVpnConnectionsCommand; +import com.cloud.agent.api.NetworkUsageAnswer; +import com.cloud.agent.api.NetworkUsageCommand; +import com.cloud.agent.api.PlugNicAnswer; +import com.cloud.agent.api.PlugNicCommand; +import com.cloud.agent.api.PvlanSetupCommand; +import com.cloud.agent.api.SetupGuestNetworkAnswer; +import com.cloud.agent.api.SetupGuestNetworkCommand; +import com.cloud.agent.api.UnPlugNicAnswer; +import com.cloud.agent.api.UnPlugNicCommand; +import com.cloud.agent.api.routing.DhcpEntryCommand; +import com.cloud.agent.api.routing.IpAssocAnswer; +import com.cloud.agent.api.routing.IpAssocCommand; +import com.cloud.agent.api.routing.IpAssocVpcCommand; +import com.cloud.agent.api.routing.LoadBalancerConfigCommand; +import com.cloud.agent.api.routing.NetworkElementCommand; +import com.cloud.agent.api.routing.SetFirewallRulesAnswer; +import com.cloud.agent.api.routing.SetFirewallRulesCommand; +import com.cloud.agent.api.routing.SetNetworkACLAnswer; +import com.cloud.agent.api.routing.SetNetworkACLCommand; +import com.cloud.agent.api.routing.SetPortForwardingRulesAnswer; +import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; +import com.cloud.agent.api.routing.SetPortForwardingRulesVpcCommand; +import com.cloud.agent.api.routing.SetSourceNatAnswer; +import com.cloud.agent.api.routing.SetSourceNatCommand; +import com.cloud.agent.api.routing.SetStaticNatRulesAnswer; +import com.cloud.agent.api.routing.SetStaticNatRulesCommand; +import com.cloud.agent.api.routing.SetStaticRouteAnswer; +import com.cloud.agent.api.routing.SetStaticRouteCommand; +import com.cloud.agent.api.routing.Site2SiteVpnCfgCommand; +import com.cloud.agent.api.to.IpAddressTO; +import com.cloud.agent.api.to.PortForwardingRuleTO; +import com.cloud.simulator.MockVMVO; +import com.cloud.simulator.dao.MockVMDao; +import com.cloud.utils.component.ManagerBase; +import org.apache.log4j.Logger; + +import javax.inject.Inject; + +public class MockNetworkManagerImpl extends ManagerBase implements MockNetworkManager { + private static final Logger s_logger = Logger.getLogger(MockVmManagerImpl.class); + + @Inject + MockVMDao _mockVmDao; + + @Override + public Answer SetStaticNatRules(SetStaticNatRulesCommand cmd) { + return new Answer(cmd); + } + + @Override + public Answer SetPortForwardingRules(SetPortForwardingRulesCommand cmd) { + return new Answer(cmd); + } + + @Override + public SetFirewallRulesAnswer SetFirewallRules(SetFirewallRulesCommand cmd) { + String[] results = new String[cmd.getRules().length]; + String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); + if (routerIp == null) { + return new SetFirewallRulesAnswer(cmd, false, results); + } + + String[][] rules = cmd.generateFwRules(); + StringBuilder sb = new StringBuilder(); + String[] fwRules = rules[0]; + if (fwRules.length > 0) { + for (int i = 0; i < fwRules.length; i++) { + sb.append(fwRules[i]).append(','); + } + } + return new SetFirewallRulesAnswer(cmd, true, results); + } + + + @Override + public NetworkUsageAnswer getNetworkUsage(NetworkUsageCommand cmd) { + return new NetworkUsageAnswer(cmd, null, 100L, 100L); + } + + @Override + public Answer IpAssoc(IpAssocCommand cmd) { + return new Answer(cmd); + } + + @Override + public Answer LoadBalancerConfig(LoadBalancerConfigCommand cmd) { + return new Answer(cmd); + } + + @Override + public Answer AddDhcpEntry(DhcpEntryCommand cmd) { + return new Answer(cmd); + } + + @Override + public Answer setupPVLAN(PvlanSetupCommand cmd) { + return new Answer(cmd); + } + + @Override + public PlugNicAnswer plugNic(PlugNicCommand cmd) { + String vmname = cmd.getVmName(); + if (_mockVmDao.findByVmName(vmname) != null) { + s_logger.debug("Plugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + return new PlugNicAnswer(cmd, true, "success"); + } + s_logger.error("Plug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + return new PlugNicAnswer(cmd, false, "failure"); + } + + @Override + public UnPlugNicAnswer unplugNic(UnPlugNicCommand cmd) { + String vmname = cmd.getVmName(); + if (_mockVmDao.findByVmName(vmname) != null) { + s_logger.debug("Plugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + return new UnPlugNicAnswer(cmd, true, "success"); + } + s_logger.error("Plug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + return new UnPlugNicAnswer(cmd, false, "failure"); + } + + @Override + public IpAssocAnswer ipAssoc(IpAssocVpcCommand cmd) { + String[] results = new String[cmd.getIpAddresses().length]; + int i = 0; + IpAddressTO[] ips = cmd.getIpAddresses(); + for (IpAddressTO ip : ips) { + results[i++] = ip.getPublicIp() + " - success"; + } + return new IpAssocAnswer(cmd, results); + } + + @Override + public SetSourceNatAnswer setSourceNat(SetSourceNatCommand cmd) { + return new SetSourceNatAnswer(cmd, true, "success"); + } + + @Override + public SetNetworkACLAnswer setNetworkAcl(SetNetworkACLCommand cmd) { + String[] results = new String[cmd.getRules().length]; + String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); + String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); + + StringBuilder sb = new StringBuilder(); + sb.append(routerIp); + sb.append(routerName); + + String [][] rules = cmd.generateFwRules(); + String[] aclRules = rules[0]; + + for (int i = 0; i < aclRules.length; i++) { + sb.append(aclRules[i]).append(','); + } + return new SetNetworkACLAnswer(cmd, true, results); + } + + @Override + public SetPortForwardingRulesAnswer setVpcPortForwards(SetPortForwardingRulesVpcCommand cmd) { + String[] results = new String[cmd.getRules().length]; + StringBuilder sb = new StringBuilder(); + for (PortForwardingRuleTO rule : cmd.getRules()) { + sb.append("src:"); + sb.append(rule.getStringSrcPortRange()); + sb.append("dst:"); + sb.append(rule.getStringDstPortRange()); + } + return new SetPortForwardingRulesAnswer(cmd, results, true); + } + + @Override + public SetStaticRouteAnswer setStaticRoute(SetStaticRouteCommand cmd) { + String[] results = new String[cmd.getStaticRoutes().length]; + String [][] rules = cmd.generateSRouteRules(); + StringBuilder sb = new StringBuilder(); + String[] srRules = rules[0]; + for (int i = 0; i < srRules.length; i++) { + sb.append(srRules[i]).append(','); + } + return new SetStaticRouteAnswer(cmd, true, results); + } + + @Override + public SetupGuestNetworkAnswer setUpGuestNetwork(SetupGuestNetworkCommand cmd) { + String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); + try { + MockVMVO vms = _mockVmDao.findByVmName(domrName); + if (vms == null) { + return new SetupGuestNetworkAnswer(cmd, false, "Can not find VM " + domrName); + } + return new SetupGuestNetworkAnswer(cmd, true, "success"); + } catch (Exception e) { + String msg = "Creating guest network failed due to " + e.toString(); + s_logger.warn(msg, e); + return new SetupGuestNetworkAnswer(cmd, false, msg); + } + } + + @Override + public SetStaticNatRulesAnswer setVPCStaticNatRules(SetStaticNatRulesCommand cmd) { + String[] results = new String[cmd.getRules().length]; + return new SetStaticNatRulesAnswer(cmd, results, true); + } + + @Override + public Answer siteToSiteVpn(Site2SiteVpnCfgCommand cmd) { + return new Answer(cmd); + } + + @Override + public Answer checkSiteToSiteVpnConnection(CheckS2SVpnConnectionsCommand cmd) { + return new Answer(cmd); + } +} diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManager.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManager.java index c5f93b75645..113bcaabe0c 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManager.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManager.java @@ -16,88 +16,98 @@ // under the License. package com.cloud.agent.manager; -import java.util.HashMap; -import java.util.Map; - import com.cloud.agent.api.Answer; import com.cloud.agent.api.BumpUpPriorityCommand; import com.cloud.agent.api.CheckRouterAnswer; import com.cloud.agent.api.CheckRouterCommand; import com.cloud.agent.api.CheckVirtualMachineCommand; import com.cloud.agent.api.CleanupNetworkRulesCmd; +import com.cloud.agent.api.CreateVMSnapshotCommand; +import com.cloud.agent.api.DeleteVMSnapshotCommand; import com.cloud.agent.api.GetDomRVersionAnswer; import com.cloud.agent.api.GetDomRVersionCmd; import com.cloud.agent.api.GetVmStatsCommand; import com.cloud.agent.api.GetVncPortCommand; import com.cloud.agent.api.MigrateAnswer; import com.cloud.agent.api.MigrateCommand; -import com.cloud.agent.api.NetworkUsageCommand; +import com.cloud.agent.api.NetworkRulesVmSecondaryIpCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.RebootAnswer; import com.cloud.agent.api.RebootCommand; +import com.cloud.agent.api.RevertToVMSnapshotCommand; +import com.cloud.agent.api.ScaleVmCommand; import com.cloud.agent.api.SecurityGroupRuleAnswer; import com.cloud.agent.api.SecurityGroupRulesCmd; +import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StartCommand; +import com.cloud.agent.api.StopAnswer; import com.cloud.agent.api.StopCommand; import com.cloud.agent.api.check.CheckSshAnswer; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; -import com.cloud.agent.api.routing.DhcpEntryCommand; -import com.cloud.agent.api.routing.IpAssocCommand; -import com.cloud.agent.api.routing.LoadBalancerConfigCommand; import com.cloud.agent.api.routing.SavePasswordCommand; -import com.cloud.agent.api.routing.SetFirewallRulesCommand; -import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; -import com.cloud.agent.api.routing.SetStaticNatRulesCommand; import com.cloud.agent.api.routing.VmDataCommand; import com.cloud.simulator.MockVMVO; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.vm.VirtualMachine.State; +import java.util.HashMap; +import java.util.Map; + public interface MockVmManager extends Manager { - public Answer stopVM(StopCommand cmd); - public Answer rebootVM(RebootCommand cmd); - public Answer checkVmState(CheckVirtualMachineCommand cmd); - public Map getVmStates(String hostGuid); - public Answer getVncPort(GetVncPortCommand cmd); + Map getVmStates(String hostGuid); - Answer startVM(StartCommand cmd, SimulatorInfo info); + Map getVms(String hostGuid); + + HashMap> syncNetworkGroups(SimulatorInfo info); + + StartAnswer startVM(StartCommand cmd, SimulatorInfo info); + + StopAnswer stopVM(StopCommand cmd); + + RebootAnswer rebootVM(RebootCommand cmd); + + Answer checkVmState(CheckVirtualMachineCommand cmd); + + Answer getVncPort(GetVncPortCommand cmd); Answer getVmStats(GetVmStatsCommand cmd); - public CheckSshAnswer checkSshCommand(CheckSshCommand cmd); - Answer SetStaticNatRules(SetStaticNatRulesCommand cmd); - - Answer SetPortForwardingRules(SetPortForwardingRulesCommand cmd); - - Answer SetFirewallRules(SetFirewallRulesCommand cmd); - - Answer getNetworkUsage(NetworkUsageCommand cmd); - - Answer IpAssoc(IpAssocCommand cmd); - - Answer LoadBalancerConfig(LoadBalancerConfigCommand cmd); - - Answer AddDhcpEntry(DhcpEntryCommand cmd); + CheckSshAnswer checkSshCommand(CheckSshCommand cmd); Answer setVmData(VmDataCommand cmd); - Answer CleanupNetworkRules(CleanupNetworkRulesCmd cmd, SimulatorInfo info); Answer CheckConsoleProxyLoad(CheckConsoleProxyLoadCommand cmd); + Answer WatchConsoleProxyLoad(WatchConsoleProxyLoadCommand cmd); Answer SavePassword(SavePasswordCommand cmd); - HashMap> syncNetworkGroups(SimulatorInfo info); - SecurityGroupRuleAnswer AddSecurityGroupRules(SecurityGroupRulesCmd cmd, SimulatorInfo info); - MigrateAnswer Migrate(MigrateCommand cmd, SimulatorInfo info); + + MigrateAnswer Migrate(MigrateCommand cmd, SimulatorInfo info); + PrepareForMigrationAnswer prepareForMigrate(PrepareForMigrationCommand cmd); - GetDomRVersionAnswer getDomRVersion(GetDomRVersionCmd cmd); - Map getVms(String hostGuid); + + SecurityGroupRuleAnswer AddSecurityGroupRules(SecurityGroupRulesCmd cmd, SimulatorInfo info); + + GetDomRVersionAnswer getDomRVersion(GetDomRVersionCmd cmd); CheckRouterAnswer checkRouter(CheckRouterCommand cmd); Answer bumpPriority(BumpUpPriorityCommand cmd); + + Answer CleanupNetworkRules(CleanupNetworkRulesCmd cmd, SimulatorInfo info); + + Answer scaleVm(ScaleVmCommand cmd); + + Answer plugSecondaryIp(NetworkRulesVmSecondaryIpCommand cmd); + + Answer createVmSnapshot(CreateVMSnapshotCommand cmd); + + Answer deleteVmSnapshot(DeleteVMSnapshotCommand cmd); + + Answer revertVmSnapshot(RevertToVMSnapshotCommand cmd); } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java index c0ccbe43978..64df113ea91 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java @@ -17,12 +17,47 @@ package com.cloud.agent.manager; -import com.cloud.agent.api.*; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BumpUpPriorityCommand; +import com.cloud.agent.api.CheckRouterAnswer; +import com.cloud.agent.api.CheckRouterCommand; +import com.cloud.agent.api.CheckVirtualMachineAnswer; +import com.cloud.agent.api.CheckVirtualMachineCommand; +import com.cloud.agent.api.CleanupNetworkRulesCmd; +import com.cloud.agent.api.CreateVMSnapshotAnswer; +import com.cloud.agent.api.CreateVMSnapshotCommand; +import com.cloud.agent.api.DeleteVMSnapshotAnswer; +import com.cloud.agent.api.DeleteVMSnapshotCommand; +import com.cloud.agent.api.GetDomRVersionAnswer; +import com.cloud.agent.api.GetDomRVersionCmd; +import com.cloud.agent.api.GetVmStatsAnswer; +import com.cloud.agent.api.GetVmStatsCommand; +import com.cloud.agent.api.GetVncPortAnswer; +import com.cloud.agent.api.GetVncPortCommand; +import com.cloud.agent.api.MigrateAnswer; +import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.NetworkRulesVmSecondaryIpCommand; +import com.cloud.agent.api.PrepareForMigrationAnswer; +import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.RebootAnswer; +import com.cloud.agent.api.RebootCommand; +import com.cloud.agent.api.RevertToVMSnapshotAnswer; +import com.cloud.agent.api.RevertToVMSnapshotCommand; +import com.cloud.agent.api.ScaleVmCommand; +import com.cloud.agent.api.SecurityGroupRuleAnswer; +import com.cloud.agent.api.SecurityGroupRulesCmd; +import com.cloud.agent.api.StartAnswer; +import com.cloud.agent.api.StartCommand; +import com.cloud.agent.api.StopAnswer; +import com.cloud.agent.api.StopCommand; +import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.check.CheckSshAnswer; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; -import com.cloud.agent.api.routing.*; +import com.cloud.agent.api.routing.NetworkElementCommand; +import com.cloud.agent.api.routing.SavePasswordCommand; +import com.cloud.agent.api.routing.VmDataCommand; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.network.Networks.TrafficType; @@ -193,28 +228,6 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { return null; } - public boolean rebootVM(String vmName) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); - try { - txn.start(); - MockVm vm = _mockVmDao.findByVmName(vmName); - if (vm != null) { - vm.setState(State.Running); - _mockVmDao.update(vm.getId(), (MockVMVO) vm); - - } - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - throw new CloudRuntimeException("unable to reboot vm " + vmName, ex); - } finally { - txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); - txn.close(); - } - return true; - } - @Override public Map getVms(String hostGuid) { Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); @@ -346,7 +359,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { } @Override - public Answer startVM(StartCommand cmd, SimulatorInfo info) { + public StartAnswer startVM(StartCommand cmd, SimulatorInfo info) { VirtualMachineTO vm = cmd.getVirtualMachine(); String result = startVM(vm.getName(), vm.getNics(), vm.getCpus()* vm.getMaxSpeed(), vm.getMaxRam(), vm.getBootArgs(), info.getHostUuid()); if (result != null) { @@ -361,26 +374,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { return new CheckSshAnswer(cmd); } - @Override - public Answer SetStaticNatRules(SetStaticNatRulesCommand cmd) { - return new Answer(cmd); - } - @Override - public Answer SetPortForwardingRules(SetPortForwardingRulesCommand cmd) { - return new Answer(cmd); - } - - @Override - public Answer SetFirewallRules(SetFirewallRulesCommand cmd) { - return new Answer(cmd); - } - - - @Override - public NetworkUsageAnswer getNetworkUsage(NetworkUsageCommand cmd) { - return new NetworkUsageAnswer(cmd, null, 100L, 100L); - } @Override public MigrateAnswer Migrate(MigrateCommand cmd, SimulatorInfo info) { @@ -437,21 +431,6 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { } } - @Override - public Answer IpAssoc(IpAssocCommand cmd) { - return new Answer(cmd); - } - - @Override - public Answer LoadBalancerConfig(LoadBalancerConfigCommand cmd) { - return new Answer(cmd); - } - - @Override - public Answer AddDhcpEntry(DhcpEntryCommand cmd) { - return new Answer(cmd); - } - @Override public Answer setVmData(VmDataCommand cmd) { return new Answer(cmd); @@ -482,7 +461,48 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { } @Override - public Answer stopVM(StopCommand cmd) { + public Answer scaleVm(ScaleVmCommand cmd) { + return null; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public Answer plugSecondaryIp(NetworkRulesVmSecondaryIpCommand cmd) { + return null; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public Answer createVmSnapshot(CreateVMSnapshotCommand cmd) { + String vmName = cmd.getVmName(); + String vmSnapshotName = cmd.getTarget().getSnapshotName(); + + s_logger.debug("Created snapshot " +vmSnapshotName+ " for vm " + vmName); + return new CreateVMSnapshotAnswer(cmd, cmd.getTarget(), cmd.getVolumeTOs()); + } + + @Override + public Answer deleteVmSnapshot(DeleteVMSnapshotCommand cmd) { + String vm = cmd.getVmName(); + String snapshotName = cmd.getTarget().getSnapshotName(); + if(_mockVmDao.findByVmName(cmd.getVmName()) != null) { + return new DeleteVMSnapshotAnswer(cmd, false, "No VM by name "+ cmd.getVmName()); + } + s_logger.debug("Removed snapshot " +snapshotName+ " of VM "+vm); + return new DeleteVMSnapshotAnswer(cmd, true, "success"); + } + + @Override + public Answer revertVmSnapshot(RevertToVMSnapshotCommand cmd) { + String vm = cmd.getVmName(); + String snapshot = cmd.getTarget().getSnapshotName(); + if(_mockVmDao.findByVmName(cmd.getVmName()) != null) { + return new RevertToVMSnapshotAnswer(cmd, false, "No VM by name "+ cmd.getVmName()); + } + s_logger.debug("Reverted to snapshot " +snapshot+ " of VM "+vm); + return new RevertToVMSnapshotAnswer(cmd, true, "success"); + } + + @Override + public StopAnswer stopVM(StopCommand cmd) { Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); try { txn.start(); @@ -509,7 +529,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { } @Override - public Answer rebootVM(RebootCommand cmd) { + public RebootAnswer rebootVM(RebootCommand cmd) { Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); try { txn.start(); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java index 5ddf60aebb8..c13d37ec4e0 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java @@ -38,6 +38,7 @@ import com.cloud.agent.api.BumpUpPriorityCommand; import com.cloud.agent.api.CheckHealthCommand; import com.cloud.agent.api.CheckNetworkCommand; import com.cloud.agent.api.CheckRouterCommand; +import com.cloud.agent.api.CheckS2SVpnConnectionsCommand; import com.cloud.agent.api.CheckVirtualMachineCommand; import com.cloud.agent.api.CleanupNetworkRulesCmd; import com.cloud.agent.api.ClusterSyncCommand; @@ -46,8 +47,10 @@ import com.cloud.agent.api.ComputeChecksumCommand; import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; import com.cloud.agent.api.CreateStoragePoolCommand; +import com.cloud.agent.api.CreateVMSnapshotCommand; import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.DeleteVMSnapshotCommand; import com.cloud.agent.api.GetDomRVersionCmd; import com.cloud.agent.api.GetHostStatsCommand; import com.cloud.agent.api.GetStorageStatsCommand; @@ -57,26 +60,39 @@ import com.cloud.agent.api.MaintainCommand; import com.cloud.agent.api.ManageSnapshotCommand; import com.cloud.agent.api.MigrateCommand; import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.NetworkRulesVmSecondaryIpCommand; import com.cloud.agent.api.NetworkUsageCommand; import com.cloud.agent.api.PingTestCommand; +import com.cloud.agent.api.PlugNicCommand; import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.RebootCommand; +import com.cloud.agent.api.RevertToVMSnapshotCommand; +import com.cloud.agent.api.ScaleVmCommand; import com.cloud.agent.api.SecStorageSetupCommand; import com.cloud.agent.api.SecStorageVMSetupCommand; import com.cloud.agent.api.SecurityGroupRulesCmd; +import com.cloud.agent.api.SetupGuestNetworkCommand; import com.cloud.agent.api.StartCommand; import com.cloud.agent.api.StopCommand; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; import com.cloud.agent.api.routing.DhcpEntryCommand; import com.cloud.agent.api.routing.IpAssocCommand; +import com.cloud.agent.api.routing.IpAssocVpcCommand; import com.cloud.agent.api.routing.LoadBalancerConfigCommand; import com.cloud.agent.api.routing.SavePasswordCommand; import com.cloud.agent.api.routing.SetFirewallRulesCommand; +import com.cloud.agent.api.routing.SetNetworkACLCommand; import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; +import com.cloud.agent.api.routing.SetPortForwardingRulesVpcCommand; +import com.cloud.agent.api.routing.SetSourceNatCommand; import com.cloud.agent.api.routing.SetStaticNatRulesCommand; +import com.cloud.agent.api.routing.SetStaticRouteCommand; +import com.cloud.agent.api.routing.Site2SiteVpnCfgCommand; import com.cloud.agent.api.routing.VmDataCommand; import com.cloud.agent.api.storage.CopyVolumeCommand; import com.cloud.agent.api.storage.CreateCommand; @@ -102,13 +118,15 @@ import com.cloud.vm.VirtualMachine.State; public class SimulatorManagerImpl extends ManagerBase implements SimulatorManager { private static final Logger s_logger = Logger.getLogger(SimulatorManagerImpl.class); @Inject - MockVmManager _mockVmMgr = null; + MockVmManager _mockVmMgr; @Inject - MockStorageManager _mockStorageMgr = null; + MockStorageManager _mockStorageMgr; @Inject - MockAgentManager _mockAgentMgr = null; + MockAgentManager _mockAgentMgr; @Inject - MockConfigurationDao _mockConfigDao = null; + MockNetworkManager _mockNetworkMgr; + @Inject + MockConfigurationDao _mockConfigDao; @Inject MockHostDao _mockHost = null; private ConnectionConcierge _concierge; @@ -213,19 +231,19 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage } else if (cmd instanceof CheckVirtualMachineCommand) { return _mockVmMgr.checkVmState((CheckVirtualMachineCommand) cmd); } else if (cmd instanceof SetStaticNatRulesCommand) { - return _mockVmMgr.SetStaticNatRules((SetStaticNatRulesCommand) cmd); + return _mockNetworkMgr.SetStaticNatRules((SetStaticNatRulesCommand) cmd); } else if (cmd instanceof SetFirewallRulesCommand) { - return _mockVmMgr.SetFirewallRules((SetFirewallRulesCommand) cmd); + return _mockNetworkMgr.SetFirewallRules((SetFirewallRulesCommand) cmd); } else if (cmd instanceof SetPortForwardingRulesCommand) { - return _mockVmMgr.SetPortForwardingRules((SetPortForwardingRulesCommand) cmd); + return _mockNetworkMgr.SetPortForwardingRules((SetPortForwardingRulesCommand) cmd); } else if (cmd instanceof NetworkUsageCommand) { - return _mockVmMgr.getNetworkUsage((NetworkUsageCommand) cmd); + return _mockNetworkMgr.getNetworkUsage((NetworkUsageCommand) cmd); } else if (cmd instanceof IpAssocCommand) { - return _mockVmMgr.IpAssoc((IpAssocCommand) cmd); + return _mockNetworkMgr.IpAssoc((IpAssocCommand) cmd); } else if (cmd instanceof LoadBalancerConfigCommand) { - return _mockVmMgr.LoadBalancerConfig((LoadBalancerConfigCommand) cmd); + return _mockNetworkMgr.LoadBalancerConfig((LoadBalancerConfigCommand) cmd); } else if (cmd instanceof DhcpEntryCommand) { - return _mockVmMgr.AddDhcpEntry((DhcpEntryCommand) cmd); + return _mockNetworkMgr.AddDhcpEntry((DhcpEntryCommand) cmd); } else if (cmd instanceof VmDataCommand) { return _mockVmMgr.setVmData((VmDataCommand) cmd); } else if (cmd instanceof CleanupNetworkRulesCmd) { @@ -304,7 +322,42 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage return new Answer(cmd); } else if (cmd instanceof CopyVolumeCommand) { return _mockStorageMgr.CopyVolume((CopyVolumeCommand) cmd); + } else if (cmd instanceof PlugNicCommand) { + return _mockNetworkMgr.plugNic((PlugNicCommand) cmd); + } else if (cmd instanceof UnPlugNicCommand) { + return _mockNetworkMgr.unplugNic((UnPlugNicCommand) cmd); + } else if (cmd instanceof IpAssocVpcCommand) { + return _mockNetworkMgr.ipAssoc((IpAssocVpcCommand) cmd); + } else if (cmd instanceof SetSourceNatCommand) { + return _mockNetworkMgr.setSourceNat((SetSourceNatCommand) cmd); + } else if (cmd instanceof SetNetworkACLCommand) { + return _mockNetworkMgr.setNetworkAcl((SetNetworkACLCommand) cmd); + } else if (cmd instanceof SetupGuestNetworkCommand) { + return _mockNetworkMgr.setUpGuestNetwork((SetupGuestNetworkCommand) cmd); + } else if (cmd instanceof SetPortForwardingRulesVpcCommand) { + return _mockNetworkMgr.setVpcPortForwards((SetPortForwardingRulesVpcCommand) cmd); + } else if (cmd instanceof SetStaticNatRulesCommand) { + return _mockNetworkMgr.setVPCStaticNatRules((SetStaticNatRulesCommand) cmd); + } else if (cmd instanceof SetStaticRouteCommand) { + return _mockNetworkMgr.setStaticRoute((SetStaticRouteCommand) cmd); + } else if (cmd instanceof Site2SiteVpnCfgCommand) { + return _mockNetworkMgr.siteToSiteVpn((Site2SiteVpnCfgCommand) cmd); + } else if (cmd instanceof CheckS2SVpnConnectionsCommand) { + return _mockNetworkMgr.checkSiteToSiteVpnConnection((CheckS2SVpnConnectionsCommand) cmd); + } else if (cmd instanceof CreateVMSnapshotCommand) { + return _mockVmMgr.createVmSnapshot((CreateVMSnapshotCommand) cmd); + } else if (cmd instanceof DeleteVMSnapshotCommand) { + return _mockVmMgr.deleteVmSnapshot((DeleteVMSnapshotCommand) cmd); + } else if (cmd instanceof RevertToVMSnapshotCommand) { + return _mockVmMgr.revertVmSnapshot((RevertToVMSnapshotCommand) cmd); + } else if (cmd instanceof NetworkRulesVmSecondaryIpCommand) { + return _mockVmMgr.plugSecondaryIp((NetworkRulesVmSecondaryIpCommand) cmd); + } else if (cmd instanceof ScaleVmCommand) { + return _mockVmMgr.scaleVm((ScaleVmCommand) cmd); + } else if (cmd instanceof PvlanSetupCommand) { + return _mockNetworkMgr.setupPVLAN((PvlanSetupCommand) cmd); } else { + s_logger.error("Simulator does not implement command of type "+cmd.toString()); return Answer.createUnsupportedCommandAnswer(cmd); } } catch(Exception e) { diff --git a/plugins/hypervisors/simulator/src/com/cloud/configuration/SimulatorComponentLibrary.java b/plugins/hypervisors/simulator/src/com/cloud/configuration/SimulatorComponentLibrary.java deleted file mode 100644 index 373cae1367e..00000000000 --- a/plugins/hypervisors/simulator/src/com/cloud/configuration/SimulatorComponentLibrary.java +++ /dev/null @@ -1,40 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.configuration; - - -//TODO: Remove this class after the managers are figured out. -public class SimulatorComponentLibrary { -// @Override -// protected void populateManagers() { -// addManager("VM Manager", MockVmManagerImpl.class); -// addManager("agent manager", MockAgentManagerImpl.class); -// addManager("storage manager", MockStorageManagerImpl.class); -// addManager("SimulatorManager", SimulatorManagerImpl.class); -// } -// -// @Override -// protected void populateDaos() { -// addDao("mock Host", MockHostDaoImpl.class); -// addDao("mock secondary storage", MockSecStorageDaoImpl.class); -// addDao("mock storage pool", MockStoragePoolDaoImpl.class); -// addDao("mock vm", MockVMDaoImpl.class); -// addDao("mock volume", MockVolumeDaoImpl.class); -// addDao("mock config", MockConfigurationDaoImpl.class); -// addDao("mock security rules", MockSecurityRulesDaoImpl.class); -// } -} diff --git a/plugins/hypervisors/vmware/pom.xml b/plugins/hypervisors/vmware/pom.xml index 79779decf62..755244f5f61 100644 --- a/plugins/hypervisors/vmware/pom.xml +++ b/plugins/hypervisors/vmware/pom.xml @@ -37,6 +37,12 @@ cloud-secondary-storage ${project.version}
+ + org.apache.cloudstack + cloud-engine-storage + 4.2.0-SNAPSHOT + compile + com.cloud.com.vmware vmware-vim25 diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/LegacyZone.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/LegacyZone.java new file mode 100644 index 00000000000..535b6a7f99d --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/LegacyZone.java @@ -0,0 +1,28 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//with the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.vmware; + +import org.apache.cloudstack.api.InternalIdentity; + +public interface LegacyZone extends InternalIdentity { + + long getId(); + + long getZoneId(); + +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/LegacyZoneVO.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/LegacyZoneVO.java new file mode 100644 index 00000000000..390e56daf00 --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/LegacyZoneVO.java @@ -0,0 +1,81 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//with the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.vmware; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.utils.NumbersUtil; + +/** +* LegacyZoneVO contains id of CloudStack zone containing clusters from multiple VMware vCetners and/or VMware Datacenters. +*/ + +@Entity +@Table(name="legacy_zones") +public class LegacyZoneVO implements LegacyZone { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "zone_id") + private long zoneId; + + @Override + public long getId() { + return id; + } + + @Override + public long getZoneId() { + return zoneId; + } + + @Override + public int hashCode() { + return NumbersUtil.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof LegacyZoneVO) { + return ((LegacyZoneVO)obj).getId() == this.getId(); + } else { + return false; + } + } + + public LegacyZoneVO() { + } + + public LegacyZoneVO(long zoneId) { + this.id = zoneId; + } + + public LegacyZoneVO(long id, long zoneId) { + this.id = id; + this.zoneId = zoneId; + } + +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenter.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenter.java new file mode 100644 index 00000000000..6d6d2ebf0eb --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenter.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +public interface VmwareDatacenter extends Identity, InternalIdentity { + + String getVmwareDatacenterName(); + + String getGuid(); + + String getVcenterHost(); + + long getId(); + + String getPassword(); + + String getUser(); +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterService.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterService.java new file mode 100644 index 00000000000..5e80e1829f1 --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterService.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware; + +import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd; +import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd; + +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.ResourceInUseException; +import com.cloud.utils.component.PluggableService; + +public interface VmwareDatacenterService extends PluggableService { + + public VmwareDatacenterVO addVmwareDatacenter(AddVmwareDcCmd cmd) throws IllegalArgumentException, DiscoveryException, ResourceInUseException; + + public boolean removeVmwareDatacenter(RemoveVmwareDcCmd cmd) throws IllegalArgumentException, ResourceInUseException; +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterVO.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterVO.java new file mode 100644 index 00000000000..a13e59e5cb4 --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterVO.java @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware; + +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.Encrypt; + +/** + * VmwareDatacenterVO contains information of Vmware Datacenter associated with a CloudStack zone. + */ + +@Entity +@Table(name="vmware_data_center") +public class VmwareDatacenterVO implements VmwareDatacenter { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "guid") + private String guid; + + @Column(name = "name") + private String name; + + @Column(name = "vcenter_host") + private String vCenterHost; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "username") + private String user; + + @Encrypt + @Column(name = "password") + private String password; + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getVmwareDatacenterName() { + return name; + } + + @Override + public String getGuid() { + return guid; + } + + @Override + public String getUser() { + return user; + } + + @Override + public String getPassword() { + return password; + } + + @Override + public String getVcenterHost() { + return vCenterHost; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public void setGuid(String guid) { + this.guid = guid; + } + + public void setVmwareDatacenterName(String name) { + this.name = name; + } + + public void setVcenterHost(String vCenterHost) { + this.vCenterHost = vCenterHost; + } + + public void setUser(String user) { + this.user = user; ; + } + + public void setPassword(String password) { + this.password = password; + } + + @Override + public String toString() { + return new StringBuilder("VmwareDatacenter[").append(guid).append("]").toString(); + } + + @Override + public int hashCode() { + return NumbersUtil.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof VmwareDatacenterVO) { + return ((VmwareDatacenterVO)obj).getId() == this.getId(); + } else { + return false; + } + } + + public VmwareDatacenterVO(String guid, String name, String vCenterHost, String user, String password) { + this.uuid = UUID.randomUUID().toString(); + this.name = name; + this.guid = guid; + this.vCenterHost = vCenterHost; + this.user = user; + this.password = password; + } + + public VmwareDatacenterVO(long id, String guid, String name, String vCenterHost, String user, String password) { + this(guid, name, vCenterHost, user, password); + this.id = id; + } + + public VmwareDatacenterVO() { + this.uuid = UUID.randomUUID().toString(); + } + +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterZoneMap.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterZoneMap.java new file mode 100644 index 00000000000..f70a5414de8 --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterZoneMap.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware; + +import org.apache.cloudstack.api.InternalIdentity; + +import com.cloud.org.Grouping; + +public interface VmwareDatacenterZoneMap extends Grouping, InternalIdentity { + public long getId(); + + public long getZoneId(); + + public long getVmwareDcId(); +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterZoneMapVO.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterZoneMapVO.java new file mode 100644 index 00000000000..93b0e2670cb --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareDatacenterZoneMapVO.java @@ -0,0 +1,78 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//with the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.vmware; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + + +//NOTE: This particular table is totally internal to the CS MS. +//Do not ever include a uuid/guid field in this table. We just +//need it map zone ids with VMware datacenter Ids. + +@Entity +@Table(name="vmware_data_center_zone_map") +public class VmwareDatacenterZoneMapVO implements VmwareDatacenterZoneMap { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name="id") + private long id; + + @Column(name="zone_id") + private long zoneId; + + @Column(name="vmware_data_center_id") + private long vmwareDcId; + + public VmwareDatacenterZoneMapVO(long zoneId, long vmwareDcId) { + this.zoneId = zoneId; + this.vmwareDcId = vmwareDcId; + } + + public VmwareDatacenterZoneMapVO() { + // Do nothing. + } + + @Override + public long getId() { + return id; + } + + @Override + public long getZoneId() { + return zoneId; + } + + @Override + public long getVmwareDcId() { + return vmwareDcId; + } + + public void setZoneId(long zoneId) { + this.zoneId = zoneId; + } + + public void setVmwareDcId(long vmwareDcId) { + this.vmwareDcId = vmwareDcId; + } +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index 1d7300bd2e2..2807da5d887 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -30,6 +30,7 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; +import org.springframework.beans.NullValueInNestedPathException; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; @@ -51,6 +52,8 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; +import com.cloud.hypervisor.vmware.dao.VmwareDatacenterDao; +import com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDao; import com.cloud.hypervisor.vmware.manager.VmwareManager; import com.cloud.hypervisor.vmware.mo.ClusterMO; import com.cloud.hypervisor.vmware.mo.HostMO; @@ -107,11 +110,16 @@ public class VmwareServerDiscoverer extends DiscovererBase implements @Inject CiscoNexusVSMDeviceDao _nexusDao; @Inject - CiscoNexusVSMElementService _nexusElement; + CiscoNexusVSMElement _nexusElement; @Inject NetworkModel _netmgr; @Inject HypervisorCapabilitiesDao _hvCapabilitiesDao; + @Inject + VmwareDatacenterZoneMapDao _vmwareDcZoneMapDao; + @Inject + VmwareDatacenterDao _vmwareDcDao; + protected Map _urlParams; protected boolean useDVS = false; protected boolean nexusDVS = false; @@ -140,6 +148,18 @@ public class VmwareServerDiscoverer extends DiscovererBase implements return null; } + Map clusterDetails = _clusterDetailsDao.findDetails(clusterId); + boolean legacyZone = _vmwareMgr.isLegacyZone(dcId); + //Check if NOT a legacy zone. + if (!legacyZone) { + String updatedInventoryPath = validateCluster(dcId, url, username, password); + if (url.getPath() != updatedInventoryPath) { + // If url from API doesn't specifiy DC then update url in database with DC assocaited with this zone. + clusterDetails.put("url", url.getScheme() + "://" + url.getHost() + updatedInventoryPath); + _clusterDetailsDao.persist(clusterId, clusterDetails); + } + } + List hosts = _resourceMgr.listAllHostsInCluster(clusterId); if (hosts != null && hosts.size() > 0) { int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(hosts.get(0).getHypervisorType(), hosts.get(0).getHypervisorVersion()); @@ -164,7 +184,6 @@ public class VmwareServerDiscoverer extends DiscovererBase implements VmwareTrafficLabel guestTrafficLabelObj = new VmwareTrafficLabel(TrafficType.Guest); VmwareTrafficLabel publicTrafficLabelObj = new VmwareTrafficLabel(TrafficType.Public); - Map clusterDetails = _clusterDetailsDao.findDetails(clusterId); DataCenterVO zone = _dcDao.findById(dcId); NetworkType zoneType = zone.getNetworkType(); _readGlobalConfigParameters(); @@ -395,6 +414,63 @@ public class VmwareServerDiscoverer extends DiscovererBase implements } } + private String validateCluster(Long dcId, URI url, String username, String password) throws DiscoveryException { + String msg; + long vmwareDcId; + VmwareDatacenterVO vmwareDc; + String vmwareDcNameFromDb; + String vmwareDcNameFromApi; + String vCenterHost; + String updatedInventoryPath = url.getPath(); + String clusterName = null; + + // Check if zone is associated with DC + VmwareDatacenterZoneMapVO vmwareDcZone = _vmwareDcZoneMapDao.findByZoneId(dcId); + if (vmwareDcZone == null) { + msg = "Zone " + dcId + " is not associated with any VMware DC yet. " + + "Please add VMware DC to this zone first and then try to add clusters."; + s_logger.error(msg); + throw new DiscoveryException(msg); + } + + // Retrieve DC added to this zone from database + vmwareDcId = vmwareDcZone.getVmwareDcId(); + vmwareDc = _vmwareDcDao.findById(vmwareDcId); + vmwareDcNameFromApi = vmwareDcNameFromDb = vmwareDc.getVmwareDatacenterName(); + vCenterHost = vmwareDc.getVcenterHost(); + String inventoryPath = url.getPath(); + + assert (inventoryPath != null); + + String[] pathTokens = inventoryPath.split("/"); + if (pathTokens.length == 2) { + // DC name is not present in url. + // Using DC name read from database. + clusterName = pathTokens[1]; + updatedInventoryPath = "/" + vmwareDcNameFromDb + "/" + clusterName; + } else if (pathTokens.length == 3) { + vmwareDcNameFromApi = pathTokens[1]; + clusterName = pathTokens[2]; + } + + if (!vCenterHost.equalsIgnoreCase(url.getHost())) { + msg = "This cluster " + clusterName + " belongs to vCenter " + url.getHost() + + ". But this zone is associated with VMware DC from vCenter " + vCenterHost + + ". Make sure the cluster being added belongs to vCenter " + vCenterHost + + " and VMware DC " + vmwareDcNameFromDb; + s_logger.error(msg); + throw new DiscoveryException(msg); + } else if (!vmwareDcNameFromDb.equalsIgnoreCase(vmwareDcNameFromApi)) { + msg = "This cluster " + clusterName + " belongs to VMware DC " + vmwareDcNameFromApi + + " .But this zone is associated with VMware DC " + vmwareDcNameFromDb + + ". Make sure the cluster being added belongs to VMware DC " + vmwareDcNameFromDb + + " in vCenter " + vCenterHost; + s_logger.error(msg); + throw new DiscoveryException(msg); + } + return updatedInventoryPath; + } + private boolean validateDiscoveredHosts(VmwareContext context, ManagedObjectReference morCluster, List morHosts) throws Exception { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDao.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDao.java new file mode 100644 index 00000000000..4a858f81363 --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDao.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware.dao; + +import java.util.List; + +import com.cloud.hypervisor.vmware.LegacyZoneVO; +import com.cloud.utils.db.GenericDao; + +public interface LegacyZoneDao extends GenericDao { + /** + * @param id of zone + * @return zone id of legacy zone + */ + LegacyZoneVO findByZoneId(String zoneId); + + /** + * Lists all legacy CloudStack zones + * @return list of ids of legacy CloudStack zones + */ + List listAllLegacyZones(); +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java new file mode 100644 index 00000000000..7d2d1285c13 --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java @@ -0,0 +1,66 @@ + // Licensed to the Apache Software Foundation (ASF) under one + // or more contributor license agreements. See the NOTICE file + // distributed with this work for additional information + // regarding copyright ownership. The ASF licenses this file + // to you under the Apache License, Version 2.0 (the + // "License"); you may not use this file except in compliance + // with the License. You may obtain a copy of the License at + // + // http://www.apache.org/licenses/LICENSE-2.0 + // + // Unless required by applicable law or agreed to in writing, + // software distributed under the License is distributed on an + // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + // KIND, either express or implied. See the License for the + // specific language governing permissions and limitations + // under the License. + + package com.cloud.hypervisor.vmware.dao; + + import java.util.List; + + import javax.ejb.Local; + + import org.apache.log4j.Logger; + import org.springframework.stereotype.Component; + + import com.cloud.hypervisor.vmware.LegacyZoneVO; + import com.cloud.utils.db.DB; + import com.cloud.utils.db.GenericDaoBase; + import com.cloud.utils.db.SearchBuilder; + import com.cloud.utils.db.SearchCriteria; + import com.cloud.utils.db.SearchCriteria.Op; + + @Component + @Local(value=LegacyZoneDao.class) @DB(txn=false) + public class LegacyZoneDaoImpl extends GenericDaoBase implements LegacyZoneDao { + protected static final Logger s_logger = Logger.getLogger(LegacyZoneDaoImpl.class); + + final SearchBuilder zoneSearch; + final SearchBuilder fullTableSearch; + + public LegacyZoneDaoImpl() { + super(); + + zoneSearch = createSearchBuilder(); + zoneSearch.and("zoneId", zoneSearch.entity().getZoneId(), Op.EQ); + zoneSearch.done(); + + fullTableSearch = createSearchBuilder(); + fullTableSearch.done(); + } + + @Override + public LegacyZoneVO findByZoneId(String zoneId) { + SearchCriteria sc = zoneSearch.create(); + sc.setParameters("zoneId", zoneId); + return findOneBy(sc); + } + + @Override + public List listAllLegacyZones() { + SearchCriteria sc = fullTableSearch.create(); + return search(sc, null); + } + +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDao.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDao.java new file mode 100644 index 00000000000..2754e91d26c --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDao.java @@ -0,0 +1,65 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware.dao; + +import java.util.List; + +import com.cloud.hypervisor.vmware.VmwareDatacenterVO; +import com.cloud.utils.db.GenericDao; + +public interface VmwareDatacenterDao extends GenericDao { + + /** + * Return a VMware Datacenter given guid + * @param guid of VMware datacenter + * @return VmwareDatacenterVO for the VMware datacenter having the specified guid. + */ + VmwareDatacenterVO getVmwareDatacenterByGuid(String guid); + + /** + * Return a VMware Datacenter given name and vCenter host. + * For legacy zones multiple records will be present in the table. + * @param name of VMware datacenter + * @param vCenter host + * @return VmwareDatacenterVO for the VMware datacenter with given name and + * belonging to specified vCenter host. + */ + List getVmwareDatacenterByNameAndVcenter(String name, String vCenterHost); + + /** + * Return a list of VMware Datacenter given name. + * @param name of Vmware datacenter + * @return list of VmwareDatacenterVO for VMware datacenters having the specified name. + */ + List listVmwareDatacenterByName(String name); + + /** + * Return a list of VMware Datacenters belonging to specified vCenter + * @param vCenter Host + * @return list of VmwareDatacenterVO for all VMware datacenters belonging to + * specified vCenter + */ + List listVmwareDatacenterByVcenter(String vCenterHost); + + /** + * Lists all associated VMware datacenter on the management server. + * @return list of VmwareDatacenterVO for all associated VMware datacenters + */ + List listAllVmwareDatacenters(); + +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java new file mode 100644 index 00000000000..9f5796a073a --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware.dao; + +import java.util.List; + +import javax.ejb.Local; + +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.hypervisor.vmware.VmwareDatacenterVO; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; + +@Component +@Local(value=VmwareDatacenterDao.class) @DB(txn=false) +public class VmwareDatacenterDaoImpl extends GenericDaoBase implements VmwareDatacenterDao { + protected static final Logger s_logger = Logger.getLogger(VmwareDatacenterDaoImpl.class); + + final SearchBuilder nameSearch; + final SearchBuilder guidSearch; + final SearchBuilder vcSearch; + final SearchBuilder nameVcSearch; + final SearchBuilder fullTableSearch; + + public VmwareDatacenterDaoImpl() { + super(); + + nameSearch = createSearchBuilder(); + nameSearch.and("name", nameSearch.entity().getVmwareDatacenterName(), Op.EQ); + nameSearch.done(); + + nameVcSearch = createSearchBuilder(); + nameVcSearch.and("name", nameVcSearch.entity().getVmwareDatacenterName(), Op.EQ); + nameVcSearch.and("vCenterHost", nameVcSearch.entity().getVcenterHost(), Op.EQ); + nameVcSearch.done(); + + vcSearch = createSearchBuilder(); + vcSearch.and("vCenterHost", vcSearch.entity().getVcenterHost(), Op.EQ); + vcSearch.done(); + + guidSearch = createSearchBuilder(); + guidSearch.and("guid", guidSearch.entity().getGuid(), Op.EQ); + guidSearch.done(); + + fullTableSearch = createSearchBuilder(); + fullTableSearch.done(); + } + + @Override + public VmwareDatacenterVO getVmwareDatacenterByGuid(String guid) { + SearchCriteria sc = guidSearch.create(); + sc.setParameters("guid", guid); + return findOneBy(sc); + } + + @Override + public List getVmwareDatacenterByNameAndVcenter(String name, String vCenterHost) { + SearchCriteria sc = nameVcSearch.create(); + sc.setParameters("name", name); + sc.setParameters("vCenterHost", vCenterHost); + return search(sc, null); + } + + @Override + public List listVmwareDatacenterByName(String name) { + SearchCriteria sc = nameSearch.create(); + sc.setParameters("name", name); + return search(sc, null); + } + + @Override + public List listVmwareDatacenterByVcenter(String vCenterHost) { + SearchCriteria sc = vcSearch.create(); + sc.setParameters("vCenterHost", vCenterHost); + return search(sc, null); + } + + @Override + public List listAllVmwareDatacenters() { + SearchCriteria sc = fullTableSearch.create(); + return search(sc, null); + } + +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterZoneMapDao.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterZoneMapDao.java new file mode 100644 index 00000000000..be693aaac0c --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterZoneMapDao.java @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware.dao; + +import com.cloud.hypervisor.vmware.VmwareDatacenterZoneMapVO; +import com.cloud.utils.db.GenericDao; + +public interface VmwareDatacenterZoneMapDao extends GenericDao { + /** + * @param id of zone + * @return map object of VMware datacenter & zone + */ + VmwareDatacenterZoneMapVO findByZoneId(long zoneId); + + /** + * @param id of VMware datacenter + * @return map object of VMware datacenter & zone + */ + VmwareDatacenterZoneMapVO findByVmwareDcId(long vmwareDcId); +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterZoneMapDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterZoneMapDaoImpl.java new file mode 100644 index 00000000000..1c1326954c9 --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterZoneMapDaoImpl.java @@ -0,0 +1,61 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//with the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. + +package com.cloud.hypervisor.vmware.dao; + +import javax.ejb.Local; + +import org.springframework.stereotype.Component; + +import com.cloud.hypervisor.vmware.VmwareDatacenterZoneMapVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; + +@Component +@Local(value=VmwareDatacenterZoneMapDao.class) +public class VmwareDatacenterZoneMapDaoImpl extends GenericDaoBase + implements VmwareDatacenterZoneMapDao { + + protected final SearchBuilder zoneSearch; + protected final SearchBuilder vmwareDcSearch; + + public VmwareDatacenterZoneMapDaoImpl() { + zoneSearch = createSearchBuilder(); + zoneSearch.and("zoneId", zoneSearch.entity().getZoneId(), Op.EQ); + zoneSearch.done(); + + vmwareDcSearch = createSearchBuilder(); + vmwareDcSearch.and("vmwareDcId", vmwareDcSearch.entity().getVmwareDcId(), Op.EQ); + vmwareDcSearch.done(); + } + + @Override + public VmwareDatacenterZoneMapVO findByZoneId(long zoneId) { + SearchCriteria sc = zoneSearch.create(); + sc.setParameters("zoneId", zoneId); + return findOneBy(sc); + } + + @Override + public VmwareDatacenterZoneMapVO findByVmwareDcId(long vmwareDcId) { + SearchCriteria sc = vmwareDcSearch.create(); + sc.setParameters("vmwareDcId", vmwareDcId); + return findOneBy(sc); + } +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java index fb6d3d6667f..f9f5f7e7e39 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java @@ -20,6 +20,7 @@ import java.io.File; import java.util.List; import java.util.Map; +import com.cloud.agent.api.to.VolumeTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.vmware.manager.VmwareStorageManager; import com.cloud.hypervisor.vmware.mo.HostMO; @@ -66,4 +67,6 @@ public interface VmwareManager { String getPrivateVSwitchName(long dcId, HypervisorType hypervisorType); public String getRootDiskController(); + + boolean isLegacyZone(long dcId); } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index edd26a312cf..68acd9e87ac 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.rmi.RemoteException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -37,6 +38,8 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd; +import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -53,16 +56,29 @@ import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.ClusterVSMMapVO; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.ClusterVSMMapDao; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.DiscoveredWithErrorException; import com.cloud.host.Host; -import com.cloud.host.HostVO; +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceInUseException; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; +import com.cloud.hypervisor.vmware.LegacyZoneVO; import com.cloud.hypervisor.vmware.VmwareCleanupMaid; +import com.cloud.hypervisor.vmware.VmwareDatacenterService; +import com.cloud.hypervisor.vmware.VmwareDatacenterVO; +import com.cloud.hypervisor.vmware.VmwareDatacenterZoneMapVO; +import com.cloud.hypervisor.vmware.dao.LegacyZoneDao; +import com.cloud.hypervisor.vmware.dao.VmwareDatacenterDao; +import com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDao; +import com.cloud.hypervisor.vmware.mo.CustomFieldConstants; +import com.cloud.hypervisor.vmware.mo.DatacenterMO; import com.cloud.hypervisor.vmware.mo.DiskControllerType; import com.cloud.hypervisor.vmware.mo.HostFirewallSystemMO; import com.cloud.hypervisor.vmware.mo.HostMO; @@ -70,9 +86,10 @@ import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.TaskMO; import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; import com.cloud.hypervisor.vmware.mo.VmwareHostType; -import com.cloud.utils.ssh.SshHelper; +import com.cloud.hypervisor.vmware.resource.VmwareContextFactory; import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.cloud.hypervisor.vmware.util.VmwareHelper; import com.cloud.network.CiscoNexusVSMDeviceVO; import com.cloud.network.NetworkModel; import com.cloud.network.dao.CiscoNexusVSMDeviceDao; @@ -86,11 +103,11 @@ import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.utils.FileUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SshHelper; @@ -98,12 +115,11 @@ import com.cloud.vm.DomainRouterVO; import com.google.gson.Gson; import com.vmware.vim25.AboutInfo; import com.vmware.vim25.HostConnectSpec; -import com.vmware.vim25.ManagedObjectReference; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;; +import com.vmware.vim25.ManagedObjectReference;; -@Local(value = {VmwareManager.class}) -public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener { +@Local(value = {VmwareManager.class, VmwareDatacenterService.class}) +public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener, VmwareDatacenterService { private static final Logger s_logger = Logger.getLogger(VmwareManagerImpl.class); private static final int STARTUP_DELAY = 60000; // 60 seconds @@ -129,6 +145,10 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Inject ConfigurationDao _configDao; @Inject ConfigurationServer _configServer; @Inject HypervisorCapabilitiesDao _hvCapabilitiesDao; + @Inject DataCenterDao _dcDao; + @Inject VmwareDatacenterDao _vmwareDcDao; + @Inject VmwareDatacenterZoneMapDao _vmwareDcZoneMapDao; + @Inject LegacyZoneDao _legacyZoneDao; String _mountParent; StorageLayer _storage; @@ -870,4 +890,246 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw public String getRootDiskController() { return _rootDiskController; } + + @Override + public List> getCommands() { + List> cmdList = new ArrayList>(); + cmdList.add(AddVmwareDcCmd.class); + cmdList.add(RemoveVmwareDcCmd.class); + return cmdList; + } + + @Override + @DB + public VmwareDatacenterVO addVmwareDatacenter(AddVmwareDcCmd cmd) throws ResourceInUseException { + VmwareDatacenterVO vmwareDc = null; + Long zoneId = cmd.getZoneId(); + String userName = cmd.getUsername(); + String password = cmd.getPassword(); + String vCenterHost = cmd.getVcenter(); + String vmwareDcName = cmd.getName(); + + // Zone validation + validateZone(zoneId, "add VMware datacenter to zone"); + + VmwareDatacenterZoneMapVO vmwareDcZoneMap = _vmwareDcZoneMapDao.findByZoneId(zoneId); + // Check if zone is associated with VMware DC + if (vmwareDcZoneMap != null) { + throw new CloudRuntimeException("Zone " + zoneId + " is already associated with a VMware datacenter."); + } + + // Validate username, password, VMware DC name and vCenter + if (userName == null) { + throw new InvalidParameterValueException("Missing or invalid parameter username."); + } + + if (password == null) { + throw new InvalidParameterValueException("Missing or invalid parameter username."); + } + + if (vmwareDcName == null) { + throw new InvalidParameterValueException("Missing or invalid parameter name. Please provide valid VMware datacenter name."); + } + + if (vCenterHost == null) { + throw new InvalidParameterValueException("Missing or invalid parameter name. " + + "Please provide valid VMware vCenter server's IP address or fully qualified domain name."); + } + + // Check if DC is already part of zone + // In that case vmware_data_center table should have the DC + vmwareDc = _vmwareDcDao.getVmwareDatacenterByGuid(vmwareDcName + "@" + vCenterHost); + if (vmwareDc != null) { + throw new ResourceInUseException("This DC is already part of other CloudStack zone(s). Cannot add this DC to more zones."); + } + + VmwareContext context = null; + DatacenterMO dcMo = null; + String dcCustomFieldValue; + boolean addDcCustomFieldDef = false; + boolean dcInUse = false; + String guid; + ManagedObjectReference dcMor; + try { + context = VmwareContextFactory.create(vCenterHost, userName, password); + + // Check if DC exists on vCenter + dcMo = new DatacenterMO(context, vmwareDcName); + dcMor = dcMo.getMor(); + if (dcMor == null) { + String msg = "Unable to find VMware DC " + vmwareDcName + " in vCenter " + vCenterHost + ". "; + s_logger.error(msg); + throw new InvalidParameterValueException(msg); + } + + // Check if DC is already associated with another cloudstack deployment + // Get custom field property cloud.zone over this DC + guid = vmwareDcName + "@" + vCenterHost; + + dcCustomFieldValue = dcMo.getCustomFieldValue(CustomFieldConstants.CLOUD_ZONE); + if (dcCustomFieldValue == null) { + addDcCustomFieldDef = true; + } + dcInUse = Boolean.parseBoolean(dcCustomFieldValue); + if (dcInUse) { + throw new ResourceInUseException("This DC is being managed by other CloudStack deployment. Cannot add this DC to zone."); + } + + // Add DC to database into vmware_data_center table + vmwareDc = new VmwareDatacenterVO(guid, vmwareDcName, vCenterHost, userName, password); + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + vmwareDc = _vmwareDcDao.persist(vmwareDc); + txn.commit(); + } catch (Exception e) { + txn.rollback(); + s_logger.error("Failed to persist VMware datacenter details to database. Exception: " + e.getMessage()); + throw new CloudRuntimeException(e.getMessage()); + } + + // Map zone with vmware datacenter + vmwareDcZoneMap = new VmwareDatacenterZoneMapVO(zoneId, vmwareDc.getId()); + + txn = Transaction.currentTxn(); + try { + txn.start(); + vmwareDcZoneMap = _vmwareDcZoneMapDao.persist(vmwareDcZoneMap); + txn.commit(); + } catch (Exception e) { + txn.rollback(); + s_logger.error("Failed to associate VMware datacenter with zone " + zoneId + ". Exception: " + e.getMessage()); + // Removing VMware datacenter from vmware_data_center table because association with zone failed. + _vmwareDcDao.remove(vmwareDcZoneMap.getId()); + throw new CloudRuntimeException(e.getMessage()); + } + + // Set custom field for this DC + if (addDcCustomFieldDef) { + dcMo.ensureCustomFieldDef(CustomFieldConstants.CLOUD_ZONE); + } + dcMo.setCustomFieldValue(CustomFieldConstants.CLOUD_ZONE, "true"); + + } catch (Throwable e) { + String msg = "Failed to add VMware DC to zone "; + if (e instanceof RemoteException) { + msg = "Encountered remote exception at vCenter. " + VmwareHelper.getExceptionMessage(e); + } else { + msg += "due to : " + e.getMessage(); + } + throw new CloudRuntimeException(msg); + } finally { + if (context != null) + context.close(); + context = null; + } + return vmwareDc; + } + + + @Override + public boolean removeVmwareDatacenter(RemoveVmwareDcCmd cmd) throws ResourceInUseException { + Long zoneId = cmd.getZoneId(); + // Validate zone + validateZone(zoneId, "remove VMware datacenter from zone"); + + // Get DC associated with this zone + VmwareDatacenterZoneMapVO vmwareDcZoneMap; + VmwareDatacenterVO vmwareDatacenter; + String vmwareDcName; + long vmwareDcId; + String vCenterHost; + String userName; + String password; + DatacenterMO dcMo = null; + Transaction txn; + + vmwareDcZoneMap = _vmwareDcZoneMapDao.findByZoneId(zoneId); + // Check if zone is associated with VMware DC + if (vmwareDcZoneMap == null) { + throw new CloudRuntimeException("Zone " + zoneId + " is not associated with any VMware datacenter."); + } + + vmwareDcId = vmwareDcZoneMap.getVmwareDcId(); + vmwareDatacenter = _vmwareDcDao.findById(vmwareDcId); + vmwareDcName = vmwareDatacenter.getVmwareDatacenterName(); + vCenterHost = vmwareDatacenter.getVcenterHost(); + userName = vmwareDatacenter.getUser(); + password = vmwareDatacenter.getPassword(); + txn = Transaction.currentTxn(); + try { + txn.start(); + // Remove the VMware datacenter entry in table vmware_data_center + _vmwareDcDao.remove(vmwareDcId); + // Remove the map entry in table vmware_data_center_zone_map + _vmwareDcZoneMapDao.remove(vmwareDcZoneMap.getId()); + txn.commit(); + } catch (Exception e) { + s_logger.info("Caught exception when trying to delete VMware datacenter record." + e.getMessage()); + throw new CloudRuntimeException("Failed to delete VMware datacenter."); + } + + // Construct context + VmwareContext context = null; + try { + context = VmwareContextFactory.create(vCenterHost, userName, password); + + // Check if DC exists on vCenter + try { + dcMo = new DatacenterMO(context, vmwareDcName); + } catch(Throwable t) { + String msg = "Unable to find DC " + vmwareDcName + " in vCenter " + vCenterHost; + s_logger.error(msg); + throw new DiscoveryException(msg); + } + + assert (dcMo != null); + + // Reset custom field property cloud.zone over this DC + dcMo.setCustomFieldValue(CustomFieldConstants.CLOUD_ZONE, "false"); + s_logger.info("Sucessfully reset custom field property cloud.zone over DC " + vmwareDcName); + } catch (Exception e) { + String msg = "Unable to reset custom field property cloud.zone over DC " + vmwareDcName + + " due to : " + VmwareHelper.getExceptionMessage(e); + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } finally { + if (context != null) + context.close(); + context = null; + } + return true; + } + + private void validateZone(Long zoneId, String errStr) throws ResourceInUseException { + // Check if zone with specified id exists + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone == null) { + InvalidParameterValueException ex = new InvalidParameterValueException( + "Can't find zone by the id specified."); + throw ex; + } + + // Check if zone has resources? - For now look for clusters + List clusters = _clusterDao.listByZoneId(zoneId); + if (clusters != null && clusters.size() > 0) { + // Look for VMware hypervisor. + for (ClusterVO cluster : clusters) { + if (cluster.getHypervisorType().equals(HypervisorType.VMware)) { + throw new ResourceInUseException("Zone has one or more clusters." + + " Can't " + errStr + " which already has clusters."); + } + } + } + } + + @Override + public boolean isLegacyZone(long dcId) { + boolean isLegacyZone = false; + LegacyZoneVO legacyZoneVo = _legacyZoneDao.findById(dcId); + if (legacyZoneVo != null) { + isLegacyZone = true; + } + return isLegacyZone; + } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index 9f1351e96f3..4ae0f305d99 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -328,7 +328,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { workerVm = vmMo; // attach volume to worker VM - String datastoreVolumePath = String.format("[%s] %s.vmdk", dsMo.getName(), volumePath); + String datastoreVolumePath = getVolumePathInDatastore(dsMo, volumePath + ".vmdk"); vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs); } } @@ -491,6 +491,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { hyperHost, volumeId, new DatastoreMO(context, morDatastore), secondaryStorageURL, volumePath); + deleteVolumeDirOnSecondaryStorage(volumeId, secondaryStorageURL); } return new CopyVolumeAnswer(cmd, true, null, result.first(), result.second()); } catch (Throwable e) { @@ -1059,7 +1060,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } //attach volume to worker VM - String datastoreVolumePath = String.format("[%s] %s.vmdk", dsMo.getName(), volumePath); + String datastoreVolumePath = getVolumePathInDatastore(dsMo, volumePath + ".vmdk"); workerVm.attachDisk(new String[] { datastoreVolumePath }, morDs); vmMo = workerVm; } @@ -1080,6 +1081,12 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } + private String getVolumePathInDatastore(DatastoreMO dsMo, String volumeFileName) throws Exception { + String datastoreVolumePath = dsMo.searchFileInSubFolders(volumeFileName, true); + assert (datastoreVolumePath != null) : "Virtual disk file missing from datastore."; + return datastoreVolumePath; + } + private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName) throws Exception { @@ -1438,4 +1445,26 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { workingVM = hyperHost.findVmOnHyperHost(uniqueName); return workingVM; } + + + + private String deleteVolumeDirOnSecondaryStorage(long volumeId, String secStorageUrl) throws Exception { + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); + String volumeMountRoot = secondaryMountPoint + "/" + getVolumeRelativeDirInSecStroage(volumeId); + + return deleteDir(volumeMountRoot); + } + + private String deleteDir(String dir) { + synchronized(dir.intern()) { + Script command = new Script(false, "rm", _timeout, s_logger); + command.add("-rf"); + command.add(dir); + return command.execute(); + } + } + + private static String getVolumeRelativeDirInSecStroage(long volumeId) { + return "volumes/" + volumeId; + } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index d8d7476858a..34766a0d794 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -37,6 +37,7 @@ import java.util.Random; import java.util.TimeZone; import java.util.UUID; +import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.storage.command.DeleteCommand; @@ -78,6 +79,9 @@ import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; import com.cloud.agent.api.DeleteStoragePoolCommand; import com.cloud.agent.api.DeleteVMSnapshotAnswer; import com.cloud.agent.api.DeleteVMSnapshotCommand; +import com.cloud.agent.api.MigrateWithStorageAnswer; +import com.cloud.agent.api.MigrateWithStorageCommand; +import com.cloud.agent.api.UnregisterVMCommand; import com.cloud.agent.api.GetDomRVersionAnswer; import com.cloud.agent.api.GetDomRVersionCmd; import com.cloud.agent.api.GetHostStatsAnswer; @@ -132,7 +136,6 @@ import com.cloud.agent.api.StopCommand; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; -import com.cloud.agent.api.UnregisterVMCommand; import com.cloud.agent.api.UpgradeSnapshotCommand; import com.cloud.agent.api.ValidateSnapshotAnswer; import com.cloud.agent.api.ValidateSnapshotCommand; @@ -167,14 +170,16 @@ import com.cloud.agent.api.routing.VmDataCommand; import com.cloud.agent.api.routing.VpnUsersCfgCommand; import com.cloud.agent.api.storage.CopyVolumeAnswer; import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.storage.CreateVolumeOVACommand; +import com.cloud.agent.api.storage.CreateVolumeOVAAnswer; +import com.cloud.agent.api.storage.MigrateVolumeAnswer; +import com.cloud.agent.api.storage.MigrateVolumeCommand; +import com.cloud.agent.api.storage.PrepareOVAPackingAnswer; +import com.cloud.agent.api.storage.PrepareOVAPackingCommand; import com.cloud.agent.api.storage.CreateAnswer; import com.cloud.agent.api.storage.CreateCommand; import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; -import com.cloud.agent.api.storage.CreateVolumeOVAAnswer; -import com.cloud.agent.api.storage.CreateVolumeOVACommand; import com.cloud.agent.api.storage.DestroyCommand; -import com.cloud.agent.api.storage.PrepareOVAPackingAnswer; -import com.cloud.agent.api.storage.PrepareOVAPackingCommand; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; import com.cloud.agent.api.storage.ResizeVolumeAnswer; @@ -230,6 +235,8 @@ import com.cloud.serializer.GsonHelper; import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeManagerImpl; import com.cloud.storage.resource.StoragePoolResource; import com.cloud.storage.resource.StorageSubsystemCommandHandler; import com.cloud.storage.resource.StorageSubsystemCommandHandlerBase; @@ -239,6 +246,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExceptionUtil; @@ -288,9 +296,14 @@ import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineFileInfo; import com.vmware.vim25.VirtualMachineGuestOsIdentifier; import com.vmware.vim25.VirtualMachinePowerState; +import com.vmware.vim25.VirtualMachineRelocateSpec; +import com.vmware.vim25.VirtualMachineRelocateSpecDiskLocator; import com.vmware.vim25.VirtualMachineRuntimeInfo; import com.vmware.vim25.VirtualSCSISharing; +import java.util.HashSet; +import java.util.Set; +import java.util.Map.Entry; public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService { private static final Logger s_logger = Logger.getLogger(VmwareResource.class); @@ -301,6 +314,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected final int _shutdown_waitMs = 300000; // wait up to 5 minutes for shutdown + @Inject + protected VolumeManager volMgr; + // out an operation protected final int _retry = 24; protected final int _sleep = 10000; @@ -338,7 +354,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected Gson _gson; protected volatile long _cmdSequence = 1; - + protected StorageSubsystemCommandHandler storageHandler; protected static HashMap s_statesTable; @@ -348,7 +364,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_statesTable.put(VirtualMachinePowerState.POWERED_OFF, State.Stopped); s_statesTable.put(VirtualMachinePowerState.SUSPENDED, State.Stopped); } - + public Gson getGson() { return _gson; } @@ -417,6 +433,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa answer = execute((PrepareForMigrationCommand) cmd); } else if (clz == MigrateCommand.class) { answer = execute((MigrateCommand) cmd); + } else if (clz == MigrateWithStorageCommand.class) { + answer = execute((MigrateWithStorageCommand) cmd); + } else if (clz == MigrateVolumeCommand.class) { + answer = execute((MigrateVolumeCommand) cmd); } else if (clz == DestroyCommand.class) { answer = execute((DestroyCommand) cmd); } else if (clz == CreateStoragePoolCommand.class) { @@ -1385,7 +1405,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(!isVMWareToolsInstalled(vmMo)){ String errMsg = "vmware tools is not installed or not running, cannot add nic to vm " + vmName; s_logger.debug(errMsg); - return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + errMsg); + return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + errMsg); } // TODO need a way to specify the control of NIC device type @@ -1445,7 +1465,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { VmwareHypervisorHost hyperHost = getHyperHost(context); - String vmName = cmd.getInstanceName(); + String vmName = cmd.getVmName(); VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); if(vmMo == null) { @@ -1548,7 +1568,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa snatArgs += "eth" + ethDeviceNum; Pair result_gateway = SshHelper.sshExecute(routerIp, DEFAULT_DOMR_SSHPORT, "root", mgr.getSystemVMKeyFile(), null, - "/opt/cloud/bin/vpc_privateGateway.sh " + args); + "/opt/cloud/bin/vpc_privateGateway.sh " + snatArgs); if (!result_gateway.first()) { throw new InternalErrorException("Unable to configure source NAT for public IP address."); @@ -1938,7 +1958,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa for (IpAliasTO ipAliasTO : revokedIpAliasTOs) { args = args + ipAliasTO.getAlias_count()+":"+ipAliasTO.getRouterip()+":"+ipAliasTO.getNetmask()+"-"; } - args = args + " " ; + args = args + "- " ; for (IpAliasTO ipAliasTO : activeIpAliasTOs) { args = args + ipAliasTO.getAlias_count()+":"+ipAliasTO.getRouterip()+":"+ipAliasTO.getNetmask()+"-"; } @@ -2434,6 +2454,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices]; int i = 0; + int ideUnitNumber = 0; + int scsiUnitNumber =0; + int nicUnitNumber = 0; int ideControllerKey = vmMo.getIDEDeviceControllerKey(); int scsiControllerKey = vmMo.getScsiDeviceControllerKey(); int controllerKey; @@ -2458,7 +2481,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), - secDsMo.getMor(), true, true, i, i + 1); + secDsMo.getMor(), true, true, ideUnitNumber++, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { if(s_logger.isDebugEnabled()) @@ -2469,10 +2492,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } - i++; } else { // we will always plugin a CDROM device - + if (volIso != null) { TemplateObjectTO iso = (TemplateObjectTO)volIso.getData(); @@ -2489,7 +2511,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa assert (isoDatastoreInfo.second() != null); deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, i, i + 1); + Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { if(s_logger.isDebugEnabled()) @@ -2503,7 +2525,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } else { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, i, i + 1); + Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, ideUnitNumber++, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { if(s_logger.isDebugEnabled()) @@ -2517,9 +2539,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } } - i++; } - + i++; for (DiskTO vol : sortVolumesByDeviceId(disks)) { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); @@ -2555,17 +2576,20 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String[] diskChain = _gson.fromJson(chainInfo, String[].class); if (diskChain == null || diskChain.length < 1) { s_logger.warn("Empty previously-saved chain info, fall back to the original"); - device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), i, i + 1); + device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), + (controllerKey==ideControllerKey)?ideUnitNumber++:scsiUnitNumber++, i + 1); } else { s_logger.info("Attach the disk with stored chain info: " + chainInfo); for (int j = 0; j < diskChain.length; j++) { diskChain[j] = String.format("[%s] %s", volumeDsDetails.second().getName(), diskChain[j]); } - device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, diskChain, volumeDsDetails.first(), i, i + 1); + device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, diskChain, volumeDsDetails.first(), + (controllerKey==ideControllerKey)?ideUnitNumber++:scsiUnitNumber++, i + 1); } } else { - device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), i, i + 1); + device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), + (controllerKey==ideControllerKey)?ideUnitNumber++:scsiUnitNumber++, i + 1); } deviceConfigSpecArray[i].setDevice(device); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); @@ -2594,10 +2618,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ManagedObjectReference dvsMor = dataCenterMo.getDvSwitchMor(networkInfo.first()); dvSwitchUuid = dataCenterMo.getDvSwitchUuid(dvsMor); s_logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); - nic = VmwareHelper.prepareDvNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), dvSwitchUuid, nicTo.getMac(), i, i + 1, true, true); + nic = VmwareHelper.prepareDvNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), dvSwitchUuid, nicTo.getMac(), nicUnitNumber++, i + 1, true, true); } else { s_logger.info("Preparing NIC device on network " + networkInfo.second()); - nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), i, i + 1, true, true); + nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), nicUnitNumber++, i + 1, true, true); } deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); @@ -3412,6 +3436,254 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } + protected Answer execute(MigrateWithStorageCommand cmd) { + + if (s_logger.isInfoEnabled()) { + s_logger.info("Executing resource MigrateWithStorageCommand: " + _gson.toJson(cmd)); + } + + VirtualMachineTO vmTo = cmd.getVirtualMachine(); + final String vmName = vmTo.getName(); + + State state = null; + synchronized (_vms) { + state = _vms.get(vmName); + _vms.put(vmName, State.Stopping); + } + + VmwareHypervisorHost srcHyperHost = null; + VmwareHypervisorHost tgtHyperHost = null; + VirtualMachineMO vmMo = null; + + ManagedObjectReference morDsAtTarget = null; + ManagedObjectReference morDsAtSource = null; + ManagedObjectReference morDc = null; + ManagedObjectReference morDcOfTargetHost = null; + ManagedObjectReference morTgtHost = new ManagedObjectReference(); + VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); + List diskLocators = new ArrayList(); + VirtualMachineRelocateSpecDiskLocator diskLocator = null; + + boolean isFirstDs = true; + String srcDiskName = ""; + String srcDsName = ""; + String tgtDsName = ""; + String tgtDsNfsHost; + String tgtDsNfsPath; + int tgtDsNfsPort; + VolumeTO volume; + StorageFilerTO filerTo; + Set mountedDatastoresAtSource = new HashSet(); + + Map volToFiler = cmd.getVolumeToFiler(); + String tgtHost = cmd.getTargetHost(); + String tgtHostMorInfo = tgtHost.split("@")[0]; + morTgtHost.setType(tgtHostMorInfo.split(":")[0]); + morTgtHost.setValue(tgtHostMorInfo.split(":")[1]); + + try { + srcHyperHost = getHyperHost(getServiceContext()); + tgtHyperHost = new HostMO(getServiceContext(), morTgtHost); + morDc = srcHyperHost.getHyperHostDatacenter(); + morDcOfTargetHost = tgtHyperHost.getHyperHostDatacenter(); + if (morDc != morDcOfTargetHost) { + String msg = "Source host & target host are in different datacentesr"; + throw new CloudRuntimeException(msg); + } + VmwareManager mgr = tgtHyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); + + // find VM through datacenter (VM is not at the target host yet) + vmMo = srcHyperHost.findVmOnPeerHyperHost(vmName); + if (vmMo == null) { + String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue(); + s_logger.error(msg); + throw new Exception(msg); + } + + // Get details of each target datastore & attach to source host. + for (Entry entry : volToFiler.entrySet()) { + volume = entry.getKey(); + filerTo = entry.getValue(); + + srcDsName = volume.getPoolUuid().replace("-", ""); + tgtDsName = filerTo.getUuid().replace("-", ""); + tgtDsNfsHost = filerTo.getHost(); + tgtDsNfsPath = filerTo.getPath(); + tgtDsNfsPort = filerTo.getPort(); + + s_logger.debug("Preparing spec for volume : " + volume.getName()); + morDsAtTarget = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(tgtHyperHost, filerTo.getUuid()); + if (morDsAtTarget == null) { + String msg = "Unable to find the mounted datastore with uuid " + morDsAtTarget + " to execute MigrateWithStorageCommand"; + s_logger.error(msg); + throw new Exception(msg); + } + morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, filerTo.getUuid()); + if (morDsAtSource == null) { + morDsAtSource = srcHyperHost.mountDatastore(false, tgtDsNfsHost, tgtDsNfsPort, tgtDsNfsPath, tgtDsName); + if (morDsAtSource == null) { + throw new Exception("Unable to mount datastore " + tgtDsNfsHost + ":/" + tgtDsNfsPath + " on " + _hostName); + } + mountedDatastoresAtSource.add(tgtDsName); + s_logger.debug("Mounted datastore " + tgtDsNfsHost + ":/" + tgtDsNfsPath + " on " + _hostName); + } + + if (isFirstDs) { + relocateSpec.setDatastore(morDsAtSource); + isFirstDs = false; + } + srcDiskName = String.format("[%s] %s.vmdk", srcDsName, volume.getPath()); + diskLocator = new VirtualMachineRelocateSpecDiskLocator(); + diskLocator.setDatastore(morDsAtSource); + diskLocator.setDiskId(getVirtualDiskInfo(vmMo, srcDiskName)); + + diskLocators.add(diskLocator); + + } + relocateSpec.getDisk().addAll(diskLocators); + + // Prepare network at target before migration + NicTO[] nics = vmTo.getNics(); + for (NicTO nic : nics) { + // prepare network on the host + prepareNetworkFromNicInfo(new HostMO(getServiceContext(), morTgtHost), nic, false, vmTo.getType()); + } + + // Ensure secondary storage mounted on target host + String secStoreUrl = mgr.getSecondaryStorageStoreUrl(Long.parseLong(_dcId)); + if(secStoreUrl == null) { + String msg = "secondary storage for dc " + _dcId + " is not ready yet?"; + throw new Exception(msg); + } + mgr.prepareSecondaryStorageStore(secStoreUrl); + ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl); + if (morSecDs == null) { + String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl; + throw new Exception(msg); + } + + // Change datastore + if (!vmMo.changeDatastore(relocateSpec)) { + throw new Exception("Change datastore operation failed during storage migration"); + } else { + s_logger.debug("Successfully migrated storage of VM " + vmName + " to target datastore(s)"); + } + + // Change host + ManagedObjectReference morPool = tgtHyperHost.getHyperHostOwnerResourcePool(); + if (!vmMo.migrate(morPool, tgtHyperHost.getMor())) { + throw new Exception("Change datastore operation failed during storage migration"); + } else { + s_logger.debug("Successfully relocated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName()); + } + + state = State.Stopping; + List volumeToList = null; + return new MigrateWithStorageAnswer(cmd, volumeToList); + } catch (Throwable e) { + if (e instanceof RemoteException) { + s_logger.warn("Encountered remote exception at vCenter, invalidating VMware session context"); + invalidateServiceContext(); + } + + String msg = "MigrationCommand failed due to " + VmwareHelper.getExceptionMessage(e); + s_logger.warn(msg, e); + return new MigrateWithStorageAnswer(cmd, (Exception) e); + } finally { + // Cleanup datastores mounted on source host + for(String mountedDatastore : mountedDatastoresAtSource) { + s_logger.debug("Attempting to unmount datastore " + mountedDatastore + " at " + _hostName); + try { + srcHyperHost.unmountDatastore(mountedDatastore); + } catch (Exception unmountEx) { + s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + + ". Seems the datastore is still being used by " + _hostName + + ". Please unmount manually to cleanup."); + } + s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + _hostName); + } + synchronized (_vms) { + _vms.put(vmName, state); + } + } + } + + private Answer execute(MigrateVolumeCommand cmd) { + String volumePath = cmd.getVolumePath(); + StorageFilerTO poolTo = cmd.getPool(); + + if (s_logger.isInfoEnabled()) { + s_logger.info("Executing resource MigrateVolumeCommand: " + _gson.toJson(cmd)); + } + + VmwareContext context = getServiceContext(); + VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME); + final String vmName = volMgr.getVmNameFromVolumeId(cmd.getVolumeId()); + + VirtualMachineMO vmMo = null; + VmwareHypervisorHost srcHyperHost = null; + + ManagedObjectReference morDs = null; + ManagedObjectReference morDc = null; + VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); + List diskLocators = new ArrayList(); + VirtualMachineRelocateSpecDiskLocator diskLocator = null; + + String srcDiskName = ""; + String srcDsName = ""; + String tgtDsName = ""; + + try { + srcHyperHost = getHyperHost(getServiceContext()); + morDc = srcHyperHost.getHyperHostDatacenter(); + srcDsName = volMgr.getStoragePoolOfVolume(cmd.getVolumeId()); + tgtDsName = poolTo.getUuid().replace("-", ""); + + // find VM through datacenter (VM is not at the target host yet) + vmMo = srcHyperHost.findVmOnPeerHyperHost(vmName); + if (vmMo == null) { + String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue(); + s_logger.error(msg); + throw new Exception(msg); + } + morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName); + if (morDs == null) { + String msg = "Unable to find the mounted datastore with name " + tgtDsName + " to execute MigrateVolumeCommand"; + s_logger.error(msg); + throw new Exception(msg); + } + + srcDiskName = String.format("[%s] %s.vmdk", srcDsName, volumePath); + diskLocator = new VirtualMachineRelocateSpecDiskLocator(); + diskLocator.setDatastore(morDs); + diskLocator.setDiskId(getVirtualDiskInfo(vmMo, srcDiskName)); + + diskLocators.add(diskLocator); + relocateSpec.getDisk().add(diskLocator); + + // Change datastore + if (!vmMo.changeDatastore(relocateSpec)) { + throw new Exception("Change datastore operation failed during volume migration"); + } else { + s_logger.debug("Successfully migrated volume " + volumePath + " to target datastore " + tgtDsName); + } + + return new MigrateVolumeAnswer(cmd, true, null, volumePath); + } catch (Exception e) { + String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString(); + s_logger.error(msg, e); + return new MigrateVolumeAnswer(cmd, false, msg, null); + } + } + + private int getVirtualDiskInfo(VirtualMachineMO vmMo, String srcDiskName) throws Exception { + Pair deviceInfo = vmMo.getDiskDevice(srcDiskName, false); + if(deviceInfo == null) { + throw new Exception("No such disk device: " + srcDiskName); + } + return deviceInfo.first().getKey(); + } + private VmwareHypervisorHost getTargetHyperHost(DatacenterMO dcMo, String destIp) throws Exception { VmwareManager mgr = dcMo.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); @@ -3524,7 +3796,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs); - String datastoreVolumePath = String.format("[%s] %s.vmdk", dsMo.getName(), cmd.getVolumePath()); + String datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true); + assert (datastoreVolumePath != null) : "Virtual disk file must exist in specified datastore for attach/detach operations."; AttachVolumeAnswer answer = new AttachVolumeAnswer(cmd, cmd.getDeviceId()); if (cmd.getAttach()) { @@ -5327,6 +5600,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa _guestTrafficInfo = (VmwareTrafficLabel) params.get("guestTrafficInfo"); _publicTrafficInfo = (VmwareTrafficLabel) params.get("publicTrafficInfo"); VmwareContext context = getServiceContext(); + volMgr = ComponentContext.inject(VolumeManagerImpl.class); try { VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME); mgr.setupResourceStartupParams(params); @@ -5537,5 +5811,5 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private boolean isVMWareToolsInstalled(VirtualMachineMO vmMo) throws Exception{ GuestInfo guestInfo = vmMo.getVmGuestInfo(); return (guestInfo != null && guestInfo.getGuestState() != null && guestInfo.getGuestState().equalsIgnoreCase("running")); - } + } } diff --git a/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/command/admin/zone/AddVmwareDcCmd.java b/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/command/admin/zone/AddVmwareDcCmd.java new file mode 100644 index 00000000000..317452b7b7f --- /dev/null +++ b/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/command/admin/zone/AddVmwareDcCmd.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.zone; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.VmwareDatacenterResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.ResourceInUseException; +import com.cloud.hypervisor.vmware.VmwareDatacenterService; +import com.cloud.hypervisor.vmware.VmwareDatacenterVO; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "addVmwareDc", description="Adds a VMware datacenter to specified zone", responseObject=VmwareDatacenterResponse.class) +public class AddVmwareDcCmd extends BaseCmd { + + @Inject public VmwareDatacenterService _vmwareDatacenterService; + + public static final Logger s_logger = Logger.getLogger(AddVmwareDcCmd.class.getName()); + + private static final String s_name = "addvmwaredcresponse"; + + @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, required=true, description="Name of VMware datacenter to be added to specified zone.") + private String name; + + @Parameter(name=ApiConstants.VCENTER, type=CommandType.STRING, required=true, description="The name/ip of vCenter. Make sure it is IP address or full qualified domain name for host running vCenter server.") + private String vCenter; + + @Parameter(name=ApiConstants.USERNAME, type=CommandType.STRING, required=false, description="The Username required to connect to resource.") + private String username; + + @Parameter(name=ApiConstants.PASSWORD, type=CommandType.STRING, required=false, description="The password for specified username.") + private String password; + + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType=ZoneResponse.class, required=true, description="The Zone ID.") + private Long zoneId; + + public String getName() { + return name; + } + + public String getVcenter() { + return vCenter; + } + + public String getUsername() { + return username; + } + + public String getPassword() { + return password; + } + + public Long getZoneId() { + return zoneId; + } + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute() { + try { + VmwareDatacenterResponse response = new VmwareDatacenterResponse(); + VmwareDatacenterVO result = _vmwareDatacenterService.addVmwareDatacenter(this); + if (result != null){ + response.setId(result.getUuid()); + response.setName(result.getVmwareDatacenterName()); + response.setResponseName(getCommandName()); + response.setObjectName("vmwaredc"); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add VMware Datacenter to zone."); + } + this.setResponseObject(response); + } catch (DiscoveryException ex) { + s_logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } catch (ResourceInUseException ex) { + s_logger.warn("Exception: ", ex); + ServerApiException e = new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + for (String proxyObj : ex.getIdProxyList()) { + e.addProxyObject(proxyObj); + } + throw e; + } catch (IllegalArgumentException ex) { + throw new IllegalArgumentException(ex.getMessage()); + } catch (CloudRuntimeException runtimeEx) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, runtimeEx.getMessage()); + } + } +} diff --git a/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/command/admin/zone/RemoveVmwareDcCmd.java b/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/command/admin/zone/RemoveVmwareDcCmd.java new file mode 100644 index 00000000000..a74c91bf753 --- /dev/null +++ b/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/command/admin/zone/RemoveVmwareDcCmd.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.zone; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.BaseCmd.CommandType; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceInUseException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.hypervisor.vmware.VmwareDatacenterService; +import com.cloud.network.element.CiscoNexusVSMElementService; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "removeVmwareDc", responseObject=SuccessResponse.class, description="Remove a VMware datacenter from a zone.") +public class RemoveVmwareDcCmd extends BaseCmd { + + @Inject public VmwareDatacenterService _vmwareDatacenterService; + + public static final Logger s_logger = Logger.getLogger(AddVmwareDcCmd.class.getName()); + + private static final String s_name = "removevmwaredcresponse"; + + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType=ZoneResponse.class, required=true, + description="The id of Zone from which VMware datacenter has to be removed.") + + private Long zoneId; + + public Long getZoneId() { + return zoneId; + } + + @Override + public void execute() { + SuccessResponse response = new SuccessResponse(); + try { + boolean result = _vmwareDatacenterService.removeVmwareDatacenter(this); + if (result) { + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to remove VMware datacenter from zone"); + } + } catch (ResourceInUseException ex) { + s_logger.warn("The zone has one or more resources (like cluster), hence not able to remove VMware datacenter from zone." + + " Please remove all resource from zone, and retry. Exception: ", ex); + ServerApiException e = new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + for (String proxyObj : ex.getIdProxyList()) { + e.addProxyObject(proxyObj); + } + throw e; + } catch (IllegalArgumentException ex) { + throw new IllegalArgumentException(ex.getMessage()); + } catch (CloudRuntimeException runtimeEx) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, runtimeEx.getMessage()); + } + } + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + +} diff --git a/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/response/VmwareDatacenterResponse.java b/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/response/VmwareDatacenterResponse.java new file mode 100644 index 00000000000..420320baf48 --- /dev/null +++ b/plugins/hypervisors/vmware/src/org/apache/cloudstack/api/response/VmwareDatacenterResponse.java @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.hypervisor.vmware.VmwareDatacenter; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = VmwareDatacenter.class) +public class VmwareDatacenterResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) @Param(description="The VMware Datacenter ID") + private String id; + + @SerializedName(ApiConstants.NAME) @Param(description="The VMware Datacenter name") + private String name; + + public String getName() { + return name; + } + + public String getId() { + return id; + } + + public void setName(String name) { + this.name = name; + } + + public void setId(String id) { + this.id = id; + } +} diff --git a/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java b/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java new file mode 100644 index 00000000000..bdba61ba028 --- /dev/null +++ b/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java @@ -0,0 +1,205 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.storage.motion; + +import java.util.HashMap; +import java.util.Map; +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.MigrateWithStorageAnswer; +import com.cloud.agent.api.MigrateWithStorageCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.agent.api.to.VolumeTO; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; + +@Component +public class VmwareStorageMotionStrategy implements DataMotionStrategy { + private static final Logger s_logger = Logger.getLogger(VmwareStorageMotionStrategy.class); + @Inject AgentManager agentMgr; + @Inject VolumeDao volDao; + @Inject VolumeDataFactory volFactory; + @Inject PrimaryDataStoreDao storagePoolDao; + @Inject VMInstanceDao instanceDao; + + @Override + public boolean canHandle(DataObject srcData, DataObject destData) { + return false; + } + + @Override + public boolean canHandle(Map volumeMap, Host srcHost, Host destHost) { + if (srcHost.getHypervisorType() == HypervisorType.VMware && destHost.getHypervisorType() == HypervisorType.VMware) { + s_logger.debug(this.getClass() + " can handle the request because the hosts have VMware hypervisor"); + return true; + } + return false; + } + + @Override + public Void copyAsync(DataObject srcData, DataObject destData, + AsyncCompletionCallback callback) { + CopyCommandResult result = new CopyCommandResult(null, null); + result.setResult("Unsupported operation requested for copying data."); + callback.complete(result); + + return null; + } + + @Override + public Void copyAsync(Map volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, + AsyncCompletionCallback callback) { + Answer answer = null; + String errMsg = null; + try { + VMInstanceVO instance = instanceDao.findById(vmTo.getId()); + if (instance != null) { + if (srcHost.getClusterId() == destHost.getClusterId()) { + answer = migrateVmWithVolumesWithinCluster(instance, vmTo, srcHost, destHost, volumeMap); + } else { + answer = migrateVmWithVolumesAcrossCluster(instance, vmTo, srcHost, destHost, volumeMap); + } + } else { + throw new CloudRuntimeException("Unsupported operation requested for moving data."); + } + } catch (Exception e) { + s_logger.error("copy failed", e); + errMsg = e.toString(); + } + + CopyCommandResult result = new CopyCommandResult(null, answer); + result.setResult(errMsg); + callback.complete(result); + return null; + } + + private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, + Host destHost, Map volumeToPool) throws AgentUnavailableException { + + // Initiate migration of a virtual machine with it's volumes. + try { + Map volumeToFilerto = new HashMap(); + for (Map.Entry entry : volumeToPool.entrySet()) { + VolumeInfo volume = entry.getKey(); + VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId())); + StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue()); + volumeToFilerto.put(volumeTo, filerTo); + } + + // Migration across cluster needs to be done in three phases. + // 1. Send a migrate command to source resource to initiate migration + // Run validations against target!! + // 2. Complete the process. Update the volume details. + MigrateWithStorageCommand migrateWithStorageCmd = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid()); + MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer) agentMgr.send( + srcHost.getId(), migrateWithStorageCmd); + if (migrateWithStorageAnswer == null) { + s_logger.error("Migration with storage of vm " + vm+ " to host " + destHost + " failed."); + throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); + } else if (!migrateWithStorageAnswer.getResult()) { + s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + migrateWithStorageAnswer.getDetails()); + throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + + ". " + migrateWithStorageAnswer.getDetails()); + } else { + // Update the volume details after migration. + updateVolumesAfterMigration(volumeToPool); + } + s_logger.debug("Storage migration of VM " + vm.getInstanceName() + " completed successfully. Migrated to host " + destHost.getName()); + + return migrateWithStorageAnswer; + } catch (OperationTimedoutException e) { + s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e); + throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId()); + } + } + + private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, + Host destHost, Map volumeToPool) throws AgentUnavailableException { + + // Initiate migration of a virtual machine with it's volumes. + try { + Map volumeToFilerto = new HashMap(); + for (Map.Entry entry : volumeToPool.entrySet()) { + VolumeInfo volume = entry.getKey(); + VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId())); + StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue()); + volumeToFilerto.put(volumeTo, filerTo); + } + + MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid()); + MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), command); + if (answer == null) { + s_logger.error("Migration with storage of vm " + vm + " failed."); + throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); + } else if (!answer.getResult()) { + s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + answer.getDetails()); + throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + + ". " + answer.getDetails()); + } else { + // Update the volume details after migration. + updateVolumesAfterMigration(volumeToPool); + } + + return answer; + } catch (OperationTimedoutException e) { + s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e); + throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId()); + } + } + + private void updateVolumesAfterMigration(Map volumeToPool) { + for (Map.Entry entry : volumeToPool.entrySet()) { + VolumeInfo volume = entry.getKey(); + StoragePool pool = (StoragePool)entry.getValue(); + + VolumeVO volumeVO = volDao.findById(volume.getId()); + Long oldPoolId = volumeVO.getPoolId(); + volumeVO.setLastPoolId(oldPoolId); + volumeVO.setFolder(pool.getPath()); + volumeVO.setPodId(pool.getPodId()); + volumeVO.setPoolId(pool.getId()); + + volDao.update(volume.getId(), volumeVO); + s_logger.debug("Volume path was successfully updated for volume " + volume.getName() + " after it was migrated."); + } + } +} diff --git a/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/VmwareDatacenterApiUnitTest.java b/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/VmwareDatacenterApiUnitTest.java new file mode 100644 index 00000000000..de08c93c78a --- /dev/null +++ b/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/VmwareDatacenterApiUnitTest.java @@ -0,0 +1,426 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.vmware; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import junit.framework.TestCase; + +import org.apache.cloudstack.api.command.admin.zone.AddVmwareDcCmd; +import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.cloud.agent.AgentManager; +import com.cloud.cluster.ClusterManager; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.ClusterVSMMapDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.event.dao.EventDao; +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceInUseException; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; +import com.cloud.hypervisor.vmware.dao.LegacyZoneDao; +import com.cloud.hypervisor.vmware.dao.VmwareDatacenterDao; +import com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDao; +import com.cloud.hypervisor.vmware.manager.VmwareManager; +import com.cloud.hypervisor.vmware.manager.VmwareManagerImpl; +import com.cloud.network.NetworkModel; +import com.cloud.network.dao.CiscoNexusVSMDeviceDao; +import com.cloud.org.Cluster.ClusterType; +import com.cloud.org.Managed.ManagedState; +import com.cloud.secstorage.CommandExecLogDao; +import com.cloud.server.ConfigurationServer; +import com.cloud.storage.secondary.SecondaryStorageVmManager; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountService; +import com.cloud.user.AccountVO; +import com.cloud.user.UserContext; +import com.cloud.user.dao.AccountDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.dao.UserVmDao; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class VmwareDatacenterApiUnitTest { + + @Inject + VmwareDatacenterService _vmwareDatacenterService; + + @Inject + DataCenterDao _dcDao; + + @Inject + HostPodDao _podDao; + + @Inject + VmwareDatacenterDao _vmwareDcDao; + + @Inject + VmwareDatacenterZoneMapDao _vmwareDcZoneMapDao; + + @Inject + ClusterDao _clusterDao; + + @Inject + ClusterDetailsDao _clusterDetailsDao; + + @Inject + ConfigurationDao _configDao; + + @Inject + AccountDao _accountDao; + + @Inject + AccountManager _acctMgr; + + long zoneId; + long podId; + long clusterId; + long vmwareDcId; + private static long domainId = 5L; + private static String vmwareDcName = "dc"; + private static String clusterName = "cluster"; + private static String vCenterHost = "10.1.1.100"; + private static String url = "http://" + vCenterHost + "/" + vmwareDcName + "/" + clusterName; + private static String user = "administrator"; + private static String password = "password"; + private static String guid = vmwareDcName + "@" + vCenterHost; + + private static VmwareDatacenterVO dc; + private static List vmwareDcs; + private static ClusterVO cluster; + private static VmwareDatacenterZoneMapVO dcZoneMap; + private static List clusterList; + private static ClusterDetailsVO clusterDetails; + + @Mock + private static AddVmwareDcCmd addCmd; + @Mock + private static RemoveVmwareDcCmd removeCmd; + + @BeforeClass + public static void setUp() throws ConfigurationException { + } + + @Before + public void testSetUp() { + Mockito.when(_configDao.isPremium()).thenReturn(true); + ComponentContext.initComponentsLifeCycle(); + MockitoAnnotations.initMocks(this); + + DataCenterVO zone = new DataCenterVO(UUID.randomUUID().toString(), "test", "8.8.8.8", null, "10.0.0.1", null, "10.0.0.1/24", + null, null, NetworkType.Basic, null, null, true, true, null, null); + zoneId = 1L; + + HostPodVO pod = new HostPodVO(UUID.randomUUID().toString(), zoneId, "192.168.56.1", "192.168.56.0/24", 8, "test"); + podId = 1L; + + AccountVO acct = new AccountVO(200L); + acct.setType(Account.ACCOUNT_TYPE_ADMIN); + acct.setAccountName("admin"); + acct.setDomainId(domainId); + UserContext.registerContext(1, acct, null, true); + + when(_accountDao.findByIdIncludingRemoved(0L)).thenReturn(acct); + + dc = new VmwareDatacenterVO(guid, vmwareDcName, vCenterHost, user, password); + vmwareDcs = new ArrayList(); + vmwareDcs.add(dc); + vmwareDcId = dc.getId(); + + cluster = new ClusterVO(zone.getId(), pod.getId(), "vmwarecluster"); + cluster.setHypervisorType(HypervisorType.VMware.toString()); + cluster.setClusterType(ClusterType.ExternalManaged); + cluster.setManagedState(ManagedState.Managed); + clusterId = 1L; + clusterList = new ArrayList(); + clusterList.add(cluster); + + clusterDetails = new ClusterDetailsVO(clusterId, "url", url); + + dcZoneMap = new VmwareDatacenterZoneMapVO(zoneId, vmwareDcId); + + Mockito.when(_dcDao.persist(Mockito.any(DataCenterVO.class))).thenReturn(zone); + Mockito.when(_dcDao.findById(1L)).thenReturn(zone); + Mockito.when(_podDao.persist(Mockito.any(HostPodVO.class))).thenReturn(pod); + Mockito.when(_podDao.findById(1L)).thenReturn(pod); + Mockito.when(_clusterDao.persist(Mockito.any(ClusterVO.class))).thenReturn(cluster); + Mockito.when(_clusterDao.findById(1L)).thenReturn(cluster); + Mockito.when(_clusterDao.listByZoneId(1L)).thenReturn(null); + Mockito.when(_clusterDao.expunge(1L)).thenReturn(true); + Mockito.when(_clusterDetailsDao.persist(Mockito.any(ClusterDetailsVO.class))).thenReturn(clusterDetails); + Mockito.when(_clusterDetailsDao.expunge(1L)).thenReturn(true); + Mockito.when(_vmwareDcDao.persist(Mockito.any(VmwareDatacenterVO.class))).thenReturn(dc); + Mockito.when(_vmwareDcDao.findById(1L)).thenReturn(null); + Mockito.when(_vmwareDcDao.expunge(1L)).thenReturn(true); + Mockito.when(_vmwareDcDao.getVmwareDatacenterByNameAndVcenter(vmwareDcName, vCenterHost)).thenReturn(null); + Mockito.when(_vmwareDcZoneMapDao.persist(Mockito.any(VmwareDatacenterZoneMapVO.class))).thenReturn(dcZoneMap); + Mockito.when(_vmwareDcZoneMapDao.findByZoneId(1L)).thenReturn(null); + Mockito.when(_vmwareDcZoneMapDao.expunge(1L)).thenReturn(true); + Mockito.when(addCmd.getZoneId()).thenReturn(1L); + Mockito.when(addCmd.getVcenter()).thenReturn(vCenterHost); + Mockito.when(addCmd.getUsername()).thenReturn(user); + Mockito.when(addCmd.getPassword()).thenReturn(password); + Mockito.when(addCmd.getName()).thenReturn(vmwareDcName); + Mockito.when(removeCmd.getZoneId()).thenReturn(1L); + } + + //@Test(expected = InvalidParameterValueException.class) + public void testAddVmwareDcToInvalidZone() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(addCmd.getZoneId()).thenReturn(2L); + _vmwareDatacenterService.addVmwareDatacenter(addCmd); + } + + //@Test(expected = ResourceInUseException.class) + public void testAddVmwareDcToZoneWithClusters() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(_clusterDao.listByZoneId(1L)).thenReturn(clusterList); + _vmwareDatacenterService.addVmwareDatacenter(addCmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testRemoveVmwareDcToInvalidZone() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(removeCmd.getZoneId()).thenReturn(2L); + _vmwareDatacenterService.removeVmwareDatacenter(removeCmd); + } + + @Test(expected = ResourceInUseException.class) + public void testRemoveVmwareDcToZoneWithClusters() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(_clusterDao.listByZoneId(1L)).thenReturn(clusterList); + _vmwareDatacenterService.removeVmwareDatacenter(removeCmd); + } + + //@Test(expected = ResourceInUseException.class) + public void testAddVmwareDcToZoneWithVmwareDc() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(_vmwareDcDao.getVmwareDatacenterByNameAndVcenter(vmwareDcName, vCenterHost)).thenReturn(vmwareDcs); + _vmwareDatacenterService.addVmwareDatacenter(addCmd); + } + + //@Test(expected = InvalidParameterValueException.class) + public void testAddVmwareDcWithNullUser() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(addCmd.getUsername()).thenReturn(null); + _vmwareDatacenterService.addVmwareDatacenter(addCmd); + } + + //@Test(expected = InvalidParameterValueException.class) + public void testAddVmwareDcWithNullPassword() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(addCmd.getPassword()).thenReturn(null); + _vmwareDatacenterService.addVmwareDatacenter(addCmd); + } + + //@Test(expected = InvalidParameterValueException.class) + public void testAddVmwareDcWithNullUrl() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(addCmd.getVcenter()).thenReturn(null); + _vmwareDatacenterService.addVmwareDatacenter(addCmd); + } + + //@Test(expected = InvalidParameterValueException.class) + public void testAddVmwareDcWithNullDcName() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(addCmd.getName()).thenReturn(null); + _vmwareDatacenterService.addVmwareDatacenter(addCmd); + } + + //@Test(expected = CloudRuntimeException.class) + public void testReAddVmwareDc() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(_vmwareDcZoneMapDao.findByZoneId(1L)).thenReturn(dcZoneMap); + _vmwareDatacenterService.addVmwareDatacenter(addCmd); + } + + @Test(expected = CloudRuntimeException.class) + public void testRemoveNonexistingVmwareDc() throws ResourceInUseException, IllegalArgumentException, DiscoveryException, Exception { + Mockito.when(_vmwareDcZoneMapDao.findByZoneId(1L)).thenReturn(null); + _vmwareDatacenterService.removeVmwareDatacenter(removeCmd); + } + + @Configuration + @ComponentScan(basePackageClasses = {VmwareManagerImpl.class}, includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)}, useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + @Bean + public AccountDao accountDao() { + return Mockito.mock(AccountDao.class); + } + + @Bean + public AccountService accountService() { + return Mockito.mock(AccountService.class); + } + + @Bean + public DataCenterDao dataCenterDao() { + return Mockito.mock(DataCenterDao.class); + } + + @Bean + public HostPodDao hostPodDao() { + return Mockito.mock(HostPodDao.class); + } + + @Bean + public ClusterDao clusterDao() { + return Mockito.mock(ClusterDao.class); + } + + @Bean + public ClusterDetailsDao clusterDetailsDao() { + return Mockito.mock(ClusterDetailsDao.class); + } + + @Bean + public VmwareDatacenterDao vmwareDatacenterDao() { + return Mockito.mock(VmwareDatacenterDao.class); + } + + @Bean + public VmwareDatacenterZoneMapDao vmwareDatacenterZoneMapDao() { + return Mockito.mock(VmwareDatacenterZoneMapDao.class); + } + + @Bean + public AgentManager agentManager() { + return Mockito.mock(AgentManager.class); + } + + @Bean + public HostDao hostDao() { + return Mockito.mock(HostDao.class); + } + + @Bean + public NetworkModel networkModel() { + return Mockito.mock(NetworkModel.class); + } + + @Bean + public ClusterManager clusterManager() { + return Mockito.mock(ClusterManager.class); + } + + @Bean + public SecondaryStorageVmManager secondaryStorageVmManager() { + return Mockito.mock(SecondaryStorageVmManager.class); + } + + @Bean + public CommandExecLogDao commandExecLogDao() { + return Mockito.mock(CommandExecLogDao.class); + } + + @Bean + public CiscoNexusVSMDeviceDao ciscoNexusVSMDeviceDao() { + return Mockito.mock(CiscoNexusVSMDeviceDao.class); + } + + @Bean + public ClusterVSMMapDao clusterVSMMapDao() { + return Mockito.mock(ClusterVSMMapDao.class); + } + + @Bean + public LegacyZoneDao legacyZoneDao() { + return Mockito.mock(LegacyZoneDao.class); + } + + @Bean + public ConfigurationDao configurationDao() { + return Mockito.mock(ConfigurationDao.class); + } + + @Bean + public ConfigurationServer configurationServer() { + return Mockito.mock(ConfigurationServer.class); + } + + @Bean + public HypervisorCapabilitiesDao hypervisorCapabilitiesDao() { + return Mockito.mock(HypervisorCapabilitiesDao.class); + } + + @Bean + public AccountManager accountManager() { + return Mockito.mock(AccountManager.class); + } + + @Bean + public EventDao eventDao() { + return Mockito.mock(EventDao.class); + } + + @Bean + public UserVmDao userVMDao() { + return Mockito.mock(UserVmDao.class); + } + + public AddVmwareDcCmd addVmwareDatacenterCmd() { + return Mockito.mock(AddVmwareDcCmd.class); + } + + public RemoveVmwareDcCmd removeVmwareDcCmd() { + return Mockito.mock(RemoveVmwareDcCmd.class); + } + + public static class Library implements TypeFilter { + + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + } +} diff --git a/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java b/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java new file mode 100644 index 00000000000..3d2ad57711d --- /dev/null +++ b/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java @@ -0,0 +1,271 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.motion; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcConext; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.MigrateWithStorageAnswer; +import com.cloud.agent.api.MigrateWithStorageCommand; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class VmwareStorageMotionStrategyTest { + + @Inject VmwareStorageMotionStrategy strategy = new VmwareStorageMotionStrategy(); + @Inject AgentManager agentMgr; + @Inject VolumeDao volDao; + @Inject VolumeDataFactory volFactory; + @Inject PrimaryDataStoreDao storagePoolDao; + @Inject VMInstanceDao instanceDao; + + CopyCommandResult result; + + @BeforeClass + public static void setUp() throws ConfigurationException { + } + + @Before + public void testSetUp() { + ComponentContext.initComponentsLifeCycle(); + } + + @Test + public void testStrategyHandlesVmwareHosts() throws Exception { + Host srcHost = mock(Host.class); + Host destHost = mock(Host.class); + when(srcHost.getHypervisorType()).thenReturn(HypervisorType.VMware); + when(destHost.getHypervisorType()).thenReturn(HypervisorType.VMware); + Map volumeMap = new HashMap(); + boolean canHandle = strategy.canHandle(volumeMap, srcHost, destHost); + assertTrue("The strategy is only supposed to handle vmware hosts", canHandle); + } + + @Test + public void testStrategyDoesnotHandlesNonVmwareHosts() throws Exception { + Host srcHost = mock(Host.class); + Host destHost = mock(Host.class); + when(srcHost.getHypervisorType()).thenReturn(HypervisorType.XenServer); + when(destHost.getHypervisorType()).thenReturn(HypervisorType.XenServer); + Map volumeMap = new HashMap(); + boolean canHandle = strategy.canHandle(volumeMap, srcHost, destHost); + assertFalse("The strategy is only supposed to handle vmware hosts", canHandle); + } + + @Test + public void testMigrateWithinClusterSuccess() throws Exception { + Host srcHost = mock(Host.class); + Host destHost = mock(Host.class); + when(srcHost.getClusterId()).thenReturn(1L); + when(destHost.getClusterId()).thenReturn(1L); + Map volumeMap = new HashMap(); + VirtualMachineTO to = mock(VirtualMachineTO.class); + when(to.getId()).thenReturn(6L); + VMInstanceVO instance = mock(VMInstanceVO.class); + when(instanceDao.findById(6L)).thenReturn(instance); + + MockContext context = new MockContext(null, null, volumeMap); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().mockCallBack(null, null)).setContext(context); + + MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class); + when(migAnswerMock.getResult()).thenReturn(true); + when(agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock); + + strategy.copyAsync(volumeMap, to, srcHost, destHost, caller); + assertTrue("Migration within cluster isn't successful.", this.result.isSuccess()); + } + + @Test + public void testMigrateWithinClusterFailure() throws Exception { + Host srcHost = mock(Host.class); + Host destHost = mock(Host.class); + when(srcHost.getClusterId()).thenReturn(1L); + when(destHost.getClusterId()).thenReturn(1L); + Map volumeMap = new HashMap(); + VirtualMachineTO to = mock(VirtualMachineTO.class); + when(to.getId()).thenReturn(6L); + VMInstanceVO instance = mock(VMInstanceVO.class); + when(instanceDao.findById(6L)).thenReturn(instance); + + MockContext context = new MockContext(null, null, volumeMap); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().mockCallBack(null, null)).setContext(context); + + MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class); + when(migAnswerMock.getResult()).thenReturn(false); + when(agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock); + + strategy.copyAsync(volumeMap, to, srcHost, destHost, caller); + assertFalse("Migration within cluster didn't fail.", this.result.isSuccess()); + } + + @Test + public void testMigrateAcrossClusterSuccess() throws Exception { + Host srcHost = mock(Host.class); + Host destHost = mock(Host.class); + when(srcHost.getClusterId()).thenReturn(1L); + when(destHost.getClusterId()).thenReturn(2L); + Map volumeMap = new HashMap(); + VirtualMachineTO to = mock(VirtualMachineTO.class); + when(to.getId()).thenReturn(6L); + VMInstanceVO instance = mock(VMInstanceVO.class); + when(instanceDao.findById(6L)).thenReturn(instance); + + MockContext context = new MockContext(null, null, volumeMap); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().mockCallBack(null, null)).setContext(context); + + MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class); + when(migAnswerMock.getResult()).thenReturn(true); + when(agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock); + + strategy.copyAsync(volumeMap, to, srcHost, destHost, caller); + assertTrue("Migration across cluster isn't successful.", this.result.isSuccess()); + } + + @Test + public void testMigrateAcrossClusterFailure() throws Exception { + Host srcHost = mock(Host.class); + Host destHost = mock(Host.class); + when(srcHost.getClusterId()).thenReturn(1L); + when(destHost.getClusterId()).thenReturn(2L); + Map volumeMap = new HashMap(); + VirtualMachineTO to = mock(VirtualMachineTO.class); + when(to.getId()).thenReturn(6L); + VMInstanceVO instance = mock(VMInstanceVO.class); + when(instanceDao.findById(6L)).thenReturn(instance); + + MockContext context = new MockContext(null, null, volumeMap); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().mockCallBack(null, null)).setContext(context); + + MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class); + when(migAnswerMock.getResult()).thenReturn(false); + when(agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock); + + strategy.copyAsync(volumeMap, to, srcHost, destHost, caller); + assertFalse("Migration across cluster didn't fail.", this.result.isSuccess()); + } + + private class MockContext extends AsyncRpcConext { + final Map volumeToPool; + final AsyncCallFuture future; + /** + * @param callback + */ + public MockContext(AsyncCompletionCallback callback, AsyncCallFuture future, + Map volumeToPool) { + super(callback); + this.volumeToPool = volumeToPool; + this.future = future; + } + } + + protected Void mockCallBack(AsyncCallbackDispatcher callback, MockContext context) { + this.result = callback.getResult(); + return null; + } + + @Configuration + @ComponentScan(basePackageClasses = { VmwareStorageMotionStrategy.class }, + includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)}, + useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + @Bean + public VolumeDao volumeDao() { + return Mockito.mock(VolumeDao.class); + } + + @Bean + public VolumeDataFactory volumeDataFactory() { + return Mockito.mock(VolumeDataFactory.class); + } + + @Bean + public PrimaryDataStoreDao primaryDataStoreDao() { + return Mockito.mock(PrimaryDataStoreDao.class); + } + + @Bean + public VMInstanceDao vmInstanceDao() { + return Mockito.mock(VMInstanceDao.class); + } + + @Bean + public AgentManager agentManager() { + return Mockito.mock(AgentManager.class); + } + + public static class Library implements TypeFilter { + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + } +} \ No newline at end of file diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 3a4453c3dd9..ab259b9143b 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -107,6 +107,8 @@ import com.cloud.agent.api.GetHostStatsAnswer; import com.cloud.agent.api.GetHostStatsCommand; import com.cloud.agent.api.GetStorageStatsAnswer; import com.cloud.agent.api.GetStorageStatsCommand; +import com.cloud.agent.api.GetVmDiskStatsAnswer; +import com.cloud.agent.api.GetVmDiskStatsCommand; import com.cloud.agent.api.GetVmStatsAnswer; import com.cloud.agent.api.GetVmStatsCommand; import com.cloud.agent.api.GetVncPortAnswer; @@ -161,6 +163,7 @@ import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.api.UpdateHostPasswordCommand; import com.cloud.agent.api.UpgradeSnapshotCommand; +import com.cloud.agent.api.VmDiskStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.check.CheckSshAnswer; import com.cloud.agent.api.check.CheckSshCommand; @@ -288,6 +291,7 @@ import com.xensource.xenapi.Types.VmBadPowerState; import com.xensource.xenapi.Types.VmPowerState; import com.xensource.xenapi.Types.XenAPIException; import com.xensource.xenapi.VBD; +import com.xensource.xenapi.VBDMetrics; import com.xensource.xenapi.VDI; import com.xensource.xenapi.VIF; import com.xensource.xenapi.VLAN; @@ -534,6 +538,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return execute((GetHostStatsCommand) cmd); } else if (clazz == GetVmStatsCommand.class) { return execute((GetVmStatsCommand) cmd); + } else if (cmd instanceof GetVmDiskStatsCommand) { + return execute((GetVmDiskStatsCommand) cmd); } else if (clazz == CheckHealthCommand.class) { return execute((CheckHealthCommand) cmd); } else if (clazz == StopCommand.class) { @@ -679,6 +685,14 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected void scaleVM(Connection conn, VM vm, VirtualMachineTO vmSpec, Host host) throws XenAPIException, XmlRpcException { + Long staticMemoryMax = vm.getMemoryStaticMax(conn); + Long staticMemoryMin = vm.getMemoryStaticMin(conn); + Long newDynamicMemoryMin = vmSpec.getMinRam() * 1024 * 1024; + Long newDynamicMemoryMax = vmSpec.getMaxRam() * 1024 * 1024; + if (staticMemoryMin > newDynamicMemoryMin || newDynamicMemoryMax > staticMemoryMax) { + throw new CloudRuntimeException("Cannot scale up the vm because of memory constraint violation: 0 <= memory-static-min <= memory-dynamic-min <= memory-dynamic-max <= memory-static-max "); + } + vm.setMemoryDynamicRange(conn, vmSpec.getMinRam() * 1024 * 1024, vmSpec.getMaxRam() * 1024 * 1024); vm.setVCPUsNumberLive(conn, (long)vmSpec.getCpus()); @@ -715,10 +729,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe // If DMC is not enable then don't execute this command. if (!isDmcEnabled(conn, host)) { - String msg = "Unable to scale the vm: " + vmName + " as DMC - Dynamic memory control is not enabled for the XenServer:" + _host.uuid + " ,check your license and hypervisor version."; - s_logger.info(msg); - return new ScaleVmAnswer(cmd, false, msg); + throw new CloudRuntimeException("Unable to scale the vm: " + vmName + " as DMC - Dynamic memory control is not enabled for the XenServer:" + _host.uuid + " ,check your license and hypervisor version."); } + // stop vm which is running on this host or is in halted state Iterator iter = vms.iterator(); while ( iter.hasNext() ) { @@ -738,13 +751,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe for (VM vm : vms) { VM.Record vmr = vm.getRecord(conn); try { - Map hostParams = new HashMap(); - hostParams = host.getLicenseParams(conn); - if (hostParams.get("restrict_dmc").equalsIgnoreCase("true")) { - throw new CloudRuntimeException("Host "+ _host.uuid + " does not support Dynamic Memory Control, so we cannot scale up the vm"); - } scaleVM(conn, vm, vmSpec, host); - } catch (Exception e) { String msg = "Catch exception " + e.getClass().getName() + " when scaling VM:" + vmName + " due to " + e.toString(); s_logger.debug(msg); @@ -2068,7 +2075,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe for (IpAliasTO ipAliasTO : revokedIpAliasTOs) { args = args + ipAliasTO.getAlias_count()+":"+ipAliasTO.getRouterip()+":"+ipAliasTO.getNetmask()+"-"; } - args = args + " " ; + //this is to ensure that thre is some argument passed to the deleteipAlias script when there are no revoked rules. + args = args + "- " ; List activeIpAliasTOs = cmd.getCreateIpAliasTos(); for (IpAliasTO ipAliasTO : activeIpAliasTOs) { args = args + ipAliasTO.getAlias_count()+":"+ipAliasTO.getRouterip()+":"+ipAliasTO.getNetmask()+"-"; @@ -2652,6 +2660,80 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return hostStats; } + protected GetVmDiskStatsAnswer execute( GetVmDiskStatsCommand cmd) { + Connection conn = getConnection(); + List vmNames = cmd.getVmNames(); + HashMap> vmDiskStatsNameMap = new HashMap>(); + if( vmNames.size() == 0 ) { + return new GetVmDiskStatsAnswer(cmd, "", cmd.getHostName(),vmDiskStatsNameMap); + } + try { + + // Determine the UUIDs of the requested VMs + List vmUUIDs = new ArrayList(); + + for (String vmName : vmNames) { + VM vm = getVM(conn, vmName); + vmUUIDs.add(vm.getUuid(conn)); + } + + HashMap> vmDiskStatsUUIDMap = getVmDiskStats(conn, cmd, vmUUIDs, cmd.getHostGuid()); + if( vmDiskStatsUUIDMap == null ) { + return new GetVmDiskStatsAnswer(cmd, "", cmd.getHostName(), vmDiskStatsNameMap); + } + + for (String vmUUID : vmDiskStatsUUIDMap.keySet()) { + List vmDiskStatsUUID = vmDiskStatsUUIDMap.get(vmUUID); + String vmName = vmNames.get(vmUUIDs.indexOf(vmUUID)); + for (VmDiskStatsEntry vmDiskStat : vmDiskStatsUUID) { + vmDiskStat.setVmName(vmName); + } + vmDiskStatsNameMap.put(vmName, vmDiskStatsUUID); + } + + return new GetVmDiskStatsAnswer(cmd, "", cmd.getHostName(),vmDiskStatsNameMap); + } catch (XenAPIException e) { + String msg = "Unable to get VM disk stats" + e.toString(); + s_logger.warn(msg, e); + return new GetVmDiskStatsAnswer(cmd, "", cmd.getHostName(),vmDiskStatsNameMap); + } catch (XmlRpcException e) { + String msg = "Unable to get VM disk stats" + e.getMessage(); + s_logger.warn(msg, e); + return new GetVmDiskStatsAnswer(cmd, "", cmd.getHostName(),vmDiskStatsNameMap); + } + } + + private HashMap> getVmDiskStats(Connection conn, GetVmDiskStatsCommand cmd, List vmUUIDs, String hostGuid) { + HashMap> vmResponseMap = new HashMap>(); + + for (String vmUUID : vmUUIDs) { + vmResponseMap.put(vmUUID, new ArrayList()); + } + + try { + for (String vmUUID : vmUUIDs) { + VM vm = VM.getByUuid(conn, vmUUID); + List vmDiskStats = new ArrayList(); + for (VBD vbd : vm.getVBDs(conn)) { + if (!vbd.getType(conn).equals(Types.VbdType.CD)) { + VmDiskStatsEntry stats = new VmDiskStatsEntry(); + VBDMetrics record = vbd.getMetrics(conn); + stats.setPath(vbd.getVDI(conn).getUuid(conn)); + stats.setBytesRead((long)(record.getIoReadKbs(conn) * 1024)); + stats.setBytesWrite((long)(record.getIoWriteKbs(conn) * 1024)); + vmDiskStats.add(stats); + } + } + vmResponseMap.put(vmUUID, vmDiskStats); + } + } catch (Exception e) { + s_logger.warn("Error while collecting disk stats from : ", e); + return null; + } + + return vmResponseMap; + } + protected GetVmStatsAnswer execute( GetVmStatsCommand cmd) { Connection conn = getConnection(); List vmNames = cmd.getVmNames(); @@ -2761,6 +2843,29 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } + try { + for (String vmUUID : vmUUIDs) { + VM vm = VM.getByUuid(conn, vmUUID); + VmStatsEntry stats = vmResponseMap.get(vmUUID); + double diskReadKBs = 0; + double diskWriteKBs = 0; + for (VBD vbd : vm.getVBDs(conn)) { + VBDMetrics record = vbd.getMetrics(conn); + diskReadKBs += record.getIoReadKbs(conn); + diskWriteKBs += record.getIoWriteKbs(conn); + } + if (stats == null) { + stats = new VmStatsEntry(); + } + stats.setDiskReadKBs(diskReadKBs); + stats.setDiskWriteKBs(diskWriteKBs); + vmResponseMap.put(vmUUID, stats); + } + } catch (Exception e) { + s_logger.warn("Error while collecting disk stats from : ", e); + return null; + } + return vmResponseMap; } @@ -8088,7 +8193,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe */ private UnPlugNicAnswer execute(UnPlugNicCommand cmd) { Connection conn = getConnection(); - String vmName = cmd.getInstanceName(); + String vmName = cmd.getVmName(); try { Set vms = VM.getByNameLabel(conn, vmName); if ( vms == null || vms.isEmpty() ) { diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java index e5a95594720..5261ca046e7 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java @@ -27,6 +27,7 @@ import javax.ejb.Local; import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.NetworkUsageAnswer; @@ -50,9 +51,12 @@ import com.xensource.xenapi.VBD; import com.xensource.xenapi.VDI; import com.xensource.xenapi.VM; + @Local(value=ServerResource.class) public class XcpOssResource extends CitrixResourceBase { - private final static Logger s_logger = Logger.getLogger(XcpServerResource.class); + private final static Logger s_logger = Logger.getLogger(XcpOssResource.class); + private static final long mem_32m = 33554432L; + @Override protected List getPatchFiles() { List files = new ArrayList(); @@ -65,13 +69,13 @@ public class XcpOssResource extends CitrixResourceBase { files.add(file); return files; } - + @Override protected void fillHostInfo(Connection conn, StartupRoutingCommand cmd) { super.fillHostInfo(conn, cmd); cmd.setCaps(cmd.getCapabilities() + " , hvm"); } - + @Override protected String getGuestOsType(String stdType, boolean bootFromCD) { if (stdType.equalsIgnoreCase("Debian GNU/Linux 6(64-bit)")) { @@ -80,7 +84,7 @@ public class XcpOssResource extends CitrixResourceBase { return CitrixHelper.getXcpGuestOsType(stdType); } } - + protected VBD createPatchVbd(Connection conn, String vmName, VM vm) throws XmlRpcException, XenAPIException { if (_host.localSRuuid != null) { //create an iso vdi on it @@ -88,13 +92,13 @@ public class XcpOssResource extends CitrixResourceBase { if (result == null || result.equalsIgnoreCase("Failed")) { throw new CloudRuntimeException("can not create systemvm vdi"); } - + Set vdis = VDI.getByNameLabel(conn, "systemvm-vdi"); if (vdis.size() != 1) { throw new CloudRuntimeException("can not find systemvmiso"); } VDI systemvmVDI = vdis.iterator().next(); - + VBD.Record cdromVBDR = new VBD.Record(); cdromVBDR.VM = vm; cdromVBDR.empty = false; @@ -109,7 +113,7 @@ public class XcpOssResource extends CitrixResourceBase { throw new CloudRuntimeException("can not find local sr"); } } - + protected NetworkUsageAnswer execute(NetworkUsageCommand cmd) { try { @@ -124,10 +128,10 @@ public class XcpOssResource extends CitrixResourceBase { return answer; } catch (Exception ex) { s_logger.warn("Failed to get network usage stats due to ", ex); - return new NetworkUsageAnswer(cmd, ex); + return new NetworkUsageAnswer(cmd, ex); } } - + @Override public Answer executeRequest(Command cmd) { if (cmd instanceof NetworkUsageCommand) { @@ -136,11 +140,11 @@ public class XcpOssResource extends CitrixResourceBase { return super.executeRequest(cmd); } } - + @Override public StartAnswer execute(StartCommand cmd) { StartAnswer answer = super.execute(cmd); - + VirtualMachineTO vmSpec = cmd.getVirtualMachine(); if (vmSpec.getType() == VirtualMachine.Type.ConsoleProxy) { Connection conn = getConnection(); @@ -152,10 +156,10 @@ public class XcpOssResource extends CitrixResourceBase { } callHostPlugin(conn, "vmops", "setDNATRule", "ip", publicIp, "port", "8443", "add", "true"); } - + return answer; } - + @Override public StopAnswer execute(StopCommand cmd) { StopAnswer answer = super.execute(cmd); @@ -166,4 +170,9 @@ public class XcpOssResource extends CitrixResourceBase { } return answer; } + + @Override + protected void setMemory(Connection conn, VM vm, long minMemsize, long maxMemsize) throws XmlRpcException, XenAPIException { + vm.setMemoryLimits(conn, mem_32m, maxMemsize, minMemsize, maxMemsize); + } } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java index 724c21bc4a2..059120a12f4 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java @@ -141,7 +141,7 @@ public class XenServer56FP1Resource extends XenServer56Resource { vmr.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY; vmr.actionsAfterShutdown = Types.OnNormalExit.DESTROY; - if (isDmcEnabled(conn, host)) { + if (isDmcEnabled(conn, host) && vmSpec.isEnableDynamicallyScaleVm()) { //scaling is allowed vmr.memoryStaticMin = mem_128m; //128MB //TODO: Remove hardcoded 8GB and assign proportionate to ServiceOffering and mem overcommit ratio diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java index 72ecc67cad6..823be86b45f 100644 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java @@ -1070,7 +1070,7 @@ public class CiscoVnmcConnectionImpl implements CiscoVnmcConnection { } private String getNameForDNatIpPool(String tenantName, String identifier) { - return "IpPool-" + tenantName + "-" + identifier; + return "IpPool-" + tenantName + "-" + identifier + "n"; } private String getDnForDNatIpPool(String tenantName, String identifier) { diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java index b335edb9f63..553325ccaa9 100644 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java @@ -98,6 +98,8 @@ import com.cloud.network.cisco.NetworkAsa1000vMapVO; import com.cloud.network.dao.CiscoAsa1000vDao; import com.cloud.network.dao.CiscoNexusVSMDeviceDao; import com.cloud.network.dao.CiscoVnmcDao; +import com.cloud.network.dao.IPAddressDao; +import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkAsa1000vMapDao; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.PhysicalNetworkDao; @@ -148,7 +150,9 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro PhysicalNetworkDao _physicalNetworkDao; @Inject PhysicalNetworkServiceProviderDao _physicalNetworkServiceProviderDao; - @Inject + @Inject + IPAddressDao _ipAddressDao; + @Inject HostDetailsDao _hostDetailsDao; @Inject HostDao _hostDao; @@ -342,22 +346,33 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro } // due to VNMC limitation of not allowing source NAT ip as the outside ip of firewall, - // an additional public ip needs to acquired for assigning as firewall outside ip + // an additional public ip needs to acquired for assigning as firewall outside ip. + // In case there are already additional ip addresses available (network restart) use one + // of them such that it is not the source NAT ip IpAddress outsideIp = null; - try { - Account caller = UserContext.current().getCaller(); - long callerUserId = UserContext.current().getCallerUserId(); - outsideIp = _networkMgr.allocateIp(owner, false, caller, callerUserId, zone); - } catch (ResourceAllocationException e) { - s_logger.error("Unable to allocate additional public Ip address. Exception details " + e); - return false; + List publicIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null); + for (IPAddressVO ip : publicIps) { + if (!ip.isSourceNat()) { + outsideIp = ip; + break; + } } + if (outsideIp == null) { // none available, acquire one + try { + Account caller = UserContext.current().getCaller(); + long callerUserId = UserContext.current().getCallerUserId(); + outsideIp = _networkMgr.allocateIp(owner, false, caller, callerUserId, zone); + } catch (ResourceAllocationException e) { + s_logger.error("Unable to allocate additional public Ip address. Exception details " + e); + return false; + } - try { - outsideIp = _networkMgr.associateIPToGuestNetwork(outsideIp.getId(), network.getId(), true); - } catch (ResourceAllocationException e) { - s_logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e); - return false; + try { + outsideIp = _networkMgr.associateIPToGuestNetwork(outsideIp.getId(), network.getId(), true); + } catch (ResourceAllocationException e) { + s_logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e); + return false; + } } // create logical edge firewall in VNMC @@ -792,7 +807,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro List rulesTO = new ArrayList(); for (StaticNat rule : rules) { IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); - StaticNatRuleTO ruleTO = new StaticNatRuleTO(0, sourceIp.getAddress().addr(), null, + StaticNatRuleTO ruleTO = new StaticNatRuleTO(rule.getSourceIpAddressId(), sourceIp.getAddress().addr(), null, null, rule.getDestIpAddress(), null, null, null, rule.isForRevoke(), false); rulesTO.add(ruleTO); } diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java index fc0c33483ac..9524bf99cec 100644 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/resource/CiscoVnmcResource.java @@ -501,7 +501,7 @@ public class CiscoVnmcResource implements ServerResource { } /* - * Destination NAT + * PF */ private synchronized Answer execute(SetPortForwardingRulesCommand cmd) { refreshVnmcConnection(); diff --git a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java index 4b9308b6606..14b616cb749 100644 --- a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java +++ b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java @@ -64,7 +64,6 @@ import com.cloud.network.element.VirtualRouterProviderVO; import com.cloud.network.lb.LoadBalancingRule; import com.cloud.network.router.VirtualRouter; import com.cloud.network.router.VirtualRouter.Role; -import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.LoadBalancerContainer; import com.cloud.network.rules.LoadBalancerContainer.Scheme; import com.cloud.offering.NetworkOffering; @@ -394,23 +393,16 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala //1) Group rules by the source ip address as NetworkManager always passes the entire network lb config to the element Map> groupedRules = groupBySourceIp(rules); - //2) Count rules in revoke state Set vmsToDestroy = new HashSet(); for (Ip sourceIp : groupedRules.keySet()) { + //2) Check if there are non revoked rules for the source ip address List rulesToCheck = groupedRules.get(sourceIp); - int revoke = 0; - for (LoadBalancingRule ruleToCheck : rulesToCheck) { - if (ruleToCheck.getState() == FirewallRule.State.Revoke){ - revoke++; - } - } - - if (revoke == rulesToCheck.size()) { - s_logger.debug("Have to destroy internal lb vm for source ip " + sourceIp); + if (_appLbDao.countBySourceIpAndNotRevoked(sourceIp, rulesToCheck.get(0).getNetworkId()) == 0) { + s_logger.debug("Have to destroy internal lb vm for source ip " + sourceIp + " as it has 0 rules in non-Revoke state"); vmsToDestroy.add(sourceIp); - } - } + } + } return vmsToDestroy; } diff --git a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java index a429306a680..c00d99abf88 100644 --- a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java +++ b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java @@ -242,6 +242,7 @@ PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer, Junip // Set capabilities for Firewall service Map firewallCapabilities = new HashMap(); firewallCapabilities.put(Capability.SupportedProtocols, "tcp,udp,icmp"); + firewallCapabilities.put(Capability.SupportedEgressProtocols, "tcp,udp,icmp,all"); firewallCapabilities.put(Capability.MultipleIps, "true"); firewallCapabilities.put(Capability.TrafficStatistics, "per public ip"); firewallCapabilities.put(Capability.SupportedTrafficDirection, "ingress, egress"); diff --git a/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java b/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java index a0068c3784c..fd065d58f87 100644 --- a/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java +++ b/plugins/network-elements/juniper-srx/src/com/cloud/network/resource/JuniperSrxResource.java @@ -750,7 +750,7 @@ public class JuniperSrxResource implements ServerResource { s_logger.debug(msg); } - private void shutdownGuestNetwork(GuestNetworkType type, long accountId, Long publicVlanTag, String sourceNatIpAddress, long privateVlanTag, String privateGateway, String privateSubnet, long privateCidrSize) throws ExecutionException { + private void shutdownGuestNetwork(GuestNetworkType type, long accountId, Long publicVlanTag, String sourceNatIpAddress, long privateVlanTag, String privateGateway, String privateSubnet, long privateCidrSize) throws ExecutionException { // Remove static and destination NAT rules for the guest network removeStaticAndDestNatRulesInPrivateVlan(privateVlanTag, privateGateway, privateCidrSize); @@ -766,10 +766,10 @@ public class JuniperSrxResource implements ServerResource { manageSourceNatPool(SrxCommand.DELETE, sourceNatIpAddress); manageProxyArp(SrxCommand.DELETE, publicVlanTag, sourceNatIpAddress); manageUsageFilter(SrxCommand.DELETE, _usageFilterIPOutput, privateSubnet, null, genIpFilterTermName(sourceNatIpAddress)); - manageUsageFilter(SrxCommand.DELETE, _usageFilterIPInput, sourceNatIpAddress, null, genIpFilterTermName(sourceNatIpAddress)); + manageUsageFilter(SrxCommand.DELETE, _usageFilterIPInput, sourceNatIpAddress, null, genIpFilterTermName(sourceNatIpAddress)); } else if (type.equals(GuestNetworkType.INTERFACE_NAT)) { manageUsageFilter(SrxCommand.DELETE, _usageFilterVlanOutput, null, privateVlanTag, null); - manageUsageFilter(SrxCommand.DELETE, _usageFilterVlanInput, null, privateVlanTag, null); + manageUsageFilter(SrxCommand.DELETE, _usageFilterVlanInput, null, privateVlanTag, null); } String msg = "Shut down guest network with type " + type +". Guest VLAN tag: " + privateVlanTag + ", guest gateway: " + privateGateway; @@ -841,21 +841,24 @@ public class JuniperSrxResource implements ServerResource { commitConfiguration(); } else { for (FirewallRuleTO rule : rules) { - int startPort = 0, endPort = 0; + int startPort = NetUtils.PORT_RANGE_MIN, endPort = NetUtils.PORT_RANGE_MAX; if (rule.getSrcPortRange() != null) { startPort = rule.getSrcPortRange()[0]; endPort = rule.getSrcPortRange()[1]; - FirewallFilterTerm term = new FirewallFilterTerm(genIpIdentifier(rule.getSrcIp()) + "-" + String.valueOf(rule.getId()), rule.getSourceCidrList(), - rule.getSrcIp(), rule.getProtocol(), startPort, endPort, - rule.getIcmpType(), rule.getIcmpCode(), genIpIdentifier(rule.getSrcIp()) + _usageFilterIPInput.getCounterIdentifier()); - if (!rule.revoked()) { - manageFirewallFilter(SrxCommand.ADD, term, _publicZoneInputFilterName); - } else { - manageFirewallFilter(SrxCommand.DELETE, term, _publicZoneInputFilterName); - } } - commitConfiguration(); + + FirewallFilterTerm term = new FirewallFilterTerm(genIpIdentifier(rule.getSrcIp()) + "-" + String.valueOf(rule.getId()), rule.getSourceCidrList(), + rule.getSrcIp(), rule.getProtocol(), startPort, endPort, + rule.getIcmpType(), rule.getIcmpCode(), genIpIdentifier(rule.getSrcIp()) + _usageFilterIPInput.getCounterIdentifier()); + if (!rule.revoked()) { + manageProxyArp(SrxCommand.ADD, getVlanTag(rule.getSrcVlanTag()), rule.getSrcIp()); + manageFirewallFilter(SrxCommand.ADD, term, _publicZoneInputFilterName); + } else { + manageFirewallFilter(SrxCommand.DELETE, term, _publicZoneInputFilterName); + manageProxyArp(SrxCommand.DELETE, getVlanTag(rule.getSrcVlanTag()), rule.getSrcIp()); + } } + commitConfiguration(); } return new Answer(cmd); @@ -925,7 +928,6 @@ public class JuniperSrxResource implements ServerResource { } private void addStaticNatRule(Long publicVlanTag, String publicIp, String privateIp, List rules) throws ExecutionException { - manageProxyArp(SrxCommand.ADD, publicVlanTag, publicIp); manageStaticNatRule(SrxCommand.ADD, publicIp, privateIp); manageAddressBookEntry(SrxCommand.ADD, _privateZone, privateIp, null); @@ -937,7 +939,6 @@ public class JuniperSrxResource implements ServerResource { private void removeStaticNatRule(Long publicVlanTag, String publicIp, String privateIp) throws ExecutionException { manageStaticNatRule(SrxCommand.DELETE, publicIp, privateIp); - manageProxyArp(SrxCommand.DELETE, publicVlanTag, publicIp); // Remove any existing security policy and clean up applications removeSecurityPolicyAndApplications(SecurityPolicyType.STATIC_NAT, privateIp); @@ -1196,8 +1197,7 @@ public class JuniperSrxResource implements ServerResource { } private void addDestinationNatRule(Protocol protocol, Long publicVlanTag, String publicIp, String privateIp, int srcPortStart, int srcPortEnd, int destPortStart, int destPortEnd) throws ExecutionException { - manageProxyArp(SrxCommand.ADD, publicVlanTag, publicIp); - + int offset = 0; for (int srcPort = srcPortStart; srcPort <= srcPortEnd; srcPort++) { int destPort = destPortStart + offset; @@ -1220,7 +1220,6 @@ public class JuniperSrxResource implements ServerResource { private void removeDestinationNatRule(Long publicVlanTag, String publicIp, String privateIp, int srcPort, int destPort) throws ExecutionException { manageDestinationNatRule(SrxCommand.DELETE, publicIp, privateIp, srcPort, destPort); manageDestinationNatPool(SrxCommand.DELETE, privateIp, destPort); - manageProxyArp(SrxCommand.DELETE, publicVlanTag, publicIp); removeSecurityPolicyAndApplications(SecurityPolicyType.DESTINATION_NAT, privateIp); diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java index c0d4599dc0c..263e13b42f3 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/resource/NetscalerResource.java @@ -904,6 +904,27 @@ public class NetscalerResource implements ServerResource { // Bind 'gslbservice' service object to GSLB virtual server GSLB.createVserverServiceBinding(_netscalerService, serviceName, vserverName); + // create a monitor for the service running on the site + lbmonitor newmonitor = new lbmonitor(); + String monitorName = GSLB.generateGslbServiceMonitorName(servicePublicIp); + newmonitor.set_type("TCP"); + newmonitor.set_servicename(serviceName); + newmonitor.set_monitorname(monitorName); + newmonitor.set_state("ENABLED"); + lbmonitor.add(_netscalerService, newmonitor); + + // bind the monitor to the GSLB servie + try { + gslbservice_lbmonitor_binding monitorBinding = new gslbservice_lbmonitor_binding(); + monitorBinding.set_monitor_name(monitorName); + monitorBinding.set_servicename(serviceName); + gslbservice_lbmonitor_binding.add(_netscalerService, monitorBinding); + } catch (Exception e) { + // TODO: Nitro API version 10.* is not compatible for NetScalers 9.*, so may fail + // against NetScaler version lesser than 10 hence ignore the exception + s_logger.warn("Failed to bind monitor to GSLB service due to " + e.getMessage()); + } + } else { // Unbind GSLB service with GSLB virtual server GSLB.deleteVserverServiceBinding(_netscalerService, serviceName, vserverName); @@ -911,6 +932,19 @@ public class NetscalerResource implements ServerResource { // delete 'gslbservice' object gslbservice service = GSLB.getServiceObject(_netscalerService, serviceName); GSLB.deleteService(_netscalerService, serviceName); + + // delete the GSLB service monitor + String monitorName = GSLB.generateGslbServiceMonitorName(servicePublicIp); + try { + lbmonitor serviceMonitor = lbmonitor.get(_netscalerService, monitorName); + if (serviceMonitor != null) { + lbmonitor.delete(_netscalerService, serviceMonitor); + } + } catch (nitro_exception ne) { + if (ne.getErrorCode() != NitroError.NS_RESOURCE_NOT_EXISTS) { + s_logger.warn("Failed to delete monitor "+ monitorName + " for GSLB service due to " + ne.getMessage()); + } + } } if (site.forRevoke()) { // delete the site if its for revoke @@ -1469,6 +1503,10 @@ public class NetscalerResource implements ServerResource { return "cloud-gslb-service-" + siteName + "-" + publicIp + "-" + publicPort; } + private static String generateGslbServiceMonitorName(String publicIp) { + return "cloud-monitor-" + publicIp; + } + private static boolean gslbServerExists(nitro_service client, String serverName) throws ExecutionException { try { if (com.citrix.netscaler.nitro.resource.config.basic.server.get(client, serverName) != null) { diff --git a/plugins/pom.xml b/plugins/pom.xml index 8a5b598c811..d8ae97af2ca 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -36,10 +36,12 @@ api/discovery acl/static-role-based affinity-group-processors/host-anti-affinity + affinity-group-processors/explicit-dedication deployment-planners/user-concentrated-pod deployment-planners/user-dispersing deployment-planners/implicit-dedication host-allocators/random + dedicated-resources hypervisors/ovm hypervisors/xen hypervisors/kvm diff --git a/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java b/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java index a0b64bfc893..7ff56f6582a 100644 --- a/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java @@ -37,6 +37,7 @@ import org.apache.log4j.Logger; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.Discoverer; import com.cloud.resource.ResourceManager; import com.cloud.storage.DataStoreRole; @@ -133,8 +134,9 @@ public class CloudStackImageStoreLifeCycleImpl implements ImageStoreLifeCycle { return false; } + @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { + public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { // TODO Auto-generated method stub return false; } diff --git a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java index 674a13d854d..6965a152041 100644 --- a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java @@ -35,6 +35,7 @@ import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle; import org.apache.log4j.Logger; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.Discoverer; import com.cloud.resource.ResourceManager; import com.cloud.storage.DataStoreRole; @@ -122,8 +123,10 @@ public class S3ImageStoreLifeCycleImpl implements ImageStoreLifeCycle { return false; } + + @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { + public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { // TODO Auto-generated method stub return false; } diff --git a/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java b/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java index e001c0b4940..c7e48018eaf 100644 --- a/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager; import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.hypervisor.Hypervisor.HypervisorType; public class SampleImageStoreLifeCycleImpl implements ImageStoreLifeCycle { @Inject @@ -62,7 +63,7 @@ public class SampleImageStoreLifeCycleImpl implements ImageStoreLifeCycle { } @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { + public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisor) { // TODO Auto-generated method stub return false; } diff --git a/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java b/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java index 3ba3b3133d0..38e20073491 100644 --- a/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java @@ -34,6 +34,7 @@ import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle; import org.apache.log4j.Logger; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.Discoverer; import com.cloud.resource.ResourceManager; import com.cloud.storage.DataStoreRole; @@ -108,8 +109,9 @@ public class SwiftImageStoreLifeCycleImpl implements ImageStoreLifeCycle { return false; } + @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { + public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { // TODO Auto-generated method stub return false; } diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index 693ab01c73b..38dd5a9b276 100644 --- a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -404,9 +404,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore } @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { - List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, - scope.getScopeId()); + public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { + List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); for (HostVO host : hosts) { try { this.storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); @@ -414,7 +413,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } - this.dataStoreHelper.attachZone(dataStore); + this.dataStoreHelper.attachZone(dataStore, hypervisorType); return true; } diff --git a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java index 504cb9a007f..7ee8565e7e7 100644 --- a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java @@ -115,7 +115,7 @@ public class SamplePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLife } @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope) { + public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { // TODO Auto-generated method stub return false; } diff --git a/pom.xml b/pom.xml index d7e80d64548..67d9576715c 100644 --- a/pom.xml +++ b/pom.xml @@ -83,6 +83,7 @@ 0.10 build/replace.properties 0.4.9 + 0.1.1 target 1.0.10 diff --git a/python/lib/cloudutils/globalEnv.py b/python/lib/cloudutils/globalEnv.py index 94d981f4dab..867aa17670d 100644 --- a/python/lib/cloudutils/globalEnv.py +++ b/python/lib/cloudutils/globalEnv.py @@ -40,3 +40,5 @@ class globalEnv: self.privateNet = "cloudbr0" #distribution self.distribution = None + # bridgeType + self.bridgeType = "native" diff --git a/python/lib/cloudutils/networkConfig.py b/python/lib/cloudutils/networkConfig.py index b6b729a4fec..9e456e970be 100644 --- a/python/lib/cloudutils/networkConfig.py +++ b/python/lib/cloudutils/networkConfig.py @@ -19,6 +19,7 @@ from cloudException import CloudRuntimeException, CloudInternalException import logging import os import re +import subprocess class networkConfig: class devInfo: @@ -85,15 +86,22 @@ class networkConfig: @staticmethod def isNetworkDev(devName): - return os.path.exists("/sys/class/net/%s"%devName) + return os.path.exists("/sys/class/net/%s" % devName) @staticmethod def isBridgePort(devName): - return os.path.exists("/sys/class/net/%s/brport"%devName) + return os.path.exists("/sys/class/net/%s/brport" % devName) @staticmethod def isBridge(devName): - return os.path.exists("/sys/class/net/%s/bridge"%devName) + return os.path.exists("/sys/class/net/%s/bridge" % devName) + + @staticmethod + def isOvsBridge(devName): + try: + return 0==subprocess.check_call(("ovs-vsctl", "br-exists", devName)) + except subprocess.CalledProcessError: + return False @staticmethod def getBridge(devName): diff --git a/python/lib/cloudutils/serviceConfig.py b/python/lib/cloudutils/serviceConfig.py index 1e32d0f3b0f..d129e00c45b 100755 --- a/python/lib/cloudutils/serviceConfig.py +++ b/python/lib/cloudutils/serviceConfig.py @@ -94,8 +94,10 @@ class networkConfigBase: if not self.netcfg.isNetworkDev(br): logging.debug("%s is not a network device, is it down?"%br) return False - if not self.netcfg.isBridge(br): - raise CloudInternalException("%s is not a bridge"%br) + if self.syscfg.env.bridgeType == "openvswitch" and not self.netcfg.isOvsBridge(br): + raise CloudInternalException("%s is not an openvswitch bridge" % br) + if self.syscfg.env.bridgeType == "native" and not self.netcfg.isBridge(br): + raise CloudInternalException("%s is not a bridge" % br) preCfged = True return preCfged @@ -153,11 +155,28 @@ class networkConfigUbuntu(serviceCfgBase, networkConfigBase): match = re.match("^ *iface %s.*"%dev.name, line) if match is not None: dev.method = self.getNetworkMethod(match.group(0)) - bridgeCfg = "\niface %s inet manual\n \ - auto %s\n \ - iface %s inet %s\n \ - bridge_ports %s\n"%(dev.name, br, br, dev.method, dev.name) cfo = configFileOps(self.netCfgFile, self) + if self.syscfg.env.bridgeType == "openvswitch": + bridgeCfg = "\n".join(("", + "iface {device} inet manual", + " ovs_type OVSPort", + " ovs_bridge {bridge}", + "", + "auto {bridge}", + "allow-ovs {bridge}", + "iface {bridge} inet {device_method}", + " ovs_type OVSBridge", + " ovs_ports {device}", + "")).format(bridge=br, device=dev.name, device_method=dev.method) + cfo.replace_line("^ *auto %s.*" % dev.name, + "allow-{bridge} {device}".format(bridge=br, device=dev.name)) + elif self.syscfg.env.bridgeType == "native": + bridgeCfg = "\niface %s inet manual\n \ + auto %s\n \ + iface %s inet %s\n \ + bridge_ports %s\n"%(dev.name, br, br, dev.method, dev.name) + else: + raise CloudInternalException("Unknown network.bridge.type %s" % self.syscfg.env.bridgeType) cfo.replace_line("^ *iface %s.*"%dev.name, bridgeCfg) def addDev(self, br, dev): @@ -193,8 +212,9 @@ class networkConfigUbuntu(serviceCfgBase, networkConfigBase): self.syscfg.svo.stopService("network-manager") self.syscfg.svo.disableService("network-manager") - if not bash("ifup %s"%self.brName).isSuccess(): - raise CloudInternalException("Can't start network:%s"%self.brName, bash.getErrMsg(self)) + ifup_op = bash("ifup %s"%self.brName) + if not ifup_op.isSuccess(): + raise CloudInternalException("Can't start network:%s %s" % (self.brName, ifup_op.getErrMsg())) self.syscfg.env.nics.append(self.brName) self.syscfg.env.nics.append(self.brName) @@ -222,8 +242,8 @@ class networkConfigRedhat(serviceCfgBase, networkConfigBase): networkConfigBase.__init__(self, syscfg) def writeToCfgFile(self, brName, dev): - self.devCfgFile = "/etc/sysconfig/network-scripts/ifcfg-%s"%dev.name - self.brCfgFile = "/etc/sysconfig/network-scripts/ifcfg-%s"%brName + self.devCfgFile = "/etc/sysconfig/network-scripts/ifcfg-%s" % dev.name + self.brCfgFile = "/etc/sysconfig/network-scripts/ifcfg-%s" % brName isDevExist = os.path.exists(self.devCfgFile) isBrExist = os.path.exists(self.brCfgFile) @@ -241,7 +261,7 @@ class networkConfigRedhat(serviceCfgBase, networkConfigBase): def addBridge(self, brName, dev): - bash("ifdown %s"%dev.name) + bash("ifdown %s" % dev.name) if not os.path.exists(self.brCfgFile): shutil.copy(self.devCfgFile, self.brCfgFile) @@ -250,14 +270,34 @@ class networkConfigRedhat(serviceCfgBase, networkConfigBase): cfo = configFileOps(self.devCfgFile, self) cfo.addEntry("NM_CONTROLLED", "no") cfo.addEntry("ONBOOT", "yes") - cfo.addEntry("BRIDGE", brName) + if self.syscfg.env.bridgeType == "openvswitch": + if cfo.getEntry("IPADDR"): + cfo.rmEntry("IPADDR", cfo.getEntry("IPADDR")) + cfo.addEntry("DEVICETYPE", "ovs") + cfo.addEntry("TYPE", "OVSPort") + cfo.addEntry("OVS_BRIDGE", brName) + elif self.syscfg.env.bridgeType == "native": + cfo.addEntry("BRIDGE", brName) + else: + raise CloudInternalException("Unknown network.bridge.type %s" % self.syscfg.env.bridgeType) cfo.save() cfo = configFileOps(self.brCfgFile, self) cfo.addEntry("NM_CONTROLLED", "no") cfo.addEntry("ONBOOT", "yes") cfo.addEntry("DEVICE", brName) - cfo.addEntry("TYPE", "Bridge") + if self.syscfg.env.bridgeType == "openvswitch": + if cfo.getEntry("HWADDR"): + cfo.rmEntry("HWADDR", cfo.getEntry("HWADDR")) + if cfo.getEntry("UUID"): + cfo.rmEntry("UUID", cfo.getEntry("UUID")) + cfo.addEntry("STP", "yes") + cfo.addEntry("DEVICETYPE", "ovs") + cfo.addEntry("TYPE", "OVSBridge") + elif self.syscfg.env.bridgeType == "native": + cfo.addEntry("TYPE", "Bridge") + else: + raise CloudInternalException("Unknown network.bridge.type %s" % self.syscfg.env.bridgeType) cfo.save() def config(self): diff --git a/scripts/storage/qcow2/modifyvlan.sh b/scripts/storage/qcow2/modifyvlan.sh deleted file mode 100755 index 5e26af0ba02..00000000000 --- a/scripts/storage/qcow2/modifyvlan.sh +++ /dev/null @@ -1,269 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -# $Id: modifyvlan.sh 9132 2010-06-04 20:17:43Z manuel $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/storage/qcow2/modifyvlan.sh $ -# modifyvlan.sh -- adds and deletes VLANs from a Routing Server -# set -x - -usage() { - printf "Usage: %s: -o -v -g \n" -} - -addVlan() { - local vlanId=$1 - - ifconfig bond1.$vlanId > /dev/null - - if [ $? -gt 0 ] - then - vconfig add bond1 $vlanId - - if [ $? -gt 0 ] - then - return 1 - fi - fi - - # Make ifcfg-bond1.$vlanId - rm /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId - touch /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId - echo "DEVICE=bond1.$vlanId" >> /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId - echo "ONBOOT=yes" >> /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId - echo "BOOTPROTO=none" >> /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId - echo "VLAN=yes" >> /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId - echo "BRIDGE=xenbr1.$vlanId" >> /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId - - # Try to add xenbr1.$vlanId over bond1.$vlanId, if it does not already exist - - ifconfig xenbr1.$vlanId > /dev/null - - if [ $? -gt 0 ] - then - brctl addbr xenbr1.$vlanId - - if [ $? -gt 0 ] - then - return 1 - fi - - brctl addif xenbr1.$vlanId bond1.$vlanId - - if [ $? -gt 0 ] - then - return 1 - fi - - fi - - ifconfig xenbr1.$vlanId up - - if [ $? -gt 0 ] - then - return 1 - fi - - # Make ifcfg-xenbr1.$vlanId - rm /etc/sysconfig/network-scripts/ifcfg-xenbr1.$vlanId - touch /etc/sysconfig/network-scripts/ifcfg-xenbr1.$vlanId - echo "TYPE=bridge" >> /etc/sysconfig/network-scripts/ifcfg-xenbr1.$vlanId - echo "DEVICE=xenbr1.$vlanId" >> /etc/sysconfig/network-scripts/ifcfg-xenbr1.$vlanId - echo "ONBOOT=yes" >> /etc/sysconfig/network-scripts/ifcfg-xenbr1.$vlanId - echo "BOOTPROTO=none" >> /etc/sysconfig/network-scripts/ifcfg-xenbr1.$vlanId - - return 0 -} - -deleteVlan() { - local vlanId=$1 - - # Try to remove xenbr1.$vlanId - ifconfig xenbr1.$vlanId down - - if [ $? -gt 0 ] - then - return 1 - fi - - brctl delbr xenbr1.$vlanId - - if [ $? -gt 0 ] - then - return 1 - fi - - # Remove ifcfg-xenbr1.$vlanId - rm /etc/sysconfig/network-scripts/ifcfg-xenbr1.$vlanId - - # Try to remove bond1.$vlanId - - vconfig rem bond1.$vlanId - - if [ $? -gt 0 ] - then - return 1 - fi - - # Remove ifcfg-bond1.$vlanId - rm /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId - - return 0 - -} - -checkIfVlanExists() { - local vlanId=$1 - - if [ "$vlanId" == "untagged" ] - then - # This VLAN should always exist, since the bridge is xenbr1, which is created during vsetup - return 0 - fi - - ifconfig bond1.$vlanId > /dev/null - - if [ $? -gt 0 ] - then - return 1 - fi - - ifconfig xenbr1.$vlanId > /dev/null - - if [ $? -gt 0 ] - then - return 1 - fi - - if [ ! -f /etc/sysconfig/network-scripts/ifcfg-xenbr1.$vlanId ] - then - return 1 - fi - - if [ ! -f /etc/sysconfig/network-scripts/ifcfg-bond1.$vlanId ] - then - return 1 - fi - - return 0 -} - -arpingVlan() { - local vlanId=$1 - local vlanGateway=$2 - - # Change!!! - return 0 - - success=1 - for i in $(seq 1 3) - do - arping -I xenbr1.$vlanId $vlanGateway > /dev/null - - if [ $? -gt 0 ] - then - success=0 - break - fi - done - - return $success -} - -op= -vlanId= -vlanGateway= -option=$@ - -while getopts 'o:v:g:' OPTION -do - case $OPTION in - o) oflag=1 - op="$OPTARG" - ;; - v) vflag=1 - vlanId="$OPTARG" - ;; - g) gflag=1 - vlanGateway="$OPTARG" - ;; - ?) usage - exit 2 - ;; - esac -done - -# Check that all arguments were passed in -if [ "$oflag$vflag$gflag" != "111" ] -then - usage - exit 2 -fi - -if [ "$op" == "add" ] -then - # Check if the vlan already exists, and exit with success if it does - checkIfVlanExists $vlanId - - if [ $? -eq 0 ] - then - exit 0 - fi - - # Add the vlan - addVlan $vlanId - - # If the add fails then return failure - if [ $? -gt 0 ] - then - exit 1 - fi - - # Ping the vlan - arpingVlan $vlanId $vlanGateway - - # If the ping fails then delete the vlan and return failure. Else, return success. - if [ $? -gt 0 ] - then - deleteVlan $vlanId - exit 1 - else - exit 0 - fi -else - if [ "$op" == "delete" ] - then - # Delete the vlan - deleteVlan $vlanId - - # Always exit with success - exit 0 - fi -fi - - - - - - - - - - - - - diff --git a/scripts/vm/hypervisor/kvm/patchviasocket.pl b/scripts/vm/hypervisor/kvm/patchviasocket.pl old mode 100644 new mode 100755 diff --git a/scripts/vm/hypervisor/xenserver/vmops b/scripts/vm/hypervisor/xenserver/vmops index d18eca836b8..650e95535e0 100755 --- a/scripts/vm/hypervisor/xenserver/vmops +++ b/scripts/vm/hypervisor/xenserver/vmops @@ -541,6 +541,7 @@ def default_ebtables_rules(): util.pread2(['ebtables', '-N', 'DEFAULT_EBTABLES']) util.pread2(['ebtables', '-A', 'FORWARD', '-j' 'DEFAULT_EBTABLES']) util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '--ip-dst', '255.255.255.255', '--ip-proto', 'udp', '--ip-dport', '67', '-j', 'ACCEPT']) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '--ip-dst', '255.255.255.255', '--ip-proto', 'udp', '--ip-dport', '68', '-j', 'ACCEPT']) util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'ARP', '--arp-op', 'Request', '-j', 'ACCEPT']) util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'ARP', '--arp-op', 'Reply', '-j', 'ACCEPT']) # deny mac broadcast and multicast diff --git a/scripts/vm/network/ovs-pvlan-vm.sh b/scripts/vm/network/ovs-pvlan-vm.sh index fd384814cc4..06e41fe4219 100755 --- a/scripts/vm/network/ovs-pvlan-vm.sh +++ b/scripts/vm/network/ovs-pvlan-vm.sh @@ -86,7 +86,8 @@ then exit 1 fi -trunk_port=1 +# try to find the physical link to outside, only supports eth and em prefix now +trunk_port=`ovs-ofctl show $br | egrep "\((eth|em)[0-9]" | cut -d '(' -f 1|tr -d ' '` if [ "$op" == "add" ] then diff --git a/server/pom.xml b/server/pom.xml index 6385bf2f233..8fe1e2d9508 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -18,6 +18,11 @@ 4.2.0-SNAPSHOT + + commons-io + commons-io + ${cs.commons-io.version} + org.apache.cloudstack cloud-core diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java index bf31b6803bc..073e4c27618 100755 --- a/server/src/com/cloud/api/ApiDBUtils.java +++ b/server/src/com/cloud/api/ApiDBUtils.java @@ -60,7 +60,6 @@ import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.ResourceTagResponse; import org.apache.cloudstack.api.response.SecurityGroupResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; -import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserResponse; @@ -177,8 +176,6 @@ import com.cloud.network.as.dao.AutoScaleVmGroupPolicyMapDao; import com.cloud.network.as.dao.AutoScaleVmProfileDao; import com.cloud.network.as.dao.ConditionDao; import com.cloud.network.as.dao.CounterDao; -import com.cloud.network.dao.AccountGuestVlanMapDao; -import com.cloud.network.dao.AccountGuestVlanMapVO; import com.cloud.network.dao.FirewallRulesCidrsDao; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; @@ -203,7 +200,6 @@ import com.cloud.network.dao.Site2SiteVpnGatewayDao; import com.cloud.network.dao.Site2SiteVpnGatewayVO; import com.cloud.network.router.VirtualRouter; import com.cloud.network.rules.FirewallRuleVO; -import com.cloud.network.rules.LoadBalancer; import com.cloud.network.security.SecurityGroup; import com.cloud.network.security.SecurityGroupManager; import com.cloud.network.security.SecurityGroupVO; @@ -217,7 +213,6 @@ import com.cloud.projects.Project; import com.cloud.projects.ProjectAccount; import com.cloud.projects.ProjectInvitation; import com.cloud.projects.ProjectService; -import com.cloud.region.ha.GlobalLoadBalancingRulesService; import com.cloud.resource.ResourceManager; import com.cloud.server.Criteria; import com.cloud.server.ManagementServer; @@ -1564,11 +1559,11 @@ public class ApiDBUtils { return _poolJoinDao.setStoragePoolResponse(vrData, vr); } - public static StoragePoolForMigrationResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO vr) { + public static StoragePoolResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO vr) { return _poolJoinDao.newStoragePoolForMigrationResponse(vr); } - public static StoragePoolForMigrationResponse fillStoragePoolForMigrationDetails(StoragePoolForMigrationResponse + public static StoragePoolResponse fillStoragePoolForMigrationDetails(StoragePoolResponse vrData, StoragePoolJoinVO vr){ return _poolJoinDao.setStoragePoolForMigrationResponse(vrData, vr); } diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index cf11b41f7ab..36ed9a5082e 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -114,7 +114,6 @@ import org.apache.cloudstack.api.response.SnapshotResponse; import org.apache.cloudstack.api.response.SnapshotScheduleResponse; import org.apache.cloudstack.api.response.StaticRouteResponse; import org.apache.cloudstack.api.response.StorageNetworkIpRangeResponse; -import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.SwiftResponse; import org.apache.cloudstack.api.response.SystemVmInstanceResponse; @@ -210,6 +209,7 @@ import com.cloud.network.Site2SiteVpnConnection; import com.cloud.network.Site2SiteVpnGateway; import com.cloud.network.VirtualRouterProvider; import com.cloud.network.VpnUser; +import com.cloud.network.VpnUserVO; import com.cloud.network.as.AutoScalePolicy; import com.cloud.network.as.AutoScaleVmGroup; import com.cloud.network.as.AutoScaleVmProfile; @@ -218,6 +218,7 @@ import com.cloud.network.as.Condition; import com.cloud.network.as.ConditionVO; import com.cloud.network.as.Counter; import com.cloud.network.dao.IPAddressVO; +import com.cloud.network.dao.LoadBalancerVO; import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.network.router.VirtualRouter; @@ -227,6 +228,7 @@ import com.cloud.network.rules.HealthCheckPolicy; import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.LoadBalancerContainer.Scheme; import com.cloud.network.rules.PortForwardingRule; +import com.cloud.network.rules.PortForwardingRuleVO; import com.cloud.network.rules.StaticNatRule; import com.cloud.network.rules.StickinessPolicy; import com.cloud.network.security.SecurityGroup; @@ -956,9 +958,9 @@ public class ApiResponseHelper implements ResponseGenerator { } @Override - public StoragePoolForMigrationResponse createStoragePoolForMigrationResponse(StoragePool pool) { + public StoragePoolResponse createStoragePoolForMigrationResponse(StoragePool pool) { List viewPools = ApiDBUtils.newStoragePoolView(pool); - List listPools = ViewResponseHelper.createStoragePoolForMigrationResponse( + List listPools = ViewResponseHelper.createStoragePoolForMigrationResponse( viewPools.toArray(new StoragePoolJoinVO[viewPools.size()])); assert listPools != null && listPools.size() == 1 : "There should be one storage pool returned"; return listPools.get(0); @@ -1681,6 +1683,7 @@ public class ApiResponseHelper implements ResponseGenerator { return ApiDBUtils.newEventResponse(vEvent); } + private List sumCapacities(List hostCapacities) { Map totalCapacityMap = new HashMap(); Map usedCapacityMap = new HashMap(); @@ -1827,6 +1830,7 @@ public class ApiResponseHelper implements ResponseGenerator { return summedCapacities; } + @Override public List createCapacityResponse(List result, DecimalFormat format) { List capacityResponses = new ArrayList(); @@ -2089,9 +2093,14 @@ public class ApiResponseHelper implements ResponseGenerator { CapabilityResponse eIp = new CapabilityResponse(); eIp.setName(Capability.ElasticIp.getName()); - eIp.setValue(offering.getElasticLb() ? "true" : "false"); + eIp.setValue(offering.getElasticIp() ? "true" : "false"); staticNatCapResponse.add(eIp); + CapabilityResponse associatePublicIp = new CapabilityResponse(); + associatePublicIp.setName(Capability.AssociatePublicIP.getName()); + associatePublicIp.setValue(offering.getAssociatePublicIP() ? "true" : "false"); + staticNatCapResponse.add(associatePublicIp); + svcRsp.setCapabilities(staticNatCapResponse); } @@ -2304,6 +2313,13 @@ public class ApiResponseHelper implements ResponseGenerator { } response.setTags(tagResponses); + if(network.getNetworkACLId() != null){ + NetworkACL acl = ApiDBUtils.findByNetworkACLId(network.getNetworkACLId()); + if(acl != null){ + response.setAclId(acl.getUuid()); + } + } + response.setObjectName("network"); return response; } @@ -2432,6 +2448,9 @@ public class ApiResponseHelper implements ResponseGenerator { hpvCapabilitiesResponse.setHypervisorVersion(hpvCapabilities.getHypervisorVersion()); hpvCapabilitiesResponse.setIsSecurityGroupEnabled(hpvCapabilities.isSecurityGroupEnabled()); hpvCapabilitiesResponse.setMaxGuestsLimit(hpvCapabilities.getMaxGuestsLimit()); + hpvCapabilitiesResponse.setMaxDataVolumesLimit(hpvCapabilities.getMaxDataVolumesLimit()); + hpvCapabilitiesResponse.setMaxHostsPerCluster(hpvCapabilities.getMaxHostsPerCluster()); + hpvCapabilitiesResponse.setIsStorageMotionSupported(hpvCapabilities.isStorageMotionSupported()); return hpvCapabilitiesResponse; } @@ -3328,6 +3347,17 @@ public class ApiResponseHelper implements ResponseGenerator { NetworkVO network = _entityMgr.findByIdIncludingRemoved(NetworkVO.class, usageRecord.getNetworkId().toString()); usageRecResponse.setNetworkId(network.getUuid()); + } else if(usageRecord.getUsageType() == UsageTypes.VM_DISK_IO_READ || usageRecord.getUsageType() == UsageTypes.VM_DISK_IO_WRITE || + usageRecord.getUsageType() == UsageTypes.VM_DISK_BYTES_READ || usageRecord.getUsageType() == UsageTypes.VM_DISK_BYTES_WRITE){ + //Device Type + usageRecResponse.setType(usageRecord.getType()); + //VM Instance Id + VMInstanceVO vm = _entityMgr.findByIdIncludingRemoved(VMInstanceVO.class, usageRecord.getUsageId().toString()); + usageRecResponse.setUsageId(vm.getUuid()); + //Volume ID + VolumeVO volume = _entityMgr.findByIdIncludingRemoved(VolumeVO.class, usageRecord.getUsageId().toString()); + usageRecResponse.setUsageId(volume.getUuid()); + } else if(usageRecord.getUsageType() == UsageTypes.VOLUME){ //Volume ID VolumeVO volume = _entityMgr.findByIdIncludingRemoved(VolumeVO.class, usageRecord.getUsageId().toString()); @@ -3356,11 +3386,12 @@ public class ApiResponseHelper implements ResponseGenerator { } else if(usageRecord.getUsageType() == UsageTypes.LOAD_BALANCER_POLICY){ //Load Balancer Policy ID - usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); - + LoadBalancerVO lb = _entityMgr.findByIdIncludingRemoved(LoadBalancerVO.class, usageRecord.getUsageId().toString()); + usageRecResponse.setUsageId(lb.getUuid()); } else if(usageRecord.getUsageType() == UsageTypes.PORT_FORWARDING_RULE){ //Port Forwarding Rule ID - usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); + PortForwardingRuleVO pf = _entityMgr.findByIdIncludingRemoved(PortForwardingRuleVO.class, usageRecord.getUsageId().toString()); + usageRecResponse.setUsageId(pf.getUuid()); } else if(usageRecord.getUsageType() == UsageTypes.NETWORK_OFFERING){ //Network Offering Id @@ -3371,7 +3402,8 @@ public class ApiResponseHelper implements ResponseGenerator { } else if(usageRecord.getUsageType() == UsageTypes.VPN_USERS){ //VPN User ID - usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); + VpnUserVO vpnUser = _entityMgr.findByIdIncludingRemoved(VpnUserVO.class, usageRecord.getUsageId().toString()); + usageRecResponse.setUsageId(vpnUser.getUuid()); } else if(usageRecord.getUsageType() == UsageTypes.SECURITY_GROUP){ //Security Group Id diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index 497be50a766..0cd1d61d4e9 100755 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -65,7 +65,6 @@ import org.apache.cloudstack.api.command.admin.host.ListHostsCmd; import org.apache.cloudstack.api.command.admin.router.ListRoutersCmd; import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.user.ListUsersCmd; -import com.cloud.event.ActionEventUtils; import org.apache.cloudstack.api.command.user.account.ListAccountsCmd; import org.apache.cloudstack.api.command.user.account.ListProjectAccountsCmd; import org.apache.cloudstack.api.command.user.event.ListEventsCmd; @@ -81,7 +80,6 @@ import org.apache.cloudstack.api.command.user.volume.ListVolumesCmd; import org.apache.cloudstack.api.command.user.zone.ListZonesByCmd; import org.apache.cloudstack.api.response.ExceptionResponse; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.cloudstack.region.RegionManager; import org.apache.commons.codec.binary.Base64; import org.apache.http.ConnectionClosedException; import org.apache.http.HttpException; @@ -123,6 +121,7 @@ import com.cloud.configuration.ConfigurationVO; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; +import com.cloud.event.ActionEventUtils; import com.cloud.exception.AccountLimitException; import com.cloud.exception.CloudAuthenticationException; import com.cloud.exception.InsufficientCapacityException; @@ -148,6 +147,7 @@ import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionProxyObject; @Component public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiServerService { @@ -166,8 +166,6 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer @Inject List _pluggableServices; @Inject List _apiAccessCheckers; - @Inject private final RegionManager _regionMgr = null; - private static int _workerCount = 0; private static ApiServer s_instance = null; private static final DateFormat _dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"); @@ -197,7 +195,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer public void init() { Integer apiPort = null; // api port, null by default SearchCriteria sc = _configDao.createSearchCriteria(); - sc.addAnd("name", SearchCriteria.Op.EQ, "integration.api.port"); + sc.addAnd("name", SearchCriteria.Op.EQ, Config.IntegrationAPIPort.key()); List values = _configDao.search(sc, null); if ((values != null) && (values.size() > 0)) { ConfigurationVO apiPortConfig = values.get(0); @@ -210,7 +208,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer String strSnapshotLimit = configs.get(Config.ConcurrentSnapshotsThresholdPerHost.key()); if (strSnapshotLimit != null) { Long snapshotLimit = NumbersUtil.parseLong(strSnapshotLimit, 1L); - if (snapshotLimit <= 0) { + if (snapshotLimit.longValue() <= 0) { s_logger.debug("Global config parameter " + Config.ConcurrentSnapshotsThresholdPerHost.toString() + " is less or equal 0; defaulting to unlimited"); } else { @@ -219,8 +217,12 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } Set> cmdClasses = new HashSet>(); - for(PluggableService pluggableService: _pluggableServices) + for(PluggableService pluggableService: _pluggableServices) { cmdClasses.addAll(pluggableService.getCommands()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Discovered plugin " + pluggableService.getClass().getSimpleName()); + } + } for(Class cmdClass: cmdClasses) { APICommand at = cmdClass.getAnnotation(APICommand.class); @@ -387,9 +389,16 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage(), ex); } catch (PermissionDeniedException ex){ - ArrayList idList = ex.getIdProxyList(); + ArrayList idList = ex.getIdProxyList(); if (idList != null) { - s_logger.info("PermissionDenied: " + ex.getMessage() + " on uuids: [" + StringUtils.listToCsvTags(idList) + "]"); + StringBuffer buf = new StringBuffer(); + for (ExceptionProxyObject obj : idList){ + buf.append(obj.getDescription()); + buf.append(":"); + buf.append(obj.getUuid()); + buf.append(" "); + } + s_logger.info("PermissionDenied: " + ex.getMessage() + " on objs: [" + buf.toString() + "]"); } else { s_logger.info("PermissionDenied: " + ex.getMessage()); } @@ -546,6 +555,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } } + @SuppressWarnings("unchecked") private void buildAsyncListResponse(BaseListCmd command, Account account) { List responses = ((ListResponse) command.getResponseObject()).getResponses(); if (responses != null && responses.size() > 0) { @@ -840,7 +850,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer @Override public void logoutUser(long userId) { - _accountMgr.logoutUser(Long.valueOf(userId)); + _accountMgr.logoutUser(userId); return; } @@ -1067,7 +1077,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer apiResponse.setErrorCode(ex.getErrorCode().getHttpCode()); apiResponse.setErrorText(ex.getDescription()); apiResponse.setResponseName(responseName); - ArrayList idList = ex.getIdProxyList(); + ArrayList idList = ex.getIdProxyList(); if (idList != null) { for (int i=0; i < idList.size(); i++) { apiResponse.addProxyObject(idList.get(i)); diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index 331404c382d..b0d8d9a9abe 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -116,6 +116,8 @@ import com.cloud.api.query.vo.UserAccountJoinVO; import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.api.query.vo.VolumeJoinVO; import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -295,6 +297,8 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { @Inject private AffinityGroupJoinDao _affinityGroupJoinDao; + @Inject + private DedicatedResourceDao _dedicatedDao; /* (non-Javadoc) * @see com.cloud.api.query.QueryService#searchForUsers(org.apache.cloudstack.api.command.admin.user.ListUsersCmd) */ @@ -1032,7 +1036,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { return response; } - + @Override public ListResponse searchForInternalLbVms(ListInternalLBVMsCmd cmd) { Pair, Integer> result = searchForRoutersInternal(cmd, cmd.getId(), cmd.getRouterName(), @@ -1048,7 +1052,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { private Pair, Integer> searchForRoutersInternal(BaseListProjectAndAccountResourcesCmd cmd, Long id, String name, String state, Long zoneId, Long podId, Long hostId, String keyword, Long networkId, Long vpcId, Boolean forVpc, String role, String zoneType) { - + Account caller = UserContext.current().getCaller(); List permittedAccounts = new ArrayList(); @@ -1140,7 +1144,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { if (vpcId != null) { sc.setParameters("vpcId", vpcId); } - + if (role != null) { sc.setParameters("role", role); } @@ -2299,7 +2303,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { UserVmVO vmInstance = _userVmDao.findById(vmId); if ((vmInstance == null) || (vmInstance.getRemoved() != null)) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find a virtual machine with specified id"); - ex.addProxyObject(vmInstance, vmId, "vmId"); + ex.addProxyObject(vmId.toString(), "vmId"); throw ex; } @@ -2375,12 +2379,14 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { sc.addAnd("name", SearchCriteria.Op.SC, ssc); } - if (domainId != null) { + /*List all resources due to Explicit Dedication except the dedicated resources of other account + * if (domainId != null) { // for domainId != null // right now, we made the decision to only list zones associated // with this domain, private zone sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); - } else if (account.getType() == Account.ACCOUNT_TYPE_NORMAL) { + } else */ + if (account.getType() == Account.ACCOUNT_TYPE_NORMAL) { // it was decided to return all zones for the user's domain, and // everything above till root // list all zones belonging to this domain, and all of its @@ -2410,6 +2416,12 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // remove disabled zones sc.addAnd("allocationState", SearchCriteria.Op.NEQ, Grouping.AllocationState.Disabled); + //remove Dedicated zones not dedicated to this domainId or subdomainId + List dedicatedZoneIds = removeDedicatedZoneNotSuitabe(domainIds); + if(!dedicatedZoneIds.isEmpty()){ + sdc.addAnd("id", SearchCriteria.Op.NIN, dedicatedZoneIds.toArray(new Object[dedicatedZoneIds.size()])); + } + } else if (account.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || account.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { // it was decided to return all zones for the domain admin, and // everything above till root, as well as zones till the domain leaf @@ -2439,6 +2451,12 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // remove disabled zones sc.addAnd("allocationState", SearchCriteria.Op.NEQ, Grouping.AllocationState.Disabled); + + //remove Dedicated zones not dedicated to this domainId or subdomainId + List dedicatedZoneIds = removeDedicatedZoneNotSuitabe(domainIds); + if(!dedicatedZoneIds.isEmpty()){ + sdc.addAnd("id", SearchCriteria.Op.NIN, dedicatedZoneIds.toArray(new Object[dedicatedZoneIds.size()])); + } } // handle available=FALSE option, only return zones with at least one VM running there @@ -2464,6 +2482,17 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { return _dcJoinDao.searchAndCount(sc, searchFilter); } + private List removeDedicatedZoneNotSuitabe(List domainIds) { + //remove dedicated zone of other domain + List dedicatedZoneIds = new ArrayList(); + List dedicatedResources = _dedicatedDao.listZonesNotInDomainIds(domainIds); + for (DedicatedResourceVO dr : dedicatedResources) { + if(dr != null) { + dedicatedZoneIds.add(dr.getDataCenterId()); + } + } + return dedicatedZoneIds; + } // This method is used for permissions check for both disk and service // offerings @@ -2555,14 +2584,14 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { if (isIso && template.getFormat() != ImageFormat.ISO) { s_logger.error("Template Id " + templateId + " is not an ISO"); InvalidParameterValueException ex = new InvalidParameterValueException("Specified Template Id is not an ISO"); - ex.addProxyObject(template, templateId, "templateId"); + ex.addProxyObject(template.getUuid(), "templateId"); throw ex; }// If ISO not requested then it shouldn't be an ISO. if (!isIso && template.getFormat() == ImageFormat.ISO) { s_logger.error("Incorrect format of the template id " + templateId); InvalidParameterValueException ex = new InvalidParameterValueException("Incorrect format " + template.getFormat() + " of the specified template id"); - ex.addProxyObject(template, templateId, "templateId"); + ex.addProxyObject(template.getUuid(), "templateId"); throw ex; } @@ -2796,9 +2825,10 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } public ListResponse listAffinityGroups(Long affinityGroupId, String affinityGroupName, - String affinityGroupType, Long vmId, Long startIndex, Long pageSize) { + String affinityGroupType, Long vmId, String accountName, Long domainId, boolean isRecursive, + boolean listAll, Long startIndex, Long pageSize) { Pair, Integer> result = listAffinityGroupsInternal(affinityGroupId, - affinityGroupName, affinityGroupType, vmId, startIndex, pageSize); + affinityGroupName, affinityGroupType, vmId, accountName, domainId, isRecursive, listAll, startIndex, pageSize); ListResponse response = new ListResponse(); List agResponses = ViewResponseHelper.createAffinityGroupResponses(result.first()); response.setResponses(agResponses, result.second()); @@ -2807,12 +2837,12 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { public Pair, Integer> listAffinityGroupsInternal(Long affinityGroupId, - String affinityGroupName, String affinityGroupType, Long vmId, Long startIndex, Long pageSize) { + String affinityGroupName, String affinityGroupType, Long vmId, String accountName, Long domainId, + boolean isRecursive, boolean listAll, Long startIndex, Long pageSize) { Account caller = UserContext.current().getCaller(); Long accountId = caller.getAccountId(); - Long domainId = caller.getDomainId(); if (vmId != null) { UserVmVO userVM = _userVmDao.findById(vmId); @@ -2824,20 +2854,25 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { return listAffinityGroupsByVM(vmId.longValue(), startIndex, pageSize); } + List permittedAccounts = new ArrayList(); + Ternary domainIdRecursiveListProject = new Ternary( + domainId, isRecursive, null); + _accountMgr.buildACLSearchParameters(caller, affinityGroupId, accountName, null, permittedAccounts, + domainIdRecursiveListProject, listAll, true); + domainId = domainIdRecursiveListProject.first(); + isRecursive = domainIdRecursiveListProject.second(); + ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); + Filter searchFilter = new Filter(AffinityGroupJoinVO.class, "id", true, startIndex, pageSize); SearchBuilder groupSearch = _affinityGroupJoinDao.createSearchBuilder(); + _accountMgr.buildACLViewSearchBuilder(groupSearch, domainId, isRecursive, permittedAccounts, + listProjectResourcesCriteria); + groupSearch.select(null, Func.DISTINCT, groupSearch.entity().getId()); // select // distinct SearchCriteria sc = groupSearch.create(); - - if (accountId != null) { - sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId); - } - - if (domainId != null) { - sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); - } + _accountMgr.buildACLViewSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); if (affinityGroupId != null) { sc.addAnd("id", SearchCriteria.Op.EQ, affinityGroupId); @@ -2852,8 +2887,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } - Pair, Integer> uniqueGroupsPair = _affinityGroupJoinDao.searchAndCount(sc, - searchFilter); + Pair, Integer> uniqueGroupsPair = _affinityGroupJoinDao.searchAndCount(sc, searchFilter); // search group details by ids Integer count = uniqueGroupsPair.second(); if (count.intValue() == 0) { diff --git a/server/src/com/cloud/api/query/ViewResponseHelper.java b/server/src/com/cloud/api/query/ViewResponseHelper.java index af8455b2d49..b98cea3f778 100644 --- a/server/src/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/com/cloud/api/query/ViewResponseHelper.java @@ -16,11 +16,28 @@ // under the License. package com.cloud.api.query; -import java.util.ArrayList; -import java.util.EnumSet; -import java.util.Hashtable; -import java.util.List; - +import com.cloud.api.ApiDBUtils; +import com.cloud.api.query.vo.AccountJoinVO; +import com.cloud.api.query.vo.AffinityGroupJoinVO; +import com.cloud.api.query.vo.AsyncJobJoinVO; +import com.cloud.api.query.vo.DataCenterJoinVO; +import com.cloud.api.query.vo.DiskOfferingJoinVO; +import com.cloud.api.query.vo.DomainRouterJoinVO; +import com.cloud.api.query.vo.EventJoinVO; +import com.cloud.api.query.vo.HostJoinVO; +import com.cloud.api.query.vo.InstanceGroupJoinVO; +import com.cloud.api.query.vo.ProjectAccountJoinVO; +import com.cloud.api.query.vo.ProjectInvitationJoinVO; +import com.cloud.api.query.vo.ProjectJoinVO; +import com.cloud.api.query.vo.ResourceTagJoinVO; +import com.cloud.api.query.vo.SecurityGroupJoinVO; +import com.cloud.api.query.vo.ServiceOfferingJoinVO; +import com.cloud.api.query.vo.StoragePoolJoinVO; +import com.cloud.api.query.vo.UserAccountJoinVO; +import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.api.query.vo.VolumeJoinVO; +import com.cloud.user.Account; +import com.cloud.user.UserContext; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.ApiConstants.HostDetails; import org.apache.cloudstack.api.ApiConstants.VMDetails; @@ -29,8 +46,8 @@ import org.apache.cloudstack.api.response.AsyncJobResponse; import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.DomainRouterResponse; import org.apache.cloudstack.api.response.EventResponse; -import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.HostForMigrationResponse; +import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.InstanceGroupResponse; import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.ProjectAccountResponse; @@ -41,37 +58,19 @@ import org.apache.cloudstack.api.response.SecurityGroupResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.TemplateResponse; -import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.log4j.Logger; -import com.cloud.api.ApiDBUtils; -import com.cloud.api.query.vo.AccountJoinVO; -import com.cloud.api.query.vo.AffinityGroupJoinVO; -import com.cloud.api.query.vo.AsyncJobJoinVO; -import com.cloud.api.query.vo.DataCenterJoinVO; -import com.cloud.api.query.vo.DiskOfferingJoinVO; -import com.cloud.api.query.vo.DomainRouterJoinVO; -import com.cloud.api.query.vo.EventJoinVO; -import com.cloud.api.query.vo.HostJoinVO; + import com.cloud.api.query.vo.ImageStoreJoinVO; -import com.cloud.api.query.vo.InstanceGroupJoinVO; -import com.cloud.api.query.vo.ProjectAccountJoinVO; -import com.cloud.api.query.vo.ProjectInvitationJoinVO; -import com.cloud.api.query.vo.ProjectJoinVO; -import com.cloud.api.query.vo.ResourceTagJoinVO; -import com.cloud.api.query.vo.SecurityGroupJoinVO; -import com.cloud.api.query.vo.ServiceOfferingJoinVO; -import com.cloud.api.query.vo.StoragePoolJoinVO; import com.cloud.api.query.vo.TemplateJoinVO; -import com.cloud.api.query.vo.UserAccountJoinVO; -import com.cloud.api.query.vo.UserVmJoinVO; -import com.cloud.api.query.vo.VolumeJoinVO; -import com.cloud.user.Account; -import com.cloud.user.UserContext; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.Hashtable; +import java.util.List; /** * Helper class to generate response from DB view VO objects. @@ -289,6 +288,7 @@ public class ViewResponseHelper { return new ArrayList(vrDataList.values()); } + public static List createImageStoreResponse(ImageStoreJoinVO... stores) { Hashtable vrDataList = new Hashtable(); // Initialise the vrdatalist with the input data @@ -306,12 +306,12 @@ public class ViewResponseHelper { } return new ArrayList(vrDataList.values()); } - - public static List createStoragePoolForMigrationResponse(StoragePoolJoinVO... pools) { - Hashtable vrDataList = new Hashtable(); + + public static List createStoragePoolForMigrationResponse(StoragePoolJoinVO... pools) { + Hashtable vrDataList = new Hashtable(); // Initialise the vrdatalist with the input data for (StoragePoolJoinVO vr : pools) { - StoragePoolForMigrationResponse vrData = vrDataList.get(vr.getId()); + StoragePoolResponse vrData = vrDataList.get(vr.getId()); if ( vrData == null ) { // first time encountering this vm vrData = ApiDBUtils.newStoragePoolForMigrationResponse(vr); @@ -321,7 +321,7 @@ public class ViewResponseHelper { } vrDataList.put(vr.getId(), vrData); } - return new ArrayList(vrDataList.values()); + return new ArrayList(vrDataList.values()); } @@ -415,7 +415,7 @@ public class ViewResponseHelper { } return new ArrayList(vrDataList.values()); } - + public static List createAffinityGroupResponses(List groups) { Hashtable vrDataList = new Hashtable(); for (AffinityGroupJoinVO vr : groups) { diff --git a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java index ce20562d5f7..56e4d0a369e 100644 --- a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java @@ -68,6 +68,7 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase { @@ -31,9 +29,9 @@ public interface StoragePoolJoinDao extends GenericDao StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO host); - StoragePoolForMigrationResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO host); + StoragePoolResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO host); - StoragePoolForMigrationResponse setStoragePoolForMigrationResponse(StoragePoolForMigrationResponse response, + StoragePoolResponse setStoragePoolForMigrationResponse(StoragePoolResponse response, StoragePoolJoinVO host); List newStoragePoolView(StoragePool group); diff --git a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index e401f4474d7..a6355ad9867 100644 --- a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -85,6 +84,9 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase implem Double networkKbWrite = Double.valueOf(vmStats.getNetworkWriteKBs()); userVmResponse.setNetworkKbsWrite(networkKbWrite.longValue()); + + Double diskKbsRead = Double.valueOf(vmStats.getDiskReadKBs()); + userVmResponse.setDiskKbsRead(diskKbsRead.longValue()); + + Double diskKbsWrite = Double.valueOf(vmStats.getDiskWriteKBs()); + userVmResponse.setDiskKbsWrite(diskKbsWrite.longValue()); + + Double diskIORead = Double.valueOf(vmStats.getDiskReadIOs()); + userVmResponse.setDiskIORead(diskIORead.longValue()); + + Double diskIOWrite = Double.valueOf(vmStats.getDiskWriteIOs()); + userVmResponse.setDiskIOWrite(diskIOWrite.longValue()); } } diff --git a/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java b/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java index 598e1d1d8df..e87a1018bdd 100644 --- a/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java +++ b/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java @@ -75,6 +75,9 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit @Column(name="limit_cpu_use") private boolean limitCpuUse; + @Column(name="is_volatile") + private boolean volatileVm; + @Column(name="host_tag") private String hostTag; @@ -318,5 +321,13 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit this.deploymentPlanner = deploymentPlanner; } + public boolean getVolatileVm() { + return volatileVm; + } + + public void setVolatileVm(boolean volatileVm) { + this.volatileVm = volatileVm; + } + } diff --git a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java index 07347c52301..5ec5447b57a 100644 --- a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java +++ b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java @@ -34,6 +34,8 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.hypervisor.Hypervisor.HypervisorType; + /** * Storage Pool DB view. * @@ -100,7 +102,7 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I @Column(name="data_center_type") private String zoneType; - + @Column(name="pod_id") private long podId; @@ -134,6 +136,11 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I @Enumerated(value = EnumType.STRING) private ScopeType scope; + + @Column(name = "hypervisor") + @Enumerated(value = EnumType.STRING) + private HypervisorType hypervisor; + /** * @return the scope */ @@ -148,6 +155,14 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I this.scope = scope; } + public HypervisorType getHypervisor() { + return hypervisor; + } + + public void setHypervisor(HypervisorType hypervisor) { + this.hypervisor = hypervisor; + } + @Override public long getId() { return id; @@ -294,7 +309,7 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I public void setZoneType(String zoneType) { this.zoneType = zoneType; } - + public long getPodId() { return podId; } diff --git a/server/src/com/cloud/api/response/ApiResponseSerializer.java b/server/src/com/cloud/api/response/ApiResponseSerializer.java index 965660a52cc..d2e5130931d 100644 --- a/server/src/com/cloud/api/response/ApiResponseSerializer.java +++ b/server/src/com/cloud/api/response/ApiResponseSerializer.java @@ -21,8 +21,10 @@ import com.cloud.api.ApiResponseGsonHelper; import com.cloud.api.ApiServer; import com.cloud.utils.encoding.URLEncoder; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionProxyObject; import com.google.gson.Gson; import com.google.gson.annotations.SerializedName; + import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.ResponseObject; @@ -215,22 +217,27 @@ public class ApiResponseSerializer { subObj.setObjectName(serializedName.value()); } serializeResponseObjXML(sb, subObj); - } else { - // Only exception reponses carry a list of uuid - // strings. + } else if (value instanceof ExceptionProxyObject) { + // Only exception reponses carry a list of + // ExceptionProxyObject objects. + ExceptionProxyObject idProxy = (ExceptionProxyObject) value; // If this is the first IdentityProxy field // encountered, put in a uuidList tag. if (!usedUuidList) { - sb.append("<").append(serializedName.value()).append(">"); + sb.append("<" + serializedName.value() + ">"); usedUuidList = true; } - sb.append("").append(value).append(""); - // We have removed uuid property field due to removal of IdentityProxy class. + sb.append("<" + "uuid" + ">" + idProxy.getUuid() + ""); + // Append the new descriptive property also. + String idFieldName = idProxy.getDescription(); + if (idFieldName != null) { + sb.append("<" + "uuidProperty" + ">" + idFieldName + ""); + } } } if (usedUuidList) { - // close the uuidList. - sb.append(""); + // close the uuidList. + sb.append(""); } } else if (fieldValue instanceof Date) { sb.append("<").append(serializedName.value()).append(">").append(BaseCmd.getDateString((Date) fieldValue)). diff --git a/server/src/com/cloud/async/AsyncJobManagerImpl.java b/server/src/com/cloud/async/AsyncJobManagerImpl.java index 47d793fd0b0..0101a8a0abf 100644 --- a/server/src/com/cloud/async/AsyncJobManagerImpl.java +++ b/server/src/com/cloud/async/AsyncJobManagerImpl.java @@ -621,11 +621,18 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, // limit to 100 jobs per turn, this gives cleanup throughput as 600 jobs per minute // hopefully this will be fast enough to balance potential growth of job table - List l = _jobDao.getExpiredJobs(cutTime, 100); - if(l != null && l.size() > 0) { - for(AsyncJobVO job : l) { - expungeAsyncJob(job); - } + //1) Expire unfinished jobs that weren't processed yet + List l = _jobDao.getExpiredUnfinishedJobs(cutTime, 100); + for(AsyncJobVO job : l) { + s_logger.trace("Expunging unfinished job " + job); + expungeAsyncJob(job); + } + + //2) Expunge finished jobs + List completedJobs = _jobDao.getExpiredCompletedJobs(cutTime, 100); + for(AsyncJobVO job : completedJobs) { + s_logger.trace("Expunging completed job " + job); + expungeAsyncJob(job); } // forcefully cancel blocking queue items if they've been staying there for too long diff --git a/server/src/com/cloud/async/dao/AsyncJobDao.java b/server/src/com/cloud/async/dao/AsyncJobDao.java index 9d207593574..9ab9b224c10 100644 --- a/server/src/com/cloud/async/dao/AsyncJobDao.java +++ b/server/src/com/cloud/async/dao/AsyncJobDao.java @@ -26,6 +26,7 @@ import com.cloud.utils.db.GenericDao; public interface AsyncJobDao extends GenericDao { AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId); List findInstancePendingAsyncJobs(AsyncJob.Type instanceType, Long accountId); - List getExpiredJobs(Date cutTime, int limit); + List getExpiredUnfinishedJobs(Date cutTime, int limit); void resetJobProcess(long msid, int jobResultCode, String jobResultMessage); -} + List getExpiredCompletedJobs(Date cutTime, int limit); +} \ No newline at end of file diff --git a/server/src/com/cloud/async/dao/AsyncJobDaoImpl.java b/server/src/com/cloud/async/dao/AsyncJobDaoImpl.java index 4793a6edc12..b2c0d9cc4e0 100644 --- a/server/src/com/cloud/async/dao/AsyncJobDaoImpl.java +++ b/server/src/com/cloud/async/dao/AsyncJobDaoImpl.java @@ -42,17 +42,19 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements private static final Logger s_logger = Logger.getLogger(AsyncJobDaoImpl.class.getName()); private final SearchBuilder pendingAsyncJobSearch; - private final SearchBuilder pendingAsyncJobsSearch; - private final SearchBuilder expiringAsyncJobSearch; - - public AsyncJobDaoImpl() { - pendingAsyncJobSearch = createSearchBuilder(); - pendingAsyncJobSearch.and("instanceType", pendingAsyncJobSearch.entity().getInstanceType(), - SearchCriteria.Op.EQ); - pendingAsyncJobSearch.and("instanceId", pendingAsyncJobSearch.entity().getInstanceId(), - SearchCriteria.Op.EQ); - pendingAsyncJobSearch.and("status", pendingAsyncJobSearch.entity().getStatus(), - SearchCriteria.Op.EQ); + private final SearchBuilder pendingAsyncJobsSearch; + private final SearchBuilder expiringUnfinishedAsyncJobSearch; + private final SearchBuilder expiringCompletedAsyncJobSearch; + + + public AsyncJobDaoImpl() { + pendingAsyncJobSearch = createSearchBuilder(); + pendingAsyncJobSearch.and("instanceType", pendingAsyncJobSearch.entity().getInstanceType(), + SearchCriteria.Op.EQ); + pendingAsyncJobSearch.and("instanceId", pendingAsyncJobSearch.entity().getInstanceId(), + SearchCriteria.Op.EQ); + pendingAsyncJobSearch.and("status", pendingAsyncJobSearch.entity().getStatus(), + SearchCriteria.Op.EQ); pendingAsyncJobSearch.done(); pendingAsyncJobsSearch = createSearchBuilder(); @@ -64,27 +66,36 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements SearchCriteria.Op.EQ); pendingAsyncJobsSearch.done(); - expiringAsyncJobSearch = createSearchBuilder(); - expiringAsyncJobSearch.and("created", expiringAsyncJobSearch.entity().getCreated(), + expiringUnfinishedAsyncJobSearch = createSearchBuilder(); + expiringUnfinishedAsyncJobSearch.and("created", expiringUnfinishedAsyncJobSearch.entity().getCreated(), SearchCriteria.Op.LTEQ); - expiringAsyncJobSearch.done(); - } - - public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId) { - SearchCriteria sc = pendingAsyncJobSearch.create(); - sc.setParameters("instanceType", instanceType); - sc.setParameters("instanceId", instanceId); - sc.setParameters("status", AsyncJobResult.STATUS_IN_PROGRESS); - - List l = listIncludingRemovedBy(sc); - if(l != null && l.size() > 0) { - if(l.size() > 1) { - s_logger.warn("Instance " + instanceType + "-" + instanceId + " has multiple pending async-job"); - } - - return l.get(0); - } - return null; + expiringUnfinishedAsyncJobSearch.and("completeMsId", expiringUnfinishedAsyncJobSearch.entity().getCompleteMsid(), SearchCriteria.Op.NULL); + expiringUnfinishedAsyncJobSearch.and("jobStatus", expiringUnfinishedAsyncJobSearch.entity().getStatus(), SearchCriteria.Op.EQ); + expiringUnfinishedAsyncJobSearch.done(); + + expiringCompletedAsyncJobSearch = createSearchBuilder(); + expiringCompletedAsyncJobSearch.and("created", expiringCompletedAsyncJobSearch.entity().getCreated(), + SearchCriteria.Op.LTEQ); + expiringCompletedAsyncJobSearch.and("completeMsId", expiringCompletedAsyncJobSearch.entity().getCompleteMsid(), SearchCriteria.Op.NNULL); + expiringCompletedAsyncJobSearch.and("jobStatus", expiringCompletedAsyncJobSearch.entity().getStatus(), SearchCriteria.Op.NEQ); + expiringCompletedAsyncJobSearch.done(); + } + + public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId) { + SearchCriteria sc = pendingAsyncJobSearch.create(); + sc.setParameters("instanceType", instanceType); + sc.setParameters("instanceId", instanceId); + sc.setParameters("status", AsyncJobResult.STATUS_IN_PROGRESS); + + List l = listIncludingRemovedBy(sc); + if(l != null && l.size() > 0) { + if(l.size() > 1) { + s_logger.warn("Instance " + instanceType + "-" + instanceId + " has multiple pending async-job"); + } + + return l.get(0); + } + return null; } public List findInstancePendingAsyncJobs(AsyncJob.Type instanceType, Long accountId) { @@ -99,9 +110,20 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements return listBy(sc); } - public List getExpiredJobs(Date cutTime, int limit) { - SearchCriteria sc = expiringAsyncJobSearch.create(); + @Override + public List getExpiredUnfinishedJobs(Date cutTime, int limit) { + SearchCriteria sc = expiringUnfinishedAsyncJobSearch.create(); sc.setParameters("created", cutTime); + sc.setParameters("jobStatus", 0); + Filter filter = new Filter(AsyncJobVO.class, "created", true, 0L, (long)limit); + return listIncludingRemovedBy(sc, filter); + } + + @Override + public List getExpiredCompletedJobs(Date cutTime, int limit) { + SearchCriteria sc = expiringCompletedAsyncJobSearch.create(); + sc.setParameters("created", cutTime); + sc.setParameters("jobStatus", 0); Filter filter = new Filter(AsyncJobVO.class, "created", true, 0L, (long)limit); return listIncludingRemovedBy(sc, filter); } diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index e1d3751f290..5ee0fad8643 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -180,7 +180,8 @@ public enum Config { RouterTemplateLXC("Advanced", NetworkManager.class, String.class, "router.template.lxc", "SystemVM Template (LXC)", "Name of the default router template on LXC.", null, ConfigurationParameterScope.zone.toString()), RouterExtraPublicNics("Advanced", NetworkManager.class, Integer.class, "router.extra.public.nics", "2", "specify extra public nics used for virtual router(up to 5)", "0-5"), StartRetry("Advanced", AgentManager.class, Integer.class, "start.retry", "10", "Number of times to retry create and start commands", null), - ScaleRetry("Advanced", AgentManager.class, Integer.class, "scale.retry", "2", "Number of times to retry scaling up the vm", null), + EnableDynamicallyScaleVm("Advanced", ManagementServer.class, Boolean.class, "enable.dynamic.scale.vm", "false", "Enables/Diables dynamically scaling a vm", null, ConfigurationParameterScope.zone.toString()), + ScaleRetry("Advanced", ManagementServer.class, Integer.class, "scale.retry", "2", "Number of times to retry scaling up the vm", null), StopRetryInterval("Advanced", HighAvailabilityManager.class, Integer.class, "stop.retry.interval", "600", "Time in seconds between retries to stop or destroy a vm" , null), StorageCleanupInterval("Advanced", StorageManager.class, Integer.class, "storage.cleanup.interval", "86400", "The interval (in seconds) to wait before running the storage cleanup thread.", null), StorageCleanupEnabled("Advanced", StorageManager.class, Boolean.class, "storage.cleanup.enabled", "true", "Enables/disables the storage cleanup thread.", null), @@ -228,6 +229,7 @@ public enum Config { NetworkGcInterval("Advanced", ManagementServer.class, Integer.class, "network.gc.interval", "600", "Seconds to wait before checking for networks to shutdown", null), CapacitySkipcountingHours("Advanced", ManagementServer.class, Integer.class, "capacity.skipcounting.hours", "3600", "Time (in seconds) to wait before release VM's cpu and memory when VM in stopped state", null), VmStatsInterval("Advanced", ManagementServer.class, Integer.class, "vm.stats.interval", "60000", "The interval (in milliseconds) when vm stats are retrieved from agents.", null), + VmDiskStatsInterval("Advanced", ManagementServer.class, Integer.class, "vm.disk.stats.interval", "0", "Interval (in seconds) to report vm disk statistics.", null), VmTransitionWaitInterval("Advanced", ManagementServer.class, Integer.class, "vm.tranisition.wait.interval", "3600", "Time (in seconds) to wait before taking over a VM in transition state", null), VmDestroyForcestop("Advanced", ManagementServer.class, Boolean.class, "vm.destroy.forcestop", "false", "On destroy, force-stop takes this value ", null), @@ -408,7 +410,7 @@ public enum Config { VMSnapshotMax("Advanced", VMSnapshotManager.class, Integer.class, "vmsnapshot.max", "10", "Maximum vm snapshots for a vm", null), VMSnapshotCreateWait("Advanced", VMSnapshotManager.class, Integer.class, "vmsnapshot.create.wait", "1800", "In second, timeout for create vm snapshot", null), - CloudDnsName("Advanced", ManagementServer.class, String.class, "cloud.dns.name", "default", " DNS name of the cloud", null), + CloudDnsName("Advanced", ManagementServer.class, String.class, "cloud.dns.name", null, "DNS name of the cloud for the GSLB service", null), BlacklistedRoutes("Advanced", VpcManager.class, String.class, "blacklisted.routes", null, "Routes that are blacklisted, can not be used for Static Routes creation for the VPC Private Gateway", "routes", ConfigurationParameterScope.zone.toString()), diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index f0b6899c155..9005ee5a588 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -40,7 +40,6 @@ import javax.naming.directory.DirContext; import javax.naming.directory.InitialDirContext; import com.cloud.event.UsageEventUtils; -import com.cloud.utils.db.*; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.api.ApiConstants.LDAPParams; import org.apache.cloudstack.api.command.admin.config.UpdateCfgCmd; @@ -70,7 +69,13 @@ import org.apache.cloudstack.api.command.admin.zone.UpdateZoneCmd; import org.apache.cloudstack.api.command.user.network.ListNetworkOfferingsCmd; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.region.*; +import org.apache.cloudstack.region.PortableIp; +import org.apache.cloudstack.region.PortableIpDao; +import org.apache.cloudstack.region.PortableIpRange; +import org.apache.cloudstack.region.PortableIpRangeDao; +import org.apache.cloudstack.region.PortableIpRangeVO; +import org.apache.cloudstack.region.PortableIpVO; +import org.apache.cloudstack.region.Region; import org.apache.cloudstack.region.dao.RegionDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; @@ -94,6 +99,7 @@ import com.cloud.dc.DataCenterIpAddressVO; import com.cloud.dc.DataCenterLinkLocalIpAddressVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.DcDetailVO; +import com.cloud.dc.DedicatedResourceVO; import com.cloud.dc.HostPodVO; import com.cloud.dc.Pod; import com.cloud.dc.PodVlanMapVO; @@ -106,6 +112,7 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterIpAddressDao; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDao; import com.cloud.dc.dao.DcDetailsDao; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.PodVlanMapDao; import com.cloud.dc.dao.VlanDao; @@ -146,8 +153,8 @@ import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkTrafficTypeDao; import com.cloud.network.dao.PhysicalNetworkTrafficTypeVO; import com.cloud.network.dao.PhysicalNetworkVO; -import com.cloud.network.rules.LoadBalancerContainer.Scheme; import com.cloud.network.element.DhcpServiceProvider; +import com.cloud.network.rules.LoadBalancerContainer.Scheme; import com.cloud.network.vpc.VpcManager; import com.cloud.offering.DiskOffering; import com.cloud.offering.NetworkOffering; @@ -188,6 +195,11 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.StringUtils; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.NicIpAlias; @@ -196,6 +208,7 @@ import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.NicIpAliasDao; import com.cloud.vm.dao.NicIpAliasVO; import com.cloud.vm.dao.NicSecondaryIpDao; + import edu.emory.mathcs.backport.java.util.Arrays; @Component @@ -299,9 +312,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati NicSecondaryIpDao _nicSecondaryIpDao; @Inject NicIpAliasDao _nicIpAliasDao; - @Inject public ManagementService _mgr; + @Inject + DedicatedResourceDao _dedicatedDao; // FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao? @Inject protected DataCenterLinkLocalIpAddressDao _LinkLocalIpAllocDao; @@ -882,10 +896,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati */ } - Grouping.AllocationState allocationState = null; if (allocationStateStr != null && !allocationStateStr.isEmpty()) { try { - allocationState = Grouping.AllocationState.valueOf(allocationStateStr); + Grouping.AllocationState.valueOf(allocationStateStr); } catch (IllegalArgumentException ex) { throw new InvalidParameterValueException("Unable to resolve Allocation State '" + allocationStateStr + "' to a supported state"); } @@ -942,6 +955,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new CloudRuntimeException("Failed to delete pod " + podId); } + // remove from dedicated resources + DedicatedResourceVO dr = _dedicatedDao.findByPodId(podId); + if (dr != null) { + _dedicatedDao.remove(dr.getId()); + } txn.commit(); return true; @@ -1307,10 +1325,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Please enter a valid IPv6 address for IP6 DNS2"); } - Grouping.AllocationState allocationState = null; if (allocationStateStr != null && !allocationStateStr.isEmpty()) { try { - allocationState = Grouping.AllocationState.valueOf(allocationStateStr); + Grouping.AllocationState.valueOf(allocationStateStr); } catch (IllegalArgumentException ex) { throw new InvalidParameterValueException("Unable to resolve Allocation State '" + allocationStateStr + "' to a supported state"); } @@ -1403,6 +1420,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (success) { // delete all capacity records for the zone _capacityDao.removeBy(null, zoneId, null, null, null); + // remove from dedicated resources + DedicatedResourceVO dr = _dedicatedDao.findByZoneId(zoneId); + if (dr != null) { + _dedicatedDao.remove(dr.getId()); + } } txn.commit(); @@ -1731,13 +1753,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // check if zone has necessary trafficTypes before enabling try { PhysicalNetwork mgmtPhyNetwork; - if (NetworkType.Advanced == zone.getNetworkType()) { - // zone should have a physical network with public and management traffiType - _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Public); - mgmtPhyNetwork = _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Management); - } else { // zone should have a physical network with management traffiType mgmtPhyNetwork = _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Management); + if (NetworkType.Advanced == zone.getNetworkType() && ! zone.isSecurityGroupEnabled() ) { + // advanced zone without SG should have a physical network with public Thpe + _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(zoneId, TrafficType.Public); } try { @@ -1797,15 +1817,20 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati try { txn.start(); // Create the new zone in the database - DataCenterVO zone = new DataCenterVO(zoneName, null, dns1, dns2, internalDns1, internalDns2, guestCidr, domain, domainId, zoneType, zoneToken, networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2); + DataCenterVO zone = new DataCenterVO(zoneName, null, dns1, dns2, internalDns1, internalDns2, guestCidr, null, null, zoneType, zoneToken, networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2); if (allocationStateStr != null && !allocationStateStr.isEmpty()) { Grouping.AllocationState allocationState = Grouping.AllocationState.valueOf(allocationStateStr); zone.setAllocationState(allocationState); } else { - // Zone will be disabled since 3.0. Admin shoul enable it after physical network and providers setup. + // Zone will be disabled since 3.0. Admin should enable it after physical network and providers setup. zone.setAllocationState(Grouping.AllocationState.Disabled); } zone = _zoneDao.persist(zone); + if (domainId != null) { + //zone is explicitly dedicated to this domain + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(zone.getId(), null, null, null, domainId, null); + _dedicatedDao.persist(dedicatedResource); + } // Create default system networks createDefaultSystemNetworks(zone.getId()); @@ -2292,7 +2317,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String endIP = cmd.getEndIp(); String newVlanGateway = cmd.getGateway(); String newVlanNetmask = cmd.getNetmask(); - Long userId = UserContext.current().getCallerUserId(); String vlanId = cmd.getVlan(); Boolean forVirtualNetwork = cmd.isForVirtualNetwork(); Long networkId = cmd.getNetworkID(); @@ -3014,9 +3038,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } + } else { + // when there is no dhcp support in the network. + if (!deletePublicIPRange(vlanDbId)) { + return false; } + _vlanDao.expunge(vlanDbId); + return true; } - throw new InvalidParameterValueException("One of the ips in the range is used to provide Dhcp service to this subnet. cannot delete this range as "); + } + return false; } @@ -3290,42 +3321,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - private void checkPrivateIpRangeErrors(Long podId, String startIP, String endIP) { - HostPodVO pod = _podDao.findById(podId); - if (pod == null) { - throw new InvalidParameterValueException("Please specify a valid pod."); - } - - // Check that the start and end IPs are valid - if (!NetUtils.isValidIp(startIP)) { - throw new InvalidParameterValueException("Please specify a valid start IP"); - } - - if (endIP != null && !NetUtils.isValidIp(endIP)) { - throw new InvalidParameterValueException("Please specify a valid end IP"); - } - - if (endIP != null && !NetUtils.validIpRange(startIP, endIP)) { - throw new InvalidParameterValueException("Please specify a valid IP range."); - } - - // Check that the IPs that are being added are compatible with the pod's - // CIDR - String cidrAddress = getCidrAddress(podId); - long cidrSize = getCidrSize(podId); - - if (endIP != null && !NetUtils.sameSubnetCIDR(startIP, endIP, cidrSize)) { - throw new InvalidParameterValueException("Please ensure that your start IP and end IP are in the same subnet, as per the pod's CIDR size."); - } - - if (!NetUtils.sameSubnetCIDR(startIP, cidrAddress, cidrSize)) { - throw new InvalidParameterValueException("Please ensure that your start IP is in the same subnet as the pod's CIDR address."); - } - - if (endIP != null && !NetUtils.sameSubnetCIDR(endIP, cidrAddress, cidrSize)) { - throw new InvalidParameterValueException("Please ensure that your end IP is in the same subnet as the pod's CIDR address."); - } - } private String getCidrAddress(String cidr) { String[] cidrPair = cidr.split("\\/"); @@ -3337,15 +3332,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return Integer.parseInt(cidrPair[1]); } - private String getCidrAddress(long podId) { - HostPodVO pod = _podDao.findById(podId); - return pod.getCidrAddress(); - } - - private long getCidrSize(long podId) { - HostPodVO pod = _podDao.findById(podId); - return pod.getCidrSize(); - } @Override public void checkPodCidrSubnets(long dcId, Long podIdToBeSkipped, String cidr) { @@ -4297,7 +4283,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati public boolean isOfferingForVpc(NetworkOffering offering) { boolean vpcProvider = _ntwkOffServiceMapDao.isProviderForNetworkOffering(offering.getId(), Provider.VPCVirtualRouter); - boolean internalLb = offering.getInternalLb(); return vpcProvider; } @@ -4454,6 +4439,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Note: This method will be used for entity name validations in the coming // releases (place holder for now) + @SuppressWarnings("unused") private void validateEntityName(String str) { String forbidden = "~!@#$%^&*()+="; char[] searchChars = forbidden.toCharArray(); @@ -4679,7 +4665,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String endIP = cmd.getEndIp(); String gateway = cmd.getGateway(); String netmask = cmd.getNetmask(); - Long userId = UserContext.current().getCallerUserId(); String vlanId = cmd.getVlan(); Region region = _regionDao.findById(regionId); diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index e7828ea6d54..5983aa7ad7c 100755 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -682,19 +682,28 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + NetworkVO defaultNetwork = null; + if (dc.getNetworkType() == NetworkType.Advanced && dc.isSecurityGroupEnabled()) { + List networks = _networkDao.listByZoneSecurityGroup(dataCenterId); + if (networks == null || networks.size() == 0) { + throw new CloudRuntimeException("Can not found security enabled network in SG Zone " + dc); + } + defaultNetwork = networks.get(0); + } else { TrafficType defaultTrafficType = TrafficType.Public; if (dc.getNetworkType() == NetworkType.Basic || dc.isSecurityGroupEnabled()) { defaultTrafficType = TrafficType.Guest; } - List defaultNetworks = _networkDao.listByZoneAndTrafficType(dataCenterId, defaultTrafficType); + // api should never allow this situation to happen if (defaultNetworks.size() != 1) { - throw new CloudRuntimeException("Found " + defaultNetworks.size() + " networks of type " + defaultTrafficType + " when expect to find 1"); + throw new CloudRuntimeException("Found " + defaultNetworks.size() + " networks of type " + + defaultTrafficType + " when expect to find 1"); + } + defaultNetwork = defaultNetworks.get(0); } - NetworkVO defaultNetwork = defaultNetworks.get(0); - List offerings = _networkModel.getSystemAccountNetworkOfferings(NetworkOffering.SystemControlNetwork, NetworkOffering.SystemManagementNetwork); List> networks = new ArrayList>(offerings.size() + 1); NicProfile defaultNic = new NicProfile(); diff --git a/server/src/com/cloud/dc/DedicatedResourceVO.java b/server/src/com/cloud/dc/DedicatedResourceVO.java new file mode 100644 index 00000000000..a4c88f57e02 --- /dev/null +++ b/server/src/com/cloud/dc/DedicatedResourceVO.java @@ -0,0 +1,136 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc; + +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; + +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +@Entity +@Table(name="dedicated_resources") +public class DedicatedResourceVO implements DedicatedResources{ + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name="id") + long id; + + @Column(name="data_center_id") + Long dataCenterId; + + @Column(name="pod_id") + Long podId; + + @Column(name="cluster_id") + Long clusterId; + + @Column(name="host_id") + Long hostId; + + @Column(name="uuid") + String uuid; + + @Column(name = "domain_id") + private Long domainId; + + @Column(name = "account_id") + private Long accountId; + + public DedicatedResourceVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public DedicatedResourceVO(Long dataCenterId, Long podId, Long clusterId, Long hostId, Long domainId, Long accountId) { + this.dataCenterId = dataCenterId; + this.podId = podId; + this.clusterId = clusterId; + this.hostId = hostId; + this.domainId = domainId; + this.accountId = accountId; + this.uuid = UUID.randomUUID().toString(); + } + + public long getId() { + return id; + } + + public Long getDataCenterId() { + return dataCenterId; + } + + public void setDataCenterId(long dataCenterId) { + this.dataCenterId = dataCenterId; + } + + public Long getPodId() { + return podId; + } + + public void setPodId(long podId) { + this.podId = podId; + } + + public Long getClusterId() { + return clusterId; + } + + public void setClusterId(long clusterId) { + this.clusterId = clusterId; + } + + public Long getHostId() { + return hostId; + } + + public void setHostId(long hostId) { + this.hostId = hostId; + } + + public DedicatedResourceVO(long dedicatedResourceId) { + this.id = dedicatedResourceId; + } + + public Long getDomainId() { + return domainId; + } + + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + + public Long getAccountId() { + return accountId; + } + + public void setAccountId(Long accountId) { + this.accountId = accountId; + } + + public String getUuid() { + return this.uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } +} diff --git a/server/src/com/cloud/dc/dao/DedicatedResourceDao.java b/server/src/com/cloud/dc/dao/DedicatedResourceDao.java new file mode 100644 index 00000000000..a5d65d46c8e --- /dev/null +++ b/server/src/com/cloud/dc/dao/DedicatedResourceDao.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import java.util.List; + +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.utils.Pair; +import com.cloud.utils.db.GenericDao; + + +public interface DedicatedResourceDao extends GenericDao { + + DedicatedResourceVO findByZoneId(Long zoneId); + + DedicatedResourceVO findByPodId(Long podId); + + DedicatedResourceVO findByClusterId(Long clusterId); + + DedicatedResourceVO findByHostId(Long hostId); + + Pair, Integer> searchDedicatedHosts(Long hostId, Long domainId, Long accountId); + + Pair, Integer> searchDedicatedClusters(Long clusterId, Long domainId, Long accountId); + + Pair, Integer> searchDedicatedPods(Long podId, Long domainId, Long accountId); + + Pair, Integer> searchDedicatedZones(Long dataCenterId, Long domainId, Long accountId); + + List listByAccountId(Long accountId); + + List listByDomainId(Long domainId); + + List listZonesNotInDomainIds(List domainIds); +} \ No newline at end of file diff --git a/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java b/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java new file mode 100644 index 00000000000..2a3b4690a0c --- /dev/null +++ b/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java @@ -0,0 +1,304 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import java.util.List; + +import javax.ejb.Local; + +import org.springframework.stereotype.Component; + +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.utils.Pair; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; + +@Component +@Local(value={DedicatedResourceDao.class}) @DB(txn = false) +public class DedicatedResourceDaoImpl extends GenericDaoBase implements DedicatedResourceDao { + protected final SearchBuilder ZoneSearch; + protected final SearchBuilder PodSearch; + protected final SearchBuilder ClusterSearch; + protected final SearchBuilder HostSearch; + + protected SearchBuilder ListZonesByDomainIdSearch; + protected SearchBuilder ListPodsByDomainIdSearch; + protected SearchBuilder ListClustersByDomainIdSearch; + protected SearchBuilder ListHostsByDomainIdSearch; + + protected SearchBuilder ListZonesByAccountIdSearch; + protected SearchBuilder ListPodsByAccountIdSearch; + protected SearchBuilder ListClustersByAccountIdSearch; + protected SearchBuilder ListHostsByAccountIdSearch; + + protected SearchBuilder ListAllZonesSearch; + protected SearchBuilder ListAllPodsSearch; + protected SearchBuilder ListAllClustersSearch; + protected SearchBuilder ListAllHostsSearch; + + protected SearchBuilder ListByAccountId; + protected SearchBuilder ListByDomainId; + + protected SearchBuilder ZoneByDomainIdsSearch; + + protected DedicatedResourceDaoImpl() { + PodSearch = createSearchBuilder(); + PodSearch.and("podId", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ); + PodSearch.done(); + + ZoneSearch = createSearchBuilder(); + ZoneSearch.and("zoneId", ZoneSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ZoneSearch.done(); + + ClusterSearch = createSearchBuilder(); + ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + ClusterSearch.done(); + + HostSearch = createSearchBuilder(); + HostSearch.and("hostId", HostSearch.entity().getHostId(), SearchCriteria.Op.EQ); + HostSearch.done(); + + ListZonesByDomainIdSearch = createSearchBuilder(); + ListZonesByDomainIdSearch.and("zoneId", ListZonesByDomainIdSearch.entity().getDataCenterId(), SearchCriteria.Op.NNULL); + ListZonesByDomainIdSearch.and("domainId", ListZonesByDomainIdSearch.entity().getDomainId(), SearchCriteria.Op.EQ); + ListZonesByDomainIdSearch.and("accountId", ListZonesByDomainIdSearch.entity().getAccountId(), SearchCriteria.Op.NULL); + ListZonesByDomainIdSearch.done(); + + ListZonesByAccountIdSearch = createSearchBuilder(); + ListZonesByAccountIdSearch.and("zoneId", ListZonesByAccountIdSearch.entity().getDataCenterId(), SearchCriteria.Op.NNULL); + ListZonesByAccountIdSearch.and("accountId", ListZonesByAccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + ListZonesByAccountIdSearch.done(); + + ListPodsByDomainIdSearch = createSearchBuilder(); + ListPodsByDomainIdSearch.and("podId", ListPodsByDomainIdSearch.entity().getPodId(), SearchCriteria.Op.NNULL); + ListPodsByDomainIdSearch.and("domainId", ListPodsByDomainIdSearch.entity().getDomainId(), SearchCriteria.Op.EQ); + ListPodsByDomainIdSearch.and("accountId", ListPodsByDomainIdSearch.entity().getAccountId(), SearchCriteria.Op.NULL); + ListPodsByDomainIdSearch.done(); + + ListPodsByAccountIdSearch = createSearchBuilder(); + ListPodsByAccountIdSearch.and("podId", ListPodsByAccountIdSearch.entity().getPodId(), SearchCriteria.Op.NNULL); + ListPodsByAccountIdSearch.and("accountId", ListPodsByAccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + ListPodsByAccountIdSearch.done(); + + ListClustersByDomainIdSearch = createSearchBuilder(); + ListClustersByDomainIdSearch.and("clusterId", ListClustersByDomainIdSearch.entity().getClusterId(), SearchCriteria.Op.NNULL); + ListClustersByDomainIdSearch.and("domainId", ListClustersByDomainIdSearch.entity().getDomainId(), SearchCriteria.Op.EQ); + ListClustersByDomainIdSearch.and("accountId", ListClustersByDomainIdSearch.entity().getAccountId(), SearchCriteria.Op.NULL); + ListClustersByDomainIdSearch.done(); + + ListClustersByAccountIdSearch = createSearchBuilder(); + ListClustersByAccountIdSearch.and("clusterId", ListClustersByAccountIdSearch.entity().getClusterId(), SearchCriteria.Op.NNULL); + ListClustersByAccountIdSearch.and("accountId", ListClustersByAccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + ListClustersByAccountIdSearch.done(); + + ListHostsByDomainIdSearch = createSearchBuilder(); + ListHostsByDomainIdSearch.and("hostId", ListHostsByDomainIdSearch.entity().getHostId(), SearchCriteria.Op.NNULL); + ListHostsByDomainIdSearch.and("domainId", ListHostsByDomainIdSearch.entity().getDomainId(), SearchCriteria.Op.EQ); + ListHostsByDomainIdSearch.and("accountId", ListHostsByDomainIdSearch.entity().getAccountId(), SearchCriteria.Op.NULL); + ListHostsByDomainIdSearch.done(); + + ListHostsByAccountIdSearch = createSearchBuilder(); + ListHostsByAccountIdSearch.and("hostId", ListHostsByAccountIdSearch.entity().getHostId(), SearchCriteria.Op.NNULL); + ListHostsByAccountIdSearch.and("accountId", ListHostsByAccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + ListHostsByAccountIdSearch.done(); + + ListAllZonesSearch = createSearchBuilder(); + ListAllZonesSearch.and("zoneId", ListAllZonesSearch.entity().getDataCenterId(), Op.EQ); + ListAllZonesSearch.and("podId", ListAllZonesSearch.entity().getPodId(), Op.NULL); + ListAllZonesSearch.and("clusterId", ListAllZonesSearch.entity().getClusterId(), Op.NULL); + ListAllZonesSearch.and("hostId", ListAllZonesSearch.entity().getHostId(), Op.NULL); + ListAllZonesSearch.and("accountId", ListAllZonesSearch.entity().getAccountId(), Op.EQ); + ListAllZonesSearch.and("domainId", ListAllZonesSearch.entity().getDomainId(), Op.EQ); + ListAllZonesSearch.done(); + + ListAllPodsSearch = createSearchBuilder(); + ListAllPodsSearch.and("zoneId", ListAllPodsSearch.entity().getDataCenterId(), Op.NULL); + ListAllPodsSearch.and("podId", ListAllPodsSearch.entity().getPodId(), Op.EQ); + ListAllPodsSearch.and("clusterId", ListAllPodsSearch.entity().getClusterId(), Op.NULL); + ListAllPodsSearch.and("hostId", ListAllPodsSearch.entity().getHostId(), Op.NULL); + ListAllPodsSearch.and("accountId", ListAllPodsSearch.entity().getAccountId(), Op.EQ); + ListAllPodsSearch.and("domainId", ListAllPodsSearch.entity().getDomainId(), Op.EQ); + ListAllPodsSearch.done(); + + ListAllClustersSearch = createSearchBuilder(); + ListAllClustersSearch.and("zoneId", ListAllClustersSearch.entity().getDataCenterId(), Op.NULL); + ListAllClustersSearch.and("podId", ListAllClustersSearch.entity().getPodId(), Op.NULL); + ListAllClustersSearch.and("clusterId", ListAllClustersSearch.entity().getClusterId(), Op.EQ); + ListAllClustersSearch.and("hostId", ListAllClustersSearch.entity().getHostId(), Op.NULL); + ListAllClustersSearch.and("accountId", ListAllClustersSearch.entity().getAccountId(), Op.EQ); + ListAllClustersSearch.and("domainId", ListAllClustersSearch.entity().getDomainId(), Op.EQ); + ListAllClustersSearch.done(); + + ListAllHostsSearch = createSearchBuilder(); + ListAllHostsSearch.and("zoneId", ListAllHostsSearch.entity().getDataCenterId(), Op.NULL); + ListAllHostsSearch.and("podId", ListAllHostsSearch.entity().getPodId(), Op.NULL); + ListAllHostsSearch.and("clusterId", ListAllHostsSearch.entity().getClusterId(), Op.NULL); + ListAllHostsSearch.and("hostId", ListAllHostsSearch.entity().getHostId(), Op.EQ); + ListAllHostsSearch.and("accountId", ListAllHostsSearch.entity().getAccountId(), Op.EQ); + ListAllHostsSearch.and("domainId", ListAllHostsSearch.entity().getDomainId(), Op.EQ); + ListAllHostsSearch.done(); + + ListByAccountId = createSearchBuilder(); + ListByAccountId.and("accountId", ListByAccountId.entity().getAccountId(), SearchCriteria.Op.EQ); + ListByAccountId.done(); + + ListByDomainId = createSearchBuilder(); + ListByDomainId.and("accountId", ListByDomainId.entity().getAccountId(), SearchCriteria.Op.NULL); + ListByDomainId.and("domainId", ListByDomainId.entity().getDomainId(), SearchCriteria.Op.EQ); + ListByDomainId.done(); + + ZoneByDomainIdsSearch = createSearchBuilder(); + ZoneByDomainIdsSearch.and("zoneId", ZoneByDomainIdsSearch.entity().getDataCenterId(), SearchCriteria.Op.NNULL); + ZoneByDomainIdsSearch.and("domainId", ZoneByDomainIdsSearch.entity().getDomainId(), SearchCriteria.Op.NIN); + ZoneByDomainIdsSearch.done(); + } + + @Override + public DedicatedResourceVO findByZoneId(Long zoneId) { + SearchCriteria sc = ZoneSearch.create(); + sc.setParameters("zoneId", zoneId); + return findOneBy(sc); + } + + @Override + public DedicatedResourceVO findByPodId(Long podId) { + SearchCriteria sc = PodSearch.create(); + sc.setParameters("podId", podId); + + return findOneBy(sc); + } + + @Override + public DedicatedResourceVO findByClusterId(Long clusterId) { + SearchCriteria sc = ClusterSearch.create(); + sc.setParameters("clusterId", clusterId); + + return findOneBy(sc); + } + + @Override + public DedicatedResourceVO findByHostId(Long hostId) { + SearchCriteria sc = HostSearch.create(); + sc.setParameters("hostId", hostId); + + return findOneBy(sc); + } + + @Override + public Pair, Integer> searchDedicatedZones(Long dataCenterId, Long domainId, Long accountId){ + SearchCriteria sc = ListAllZonesSearch.create(); + if (dataCenterId != null) { + sc.setParameters("dataCenterId", dataCenterId); + } + if(domainId != null) { + sc.setParameters("domainId", domainId); + if(accountId != null) { + sc.setParameters("accountId", accountId); + } else { + sc.setParameters("accountId", (Object)null); + } + } + return searchAndCount(sc, null); + } + @Override + public Pair, Integer> searchDedicatedPods(Long podId, Long domainId, Long accountId){ + SearchCriteria sc = ListAllPodsSearch.create(); + if (podId != null) { + sc.setParameters("podId", podId); + } + if(domainId != null) { + sc.setParameters("domainId", domainId); + if(accountId != null) { + sc.setParameters("accountId", accountId); + } else { + sc.setParameters("accountId", (Object)null); + } + } + return searchAndCount(sc, null); + } + + @Override + public Pair, Integer> searchDedicatedClusters(Long clusterId, Long domainId, Long accountId){ + SearchCriteria sc = ListAllClustersSearch.create(); + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + if(domainId != null) { + sc.setParameters("domainId", domainId); + if(accountId != null) { + sc.setParameters("accountId", accountId); + } else { + sc.setParameters("accountId", (Object)null); + } + } + return searchAndCount(sc, null); + } + + @Override + public Pair, Integer> searchDedicatedHosts(Long hostId, Long domainId, Long accountId){ + SearchCriteria sc = ListAllHostsSearch.create(); + if (hostId != null) { + sc.setParameters("hostId", hostId); + } + if(domainId != null) { + sc.setParameters("domainId", domainId); + if(accountId != null) { + sc.setParameters("accountId", accountId); + } else { + sc.setParameters("accountId", (Object)null); + } + } + return searchAndCount(sc, null); + } + + @Override + public List listByAccountId(Long accountId){ + SearchCriteria sc = ListByAccountId.create(); + sc.setParameters("accountId", accountId); + return listBy(sc); + } + + @Override + public List listByDomainId(Long domainId){ + SearchCriteria sc = ListByDomainId.create(); + sc.setParameters("domainId", domainId); + return listBy(sc); + } + + @Override + public List listZonesNotInDomainIds(List domainIds) { + SearchCriteria sc = ZoneByDomainIdsSearch.create(); + sc.setParameters("domainId", domainIds.toArray(new Object[domainIds.size()])); + return listBy(sc); + } + + @Override + public boolean remove(Long id) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + DedicatedResourceVO resource = createForUpdate(); + update(id, resource); + + boolean result = super.remove(id); + txn.commit(); + return result; + } +} diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 0f611681110..4ef2152dd7b 100644 --- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -30,6 +30,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.affinity.AffinityGroupProcessor; +import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -53,9 +54,12 @@ import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.HostPodVO; import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; @@ -91,6 +95,7 @@ import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; @@ -157,6 +162,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy @Inject protected HostDao _hostDao; @Inject protected HostPodDao _podDao; @Inject protected ClusterDao _clusterDao; + @Inject protected DedicatedResourceDao _dedicatedDao; @Inject protected GuestOSDao _guestOSDao = null; @Inject protected GuestOSCategoryDao _guestOSCategoryDao = null; @Inject protected DiskOfferingDao _diskOfferingDao; @@ -196,6 +202,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy // call affinitygroup chain VirtualMachine vm = vmProfile.getVirtualMachine(); long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); + DataCenter dc = _dcDao.findById(vm.getDataCenterId()); if (vmGroupCount > 0) { for (AffinityGroupProcessor processor : _affinityProcessors) { @@ -203,13 +210,14 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy } } + checkForNonDedicatedResources(vmProfile, dc, avoids); if (s_logger.isDebugEnabled()) { s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); } // call planners - DataCenter dc = _dcDao.findById(vm.getDataCenterId()); + //DataCenter dc = _dcDao.findById(vm.getDataCenterId()); // check if datacenter is in avoid set if (avoids.shouldAvoid(dc)) { if (s_logger.isDebugEnabled()) { @@ -283,9 +291,8 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy if (!suitableVolumeStoragePools.isEmpty()) { List suitableHosts = new ArrayList(); suitableHosts.add(host); - Pair> potentialResources = findPotentialDeploymentResources( - suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner)); + suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner,vmProfile, plan ,avoids)); if (potentialResources != null) { Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); @@ -339,13 +346,13 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); Map> suitableVolumeStoragePools = result.first(); List readyAndReusedVolumes = result.second(); + // choose the potential pool for this VM for this host if (!suitableVolumeStoragePools.isEmpty()) { List suitableHosts = new ArrayList(); suitableHosts.add(host); - Pair> potentialResources = findPotentialDeploymentResources( - suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner)); + suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner,vmProfile, plan ,avoids)); if (potentialResources != null) { Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); @@ -395,7 +402,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy resetAvoidSet(plannerAvoidOutput, plannerAvoidInput); dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, - getPlannerUsage(planner), plannerAvoidOutput); + getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput); if (dest != null) { return dest; } @@ -430,6 +437,60 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy return dest; } + private void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) { + boolean isExplicit = false; + VirtualMachine vm = vmProfile.getVirtualMachine(); + // check affinity group of type Explicit dedication exists + List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), "ExplicitDedication"); + + if (vmGroupMappings != null && !vmGroupMappings.isEmpty()){ + isExplicit = true; + } + + if (!isExplicit && vm.getType() == VirtualMachine.Type.User) { + //add explicitly dedicated resources in avoidList + DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(dc.getId()); + if (dedicatedZone != null) { + long accountDomainId = vmProfile.getOwner().getDomainId(); + if (dedicatedZone.getDomainId() != null && !dedicatedZone.getDomainId().equals(accountDomainId)) { + throw new CloudRuntimeException("Failed to deploy VM. Zone " + dc.getName() + " is dedicated."); + } + } + + List podsInDc = _podDao.listByDataCenterId(dc.getId()); + for (HostPodVO pod : podsInDc) { + DedicatedResourceVO dedicatedPod = _dedicatedDao.findByPodId(pod.getId()); + if (dedicatedPod != null) { + avoids.addPod(dedicatedPod.getPodId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot use this dedicated pod " + pod.getName() + "."); + } + } + } + + List clusterInDc = _clusterDao.listClustersByDcId(dc.getId()); + for (ClusterVO cluster : clusterInDc) { + DedicatedResourceVO dedicatedCluster = _dedicatedDao.findByClusterId(cluster.getId()); + if (dedicatedCluster != null) { + avoids.addCluster(dedicatedCluster.getClusterId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot use this dedicated Cluster " + cluster.getName() + "."); + } + } + } + List hostInDc = _hostDao.listByDataCenterId(dc.getId()); + for (HostVO host : hostInDc) { + DedicatedResourceVO dedicatedHost = _dedicatedDao.findByHostId(host.getId()); + if (dedicatedHost != null) { + avoids.addHost(dedicatedHost.getHostId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot use this dedicated host " + host.getName() + "."); + } + } + } + } + } + private void resetAvoidSet(ExcludeList avoidSet, ExcludeList removeSet) { if (avoidSet.getDataCentersToAvoid() != null && removeSet.getDataCentersToAvoid() != null) { avoidSet.getDataCentersToAvoid().removeAll(removeSet.getDataCentersToAvoid()); @@ -448,9 +509,9 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy } } - private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner) { + private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) throws InsufficientServerCapacityException { if (planner != null && planner instanceof DeploymentClusterPlanner) { - return ((DeploymentClusterPlanner) planner).getResourceUsage(); + return ((DeploymentClusterPlanner) planner).getResourceUsage(vmProfile, plan, avoids); } else { return DeploymentPlanner.PlannerResourceUsage.Shared; } diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index caf8c6e92db..7124de28d7b 100755 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -517,7 +517,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPla } @Override - public PlannerResourceUsage getResourceUsage() { + public PlannerResourceUsage getResourceUsage(VirtualMachineProfile vmProfile, + DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { return PlannerResourceUsage.Shared; } } diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java index befd8c10113..5d4a5803da4 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java @@ -27,7 +27,9 @@ import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; +import com.cloud.configuration.Config; import com.cloud.offering.ServiceOffering; +import com.cloud.server.ConfigurationServer; import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.NicProfile; @@ -45,6 +47,8 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis @Inject NicDao _nicDao; @Inject VMInstanceDao _virtualMachineDao; @Inject NicSecondaryIpDao _nicSecIpDao; + @Inject ConfigurationServer _configServer; + protected HypervisorGuruBase() { super(); @@ -124,6 +128,9 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis VMInstanceVO vmInstance = _virtualMachineDao.findById(to.getId()); to.setUuid(vmInstance.getUuid()); + // + to.setEnableDynamicallyScaleVm(Boolean.parseBoolean(_configServer.getConfigValue(Config.EnableDynamicallyScaleVm.key(), Config.ConfigurationParameterScope.zone.toString(), vm.getDataCenterId()))); + return to; } diff --git a/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java index 4a90a77f428..9b190aa6b12 100644 --- a/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java @@ -485,7 +485,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl if (add && (!reservedIpAddressesForGuestNetwork.contains(network.getGateway()))) { // Insert a new NIC for this guest network to reserve the gateway address - _networkMgr.savePlaceholderNic(network, network.getGateway(), null); + _networkMgr.savePlaceholderNic(network, network.getGateway(), null, null); } // Delete any mappings used for inline external load balancers in this network @@ -550,7 +550,9 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl ruleTO = new FirewallRuleTO(rule, guestVlanTag, rule.getTrafficType()); } else { IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); - ruleTO = new FirewallRuleTO(rule, null, sourceIp.getAddress().addr()); + Vlan vlan = _vlanDao.findById(sourceIp.getVlanId()); + + ruleTO = new FirewallRuleTO(rule, vlan.getVlanTag(), sourceIp.getAddress().addr()); } rulesTO.add(ruleTO); } diff --git a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java index f93bf7ae9b5..829ad3fdfe6 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java @@ -210,21 +210,21 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase zoneId = pNetwork.getDataCenterId(); PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); + ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); + if (ntwkSvcProvider == null) { + throw new CloudRuntimeException("Network Service Provider: " + ntwkDevice.getNetworkServiceProvder() + + " is not enabled in the physical network: " + physicalNetworkId + "to add this device"); + } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { + throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + + " is in shutdown state in the physical network: " + physicalNetworkId + "to add this device"); + } + if (gslbProvider) { ExternalLoadBalancerDeviceVO zoneGslbProvider = _externalLoadBalancerDeviceDao.findGslbServiceProvider( physicalNetworkId, ntwkDevice.getNetworkServiceProvder()); if (zoneGslbProvider != null) { throw new CloudRuntimeException("There is a GSLB service provider configured in the zone alredy."); } - } else { - ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); - if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkDevice.getNetworkServiceProvder() + - " is not enabled in the physical network: " + physicalNetworkId + "to add this device"); - } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + - " is in shutdown state in the physical network: " + physicalNetworkId + "to add this device"); - } } URI uri; @@ -774,7 +774,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase // If a NIC doesn't exist for the load balancing IP address, create one loadBalancingIpNic = _nicDao.findByIp4AddressAndNetworkId(loadBalancingIpAddress, network.getId()); if (loadBalancingIpNic == null) { - loadBalancingIpNic = _networkMgr.savePlaceholderNic(network, loadBalancingIpAddress, null); + loadBalancingIpNic = _networkMgr.savePlaceholderNic(network, loadBalancingIpAddress, null, null); } // Save a mapping between the source IP address and the load balancing IP address NIC @@ -1019,7 +1019,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if (add) { // Insert a new NIC for this guest network to reserve the self IP - _networkMgr.savePlaceholderNic(guestConfig, selfIp, null); + _networkMgr.savePlaceholderNic(guestConfig, selfIp, null, null); } else { // release the self-ip obtained from guest network Nic selfipNic = getPlaceholderNic(guestConfig); diff --git a/server/src/com/cloud/network/NetworkManager.java b/server/src/com/cloud/network/NetworkManager.java index 05bc26ee5c2..bc43daa9975 100755 --- a/server/src/com/cloud/network/NetworkManager.java +++ b/server/src/com/cloud/network/NetworkManager.java @@ -19,7 +19,6 @@ package com.cloud.network; import java.util.List; import java.util.Map; -import com.cloud.network.element.DhcpServiceProvider; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import com.cloud.dc.DataCenter; @@ -39,6 +38,7 @@ import com.cloud.network.Network.Service; import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.element.DhcpServiceProvider; import com.cloud.network.element.LoadBalancingServiceProvider; import com.cloud.network.element.StaticNatServiceProvider; import com.cloud.network.element.UserDataServiceProvider; @@ -59,7 +59,6 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.VirtualMachineProfile; -import org.apache.cloudstack.region.PortableIp; /** * NetworkManager manages the network for the different end users. @@ -268,6 +267,11 @@ public interface NetworkManager { IPAddressVO associateIPToGuestNetwork(long ipAddrId, long networkId, boolean releaseOnFailure) throws ResourceAllocationException, ResourceUnavailableException, InsufficientAddressCapacityException, ConcurrentOperationException; + IpAddress allocatePortableIp(Account ipOwner, Account caller, long dcId, Long networkId, Long vpcID) + throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException; + + boolean releasePortableIpAddress(long addrId); + IPAddressVO associatePortableIPToGuestNetwork(long ipAddrId, long networkId, boolean releaseOnFailure) throws ResourceAllocationException, ResourceUnavailableException, InsufficientAddressCapacityException, ConcurrentOperationException; @@ -314,9 +318,6 @@ public interface NetworkManager { InsufficientAddressCapacityException, ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException; - PublicIp assignVpnGatewayIpAddress(long dcId, Account owner, long vpcId) throws InsufficientAddressCapacityException, ConcurrentOperationException; - - /** * @param addr */ @@ -341,10 +342,7 @@ public interface NetworkManager { * @return */ int getNetworkLockTimeout(); - - - boolean cleanupIpResources(long addrId, long userId, Account caller); - + boolean restartNetwork(Long networkId, Account callerAccount, User callerUser, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; @@ -362,35 +360,28 @@ public interface NetworkManager { IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, long callerId, DataCenter zone) throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException; - - IpAddress allocatePortableIp(Account ipOwner, Account caller, long dcId, Long networkId, Long vpcID) - throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException; - Map finalizeServicesAndProvidersForNetwork(NetworkOffering offering, Long physicalNetworkId); - List getProvidersForServiceInNetwork(Network network, Service service); StaticNatServiceProvider getStaticNatProviderForNetwork(Network network); + boolean isNetworkInlineMode(Network network); int getRuleCountForIp(Long addressId, FirewallRule.Purpose purpose, FirewallRule.State state); LoadBalancingServiceProvider getLoadBalancingProviderForNetwork(Network network, Scheme lbScheme); - boolean isSecondaryIpSetForNic(long nicId); - public String allocateGuestIP(Account ipOwner, boolean isSystem, long zoneId, Long networkId, String requestedIp) - throws InsufficientAddressCapacityException; - + public String allocateGuestIP(Account ipOwner, boolean isSystem, long zoneId, Long networkId, String requestedIp) throws InsufficientAddressCapacityException; List listVmNics(Long vmId, Long nicId); + String allocatePublicIpForGuestNic(Long networkId, DataCenter dc, Pod pod, Account caller, String requestedIp) throws InsufficientAddressCapacityException; - boolean removeVmSecondaryIpsOfNic(long nicId); - NicVO savePlaceholderNic(Network network, String ip4Address, Type vmType); + NicVO savePlaceholderNic(Network network, String ip4Address, String ip6Address, Type vmType); DhcpServiceProvider getDhcpServiceProvider(Network network); diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index e8801801d72..2fd9bd06c3f 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -16,9 +16,46 @@ // under the License. package com.cloud.network; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.acl.ControlledEntity.ACLType; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.region.PortableIp; +import org.apache.cloudstack.region.PortableIpDao; +import org.apache.cloudstack.region.PortableIpVO; +import org.apache.cloudstack.region.Region; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; -import com.cloud.agent.api.*; +import com.cloud.agent.api.AgentControlAnswer; +import com.cloud.agent.api.AgentControlCommand; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckNetworkAnswer; +import com.cloud.agent.api.CheckNetworkCommand; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.agent.api.to.NicTO; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; @@ -26,9 +63,16 @@ import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.*; +import com.cloud.dc.AccountVlanMapVO; +import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DataCenterVnetVO; +import com.cloud.dc.Pod; +import com.cloud.dc.PodVlanMapVO; +import com.cloud.dc.Vlan; import com.cloud.dc.Vlan.VlanType; +import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterVnetDao; @@ -39,27 +83,64 @@ import com.cloud.deploy.DeployDestination; import com.cloud.deploy.DeploymentPlan; import com.cloud.domain.Domain; import com.cloud.domain.dao.DomainDao; +import com.cloud.event.ActionEventUtils; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.UsageEventDao; -import com.cloud.exception.*; +import com.cloud.exception.AccountLimitException; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.ConnectionException; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientVirtualNetworkCapcityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.UnsupportedServiceException; import com.cloud.host.Host; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; -import com.cloud.server.ConfigurationServer; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.IpAddress.State; -import com.cloud.network.Network.*; +import com.cloud.network.Network.Capability; +import com.cloud.network.Network.Event; +import com.cloud.network.Network.GuestType; +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; import com.cloud.network.Networks.AddressFormat; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.IsolationType; import com.cloud.network.Networks.TrafficType; import com.cloud.network.addr.PublicIp; -import com.cloud.network.dao.*; -import com.cloud.network.element.*; +import com.cloud.network.dao.AccountGuestVlanMapDao; +import com.cloud.network.dao.AccountGuestVlanMapVO; +import com.cloud.network.dao.FirewallRulesDao; +import com.cloud.network.dao.IPAddressDao; +import com.cloud.network.dao.IPAddressVO; +import com.cloud.network.dao.LoadBalancerDao; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkDomainDao; +import com.cloud.network.dao.NetworkServiceMapDao; +import com.cloud.network.dao.NetworkServiceMapVO; +import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.PhysicalNetworkDao; +import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; +import com.cloud.network.dao.PhysicalNetworkTrafficTypeDao; +import com.cloud.network.dao.PhysicalNetworkTrafficTypeVO; +import com.cloud.network.dao.PhysicalNetworkVO; +import com.cloud.network.dao.UserIpv6AddressDao; +import com.cloud.network.element.DhcpServiceProvider; +import com.cloud.network.element.IpDeployer; +import com.cloud.network.element.IpDeployingRequester; +import com.cloud.network.element.LoadBalancingServiceProvider; +import com.cloud.network.element.NetworkElement; +import com.cloud.network.element.StaticNatServiceProvider; +import com.cloud.network.element.UserDataServiceProvider; import com.cloud.network.guru.NetworkGuru; import com.cloud.network.lb.LoadBalancingRulesManager; -import com.cloud.network.rules.*; +import com.cloud.network.rules.FirewallManager; +import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.rules.LoadBalancerContainer.Scheme; @@ -81,7 +162,13 @@ import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingDetailsDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.org.Grouping; -import com.cloud.user.*; +import com.cloud.server.ConfigurationServer; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.Journal; @@ -90,35 +177,35 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.db.*; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.JoinBuilder.JoinType; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; -import com.cloud.vm.*; +import com.cloud.vm.Nic; import com.cloud.vm.Nic.ReservationStrategy; +import com.cloud.vm.NicProfile; +import com.cloud.vm.NicVO; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.ReservationContextImpl; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; -import com.cloud.vm.dao.*; -import org.apache.cloudstack.acl.ControlledEntity.ACLType; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.region.PortableIp; -import org.apache.cloudstack.region.PortableIpDao; -import org.apache.cloudstack.region.PortableIpVO; -import org.apache.cloudstack.region.Region; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; -import java.net.URI; -import java.util.*; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicSecondaryIpDao; +import com.cloud.vm.dao.NicSecondaryIpVO; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; /** * NetworkManagerImpl implements NetworkManager. @@ -258,9 +345,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L PortableIpDao _portableIpDao; protected StateMachine2 _stateMachine; - private final HashMap _systemNetworks = new HashMap(5); - private static Long _privateOfferingId = null; - ScheduledExecutorService _executor; SearchBuilder AssignIpAddressSearch; @@ -486,11 +570,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return ipToReturn; } - @Override - public PublicIp assignVpnGatewayIpAddress(long dcId, Account owner, long vpcId) throws InsufficientAddressCapacityException, ConcurrentOperationException { - return assignDedicateIpAddress(owner, null, vpcId, dcId, false); - } - @DB @Override @@ -510,6 +589,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L // this ownerId comes from owner or type Account. See the class "AccountVO" and the annotations in that class // to get the table name and field name that is queried to fill this ownerid. ConcurrentOperationException ex = new ConcurrentOperationException("Unable to lock account"); + throw ex; } if (s_logger.isDebugEnabled()) { s_logger.debug("lock account " + ownerId + " is acquired"); @@ -656,7 +736,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L // zone is of type DataCenter. See DataCenterVO.java. PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, " + "Zone is currently disabled"); - ex.addProxyObject("data_center", zone.getId(), "zoneId"); + ex.addProxyObject(zone.getUuid(), "zoneId"); throw ex; } @@ -1062,6 +1142,9 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L _ipAddressDao.update(ipAddrId, ip); txn.commit(); + ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, + EventTypes.EVENT_PORTABLE_IP_TRANSFER, "Portable IP associated is transferred from network " + + currentNetworkId + " to " + newNetworkId); } @Override @@ -1113,7 +1196,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L } @DB - private void releasePortableIpAddress(long addrId) { + @Override + public boolean releasePortableIpAddress(long addrId) { Transaction txn = Transaction.currentTxn(); GlobalLock portableIpLock = GlobalLock.getInternLock("PortablePublicIpRange"); @@ -1128,12 +1212,13 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L // removed the provisioned vlan VlanVO vlan = _vlanDao.findById(ip.getVlanId()); - _vlanDao.expunge(vlan.getId()); + _vlanDao.remove(vlan.getId()); // remove the provisioned public ip address - _ipAddressDao.expunge(ip.getId()); + _ipAddressDao.remove(ip.getId()); txn.commit(); + return true; } finally { portableIpLock.releaseRef(); } @@ -1397,25 +1482,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L if (errorIfAlreadySetup) { InvalidParameterValueException ex = new InvalidParameterValueException("Found existing network configuration (with specified id) for offering (with specified id)"); - ex.addProxyObject(offering, offering.getId(), "offeringId"); - ex.addProxyObject(configs.get(0), configs.get(0).getId(), "networkConfigId"); - throw ex; - } else { - return configs; - } - } - } else if (predefined != null && predefined.getCidr() != null && predefined.getBroadcastUri() == null && vpcId == null) { - // don't allow to have 2 networks with the same cidr in the same zone for the account - List configs = _networksDao.listBy(owner.getId(), plan.getDataCenterId(), predefined.getCidr(), true); - if (configs.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0)); - } - - if (errorIfAlreadySetup) { - InvalidParameterValueException ex = new InvalidParameterValueException("Found existing network configuration (with specified id) for offering (with specified id)"); - ex.addProxyObject(offering, offering.getId(), "offeringId"); - ex.addProxyObject(configs.get(0), configs.get(0).getId(), "networkConfigId"); + ex.addProxyObject(offering.getUuid(), "offeringId"); + ex.addProxyObject(configs.get(0).getUuid(), "networkConfigId"); throw ex; } else { return configs; @@ -1458,6 +1526,9 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L finalizeServicesAndProvidersForNetwork(offering, plan.getPhysicalNetworkId()))); if (domainId != null && aclType == ACLType.Domain) { + if (subdomainAccess == null ) { + subdomainAccess = true; + } _networksDao.addDomainToNetwork(id, domainId, subdomainAccess); } @@ -1467,7 +1538,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L if (networks.size() < 1) { // see networkOfferingVO.java CloudRuntimeException ex = new CloudRuntimeException("Unable to convert network offering with specified id to network profile"); - ex.addProxyObject(offering, offering.getId(), "offeringId"); + ex.addProxyObject(offering.getUuid(), "offeringId"); throw ex; } @@ -1846,7 +1917,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L if (!element.implement(network, offering, dest, context)) { CloudRuntimeException ex = new CloudRuntimeException("Failed to implement provider " + element.getProvider().getName() + " for network with specified id"); - ex.addProxyObject(network, network.getId(), "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } } @@ -2166,11 +2237,13 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L } } + @Override public void removeNic(VirtualMachineProfile vm, Nic nic) { removeNic(vm, _nicDao.findById(nic.getId())); } + protected void removeNic(VirtualMachineProfile vm, NicVO nic) { nic.setState(Nic.State.Deallocating); _nicDao.update(nic.getId(), nic); @@ -2242,7 +2315,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L if (ntwkOff.getState() != NetworkOffering.State.Enabled) { // see NetworkOfferingVO InvalidParameterValueException ex = new InvalidParameterValueException("Can't use specified network offering id as its stat is not " + NetworkOffering.State.Enabled); - ex.addProxyObject(ntwkOff, ntwkOff.getId(), "networkOfferingId"); + ex.addProxyObject(ntwkOff.getUuid(), "networkOfferingId"); throw ex; } @@ -2251,7 +2324,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L // see PhysicalNetworkVO.java InvalidParameterValueException ex = new InvalidParameterValueException("Specified physical network id is" + " in incorrect state:" + pNtwk.getState()); - ex.addProxyObject("physical_network", pNtwk.getId(), "physicalNetworkId"); + ex.addProxyObject(pNtwk.getUuid(), "physicalNetworkId"); throw ex; } @@ -2526,7 +2599,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L public boolean shutdownNetwork(long networkId, ReservationContext context, boolean cleanupElements) { boolean result = false; Transaction txn = Transaction.currentTxn(); - + txn.start(); NetworkVO network = _networksDao.lockRow(networkId, true); if (network == null) { s_logger.debug("Unable to find network with id: " + networkId); @@ -2537,7 +2610,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return false; } - txn.start(); if (isSharedNetworkWithServices(network)) { network.setState(Network.State.Shutdown); _networksDao.update(network.getId(), network); @@ -2904,7 +2976,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L NetworkVO network = _networksDao.findById(networkId); if (network == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Network with specified id doesn't exist"); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(String.valueOf(networkId), "networkId"); throw ex; } @@ -3153,7 +3225,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L " network provision due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id)" + " elements and resources as a part of network provision for persistent network"); - e.addProxyObject(guestNetwork, guestNetwork.getId(), "networkId"); + e.addProxyObject(guestNetwork.getUuid(), "networkId"); throw e; } } @@ -3232,8 +3304,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return false; } - @Override - public boolean cleanupIpResources(long ipId, long userId, Account caller) { + + protected boolean cleanupIpResources(long ipId, long userId, Account caller) { boolean success = true; // Revoke all firewall rules for the ip @@ -3341,9 +3413,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L throws InsufficientAddressCapacityException { String ipaddr = null; Account caller = UserContext.current().getCaller(); - long callerUserId = UserContext.current().getCallerUserId(); // check permissions - DataCenter zone = _configMgr.getZone(zoneId); Network network = _networksDao.findById(networkId); _accountMgr.checkAccess(caller, null, false, network); @@ -3532,8 +3602,16 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L List ipsToRelease = _ipAddressDao.listByAssociatedNetwork(networkId, null); for (IPAddressVO ipToRelease : ipsToRelease) { if (ipToRelease.getVpcId() == null) { + if (!ipToRelease.isPortable()) { IPAddressVO ip = markIpAsUnavailable(ipToRelease.getId()); assert (ip != null) : "Unable to mark the ip address id=" + ipToRelease.getId() + " as unavailable."; + } else { + // portable IP address are associated with owner, until explicitly requested to be disassociated + // so as part of network clean up just break IP association with guest network + ipToRelease.setAssociatedWithNetworkId(null); + _ipAddressDao.update(ipToRelease.getId(), ipToRelease); + s_logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any network"); + } } else { _vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId()); } @@ -3940,7 +4018,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L String requestedIpv4, String requestedIpv6) throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException { //This method allocates direct ip for the Shared network in Advance zones - boolean ipv4 = false, ipv6 = false; + boolean ipv4 = false; Transaction txn = Transaction.currentTxn(); txn.start(); @@ -3983,7 +4061,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L //FIXME - get ipv6 address from the placeholder if it's stored there if (network.getIp6Gateway() != null) { if (nic.getIp6Address() == null) { - ipv6 = true; UserIpv6Address ip = _ipv6Mgr.assignDirectIp6Address(dc.getId(), vm.getOwner(), network.getId(), requestedIpv6); Vlan vlan = _vlanDao.findById(ip.getVlanId()); nic.setIp6Address(ip.getAddress().toString()); @@ -4073,6 +4150,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return nic; } + + @Override public List getNicProfiles(VirtualMachine vm) { List nics = _nicDao.listByVmId(vm.getId()); @@ -4122,6 +4201,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return map; } + @Override public List getProvidersForServiceInNetwork(Network network, Service service) { Map> service2ProviderMap = getServiceProvidersMap(network.getId()); @@ -4132,6 +4212,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return null; } + protected List getElementForServiceInNetwork(Network network, Service service) { List elements = new ArrayList(); List providers = getProvidersForServiceInNetwork(network, service); @@ -4154,6 +4235,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return elements; } + @Override public StaticNatServiceProvider getStaticNatProviderForNetwork(Network network) { //only one provider per Static nat service is supoprted @@ -4162,6 +4244,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return (StaticNatServiceProvider)element; } + @Override public LoadBalancingServiceProvider getLoadBalancingProviderForNetwork(Network network, Scheme lbScheme) { List lbElements = getElementForServiceInNetwork(network, Service.Lb); @@ -4188,6 +4271,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return (LoadBalancingServiceProvider)lbElement; } + @Override public boolean isNetworkInlineMode(Network network) { NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -4203,14 +4287,15 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return rules.size(); } + @Override public boolean isSecondaryIpSetForNic(long nicId) { NicVO nic = _nicDao.findById(nicId); return nic.getSecondaryIp(); } - @Override - public boolean removeVmSecondaryIpsOfNic(long nicId) { + + private boolean removeVmSecondaryIpsOfNic(long nicId) { Transaction txn = Transaction.currentTxn(); txn.start(); List ipList = _nicSecondaryIpDao.listByNicId(nicId); @@ -4224,6 +4309,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return true; } + @Override public String allocatePublicIpForGuestNic(Long networkId, DataCenter dc, Pod pod,Account owner, String requestedIp) throws InsufficientAddressCapacityException { @@ -4236,10 +4322,12 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return ipAddr.addr(); } + @Override - public NicVO savePlaceholderNic(Network network, String ip4Address, Type vmType) { + public NicVO savePlaceholderNic(Network network, String ip4Address, String ip6Address, Type vmType) { NicVO nic = new NicVO(null, null, network.getId(), null); nic.setIp4Address(ip4Address); + nic.setIp6Address(ip6Address); nic.setReservationStrategy(ReservationStrategy.PlaceHolder); nic.setState(Nic.State.Reserved); nic.setVmType(vmType); diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index 8971f8c163b..21917f76351 100755 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -32,13 +32,16 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.DataCenter; import com.cloud.dc.PodVlanMapVO; import com.cloud.dc.Vlan; import com.cloud.dc.Vlan.VlanType; @@ -282,7 +285,12 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } else { CloudRuntimeException ex = new CloudRuntimeException("Multiple generic soure NAT IPs provided for network"); // see the IPAddressVO.java class. - ex.addProxyObject("user_ip_address", ip.getAssociatedWithNetworkId(), "networkId"); + IPAddressVO ipAddr = ApiDBUtils.findIpAddressById(ip.getAssociatedWithNetworkId()); + String ipAddrUuid = ip.getAssociatedWithNetworkId().toString(); + if ( ipAddr != null){ + ipAddrUuid = ipAddr.getUuid(); + } + ex.addProxyObject(ipAddrUuid, "networkId"); throw ex; } } @@ -1128,17 +1136,21 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { public PhysicalNetwork getDefaultPhysicalNetworkByZoneAndTrafficType(long zoneId, TrafficType trafficType) { List networkList = _physicalNetworkDao.listByZoneAndTrafficType(zoneId, trafficType); + DataCenter dc = ApiDBUtils.findZoneById(zoneId); + String dcUuid = String.valueOf(zoneId); + if ( dc != null ){ + dcUuid = dc.getUuid(); + } if (networkList.isEmpty()) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the default physical network with traffic=" + trafficType + " in the specified zone id"); - // Since we don't have a DataCenterVO object at our disposal, we just set the table name that the zoneId's corresponding uuid is looked up from, manually. - ex.addProxyObject("data_center", zoneId, "zoneId"); + ex.addProxyObject(dcUuid, "zoneId"); throw ex; } if (networkList.size() > 1) { InvalidParameterValueException ex = new InvalidParameterValueException("More than one physical networks exist in zone id=" + zoneId + " with traffic type=" + trafficType); - ex.addProxyObject("data_center", zoneId, "zoneId"); + ex.addProxyObject(dcUuid, "zoneId"); throw ex; } @@ -1488,24 +1500,25 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { @Override public void checkNetworkPermissions(Account owner, Network network) { // Perform account permission check - if (network.getGuestType() != Network.GuestType.Shared) { + if (network.getGuestType() != Network.GuestType.Shared + || (network.getGuestType() == Network.GuestType.Shared && network.getAclType() == ACLType.Account)) { AccountVO networkOwner = _accountDao.findById(network.getAccountId()); if(networkOwner == null) - throw new PermissionDeniedException("Unable to use network with id= " + network.getId() + ", network does not have an owner"); + throw new PermissionDeniedException("Unable to use network with id= " + ((network != null)? ((NetworkVO)network).getUuid() : "") + ", network does not have an owner"); if(owner.getType() != Account.ACCOUNT_TYPE_PROJECT && networkOwner.getType() == Account.ACCOUNT_TYPE_PROJECT){ if(!_projectAccountDao.canAccessProjectAccount(owner.getAccountId(), network.getAccountId())){ - throw new PermissionDeniedException("Unable to use network with id= " + network.getId() + ", permission denied"); + throw new PermissionDeniedException("Unable to use network with id= " + ((network != null)? ((NetworkVO)network).getUuid() : "") + ", permission denied"); } }else{ List networkMap = _networksDao.listBy(owner.getId(), network.getId()); if (networkMap == null || networkMap.isEmpty()) { - throw new PermissionDeniedException("Unable to use network with id= " + network.getId() + ", permission denied"); + throw new PermissionDeniedException("Unable to use network with id= " + ((network != null)? ((NetworkVO)network).getUuid() : "") + ", permission denied"); } } } else { if (!isNetworkAvailableInDomain(network.getId(), owner.getDomainId())) { - throw new PermissionDeniedException("Shared network id=" + network.getUuid() + " is not available in domain id=" + owner.getDomainId()); + throw new PermissionDeniedException("Shared network id=" + ((network != null)? ((NetworkVO)network).getUuid() : "") + " is not available in domain id=" + owner.getDomainId()); } } } @@ -1660,7 +1673,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { if (networkDomainMap.subdomainAccess) { Set parentDomains = _domainMgr.getDomainParentIds(domainId); - if (parentDomains.contains(domainId)) { + if (parentDomains.contains(networkDomainId)) { return true; } } @@ -2052,17 +2065,24 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { public NicVO getPlaceholderNicForRouter(Network network, Long podId) { List nics = _nicDao.listPlaceholderNicsByNetworkIdAndVmType(network.getId(), VirtualMachine.Type.DomainRouter); for (NicVO nic : nics) { - if (nic.getReserver() == null && nic.getIp4Address() != null) { + if (nic.getReserver() == null && (nic.getIp4Address() != null || nic.getIp6Address() != null)) { if (podId == null) { return nic; } else { //return nic only when its ip address belong to the pod range (for the Basic zone case) List vlans = _vlanDao.listVlansForPod(podId); for (Vlan vlan : vlans) { - IpAddress ip = _ipAddressDao.findByIpAndNetworkId(network.getId(), nic.getIp4Address()); - if (ip != null && ip.getVlanId() == vlan.getId()) { - return nic; - } + if (nic.getIp4Address() != null) { + IpAddress ip = _ipAddressDao.findByIpAndNetworkId(network.getId(), nic.getIp4Address()); + if (ip != null && ip.getVlanId() == vlan.getId()) { + return nic; + } + } else { + UserIpv6AddressVO ipv6 = _ipv6Dao.findByNetworkIdAndIp(network.getId(), nic.getIp6Address()); + if (ipv6 != null && ipv6.getVlanId() == vlan.getId()) { + return nic; + } + } } } } diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index 1533ca9bc4f..c2af8e8fe60 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -65,6 +65,7 @@ import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import org.bouncycastle.util.IPAddress; +import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; @@ -317,7 +318,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } else { CloudRuntimeException ex = new CloudRuntimeException("Multiple generic soure NAT IPs provided for network"); // see the IPAddressVO.java class. - ex.addProxyObject("user_ip_address", ip.getAssociatedWithNetworkId(), "networkId"); + IPAddressVO ipAddr = ApiDBUtils.findIpAddressById(ip.getAssociatedWithNetworkId()); + String ipAddrUuid = ip.getAssociatedWithNetworkId().toString(); + if ( ipAddr != null ){ + ipAddrUuid = ipAddr.getUuid(); + } + ex.addProxyObject(ipAddrUuid, "networkId"); throw ex; } } @@ -591,8 +597,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Override @ActionEvent(eventType = EventTypes.EVENT_PORTABLE_IP_RELEASE, eventDescription = "disassociating portable Ip", async = true) - public boolean releasePortableIpAddress(long ipAddressId) throws InsufficientAddressCapacityException { - return releaseIpAddressInternal(ipAddressId); + public boolean releasePortableIpAddress(long ipAddressId) { + try { + return releaseIpAddressInternal(ipAddressId); + } catch (Exception e) { + return false; + } } @Override @@ -873,14 +883,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // don't allow releasing system ip address if (ipVO.getSystem()) { InvalidParameterValueException ex = new InvalidParameterValueException("Can't release system IP address with specified id"); - ex.addProxyObject(ipVO, ipVO.getId(), "systemIpAddrId"); + ex.addProxyObject(ipVO.getUuid(), "systemIpAddrId"); throw ex; } boolean success = _networkMgr.disassociatePublicIpAddress(ipAddressId, userId, caller); if (success) { - if (!ipVO.isPortable()) { + if (ipVO.isPortable()) { return success; } Long networkId = ipVO.getAssociatedWithNetworkId(); @@ -990,12 +1000,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (ntwkOff == null || ntwkOff.isSystemOnly()) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find network offering by specified id"); if (ntwkOff != null) { - ex.addProxyObject(ntwkOff, networkOfferingId, "networkOfferingId"); - // Get the VO object's table name. - String tablename = AnnotationHelper.getTableName(ntwkOff); - if (tablename != null) { - ex.addProxyObject(tablename, networkOfferingId, "networkOfferingId"); - } + ex.addProxyObject(ntwkOff.getUuid(), "networkOfferingId"); } throw ex; } @@ -1029,7 +1034,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { // See DataCenterVO.java PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation since specified Zone is currently disabled"); - ex.addProxyObject(zone, zoneId, "zoneId"); + ex.addProxyObject(zone.getUuid(), "zoneId"); throw ex; } @@ -1251,13 +1256,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // Can add vlan range only to the network which allows it if (createVlan && !ntwkOff.getSpecifyIpRanges()) { InvalidParameterValueException ex = new InvalidParameterValueException("Network offering with specified id doesn't support adding multiple ip ranges"); - ex.addProxyObject(ntwkOff, ntwkOff.getId(), "networkOfferingId"); - String tablename = AnnotationHelper.getTableName(ntwkOff); - if (tablename != null) { - ex.addProxyObject(tablename, ntwkOff.getId(), "networkOfferingId"); - } else { - s_logger.info("\nCould not retrieve table name (annotation) from " + tablename + " VO proxy object\n"); - } + ex.addProxyObject(ntwkOff.getUuid(), "networkOfferingId"); throw ex; } @@ -1286,10 +1285,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Network offering can't be used for VPC networks"); } - if(aclId == null){ - //Use default deny all ACL, when aclId is not specified - aclId = NetworkACL.DEFAULT_DENY; - } else { + if(aclId != null){ NetworkACL acl = _networkACLDao.findById(aclId); if(acl == null){ throw new InvalidParameterValueException("Unable to find specified NetworkACL"); @@ -1342,7 +1338,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } catch (ResourceUnavailableException ex) { s_logger.warn("Failed to implement persistent guest network " + network + "due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement persistent guest network"); - e.addProxyObject(network, network.getId(), "networkId"); + e.addProxyObject(network.getUuid(), "networkId"); throw e; } } @@ -1428,7 +1424,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (!_projectMgr.canAccessProjectAccount(caller, project.getProjectAccountId())) { // getProject() returns type ProjectVO. InvalidParameterValueException ex = new InvalidParameterValueException("Account " + caller + " cannot access specified project id"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(project.getUuid(), "projectId"); throw ex; } permittedAccounts.add(project.getProjectAccountId()); @@ -1753,14 +1749,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // see NetworkVO.java InvalidParameterValueException ex = new InvalidParameterValueException("unable to find network with specified id"); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(String.valueOf(networkId), "networkId"); throw ex; } // don't allow to delete system network if (isNetworkSystem(network)) { InvalidParameterValueException ex = new InvalidParameterValueException("Network with specified id is system and can't be removed"); - ex.addProxyObject(network, network.getId(), "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } @@ -1789,7 +1785,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { NetworkVO network = _networksDao.findById(networkId); if (network == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Network with specified id doesn't exist"); - ex.addProxyObject("networks", networkId, "networkId"); + ex.addProxyObject(networkId.toString(), "networkId"); throw ex; } @@ -1932,14 +1928,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (network == null) { // see NetworkVO.java InvalidParameterValueException ex = new InvalidParameterValueException("Specified network id doesn't exist in the system"); - ex.addProxyObject("networks", networkId, "networkId"); + ex.addProxyObject(String.valueOf(networkId), "networkId"); throw ex; } //perform below validation if the network is vpc network if (network.getVpcId() != null && networkOfferingId != null) { Vpc vpc = _vpcMgr.getVpc(network.getVpcId()); - _vpcMgr.validateNtwkOffForNtwkInVpc(networkId, networkOfferingId, null, null, vpc, null, _accountMgr.getAccount(network.getAccountId())); + _vpcMgr.validateNtwkOffForNtwkInVpc(networkId, networkOfferingId, null, null, vpc, null, _accountMgr.getAccount(network.getAccountId()), null); } // don't allow to update network in Destroy state @@ -1988,14 +1984,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (networkOfferingId != null) { if (networkOffering == null || networkOffering.isSystemOnly()) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find network offering with specified id"); - ex.addProxyObject(networkOffering, networkOfferingId, "networkOfferingId"); + ex.addProxyObject(networkOfferingId.toString(), "networkOfferingId"); throw ex; } // network offering should be in Enabled state if (networkOffering.getState() != NetworkOffering.State.Enabled) { InvalidParameterValueException ex = new InvalidParameterValueException("Network offering with specified id is not in " + NetworkOffering.State.Enabled + " state, can't upgrade to it"); - ex.addProxyObject(networkOffering, networkOfferingId, "networkOfferingId"); + ex.addProxyObject(networkOffering.getUuid(), "networkOfferingId"); throw ex; } //can't update from vpc to non-vpc network offering @@ -2017,7 +2013,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (changeCidr) { if (!checkForNonStoppedVmInNetwork(network.getId())) { InvalidParameterValueException ex = new InvalidParameterValueException("All user vm of network of specified id should be stopped before changing CIDR!"); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } } @@ -2120,6 +2116,21 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } } + // In some scenarios even though guesVmCidr and network CIDR do not appear similar but + // the IP ranges exactly matches, in these special cases make sure no Reservation gets applied + if (network.getNetworkCidr() == null) { + if (NetUtils.isSameIpRange(guestVmCidr, network.getCidr()) && !guestVmCidr.equals(network.getCidr())) { + throw new InvalidParameterValueException("The Start IP and End IP of guestvmcidr: "+ guestVmCidr + " and CIDR: " + network.getCidr() + " are same, " + + "even though both the cidrs appear to be different. As a precaution no IP Reservation will be applied."); + } + } else { + if(NetUtils.isSameIpRange(guestVmCidr, network.getNetworkCidr()) && !guestVmCidr.equals(network.getNetworkCidr())) { + throw new InvalidParameterValueException("The Start IP and End IP of guestvmcidr: "+ guestVmCidr + " and Network CIDR: " + network.getNetworkCidr() + " are same, " + + "even though both the cidrs appear to be different. As a precaution IP Reservation will not be affected. If you want to reset IP Reservation, " + + "specify guestVmCidr to be: " + network.getNetworkCidr()); + } + } + // When reservation is applied for the first time, network_cidr will be null // Populate it with the actual network cidr if (network.getNetworkCidr() == null) { @@ -2150,7 +2161,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (!_networkMgr.shutdownNetworkElementsAndResources(context, true, network)) { s_logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network elements and resources as a part of update to network of specified id"); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } } else { @@ -2169,13 +2180,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (!_networkMgr.shutdownNetwork(network.getId(), context, true)) { s_logger.warn("Failed to shutdown the network as a part of update to network with specified id"); CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network as a part of update of specified network id"); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } } } else { CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network elements and resources as a part of update to network with specified id; network is in wrong state: " + network.getState()); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } } @@ -2186,7 +2197,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { boolean validStateToImplement = (networkState == Network.State.Implemented || networkState == Network.State.Setup || networkState == Network.State.Allocated); if (restartNetwork && !validStateToImplement) { CloudRuntimeException ex = new CloudRuntimeException("Failed to implement the network elements and resources as a part of update to network with specified id; network is in wrong state: " + networkState); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } @@ -2237,7 +2248,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } catch (Exception ex) { s_logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id) elements and resources as a part of network update"); - e.addProxyObject(network, networkId, "networkId"); + e.addProxyObject(network.getUuid(), "networkId"); throw e; } } @@ -2255,7 +2266,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { "f network update due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified" + " id) elements and resources as a part of network update"); - e.addProxyObject(network, networkId, "networkId"); + e.addProxyObject(network.getUuid(), "networkId"); throw e; } } @@ -2523,7 +2534,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { PhysicalNetworkVO network = _physicalNetworkDao.findById(id); if (network == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Physical Network with specified id doesn't exist in the system"); - ex.addProxyObject(network, id, "physicalNetworkId"); + ex.addProxyObject(id.toString(), "physicalNetworkId"); throw ex; } @@ -2531,7 +2542,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { DataCenter zone = _dcDao.findById(network.getDataCenterId()); if (zone == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Zone with id=" + network.getDataCenterId() + " doesn't exist in the system"); - ex.addProxyObject(zone, network.getDataCenterId(), "dataCenterId"); + ex.addProxyObject(String.valueOf(network.getDataCenterId()), "dataCenterId"); throw ex; } if (newVnetRangeString != null) { @@ -2545,7 +2556,6 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (removeVlan != null){ List tokens = processVlanRange(network,removeVlan); boolean result = removeVlanRange(network, tokens.get(0), tokens.get(1)); - } if (tags != null && tags.size() > 1) { @@ -2791,9 +2801,6 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { return true; } - private boolean physicalNetworkHasAllocatedVnets(long zoneId, long physicalNetworkId) { - return !_dcDao.listAllocatedVnets(physicalNetworkId).isEmpty(); - } @Override @ActionEvent(eventType = EventTypes.EVENT_PHYSICAL_NETWORK_DELETE, eventDescription = "deleting physical network", async = true) @@ -2804,7 +2811,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { PhysicalNetworkVO pNetwork = _physicalNetworkDao.findById(physicalNetworkId); if (pNetwork == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Physical Network with specified id doesn't exist in the system"); - ex.addProxyObject(pNetwork, physicalNetworkId, "physicalNetworkId"); + ex.addProxyObject(physicalNetworkId.toString(), "physicalNetworkId"); throw ex; } @@ -3052,6 +3059,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { _accountGuestVlanMapDao.update(guestVlanMapId, accountGuestVlanMapVO); } else { Transaction txn = Transaction.currentTxn(); + txn.start(); accountGuestVlanMapVO = new AccountGuestVlanMapVO(vlanOwner.getAccountId(), physicalNetworkId); accountGuestVlanMapVO.setGuestVlanRange(startVlan + "-" + endVlan); _accountGuestVlanMapDao.persist(accountGuestVlanMapVO); @@ -3100,7 +3108,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { Account account = _accountDao.findActiveAccount(accountName, domainId); if (account == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find account " + accountName); - ex.addProxyObject("domain", domainId, "domainId"); + DomainVO domain = ApiDBUtils.findDomainById(domainId); + String domainUuid = domainId.toString(); + if (domain != null ){ + domainUuid = domain.getUuid(); + } + ex.addProxyObject(domainUuid, "domainId"); throw ex; } else { accountId = account.getId(); @@ -3112,7 +3125,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { Project project = _projectMgr.getProject(projectId); if (project == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find project by id " + projectId); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(projectId.toString(), "projectId"); throw ex; } accountId = project.getProjectAccountId(); @@ -3210,7 +3223,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { PhysicalNetworkVO network = _physicalNetworkDao.findById(physicalNetworkId); if (network == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Physical Network with specified id doesn't exist in the system"); - ex.addProxyObject(network, physicalNetworkId, "physicalNetworkId"); + ex.addProxyObject(physicalNetworkId.toString(), "physicalNetworkId"); throw ex; } @@ -3219,7 +3232,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { PhysicalNetworkVO destNetwork = _physicalNetworkDao.findById(destinationPhysicalNetworkId); if (destNetwork == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Destination Physical Network with specified id doesn't exist in the system"); - ex.addProxyObject(destNetwork, destinationPhysicalNetworkId, "destinationPhysicalNetworkId"); + ex.addProxyObject(destinationPhysicalNetworkId.toString(), "destinationPhysicalNetworkId"); throw ex; } } @@ -3646,7 +3659,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { PhysicalNetworkVO network = _physicalNetworkDao.findById(physicalNetworkId); if (network == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Physical Network with specified id doesn't exist in the system"); - ex.addProxyObject(network, physicalNetworkId, "physicalNetworkId"); + ex.addProxyObject(physicalNetworkId.toString(), "physicalNetworkId"); throw ex; } @@ -3754,14 +3767,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } } - - + private boolean getAllowSubdomainAccessGlobal() { return _allowSubdomainNetworkAccess; } - - + @Override public List> listTrafficTypeImplementor(ListTrafficTypeImplementorsCmd cmd) { String type = cmd.getTrafficType(); @@ -3820,7 +3831,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (pNtwk == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find a physical network" + " having the given id"); - ex.addProxyObject("physical_network", physicalNetworkId, "physicalNetworkId"); + ex.addProxyObject(String.valueOf(physicalNetworkId), "physicalNetworkId"); throw ex; } @@ -3914,7 +3925,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (userVm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Virtual mahine id does not exist"); - ex.addProxyObject(userVm, vmId, "vmId"); + ex.addProxyObject(vmId.toString(), "vmId"); throw ex; } _accountMgr.checkAccess(caller, null, true, userVm); diff --git a/server/src/com/cloud/network/firewall/FirewallManagerImpl.java b/server/src/com/cloud/network/firewall/FirewallManagerImpl.java index 334a5a108e6..f7275b0e237 100644 --- a/server/src/com/cloud/network/firewall/FirewallManagerImpl.java +++ b/server/src/com/cloud/network/firewall/FirewallManagerImpl.java @@ -159,6 +159,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, rule.getIcmpType(), null, rule.getType(), rule.getNetworkId(), rule.getTrafficType()); } + @Override public FirewallRule createIngressFirewallRule(FirewallRule rule) throws NetworkRuleConflictException { Account caller = UserContext.current().getCaller(); Long sourceIpAddressId = rule.getSourceIpAddressId(); diff --git a/server/src/com/cloud/network/guru/DirectNetworkGuru.java b/server/src/com/cloud/network/guru/DirectNetworkGuru.java index 84008c0f050..b4577ac096a 100755 --- a/server/src/com/cloud/network/guru/DirectNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectNetworkGuru.java @@ -221,7 +221,6 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException { - //FIXME - save ipv6 informaiton in the placeholder nic Transaction txn = Transaction.currentTxn(); txn.start(); _networkMgr.allocateDirectIp(nic, dc, vm, network, requestedIp4Addr, requestedIp6Addr); @@ -229,8 +228,8 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { if (vm.getType() == VirtualMachine.Type.DomainRouter) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic == null) { - s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " and ipv6 address " + requestedIp6Addr + " for the network " + network); - _networkMgr.savePlaceholderNic(network, nic.getIp4Address(), VirtualMachine.Type.DomainRouter); + s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " and ipv6 address " + nic.getIp6Address() + " for the network " + network); + _networkMgr.savePlaceholderNic(network, nic.getIp4Address(), nic.getIp6Address(), VirtualMachine.Type.DomainRouter); } } txn.commit(); diff --git a/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java b/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java index cf27986a69d..f21e352ff76 100755 --- a/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java @@ -203,7 +203,7 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, pod.getId()); if (placeholderNic == null) { s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " for the network " + network); - _networkMgr.savePlaceholderNic(network, nic.getIp4Address(), VirtualMachine.Type.DomainRouter); + _networkMgr.savePlaceholderNic(network, nic.getIp4Address(), null, VirtualMachine.Type.DomainRouter); } } txn.commit(); diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 0118ca534e1..67d31ab3a4e 100755 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -947,7 +947,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements UserVm vm = _vmDao.findById(instanceId); if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging) { InvalidParameterValueException ex = new InvalidParameterValueException("Invalid instance id specified"); - ex.addProxyObject(vm, instanceId, "instanceId"); + if (vm == null) { + ex.addProxyObject(instanceId.toString(), "instanceId"); + } else { + ex.addProxyObject(vm.getUuid(), "instanceId"); + } throw ex; } @@ -969,9 +973,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (nicInSameNetwork == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("VM " + instanceId - + " cannot be added because it doesn't belong in the same network."); - ex.addProxyObject(vm, instanceId, "instanceId"); + InvalidParameterValueException ex = + new InvalidParameterValueException("VM with id specified cannot be added because it doesn't belong in the same network."); + ex.addProxyObject(vm.getUuid(), "instanceId"); throw ex; } @@ -1024,7 +1028,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (!success) { CloudRuntimeException ex = new CloudRuntimeException("Failed to add specified loadbalancerruleid for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); // TBD: Also pack in the instanceIds in the exception using the // right VO object or table name. throw ex; @@ -1075,7 +1079,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); CloudRuntimeException ex = new CloudRuntimeException( "Failed to remove specified load balancer rule id for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; } success = true; @@ -1098,7 +1102,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (!success) { CloudRuntimeException ex = new CloudRuntimeException( "Failed to remove specified load balancer rule id for vms " + instanceIds); - ex.addProxyObject(loadBalancer, loadBalancerId, "loadBalancerId"); + ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; } return success; @@ -1368,12 +1372,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (ipAddr == null || !ipAddr.readyToUse()) { InvalidParameterValueException ex = new InvalidParameterValueException( "Unable to create load balancer rule, invalid IP address id specified"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + if (ipAddr == null){ + ex.addProxyObject(String.valueOf(sourceIpId), "sourceIpId"); + } + else{ + ex.addProxyObject(ipAddr.getUuid(), "sourceIpId"); + } throw ex; } else if (ipAddr.isOneToOneNat()) { InvalidParameterValueException ex = new InvalidParameterValueException( "Unable to create load balancer rule; specified sourceip id has static nat enabled"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + ex.addProxyObject(ipAddr.getUuid(), "sourceIpId"); throw ex; } @@ -1384,7 +1393,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (networkId == null) { InvalidParameterValueException ex = new InvalidParameterValueException( "Unable to create load balancer rule ; specified sourceip id is not associated with any network"); - ex.addProxyObject(ipAddr, sourceIpId, "sourceIpId"); + ex.addProxyObject(ipAddr.getUuid(), "sourceIpId"); throw ex; } @@ -1395,7 +1404,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements Purpose.LoadBalancing, FirewallRuleType.User, networkId, null); LoadBalancerVO newRule = new LoadBalancerVO(xId, name, description, - sourceIpId, srcPort, srcPort, algorithm, + sourceIpId, srcPort, destPort, algorithm, networkId, ipAddr.getAllocatedToAccountId(), ipAddr.getAllocatedInDomainId()); // verify rule is supported by Lb provider of the network @@ -1826,9 +1835,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } - IPAddressVO addr = _ipAddressDao.findById(loadBalancer.getSourceIpAddressId()); - List userVms = _vmDao.listVirtualNetworkInstancesByAcctAndZone(loadBalancer.getAccountId(), - addr.getDataCenterId(), loadBalancer.getNetworkId()); + List userVms = _vmDao.listVirtualNetworkInstancesByAcctAndNetwork(loadBalancer.getAccountId(), + loadBalancer.getNetworkId()); for (UserVmVO userVm : userVms) { // if the VM is destroyed, being expunged, in an error state, or in @@ -1924,6 +1932,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); sb.and("sourceIpAddress", sb.entity().getSourceIpAddressId(), SearchCriteria.Op.EQ); sb.and("networkId", sb.entity().getNetworkId(), SearchCriteria.Op.EQ); + sb.and("scheme", sb.entity().getScheme(), SearchCriteria.Op.EQ); if (instanceId != null) { SearchBuilder lbVMSearch = _lb2VmMapDao.createSearchBuilder(); @@ -2083,7 +2092,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (!_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Lb)) { InvalidParameterValueException ex = new InvalidParameterValueException( "LB service is not supported in specified network id"); - ex.addProxyObject(network, network.getId(), "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 4e3b90383df..40db31ff6a5 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -1180,7 +1180,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V _alertMgr.sendAlert(AlertManager.ALERT_TYPE_DOMAIN_ROUTER, backupRouter.getDataCenterId(), backupRouter.getPodIdToDeployIn(), title, title); try { - rebootRouter(backupRouter.getId(), false); + rebootRouter(backupRouter.getId(), true); } catch (ConcurrentOperationException e) { s_logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); } catch (ResourceUnavailableException e) { @@ -1439,9 +1439,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V assert guestNetwork.getTrafficType() == TrafficType.Guest; // 1) Get deployment plan and find out the list of routers - boolean isPodBased = (dest.getDataCenter().getNetworkType() == NetworkType.Basic || - _networkModel.areServicesSupportedInNetwork(guestNetwork.getId(), Service.SecurityGroup)) - && guestNetwork.getTrafficType() == TrafficType.Guest; + boolean isPodBased = (dest.getDataCenter().getNetworkType() == NetworkType.Basic); // dest has pod=null, for Basic Zone findOrDeployVRs for all Pods List destinations = new ArrayList(); @@ -1743,24 +1741,27 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V s_logger.debug("Adding nic for Virtual Router in Guest network " + guestNetwork); String defaultNetworkStartIp = null, defaultNetworkStartIpv6 = null; if (!setupPublicNetwork) { - if (guestNetwork.getCidr() != null) { Nic placeholder = _networkModel.getPlaceholderNicForRouter(guestNetwork, plan.getPodId()); - if (placeholder != null) { - s_logger.debug("Requesting ip address " + placeholder.getIp4Address() + " stored in placeholder nic for the network " + guestNetwork); + if (guestNetwork.getCidr() != null) { + if (placeholder != null && placeholder.getIp4Address() != null) { + s_logger.debug("Requesting ipv4 address " + placeholder.getIp4Address() + " stored in placeholder nic for the network " + guestNetwork); defaultNetworkStartIp = placeholder.getIp4Address(); } else { String startIp = _networkModel.getStartIpAddress(guestNetwork.getId()); if (startIp != null && _ipAddressDao.findByIpAndSourceNetworkId(guestNetwork.getId(), startIp).getAllocatedTime() == null) { defaultNetworkStartIp = startIp; } else if (s_logger.isDebugEnabled()){ - s_logger.debug("First ip " + startIp + " in network id=" + guestNetwork.getId() + + s_logger.debug("First ipv4 " + startIp + " in network id=" + guestNetwork.getId() + " is already allocated, can't use it for domain router; will get random ip address from the range"); } } } - //FIXME - get ipv6 stored in the placeholder if (guestNetwork.getIp6Cidr() != null) { + if (placeholder != null && placeholder.getIp6Address() != null) { + s_logger.debug("Requesting ipv6 address " + placeholder.getIp6Address() + " stored in placeholder nic for the network " + guestNetwork); + defaultNetworkStartIpv6 = placeholder.getIp6Address(); + } else { String startIpv6 = _networkModel.getStartIpv6Address(guestNetwork.getId()); if (startIpv6 != null && _ipv6Dao.findByNetworkIdAndIp(guestNetwork.getId(), startIpv6) == null) { defaultNetworkStartIpv6 = startIpv6; @@ -1770,6 +1771,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } } } + } NicProfile gatewayNic = new NicProfile(defaultNetworkStartIp, defaultNetworkStartIpv6); if (setupPublicNetwork) { @@ -2476,18 +2478,21 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V //Reapply dhcp and dns configuration. if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.Dhcp, provider)) { List revokedIpAliasVOs = _nicIpAliasDao.listByNetworkIdAndState(guestNetworkId, NicIpAlias.state.revoked); - s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to apply on the router as a part of dhco configuration"); + s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); List revokedIpAliasTOs = new ArrayList(); for (NicIpAliasVO revokedAliasVO : revokedIpAliasVOs) { revokedIpAliasTOs.add(new IpAliasTO(revokedAliasVO.getIp4Address(), revokedAliasVO.getNetmask(), revokedAliasVO.getAliasCount().toString())); } List aliasVOs = _nicIpAliasDao.listByNetworkIdAndState(guestNetworkId, NicIpAlias.state.active); - s_logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhco configuration"); + s_logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); List activeIpAliasTOs = new ArrayList(); for (NicIpAliasVO aliasVO : aliasVOs) { activeIpAliasTOs.add(new IpAliasTO(aliasVO.getIp4Address(), aliasVO.getNetmask(), aliasVO.getAliasCount().toString())); } + if (revokedIpAliasTOs.size() != 0 || activeIpAliasTOs.size() != 0){ createDeleteIpAliasCommand(router, revokedIpAliasTOs, activeIpAliasTOs, guestNetworkId, cmds); + configDnsMasq(router, _networkDao.findById(guestNetworkId), cmds); + } } } @@ -2796,7 +2801,13 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V for (VlanVO vlan : vlanList) { vlanDbIdList.add(vlan.getId()); } + if (dc.getNetworkType() == NetworkType.Basic) { routerPublicIP = _networkMgr.assignPublicIpAddressFromVlans(router.getDataCenterId(), vm.getPodIdToDeployIn(), caller, Vlan.VlanType.DirectAttached, vlanDbIdList, nic.getNetworkId(), null, false); + } + else { + routerPublicIP = _networkMgr.assignPublicIpAddressFromVlans(router.getDataCenterId(), null, caller, Vlan.VlanType.DirectAttached, vlanDbIdList, nic.getNetworkId(), null, false); + } + routerAliasIp = routerPublicIP.getAddress().addr(); } } @@ -2847,13 +2858,13 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V Commands cmds = new Commands(OnError.Continue); List revokedIpAliasVOs = _nicIpAliasDao.listByNetworkIdAndState(network.getId(), NicIpAlias.state.revoked); - s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to apply on the router as a part of dhco configuration"); + s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); List revokedIpAliasTOs = new ArrayList(); for (NicIpAliasVO revokedAliasVO : revokedIpAliasVOs) { revokedIpAliasTOs.add(new IpAliasTO(revokedAliasVO.getIp4Address(), revokedAliasVO.getNetmask(), revokedAliasVO.getAliasCount().toString())); } List aliasVOs = _nicIpAliasDao.listByNetworkIdAndState(network.getId(), NicIpAlias.state.active); - s_logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhco configuration"); + s_logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); List activeIpAliasTOs = new ArrayList(); for (NicIpAliasVO aliasVO : aliasVOs) { activeIpAliasTOs.add(new IpAliasTO(aliasVO.getIp4Address(), aliasVO.getNetmask(), aliasVO.getAliasCount().toString())); @@ -3371,20 +3382,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V DataCenterVO dcVo = _dcDao.findById(router.getDataCenterId()); Nic defaultNic = findGatewayIp(vm.getId()); String gatewayIp = defaultNic.getGateway(); - boolean needGateway = true; if (gatewayIp != null && !gatewayIp.equals(nic.getGateway())) { - needGateway = false; - GuestOSVO guestOS = _guestOSDao.findById(vm.getGuestOSId()); - // Do set dhcp:router option for non-default nic on certain OS(including Windows), and leave other OS unset. - // Because some OS(e.g. CentOS) would set routing on wrong interface - for (String name : _guestOSNeedGatewayOnNonDefaultNetwork) { - if (guestOS.getDisplayName().startsWith(name)) { - needGateway = true; - break; - } - } - } - if (!needGateway) { gatewayIp = "0.0.0.0"; } dhcpCommand.setDefaultRouter(gatewayIp); @@ -3411,10 +3409,16 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V List ipAliasVOList = _nicIpAliasDao.getAliasIpForVm(router.getId()); List ipList = new ArrayList(); - NicVO router_guest_ip = _nicDao.findByNtwkIdAndInstanceId(network.getId(), router.getId()); - ipList.add(new DnsmasqTO(router_guest_ip.getIp4Address(),router_guest_ip.getGateway(),router_guest_ip.getNetmask())); + NicVO router_guest_nic = _nicDao.findByNtwkIdAndInstanceId(network.getId(), router.getId()); + String cidr = NetUtils.getCidrFromGatewayAndNetmask(router_guest_nic.getGateway(), router_guest_nic.getNetmask()); + String[] cidrPair = cidr.split("\\/"); + String cidrAddress = cidrPair[0]; + long cidrSize = Long.parseLong(cidrPair[1]); + String startIpOfSubnet = NetUtils.getIpRangeStartIpFromCidr(cidrAddress, cidrSize); + + ipList.add(new DnsmasqTO(router_guest_nic.getIp4Address(),router_guest_nic.getGateway(),router_guest_nic.getNetmask(), startIpOfSubnet)); for (NicIpAliasVO ipAliasVO : ipAliasVOList) { - DnsmasqTO dnsmasqTO = new DnsmasqTO(ipAliasVO.getStartIpOfSubnet(), ipAliasVO.getGateway(), ipAliasVO.getNetmask()); + DnsmasqTO dnsmasqTO = new DnsmasqTO(ipAliasVO.getIp4Address(), ipAliasVO.getGateway(), ipAliasVO.getNetmask(), ipAliasVO.getStartIpOfSubnet()); ipList.add(dnsmasqTO); } DataCenterVO dcvo = _dcDao.findById(router.getDataCenterId()); diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index 9992b7ca01e..711549903fa 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -949,9 +949,9 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian if (router.getVpcId() != null) { if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.NetworkACL, Provider.VPCVirtualRouter)) { List networkACLs = _networkACLMgr.listNetworkACLItems(guestNetworkId); - s_logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + router - + " start for guest network id=" + guestNetworkId); - if (!networkACLs.isEmpty()) { + if ((networkACLs != null) && !networkACLs.isEmpty()) { + s_logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + router + + " start for guest network id=" + guestNetworkId); createNetworkACLsCommands(networkACLs, router, cmds, guestNetworkId, false); } } diff --git a/server/src/com/cloud/network/rules/RulesManager.java b/server/src/com/cloud/network/rules/RulesManager.java index cede987280d..201d79db9c6 100644 --- a/server/src/com/cloud/network/rules/RulesManager.java +++ b/server/src/com/cloud/network/rules/RulesManager.java @@ -32,47 +32,20 @@ import com.cloud.vm.VirtualMachine; */ public interface RulesManager extends RulesService { - boolean applyPortForwardingRules(long ipAddressId, boolean continueOnError, Account caller); - - boolean applyStaticNatRulesForIp(long sourceIpId, boolean continueOnError, Account caller, boolean forRevoke); - boolean applyPortForwardingRulesForNetwork(long networkId, boolean continueOnError, Account caller); boolean applyStaticNatRulesForNetwork(long networkId, boolean continueOnError, Account caller); - void checkIpAndUserVm(IpAddress ipAddress, UserVm userVm, Account caller); - void checkRuleAndUserVm(FirewallRule rule, UserVm userVm, Account caller); boolean revokeAllPFAndStaticNatRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException; boolean revokeAllPFStaticNatRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException; - List listFirewallRulesByIp(long ipAddressId); - - /** - * Returns a list of port forwarding rules that are ready for application - * to the network elements for this ip. - * - * @param ip - * @return List of PortForwardingRule - */ - List listPortForwardingRulesForApplication(long ipId); - - List gatherPortForwardingRulesForApplication(List addrs); - boolean revokePortForwardingRulesForVm(long vmId); - boolean revokeStaticNatRulesForVm(long vmId); - FirewallRule[] reservePorts(IpAddress ip, String protocol, FirewallRule.Purpose purpose, boolean openFirewall, Account caller, int... ports) throws NetworkRuleConflictException; - boolean releasePorts(long ipId, String protocol, FirewallRule.Purpose purpose, int... ports); - - List listByNetworkId(long networkId); - - boolean applyStaticNatForIp(long sourceIpId, boolean continueOnError, Account caller, boolean forRevoke); - boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller); void getSystemIpAndEnableStaticNatForVm(VirtualMachine vm, boolean getNewIp) throws InsufficientAddressCapacityException; diff --git a/server/src/com/cloud/network/rules/RulesManagerImpl.java b/server/src/com/cloud/network/rules/RulesManagerImpl.java index 883455377f4..41bf2b3af65 100755 --- a/server/src/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/com/cloud/network/rules/RulesManagerImpl.java @@ -24,7 +24,6 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.user.firewall.ListPortForwardingRulesCmd; import org.apache.log4j.Logger; @@ -54,7 +53,6 @@ import com.cloud.network.dao.LoadBalancerVMMapDao; import com.cloud.network.dao.LoadBalancerVMMapVO; import com.cloud.network.rules.FirewallRule.FirewallRuleType; import com.cloud.network.rules.FirewallRule.Purpose; -import com.cloud.network.rules.FirewallRule.TrafficType; import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.network.vpc.VpcManager; import com.cloud.offering.NetworkOffering; @@ -69,7 +67,6 @@ import com.cloud.user.UserContext; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -139,8 +136,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules @Inject LoadBalancerVMMapDao _loadBalancerVMMapDao; - @Override - public void checkIpAndUserVm(IpAddress ipAddress, UserVm userVm, Account caller) { + + protected void checkIpAndUserVm(IpAddress ipAddress, UserVm userVm, Account caller) { if (ipAddress == null || ipAddress.getAllocatedTime() == null || ipAddress.getAllocatedToAccountId() == null) { throw new InvalidParameterValueException("Unable to create ip forwarding rule on address " + ipAddress + ", invalid IP address specified."); } @@ -490,10 +487,9 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules "a part of enable static nat"); return false; } - performedIpAssoc = true; } else if (ipAddress.isPortable()) { - s_logger.info("Portable IP " + ipAddress.getUuid() + " is not associated with the network, so" + - "associate IP with the network " + networkId); + s_logger.info("Portable IP " + ipAddress.getUuid() + " is not associated with the network yet " + + " so associate IP with the network " + networkId); try { // check if StaticNat service is enabled in the network _networkModel.checkIpForService(ipAddress, Service.StaticNat, networkId); @@ -504,13 +500,12 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } // associate portable IP with guest network - _networkMgr.associatePortableIPToGuestNetwork(ipId, networkId, false); + ipAddress = _networkMgr.associatePortableIPToGuestNetwork(ipId, networkId, false); } catch (Exception e) { s_logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); return false; } - performedIpAssoc = true; } } else if (ipAddress.getAssociatedWithNetworkId() != networkId) { if (ipAddress.isPortable()) { @@ -520,14 +515,16 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // check if portable IP can be transferred across the networks if (_networkMgr.isPortableIpTransferableFromNetwork(ipId, ipAddress.getAssociatedWithNetworkId() )) { try { + // transfer the portable IP and refresh IP details _networkMgr.transferPortableIP(ipId, ipAddress.getAssociatedWithNetworkId(), networkId); + ipAddress = _ipAddressDao.findById(ipId); } catch (Exception e) { s_logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); return false; } } else { - throw new InvalidParameterValueException("Portable IP: " + ipId + " has associated services" + + throw new InvalidParameterValueException("Portable IP: " + ipId + " has associated services " + "in network " + ipAddress.getAssociatedWithNetworkId() + " so can not be transferred to " + " network " + networkId); } @@ -706,6 +703,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return true; } + private boolean revokeStaticNatRuleInternal(long ruleId, Account caller, long userId, boolean apply) { FirewallRuleVO rule = _firewallDao.findById(ruleId); @@ -756,45 +754,6 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return success; } - @Override - public boolean revokeStaticNatRulesForVm(long vmId) { - boolean success = true; - - UserVmVO vm = _vmDao.findByIdIncludingRemoved(vmId); - if (vm == null) { - return false; - } - - List rules = _firewallDao.listStaticNatByVmId(vm.getId()); - Set ipsToReprogram = new HashSet(); - - if (rules == null || rules.isEmpty()) { - s_logger.debug("No static nat rules are found for vm id=" + vmId); - return true; - } - - for (FirewallRuleVO rule : rules) { - // mark static nat as Revoked, but don't revoke it yet (apply = false) - revokeStaticNatRuleInternal(rule.getId(), _accountMgr.getSystemAccount(), Account.ACCOUNT_ID_SYSTEM, false); - ipsToReprogram.add(rule.getSourceIpAddressId()); - } - - // apply rules for all ip addresses - for (Long ipId : ipsToReprogram) { - s_logger.debug("Applying static nat rules for ip address id=" + ipId + " as a part of vm expunge"); - if (!applyStaticNatRulesForIp(ipId, true, _accountMgr.getSystemAccount(), true)) { - success = false; - s_logger.warn("Failed to apply static nat rules for ip id=" + ipId); - } - } - - return success; - } - - @Override - public List listPortForwardingRulesForApplication(long ipId) { - return _portForwardingDao.listForApplication(ipId); - } @Override public Pair, Integer> listPortForwardingRules(ListPortForwardingRulesCmd cmd) { @@ -872,8 +831,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return _firewallCidrsDao.getSourceCidrs(ruleId); } - @Override - public boolean applyPortForwardingRules(long ipId, boolean continueOnError, Account caller) { + + protected boolean applyPortForwardingRules(long ipId, boolean continueOnError, Account caller) { List rules = _portForwardingDao.listForApplication(ipId); if (rules.size() == 0) { @@ -897,8 +856,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return true; } - @Override - public boolean applyStaticNatRulesForIp(long sourceIpId, boolean continueOnError, Account caller, boolean forRevoke) { + + protected boolean applyStaticNatRulesForIp(long sourceIpId, boolean continueOnError, Account caller, boolean forRevoke) { List rules = _firewallDao.listByIpAndPurpose(sourceIpId, Purpose.StaticNat); List staticNatRules = new ArrayList(); @@ -1172,15 +1131,6 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return success && rules.size() == 0; } - @Override - public List listFirewallRulesByIp(long ipId) { - return null; - } - - @Override - public boolean releasePorts(long ipId, String protocol, FirewallRule.Purpose purpose, int... ports) { - return _firewallDao.releasePorts(ipId, protocol, purpose, ports); - } @Override @DB @@ -1221,29 +1171,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } } - @Override - public List gatherPortForwardingRulesForApplication(List addrs) { - List allRules = new ArrayList(); - for (IpAddress addr : addrs) { - if (!addr.readyToUse()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skipping " + addr + " because it is not ready for propation yet."); - } - continue; - } - allRules.addAll(_portForwardingDao.listForApplication(addr.getId())); - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + allRules.size() + " rules to apply for the addresses."); - } - - return allRules; - } - - @Override - public List listByNetworkId(long networkId) { + private List listByNetworkId(long networkId) { return _portForwardingDao.listByNetwork(networkId); } @@ -1257,14 +1186,14 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (ipAddress.getSystem()) { InvalidParameterValueException ex = new InvalidParameterValueException("Can't disable static nat for system IP address with specified id"); - ex.addProxyObject(ipAddress, ipId, "ipId"); + ex.addProxyObject(ipAddress.getUuid(), "ipId"); throw ex; } Long vmId = ipAddress.getAssociatedWithVmId(); if (vmId == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Specified IP address id is not associated with any vm Id"); - ex.addProxyObject(ipAddress, ipId, "ipId"); + ex.addProxyObject(ipAddress.getUuid(), "ipId"); throw ex; } @@ -1292,7 +1221,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (!ipAddress.isOneToOneNat()) { InvalidParameterValueException ex = new InvalidParameterValueException("One to one nat is not enabled for the specified ip id"); - ex.addProxyObject(ipAddress, ipId, "ipId"); + ex.addProxyObject(ipAddress.getUuid(), "ipId"); throw ex; } @@ -1353,22 +1282,22 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (ip == null || !ip.isOneToOneNat() || ip.getAssociatedWithVmId() == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Source ip address of the specified firewall rule id is not static nat enabled"); - ex.addProxyObject(ruleVO, rule.getId(), "ruleId"); + ex.addProxyObject(ruleVO.getUuid(), "ruleId"); throw ex; } String dstIp = ip.getVmIp(); if (dstIp == null) { InvalidParameterValueException ex = new InvalidParameterValueException("VM ip address of the specified public ip is not set "); - ex.addProxyObject(ruleVO, rule.getId(), "ruleId"); + ex.addProxyObject(ruleVO.getUuid(), "ruleId"); throw ex; } return new StaticNatRuleImpl(ruleVO, dstIp); } - @Override - public boolean applyStaticNatForIp(long sourceIpId, boolean continueOnError, Account caller, boolean forRevoke) { + + protected boolean applyStaticNatForIp(long sourceIpId, boolean continueOnError, Account caller, boolean forRevoke) { IpAddress sourceIp = _ipAddressDao.findById(sourceIpId); List staticNats = createStaticNatForIp(sourceIp, caller, forRevoke); @@ -1432,7 +1361,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules Network network = _networkModel.getNetwork(networkId); if (network == null) { CloudRuntimeException ex = new CloudRuntimeException("Unable to find an ip address to map to specified vm id"); - ex.addProxyObject(vm, vm.getId(), "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } diff --git a/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java b/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java index e26dad98f60..bf6b859f619 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java @@ -16,8 +16,10 @@ // under the License. package com.cloud.network.vpc; +import com.cloud.configuration.ConfigurationManager; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; import com.cloud.network.Network.Service; @@ -29,6 +31,7 @@ import com.cloud.network.element.VpcProvider; import com.cloud.network.vpc.NetworkACLItem.State; import com.cloud.network.vpc.dao.NetworkACLDao; import com.cloud.network.vpc.dao.VpcGatewayDao; +import com.cloud.offering.NetworkOffering; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -73,6 +76,8 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana VpcGatewayDao _vpcGatewayDao; @Inject NetworkModel _ntwkModel; + @Inject + ConfigurationManager _configMgr; @Override public NetworkACL createNetworkACL(String name, String description, long vpcId) { @@ -133,9 +138,22 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana @Override public boolean replaceNetworkACL(NetworkACL acl, NetworkVO network) throws ResourceUnavailableException { + + NetworkOffering guestNtwkOff = _configMgr.getNetworkOffering(network.getNetworkOfferingId()); + + if (guestNtwkOff == null) { + throw new InvalidParameterValueException("Can't find network offering associated with network: "+network.getUuid()); + } + + //verify that ACLProvider is supported by network offering + if(!_ntwkModel.areServicesSupportedByNetworkOffering(guestNtwkOff.getId(), Service.NetworkACL)){ + throw new InvalidParameterValueException("Cannot apply NetworkACL. Network Offering does not support NetworkACL service"); + } + network.setNetworkACLId(acl.getId()); //Update Network ACL if(_networkDao.update(network.getId(), network)){ + s_logger.debug("Updated network: "+network.getId()+ "with Network ACL Id: "+acl.getId()+", Applying ACL items"); //Apply ACL to network return applyACLToNetwork(network.getId()); } @@ -276,6 +294,9 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana @Override public List listNetworkACLItems(long guestNtwkId) { Network network = _networkMgr.getNetwork(guestNtwkId); + if(network.getNetworkACLId() == null){ + return null; + } return _networkACLItemDao.listByACL(network.getNetworkACLId()); } @@ -376,16 +397,22 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana public boolean applyACLItemsToNetwork(long networkId, List rules) throws ResourceUnavailableException { Network network = _networkDao.findById(networkId); boolean handled = false; + boolean foundProvider = false; for (NetworkACLServiceProvider element: _networkAclElements) { Network.Provider provider = element.getProvider(); boolean isAclProvider = _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.NetworkACL, provider); if (!isAclProvider) { continue; } + foundProvider = true; + s_logger.debug("Applying NetworkACL for network: "+network.getId()+" with Network ACL service provider"); handled = element.applyNetworkACLs(network, rules); if (handled) break; } + if(!foundProvider){ + s_logger.debug("Unable to find NetworkACL service provider for network: "+network.getId()); + } return handled; } diff --git a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java index 2b02a888de9..4ad22d90770 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -121,7 +121,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ } if(vpcId != null){ - sc.setParameters("vpcId", name); + sc.setParameters("vpcId", vpcId); } if(networkId != null){ @@ -303,28 +303,30 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ } //Validate Protocol - //Check if protocol is a number - if(StringUtils.isNumeric(protocol)){ - int protoNumber = Integer.parseInt(protocol); - if(protoNumber < 0 || protoNumber > 255){ - throw new InvalidParameterValueException("Invalid protocol number: " + protoNumber); + if(protocol != null){ + //Check if protocol is a number + if(StringUtils.isNumeric(protocol)){ + int protoNumber = Integer.parseInt(protocol); + if(protoNumber < 0 || protoNumber > 255){ + throw new InvalidParameterValueException("Invalid protocol number: " + protoNumber); + } + } else { + //Protocol is not number + //Check for valid protocol strings + String supportedProtocols = "tcp,udp,icmp,all"; + if(!supportedProtocols.contains(protocol.toLowerCase())){ + throw new InvalidParameterValueException("Invalid protocol: " + protocol); + } } - } else { - //Protocol is not number - //Check for valid protocol strings - String supportedProtocols = "tcp,udp,icmp,all"; - if(!supportedProtocols.contains(protocol.toLowerCase())){ - throw new InvalidParameterValueException("Invalid protocol: " + protocol); + + // icmp code and icmp type can't be passed in for any other protocol rather than icmp + if (!protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (icmpCode != null || icmpType != null)) { + throw new InvalidParameterValueException("Can specify icmpCode and icmpType for ICMP protocol only"); } - } - // icmp code and icmp type can't be passed in for any other protocol rather than icmp - if (!protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (icmpCode != null || icmpType != null)) { - throw new InvalidParameterValueException("Can specify icmpCode and icmpType for ICMP protocol only"); - } - - if (protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (portStart != null || portEnd != null)) { - throw new InvalidParameterValueException("Can't specify start/end port when protocol is ICMP"); + if (protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (portStart != null || portEnd != null)) { + throw new InvalidParameterValueException("Can't specify start/end port when protocol is ICMP"); + } } //validate icmp code and type diff --git a/server/src/com/cloud/network/vpc/VpcManager.java b/server/src/com/cloud/network/vpc/VpcManager.java index f22e7e4bf83..e01413f78f3 100644 --- a/server/src/com/cloud/network/vpc/VpcManager.java +++ b/server/src/com/cloud/network/vpc/VpcManager.java @@ -164,7 +164,7 @@ public interface VpcManager extends VpcService{ * @param gateway * @param networkOwner TODO */ - void validateNtwkOffForNtwkInVpc(Long networkId, long newNtwkOffId, String newCidr, String newNetworkDomain, Vpc vpc, String gateway, Account networkOwner); + void validateNtwkOffForNtwkInVpc(Long networkId, long newNtwkOffId, String newCidr, String newNetworkDomain, Vpc vpc, String gateway, Account networkOwner, Long aclId); List getVpcPrivateGateways(long vpcId); } diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java index 1aab7320fb4..b41003a6105 100644 --- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java @@ -16,27 +16,6 @@ // under the License. package com.cloud.network.vpc; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.acl.ControlledEntity.ACLType; -import org.apache.cloudstack.api.command.user.vpc.ListPrivateGatewaysCmd; -import org.apache.cloudstack.api.command.user.vpc.ListStaticRoutesCmd; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.Resource.ResourceType; @@ -80,6 +59,7 @@ import com.cloud.network.dao.Site2SiteVpnGatewayDao; import com.cloud.network.element.StaticNatServiceProvider; import com.cloud.network.element.VpcProvider; import com.cloud.network.vpc.VpcOffering.State; +import com.cloud.network.vpc.dao.NetworkACLDao; import com.cloud.network.vpc.dao.PrivateIpDao; import com.cloud.network.vpc.dao.StaticRouteDao; import com.cloud.network.vpc.dao.VpcDao; @@ -87,7 +67,6 @@ import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.network.vpc.dao.VpcOfferingDao; import com.cloud.network.vpc.dao.VpcOfferingServiceMapDao; import com.cloud.network.vpc.dao.VpcServiceMapDao; -import com.cloud.network.vpc.dao.NetworkACLDao; import com.cloud.network.vpn.Site2SiteVpnManager; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingServiceMapVO; @@ -121,6 +100,25 @@ import com.cloud.utils.net.NetUtils; import com.cloud.vm.ReservationContext; import com.cloud.vm.ReservationContextImpl; import com.cloud.vm.dao.DomainRouterDao; +import org.apache.cloudstack.acl.ControlledEntity.ACLType; +import org.apache.cloudstack.api.command.user.vpc.ListPrivateGatewaysCmd; +import org.apache.cloudstack.api.command.user.vpc.ListStaticRoutesCmd; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; @Component @@ -587,7 +585,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (vpcOff == null || vpcOff.getState() != State.Enabled) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find vpc offering in " + State.Enabled + " state by specified id"); - ex.addProxyObject("vpc_offerings", vpcOffId, "vpcOfferingId"); + if (vpcOff == null) { + ex.addProxyObject(String.valueOf(vpcOffId), "vpcOfferingId"); + } else { + ex.addProxyObject(vpcOff.getUuid(), "vpcOfferingId"); + } throw ex; } @@ -596,7 +598,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { // See DataCenterVO.java PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation since specified Zone is currently disabled"); - ex.addProxyObject("data_center", zone.getId(), "zoneId"); + ex.addProxyObject(zone.getUuid(), "zoneId"); throw ex; } @@ -943,7 +945,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis Vpc vpc = getActiveVpc(vpcId); if (vpc == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find Enabled VPC by id specified"); - ex.addProxyObject("vpc", vpcId, "VPC"); + ex.addProxyObject(String.valueOf(vpcId), "VPC"); throw ex; } @@ -1035,7 +1037,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @DB @Override public void validateNtwkOffForNtwkInVpc(Long networkId, long newNtwkOffId, String newCidr, - String newNetworkDomain, Vpc vpc, String gateway, Account networkOwner) { + String newNetworkDomain, Vpc vpc, String gateway, Account networkOwner, Long aclId) { NetworkOffering guestNtwkOff = _configMgr.getNetworkOffering(newNtwkOffId); @@ -1080,6 +1082,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } } + + //5) When aclId is provided, verify that ACLProvider is supported by network offering + if(aclId != null && (!_ntwkModel.areServicesSupportedByNetworkOffering(guestNtwkOff.getId(), Service.NetworkACL))){ + throw new InvalidParameterValueException("Cannot apply NetworkACL. Network Offering does not support NetworkACL service"); + } + } @Override @@ -1217,9 +1225,18 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis List ipsToRelease = _ipAddressDao.listByAssociatedVpc(vpcId, null); s_logger.debug("Releasing ips for vpc id=" + vpcId + " as a part of vpc cleanup"); for (IPAddressVO ipToRelease : ipsToRelease) { - success = success && _ntwkMgr.disassociatePublicIpAddress(ipToRelease.getId(), callerUserId, caller); - if (!success) { - s_logger.warn("Failed to cleanup ip " + ipToRelease + " as a part of vpc id=" + vpcId + " cleanup"); + if (ipToRelease.isPortable()) { + // portable IP address are associated with owner, until explicitly requested to be disassociated. + // so as part of VPC clean up just break IP association with VPC + ipToRelease.setVpcId(null); + ipToRelease.setAssociatedWithNetworkId(null); + _ipAddressDao.update(ipToRelease.getId(), ipToRelease); + s_logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any VPC"); + } else { + success = success && _ntwkMgr.disassociatePublicIpAddress(ipToRelease.getId(), callerUserId, caller); + if (!success) { + s_logger.warn("Failed to cleanup ip " + ipToRelease + " as a part of vpc id=" + vpcId + " cleanup"); + } } } @@ -1266,7 +1283,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis Vpc vpc = getActiveVpc(vpcId); if (vpc == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find Enabled VPC by id specified"); - ex.addProxyObject("vpc", vpcId, "VPC"); + ex.addProxyObject(String.valueOf(vpcId), "VPC"); throw ex; } @@ -1345,7 +1362,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis Vpc vpc = getActiveVpc(vpcId); if (vpc == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find Enabled VPC by id specified"); - ex.addProxyObject("vpc", vpcId, "VPC"); + ex.addProxyObject(String.valueOf(vpcId), "VPC"); throw ex; } @@ -2007,7 +2024,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (vpc == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find Enabled VPC "); - ex.addProxyObject("vpc", vpcId, "VPC"); + ex.addProxyObject(String.valueOf(vpcId), "VPC"); throw ex; } _accountMgr.checkAccess(caller, null, false, vpc); @@ -2021,7 +2038,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } //1) Validate if network can be created for VPC - validateNtwkOffForNtwkInVpc(null, ntwkOffId, cidr, networkDomain, vpc, gateway, owner); + validateNtwkOffForNtwkInVpc(null, ntwkOffId, cidr, networkDomain, vpc, gateway, owner, aclId); //2) Create network Network guestNetwork = _ntwkMgr.createGuestNetwork(ntwkOffId, name, displayText, gateway, cidr, vlanId, @@ -2096,6 +2113,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis hTypes.add(HypervisorType.XenServer); hTypes.add(HypervisorType.VMware); hTypes.add(HypervisorType.KVM); + hTypes.add(HypervisorType.Simulator); return hTypes; } diff --git a/server/src/com/cloud/projects/ProjectManagerImpl.java b/server/src/com/cloud/projects/ProjectManagerImpl.java index de4f3ccd11b..7ab385f2de4 100755 --- a/server/src/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/com/cloud/projects/ProjectManagerImpl.java @@ -43,6 +43,7 @@ import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.ProjectAccountJoinDao; import com.cloud.api.query.dao.ProjectInvitationJoinDao; import com.cloud.api.query.dao.ProjectJoinDao; @@ -50,6 +51,7 @@ import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; @@ -508,14 +510,14 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (project == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find project with specified id"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(String.valueOf(projectId), "projectId"); throw ex; } //User can be added to Active project only if (project.getState() != Project.State.Active) { InvalidParameterValueException ex = new InvalidParameterValueException("Can't add account to the specified project id in state=" + project.getState() + " as it's no longer active"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(project.getUuid(),"projectId"); throw ex; } @@ -525,8 +527,12 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { account = _accountMgr.getActiveAccountByName(accountName, project.getDomainId()); if (account == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find account name=" + accountName + " in specified domain id"); - // We don't have a DomainVO object with us, so just pass the tablename "domain" manually. - ex.addProxyObject("domain", project.getDomainId(), "domainId"); + DomainVO domain = ApiDBUtils.findDomainById(project.getDomainId()); + String domainUuid = String.valueOf(project.getDomainId()); + if ( domain != null ){ + domainUuid = domain.getUuid(); + } + ex.addProxyObject(domainUuid, "domainId"); throw ex; } @@ -590,7 +596,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (project == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find project with specified id"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(String.valueOf(projectId), "projectId"); throw ex; } @@ -598,8 +604,12 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { Account account = _accountMgr.getActiveAccountByName(accountName, project.getDomainId()); if (account == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find account name=" + accountName + " in domain id=" + project.getDomainId()); - // Since we don't have a domainVO object, pass the table name manually. - ex.addProxyObject("domain", project.getDomainId(), "domainId"); + DomainVO domain = ApiDBUtils.findDomainById(project.getDomainId()); + String domainUuid = String.valueOf(project.getDomainId()); + if ( domain != null ){ + domainUuid = domain.getUuid(); + } + ex.addProxyObject(domainUuid, "domainId"); } //verify permissions @@ -610,14 +620,14 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (projectAccount == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Account " + accountName + " is not assigned to the project with specified id"); // Use the projectVO object and not the projectAccount object to inject the projectId. - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(project.getUuid(), "projectId"); throw ex; } //can't remove the owner of the project if (projectAccount.getAccountRole() == Role.Admin) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to delete account " + accountName + " from the project with specified id as the account is the owner of the project"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(project.getUuid(), "projectId"); throw ex; } @@ -791,7 +801,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (project == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find project with specified id"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(String.valueOf(projectId), "projectId"); throw ex; } @@ -833,7 +843,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { //verify input parameters if (project == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find project with specified id"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(String.valueOf(projectId), "projectId"); throw ex; } @@ -844,7 +854,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { return _projectDao.findById(projectId); } else { CloudRuntimeException ex = new CloudRuntimeException("Failed to suspend project with specified id"); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(project.getUuid(), "projectId"); throw ex; } diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 837fdfb6477..fe91cb337d0 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -30,6 +30,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.*; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.cluster.AddClusterCmd; import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd; @@ -40,16 +41,10 @@ import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; -import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd; import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd; import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.region.dao.RegionDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -91,6 +86,7 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.ClusterVSMMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterIpAddressDao; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.PlannerHostReservationVO; import com.cloud.deploy.dao.PlannerHostReservationDao; @@ -123,12 +119,9 @@ import com.cloud.org.Grouping; import com.cloud.org.Grouping.AllocationState; import com.cloud.org.Managed; import com.cloud.service.ServiceOfferingVO; -import com.cloud.storage.DataStoreRole; import com.cloud.storage.GuestOSCategoryVO; -import com.cloud.storage.ImageStore; import com.cloud.storage.S3; import com.cloud.storage.S3VO; -import com.cloud.storage.ScopeType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; @@ -171,6 +164,7 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.dc.DataCenter.NetworkType; @Component @Local({ ResourceManager.class, ResourceService.class }) @@ -227,6 +221,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, protected StorageService _storageSvr; @Inject PlannerHostReservationDao _plannerHostReserveDao; + @Inject + protected DedicatedResourceDao _dedicatedDao; protected List _discoverers; @@ -385,8 +381,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, // Check if the zone exists in the system DataCenterVO zone = _dcDao.findById(dcId); if (zone == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("Can't find zone by the id specified"); - ex.addProxyObject(zone, dcId, "dcId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Can't find zone by the id specified"); + ex.addProxyObject(String.valueOf(dcId), "dcId"); throw ex; } @@ -394,7 +391,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getType())) { PermissionDeniedException ex = new PermissionDeniedException( "Cannot perform this operation, Zone with specified id is currently disabled"); - ex.addProxyObject(zone, dcId, "dcId"); + ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } @@ -409,9 +406,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } // check if pod belongs to the zone if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) { - InvalidParameterValueException ex = new InvalidParameterValueException("Pod with specified id doesn't belong to the zone " + dcId); - ex.addProxyObject(pod, podId, "podId"); - ex.addProxyObject(zone, dcId, "dcId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Pod with specified id doesn't belong to the zone " + dcId); + ex.addProxyObject(pod.getUuid(), "podId"); + ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } @@ -430,6 +428,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new InvalidParameterValueException("Unable to resolve " + cmd.getHypervisor() + " to a supported "); } + if (zone.isSecurityGroupEnabled() && zone.getNetworkType().equals(NetworkType.Advanced)) { + if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer + && hypervisorType != HypervisorType.Simulator) { + throw new InvalidParameterValueException("Don't support hypervisor type " + hypervisorType + " in advanced security enabled zone"); + } + } + Cluster.ClusterType clusterType = null; if (cmd.getClusterType() != null && !cmd.getClusterType().isEmpty()) { clusterType = Cluster.ClusterType.valueOf(cmd.getClusterType()); @@ -476,8 +481,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, CloudRuntimeException ex = new CloudRuntimeException("Unable to create cluster " + clusterName + " in pod and data center with specified ids", e); // Get the pod VO object's table name. - ex.addProxyObject(pod, podId, "podId"); - ex.addProxyObject(zone, dcId, "dcId"); + ex.addProxyObject(pod.getUuid(), "podId"); + ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } clusterId = cluster.getId(); @@ -593,8 +598,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (clusterId != null) { ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("can not find cluster for specified clusterId"); - ex.addProxyObject(cluster, clusterId, "clusterId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "can not find cluster for specified clusterId"); + ex.addProxyObject(clusterId.toString(), "clusterId"); throw ex; } else { if (cluster.getGuid() == null) { @@ -602,7 +608,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (!hosts.isEmpty()) { CloudRuntimeException ex = new CloudRuntimeException( "Guid is not updated for cluster with specified cluster id; need to wait for hosts in this cluster to come up"); - ex.addProxyObject(cluster, clusterId, "clusterId"); + ex.addProxyObject(cluster.getUuid(), "clusterId"); throw ex; } } @@ -658,7 +664,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getType())) { PermissionDeniedException ex = new PermissionDeniedException( "Cannot perform this operation, Zone with specified id is currently disabled"); - ex.addProxyObject(zone, dcId, "dcId"); + ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } @@ -670,10 +676,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } // check if pod belongs to the zone if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) { - InvalidParameterValueException ex = new InvalidParameterValueException("Pod with specified podId" + podId - + " doesn't belong to the zone with specified zoneId" + dcId); - ex.addProxyObject(pod, podId, "podId"); - ex.addProxyObject(zone, dcId, "dcId"); + InvalidParameterValueException ex = new InvalidParameterValueException( + "Pod with specified podId" + + podId + + " doesn't belong to the zone with specified zoneId" + + dcId); + ex.addProxyObject(pod.getUuid(), "podId"); + ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } } @@ -730,10 +739,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } catch (Exception e) { cluster = _clusterDao.findBy(clusterName, podId); if (cluster == null) { - CloudRuntimeException ex = new CloudRuntimeException("Unable to create cluster " + clusterName - + " in pod with specified podId and data center with specified dcID", e); - ex.addProxyObject(pod, podId, "podId"); - ex.addProxyObject(zone, dcId, "dcId"); + CloudRuntimeException ex = new CloudRuntimeException( + "Unable to create cluster " + + clusterName + + " in pod with specified podId and data center with specified dcID", + e); + ex.addProxyObject(pod.getUuid(), "podId"); + ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } } @@ -932,6 +944,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, hostCapacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId); hostCapacitySC.addAnd("capacityType", SearchCriteria.Op.IN, capacityTypes); _capacityDao.remove(hostCapacitySC); + // remove from dedicated resources + DedicatedResourceVO dr = _dedicatedDao.findByHostId(hostId); + if (dr != null) { + _dedicatedDao.remove(dr.getId()); + } txn.commit(); return true; } @@ -995,6 +1012,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (hypervisorType == HypervisorType.VMware && Boolean.parseBoolean(_configDao.getValue(Config.VmwareUseNexusVSwitch.toString()))) { _clusterVSMMapDao.removeByClusterId(cmd.getId()); } + // remove from dedicated resources + DedicatedResourceVO dr = _dedicatedDao.findByClusterId(cluster.getId()); + if (dr != null) { + _dedicatedDao.remove(dr.getId()); + } } txn.commit(); diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index 98f1c964408..45892aafa12 100755 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -36,33 +36,40 @@ import java.util.Map; import java.util.Properties; import java.util.UUID; import java.util.regex.Pattern; -import java.util.StringTokenizer; import javax.crypto.KeyGenerator; import javax.crypto.SecretKey; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.configuration.*; -import com.cloud.dc.*; -import com.cloud.dc.dao.DcDetailsDao; -import com.cloud.user.*; -import com.cloud.utils.db.GenericDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.codec.binary.Base64; +import org.apache.commons.io.FileUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.configuration.Config; +import com.cloud.configuration.ConfigurationVO; +import com.cloud.configuration.Resource; import com.cloud.configuration.Resource.ResourceOwnerType; import com.cloud.configuration.Resource.ResourceType; +import com.cloud.configuration.ResourceCountVO; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.configuration.dao.ResourceCountDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DcDetailVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.VlanVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DcDetailsDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.VlanDao; import com.cloud.domain.DomainVO; @@ -95,6 +102,11 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.test.IPRangeConfig; +import com.cloud.user.Account; +import com.cloud.user.AccountDetailVO; +import com.cloud.user.AccountDetailsDao; +import com.cloud.user.AccountVO; +import com.cloud.user.User; import com.cloud.user.dao.AccountDao; import com.cloud.utils.PasswordGenerator; import com.cloud.utils.PropertiesUtil; @@ -106,7 +118,6 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.Script; -import com.cloud.uuididentity.dao.IdentityDao; @Component @@ -124,12 +135,10 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio @Inject private DataCenterDao _dataCenterDao; @Inject private NetworkDao _networkDao; @Inject private VlanDao _vlanDao; - private String _domainSuffix; @Inject private DomainDao _domainDao; @Inject private AccountDao _accountDao; @Inject private ResourceCountDao _resourceCountDao; @Inject private NetworkOfferingServiceMapDao _ntwkOfferingServiceMapDao; - @Inject private IdentityDao _identityDao; @Inject private DcDetailsDao _dcDetailsDao; @Inject private ClusterDetailsDao _clusterDetailsDao; @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; @@ -162,9 +171,6 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio // Get init String init = _configDao.getValue("init"); - // Get domain suffix - needed for network creation - _domainSuffix = _configDao.getValue("guest.domain.suffix"); - if (init == null || init.equals("false")) { s_logger.debug("ConfigurationServer is saving default values to the database."); @@ -443,23 +449,13 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } } - private String getBase64Keystore(String keystorePath) throws IOException { - byte[] storeBytes = new byte[4094]; - int len = 0; - try { - len = new FileInputStream(keystorePath).read(storeBytes); - } catch (EOFException e) { - } catch (Exception e) { - throw new IOException("Cannot read the generated keystore file"); - } - if (len > 3000) { // Base64 codec would enlarge data by 1/3, and we have 4094 bytes in database entry at most - throw new IOException("KeyStore is too big for database! Length " + len); + static String getBase64Keystore(String keystorePath) throws IOException { + byte[] storeBytes = FileUtils.readFileToByteArray(new File(keystorePath)); + if (storeBytes.length > 3000) { // Base64 codec would enlarge data by 1/3, and we have 4094 bytes in database entry at most + throw new IOException("KeyStore is too big for database! Length " + storeBytes.length); } - byte[] encodeBytes = new byte[len]; - System.arraycopy(storeBytes, 0, encodeBytes, 0, len); - - return new String(Base64.encodeBase64(encodeBytes)); + return new String(Base64.encodeBase64(storeBytes)); } private void generateDefaultKeystore(String keystorePath) throws IOException { diff --git a/server/src/com/cloud/server/ManagementServer.java b/server/src/com/cloud/server/ManagementServer.java index 969bc6557e1..f60ce488e10 100755 --- a/server/src/com/cloud/server/ManagementServer.java +++ b/server/src/com/cloud/server/ManagementServer.java @@ -16,19 +16,11 @@ // under the License. package com.cloud.server; -import java.util.Date; import java.util.List; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.ManagementServerException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.exception.VirtualMachineMigrationException; -import org.apache.cloudstack.api.command.admin.systemvm.ScaleSystemVMCmd; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import com.cloud.event.EventVO; import com.cloud.host.HostVO; -import com.cloud.info.ConsoleProxyInfo; import com.cloud.storage.GuestOSVO; import com.cloud.utils.Pair; import com.cloud.utils.component.PluggableService; @@ -59,30 +51,6 @@ public interface ManagementServer extends ManagementService, PluggableService { */ HostVO getHostBy(long hostId); - /** - * Retrieves all Events between the start and end date specified - * - * @param userId - * unique id of the user, pass in -1 to retrieve events for all users - * @param accountId - * unique id of the account (which could be shared by many users), pass in -1 to retrieve events for all accounts - * @param domainId - * the id of the domain in which to search for users (useful when -1 is passed in for userId) - * @param type - * the type of the event. - * @param level - * INFO, WARN, or ERROR - * @param startDate - * inclusive. - * @param endDate - * inclusive. If date specified is greater than the current time, the system will use the current time. - * @return List of events - */ - List getEvents(long userId, long accountId, Long domainId, String type, String level, Date startDate, Date endDate); - - //FIXME - move all console proxy related commands to corresponding managers - ConsoleProxyInfo getConsoleProxyForVm(long dataCenterId, long userVmId); - String getConsoleAccessUrlRoot(long vmId); GuestOSVO getGuestOs(Long guestOsId); @@ -103,7 +71,5 @@ public interface ManagementServer extends ManagementService, PluggableService { String getEncryptionKey(); String getEncryptionIV(); void resetEncryptionKeyIV(); - - public void enableAdminUser(String password); } diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 17444ebdb33..5c2917f78f7 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -24,6 +24,7 @@ import java.util.Calendar; import java.util.Comparator; import java.util.Date; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TimeZone; @@ -825,38 +826,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } - @Override - public List getEvents(long userId, long accountId, Long domainId, String type, String level, Date startDate, Date endDate) { - SearchCriteria sc = _eventDao.createSearchCriteria(); - if (userId > 0) { - sc.addAnd("userId", SearchCriteria.Op.EQ, userId); - } - if (accountId > 0) { - sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId); - } - if (domainId != null) { - sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); - } - if (type != null) { - sc.addAnd("type", SearchCriteria.Op.EQ, type); - } - if (level != null) { - sc.addAnd("level", SearchCriteria.Op.EQ, level); - } - if (startDate != null && endDate != null) { - startDate = massageDate(startDate, 0, 0, 0); - endDate = massageDate(endDate, 23, 59, 59); - sc.addAnd("createDate", SearchCriteria.Op.BETWEEN, startDate, endDate); - } else if (startDate != null) { - startDate = massageDate(startDate, 0, 0, 0); - sc.addAnd("createDate", SearchCriteria.Op.GTEQ, startDate); - } else if (endDate != null) { - endDate = massageDate(endDate, 23, 59, 59); - sc.addAnd("createDate", SearchCriteria.Op.LTEQ, endDate); - } - - return _eventDao.search(sc, null); - } @Override public boolean archiveEvents(ArchiveEventsCmd cmd) { @@ -1056,7 +1025,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " + "migrate the vm with specified id"); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } @@ -1077,8 +1046,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } InvalidParameterValueException ex = new InvalidParameterValueException( "Unable to find the host (with specified id) of VM with specified id"); - ex.addProxyObject(srcHost, srcHostId, "hostId"); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(String.valueOf(srcHostId), "hostId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } @@ -1119,10 +1088,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe allHosts.remove(srcHost); // Check if the host has storage pools for all the volumes of the vm to be migrated. - for (Host host : allHosts) { + for (Iterator iterator = allHosts.iterator(); iterator.hasNext();) { + Host host = iterator.next(); Map> volumePools = findSuitablePoolsForVolumes(vmProfile, host); if (volumePools.isEmpty()) { - allHosts.remove(host); + iterator.remove(); } else { if (!host.getClusterId().equals(srcHost.getClusterId()) || usesLocal) { requiresStorageMotion.put(host, true); @@ -1234,7 +1204,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (volume == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find volume with" + " specified id."); - ex.addProxyObject(volume, volumeId, "volumeId"); + ex.addProxyObject(volumeId.toString(), "volumeId"); throw ex; } @@ -1488,8 +1458,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe + " in specified domain"); // Since we don't have a DomainVO object here, we directly set // tablename to "domain". - String tablename = "domain"; - ex.addProxyObject(tablename, domainId, "domainId"); + DomainVO domain = ApiDBUtils.findDomainById(domainId); + String domainUuid = domainId.toString(); + if ( domain != null ){ + domainUuid = domain.getUuid(); + } + ex.addProxyObject(domainUuid, "domainId"); throw ex; } else { accountId = account.getId(); @@ -1509,7 +1483,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe Project project = _projectMgr.getProject(projectId); if (project == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find project by id " + projectId); - ex.addProxyObject(project, projectId, "projectId"); + ex.addProxyObject(projectId.toString(), "projectId"); throw ex; } accountId = project.getProjectAccountId(); @@ -1683,14 +1657,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (isIso && template.getFormat() != ImageFormat.ISO) { s_logger.error("Template Id " + templateId + " is not an ISO"); InvalidParameterValueException ex = new InvalidParameterValueException("Specified Template Id is not an ISO"); - ex.addProxyObject(template, templateId, "templateId"); + ex.addProxyObject(template.getUuid(), "templateId"); throw ex; }// If ISO not requested then it shouldn't be an ISO. if (!isIso && template.getFormat() == ImageFormat.ISO) { s_logger.error("Incorrect format of the template id " + templateId); InvalidParameterValueException ex = new InvalidParameterValueException("Incorrect format " + template.getFormat() + " of the specified template id"); - ex.addProxyObject(template, templateId, "templateId"); + ex.addProxyObject(template.getUuid(), "templateId"); throw ex; } } @@ -1785,14 +1759,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe VMTemplateVO template = _templateDao.findById(id); if (template == null || template.getRemoved() != null) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find template/iso with specified id"); - ex.addProxyObject(template, id, "templateId"); + ex.addProxyObject(id.toString(), "templateId"); throw ex; } // Don't allow to modify system template if (id.equals(Long.valueOf(1))) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to update template/iso of specified id"); - ex.addProxyObject(template, id, "templateId"); + ex.addProxyObject(template.getUuid(), "templateId"); throw ex; } @@ -2059,8 +2033,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return new Pair, Integer>(result.first(), result.second()); } - @Override - public ConsoleProxyInfo getConsoleProxyForVm(long dataCenterId, long userVmId) { + + protected ConsoleProxyInfo getConsoleProxyForVm(long dataCenterId, long userVmId) { return _consoleProxyMgr.assignProxy(dataCenterId, userVmId); } @@ -2140,7 +2114,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe DomainVO domain = _domainDao.findById(domainId); if (domain == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id"); - ex.addProxyObject(domain, domainId, "domainId"); + ex.addProxyObject(domainId.toString(), "domainId"); throw ex; } else if (domain.getParent() == null && domainName != null) { // check if domain is ROOT domain - and deny to edit it with the new @@ -2164,7 +2138,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (!domains.isEmpty() && !sameDomain) { InvalidParameterValueException ex = new InvalidParameterValueException("Failed to update specified domain id with name '" + domainName + "' since it already exists in the system"); - ex.addProxyObject(domain, domainId, "domainId"); + ex.addProxyObject(domain.getUuid(), "domainId"); throw ex; } } @@ -3074,7 +3048,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe VMInstanceVO systemVm = _vmInstanceDao.findByIdTypes(instanceId, VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm); if (systemVm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find a system vm of specified instanceId"); - ex.addProxyObject(systemVm, instanceId, "instanceId"); + ex.addProxyObject(String.valueOf(instanceId), "instanceId"); throw ex; } return systemVm.getType(); @@ -3086,7 +3060,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe VMInstanceVO systemVm = _vmInstanceDao.findByIdTypes(vmId, VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm); if (systemVm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find a system vm with specified vmId"); - ex.addProxyObject(systemVm, vmId, "vmId"); + ex.addProxyObject(String.valueOf(vmId), "vmId"); throw ex; } @@ -3096,7 +3070,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return startSecondaryStorageVm(vmId); } else { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find a system vm with specified vmId"); - ex.addProxyObject(systemVm, vmId, "vmId"); + ex.addProxyObject(systemVm.getUuid(), "vmId"); throw ex; } } @@ -3109,7 +3083,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe VMInstanceVO systemVm = _vmInstanceDao.findByIdTypes(id, VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm); if (systemVm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find a system vm with specified vmId"); - ex.addProxyObject(systemVm, id, "vmId"); + ex.addProxyObject(id.toString(), "vmId"); throw ex; } @@ -3131,7 +3105,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (systemVm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find a system vm with specified vmId"); - ex.addProxyObject(systemVm, cmd.getId(), "vmId"); + ex.addProxyObject(cmd.getId().toString(), "vmId"); throw ex; } @@ -3148,7 +3122,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (systemVm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find a system vm with specified vmId"); - ex.addProxyObject(systemVm, cmd.getId(), "vmId"); + ex.addProxyObject(cmd.getId().toString(), "vmId"); throw ex; } @@ -3186,7 +3160,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe User user = _accountMgr.getUserIncludingRemoved(userId); if ((user == null) || (user.getRemoved() != null)) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find active user of specified id"); - ex.addProxyObject(user, userId, "userId"); + ex.addProxyObject(String.valueOf(userId), "userId"); throw ex; } @@ -3276,7 +3250,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe InstanceGroupVO group = _vmGroupDao.findById(groupId.longValue()); if (group == null) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find a vm group with specified groupId"); - ex.addProxyObject(group, groupId, "groupId"); + ex.addProxyObject(groupId.toString(), "groupId"); throw ex; } @@ -3482,7 +3456,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (s == null) { InvalidParameterValueException ex = new InvalidParameterValueException("A key pair with name '" + cmd.getName() + "' does not exist for account " + owner.getAccountName() + " in specified domain id"); - ex.addProxyObject(owner, owner.getDomainId(), "domainId"); + DomainVO domain = ApiDBUtils.findDomainById(owner.getDomainId()); + String domainUuid = String.valueOf(owner.getDomainId()); + if (domain != null){ + domainUuid = domain.getUuid(); + } + ex.addProxyObject(domainUuid, "domainId"); throw ex; } @@ -3569,7 +3548,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe UserVmVO vm = _userVmDao.findById(cmd.getId()); if (vm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("No VM with specified id found."); - ex.addProxyObject(vm, cmd.getId(), "vmId"); + ex.addProxyObject(cmd.getId().toString(), "vmId"); throw ex; } @@ -3580,7 +3559,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe String password = vm.getDetail("Encrypted.Password"); if (password == null || password.equals("")) { InvalidParameterValueException ex = new InvalidParameterValueException("No password for VM with specified id found."); - ex.addProxyObject(vm, cmd.getId(), "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } @@ -3690,7 +3669,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (hpvCapabilities == null) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find the hypervisor capabilities for specified id"); - ex.addProxyObject(hpvCapabilities, id, "Id"); + ex.addProxyObject(id.toString(), "Id"); throw ex; } @@ -3762,8 +3741,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } - @Override - public void enableAdminUser(String password) { + + private void enableAdminUser(String password) { String encodedPassword = null; UserVO adminUser = _userDao.getUser(2); diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index 3feb10b3da8..e5104358503 100755 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -17,11 +17,14 @@ package com.cloud.server; import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TimeZone; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -50,8 +53,11 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.GetFileStatsCommand; import com.cloud.agent.api.GetStorageStatsCommand; import com.cloud.agent.api.HostStatsEntry; +import com.cloud.agent.api.VmDiskStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.manager.Commands; +import com.cloud.cluster.ManagementServerHostVO; +import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; @@ -67,14 +73,21 @@ import com.cloud.storage.VolumeStats; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.VmDiskStatisticsVO; +import com.cloud.user.dao.VmDiskStatisticsDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ComponentMethodInterceptable; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.net.MacAddress; import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; import com.cloud.vm.VmStats; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.UserVmDao; /** @@ -101,6 +114,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc @Inject private ResourceManager _resourceMgr; @Inject private ConfigurationDao _configDao; @Inject private EndPointSelector _epSelector; + @Inject private VmDiskStatisticsDao _vmDiskStatsDao; + @Inject private ManagementServerHostDao _msHostDao; private ConcurrentHashMap _hostStats = new ConcurrentHashMap(); private final ConcurrentHashMap _VmStats = new ConcurrentHashMap(); @@ -112,6 +127,15 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc long hostAndVmStatsInterval = -1L; long storageStatsInterval = -1L; long volumeStatsInterval = -1L; + int vmDiskStatsInterval = 0; + + private ScheduledExecutorService _diskStatsUpdateExecutor; + private int _usageAggregationRange = 1440; + private String _usageTimeZone = "GMT"; + private final long mgmtSrvrId = MacAddress.getMacAddress().toLong(); + private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds + private static final int USAGE_AGGREGATION_RANGE_MIN = 10; // 10 minutes, same to com.cloud.usage.UsageManagerImpl.USAGE_AGGREGATION_RANGE_MIN + private boolean _dailyOrHourly = false; //private final GlobalLock m_capacityCheckLock = GlobalLock.getInternLock("capacity.check"); @@ -141,6 +165,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc hostAndVmStatsInterval = NumbersUtil.parseLong(configs.get("vm.stats.interval"), 60000L); storageStatsInterval = NumbersUtil.parseLong(configs.get("storage.stats.interval"), 60000L); volumeStatsInterval = NumbersUtil.parseLong(configs.get("volume.stats.interval"), -1L); + vmDiskStatsInterval = NumbersUtil.parseInt(configs.get("vm.disk.stats.interval"), 0); if (hostStatsInterval > 0) { _executor.scheduleWithFixedDelay(new HostCollector(), 15000L, hostStatsInterval, TimeUnit.MILLISECONDS); @@ -154,12 +179,61 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc _executor.scheduleWithFixedDelay(new StorageCollector(), 15000L, storageStatsInterval, TimeUnit.MILLISECONDS); } + if (vmDiskStatsInterval > 0) { + if (vmDiskStatsInterval < 300) + vmDiskStatsInterval = 300; + _executor.scheduleAtFixedRate(new VmDiskStatsTask(), vmDiskStatsInterval, vmDiskStatsInterval, TimeUnit.SECONDS); + } + // -1 means we don't even start this thread to pick up any data. if (volumeStatsInterval > 0) { _executor.scheduleWithFixedDelay(new VolumeCollector(), 15000L, volumeStatsInterval, TimeUnit.MILLISECONDS); } else { s_logger.info("Disabling volume stats collector"); } + + //Schedule disk stats update task + _diskStatsUpdateExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("DiskStatsUpdater")); + String aggregationRange = configs.get("usage.stats.job.aggregation.range"); + _usageAggregationRange = NumbersUtil.parseInt(aggregationRange, 1440); + _usageTimeZone = configs.get("usage.aggregation.timezone"); + if(_usageTimeZone == null){ + _usageTimeZone = "GMT"; + } + TimeZone usageTimezone = TimeZone.getTimeZone(_usageTimeZone); + Calendar cal = Calendar.getInstance(usageTimezone); + cal.setTime(new Date()); + long endDate = 0; + int HOURLY_TIME = 60; + final int DAILY_TIME = 60 * 24; + if (_usageAggregationRange == DAILY_TIME) { + cal.set(Calendar.HOUR_OF_DAY, 0); + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + cal.roll(Calendar.DAY_OF_YEAR, true); + cal.add(Calendar.MILLISECOND, -1); + endDate = cal.getTime().getTime(); + _dailyOrHourly = true; + } else if (_usageAggregationRange == HOURLY_TIME) { + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + cal.roll(Calendar.HOUR_OF_DAY, true); + cal.add(Calendar.MILLISECOND, -1); + endDate = cal.getTime().getTime(); + _dailyOrHourly = true; + } else { + endDate = cal.getTime().getTime(); + _dailyOrHourly = false; + } + if (_usageAggregationRange < USAGE_AGGREGATION_RANGE_MIN) { + s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + USAGE_AGGREGATION_RANGE_MIN); + _usageAggregationRange = USAGE_AGGREGATION_RANGE_MIN; + } + _diskStatsUpdateExecutor.scheduleAtFixedRate(new VmDiskStatsUpdaterTask(), (endDate - System.currentTimeMillis()), + (_usageAggregationRange * 60 * 1000), TimeUnit.MILLISECONDS); + } class HostCollector implements Runnable { @@ -254,6 +328,10 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc statsInMemory.setNumCPUs(statsForCurrentIteration.getNumCPUs()); statsInMemory.setNetworkReadKBs(statsInMemory.getNetworkReadKBs() + statsForCurrentIteration.getNetworkReadKBs()); statsInMemory.setNetworkWriteKBs(statsInMemory.getNetworkWriteKBs() + statsForCurrentIteration.getNetworkWriteKBs()); + statsInMemory.setDiskWriteKBs(statsInMemory.getDiskWriteKBs() + statsForCurrentIteration.getDiskWriteKBs()); + statsInMemory.setDiskReadIOs(statsInMemory.getDiskReadIOs() + statsForCurrentIteration.getDiskReadIOs()); + statsInMemory.setDiskWriteIOs(statsInMemory.getDiskWriteIOs() + statsForCurrentIteration.getDiskWriteIOs()); + statsInMemory.setDiskReadKBs(statsInMemory.getDiskReadKBs() + statsForCurrentIteration.getDiskReadKBs()); _VmStats.put(vmId, statsInMemory); } @@ -276,6 +354,175 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc return _VmStats.get(id); } + class VmDiskStatsUpdaterTask implements Runnable { + @Override + public void run() { + GlobalLock scanLock = GlobalLock.getInternLock("vm.disk.stats"); + try { + if(scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { + //Check for ownership + //msHost in UP state with min id should run the job + ManagementServerHostVO msHost = _msHostDao.findOneInUpState(new Filter(ManagementServerHostVO.class, "id", true, 0L, 1L)); + if(msHost == null || (msHost.getMsid() != mgmtSrvrId)){ + s_logger.debug("Skipping aggregate disk stats update"); + scanLock.unlock(); + return; + } + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + try { + txn.start(); + //get all stats with delta > 0 + List updatedVmNetStats = _vmDiskStatsDao.listUpdatedStats(); + for(VmDiskStatisticsVO stat : updatedVmNetStats){ + if (_dailyOrHourly) { + //update agg bytes + stat.setAggBytesRead(stat.getCurrentBytesRead() + stat.getNetBytesRead()); + stat.setAggBytesWrite(stat.getCurrentBytesWrite() + stat.getNetBytesWrite()); + stat.setAggIORead(stat.getCurrentIORead() + stat.getNetIORead()); + stat.setAggIOWrite(stat.getCurrentIOWrite() + stat.getNetIOWrite()); + _vmDiskStatsDao.update(stat.getId(), stat); + } + } + s_logger.debug("Successfully updated aggregate vm disk stats"); + txn.commit(); + } catch (Exception e){ + txn.rollback(); + s_logger.debug("Failed to update aggregate disk stats", e); + } finally { + scanLock.unlock(); + txn.close(); + } + } + } catch (Exception e){ + s_logger.debug("Exception while trying to acquire disk stats lock", e); + } finally { + scanLock.releaseRef(); + } + } + } + + class VmDiskStatsTask implements Runnable { + @Override + public void run() { + // collect the vm disk statistics(total) from hypervisor. added by weizhou, 2013.03. + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + try { + txn.start(); + SearchCriteria sc = _hostDao.createSearchCriteria(); + sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString()); + sc.addAnd("resourceState", SearchCriteria.Op.NIN, ResourceState.Maintenance, ResourceState.PrepareForMaintenance, ResourceState.ErrorInMaintenance); + sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.Routing.toString()); + List hosts = _hostDao.search(sc, null); + + for (HostVO host : hosts) { + List vms = _userVmDao.listRunningByHostId(host.getId()); + List vmIds = new ArrayList(); + + for (UserVmVO vm : vms) { + if (vm.getType() == VirtualMachine.Type.User) // user vm + vmIds.add(vm.getId()); + } + + HashMap> vmDiskStatsById = _userVmMgr.getVmDiskStatistics(host.getId(), host.getName(), vmIds); + if (vmDiskStatsById == null) + continue; + + Set vmIdSet = vmDiskStatsById.keySet(); + for(Long vmId : vmIdSet) + { + List vmDiskStats = vmDiskStatsById.get(vmId); + if (vmDiskStats == null) + continue; + UserVmVO userVm = _userVmDao.findById(vmId); + for (VmDiskStatsEntry vmDiskStat:vmDiskStats) { + SearchCriteria sc_volume = _volsDao.createSearchCriteria(); + sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStat.getPath()); + VolumeVO volume = _volsDao.search(sc_volume, null).get(0); + VmDiskStatisticsVO previousVmDiskStats = _vmDiskStatsDao.findBy(userVm.getAccountId(), userVm.getDataCenterId(), vmId, volume.getId()); + VmDiskStatisticsVO vmDiskStat_lock = _vmDiskStatsDao.lock(userVm.getAccountId(), userVm.getDataCenterId(), vmId, volume.getId()); + + if ((vmDiskStat.getBytesRead() == 0) && (vmDiskStat.getBytesWrite() == 0) + && (vmDiskStat.getIORead() == 0) && (vmDiskStat.getIOWrite() == 0)) { + s_logger.debug("IO/bytes read and write are all 0. Not updating vm_disk_statistics"); + continue; + } + + if (vmDiskStat_lock == null) { + s_logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId()+ " and volumeId:" + volume.getId()); + continue; + } + + if (previousVmDiskStats != null + && ((previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) + || (previousVmDiskStats.getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite()) + || (previousVmDiskStats.getCurrentIORead() != vmDiskStat_lock.getCurrentIORead()) + || (previousVmDiskStats.getCurrentIOWrite() != vmDiskStat_lock.getCurrentIOWrite()))) { + s_logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Read(Bytes): " + vmDiskStat.getBytesRead() + " write(Bytes): " + vmDiskStat.getBytesWrite() + + " Read(IO): " + vmDiskStat.getIORead() + " write(IO): " + vmDiskStat.getIOWrite()); + continue; + } + + if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Read # of bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getBytesRead() + " Stored: " + vmDiskStat_lock.getCurrentBytesRead()); + } + vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); + } + vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); + if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Write # of bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getBytesWrite() + " Stored: " + vmDiskStat_lock.getCurrentBytesWrite()); + } + vmDiskStat_lock.setNetBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); + } + vmDiskStat_lock.setCurrentBytesWrite(vmDiskStat.getBytesWrite()); + if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Read # of IO that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); + } + vmDiskStat_lock.setNetIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); + } + vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); + if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Write # of IO that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); + } + vmDiskStat_lock.setNetIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); + } + vmDiskStat_lock.setCurrentIOWrite(vmDiskStat.getIOWrite()); + + if (! _dailyOrHourly) { + //update agg bytes + vmDiskStat_lock.setAggBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); + vmDiskStat_lock.setAggBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); + vmDiskStat_lock.setAggIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); + vmDiskStat_lock.setAggIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); + } + + _vmDiskStatsDao.update(vmDiskStat_lock.getId(), vmDiskStat_lock); + } + } + } + txn.commit(); + } catch (Exception e) { + s_logger.warn("Error while collecting vm disk stats from hosts", e); + } finally { + txn.close(); + } + + } + } + class StorageCollector implements Runnable { @Override public void run() { diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 11461794021..687c4da3331 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -667,6 +667,25 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new InvalidParameterValueException("zone id can't be null, if scope is zone"); } + HypervisorType hypervisorType = HypervisorType.KVM; + if (scopeType == ScopeType.ZONE) { + String hypervisor = cmd.getHypervisor(); + if (hypervisor != null) { + try { + hypervisorType = HypervisorType.getType(hypervisor); + } catch (Exception e) { + throw new InvalidParameterValueException("invalid hypervisor type" + hypervisor); + } + } else { + throw new InvalidParameterValueException( + "Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage."); + } + if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.VMware) { + throw new InvalidParameterValueException( + "zone wide storage pool is not suported for hypervisor type " + hypervisor); + } + } + Map ds = cmd.getDetails(); Map details = new HashMap(); if (ds != null) { @@ -712,7 +731,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C lifeCycle.attachCluster(store, clusterScope); } else if (scopeType == ScopeType.ZONE) { ZoneScope zoneScope = new ZoneScope(zoneId); - lifeCycle.attachZone(store, zoneScope); + lifeCycle.attachZone(store, zoneScope, hypervisorType); } } catch (Exception e) { s_logger.debug("Failed to add data store", e); @@ -1666,7 +1685,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getType())) { PermissionDeniedException ex = new PermissionDeniedException( "Cannot perform this operation, Zone with specified id is currently disabled"); - ex.addProxyObject(zone, dcId, "dcId"); + ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } } @@ -1820,7 +1839,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getType())) { PermissionDeniedException ex = new PermissionDeniedException( "Cannot perform this operation, Zone with specified id is currently disabled"); - ex.addProxyObject(zone, dcId, "dcId"); + ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } diff --git a/server/src/com/cloud/storage/VolumeManager.java b/server/src/com/cloud/storage/VolumeManager.java index 5f533ca3c5b..ffa20d306c5 100644 --- a/server/src/com/cloud/storage/VolumeManager.java +++ b/server/src/com/cloud/storage/VolumeManager.java @@ -115,4 +115,7 @@ public interface VolumeManager extends VolumeApiService { Account owner); + String getVmNameFromVolumeId(long volumeId); + + String getStoragePoolOfVolume(long volumeId); } diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java index 5abd4323114..eca842800a5 100644 --- a/server/src/com/cloud/storage/VolumeManagerImpl.java +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -152,9 +152,11 @@ import com.cloud.template.TemplateManager; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.ResourceLimitService; +import com.cloud.user.VmDiskStatisticsVO; import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; +import com.cloud.user.dao.VmDiskStatisticsDao; import com.cloud.uservm.UserVm; import com.cloud.utils.EnumUtils; import com.cloud.utils.NumbersUtil; @@ -299,6 +301,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { @Inject protected ResourceTagDao _resourceTagDao; @Inject + protected VmDiskStatisticsDao _vmDiskStatsDao; + @Inject protected VMSnapshotDao _vmSnapshotDao; @Inject protected List _storagePoolAllocators; @@ -383,6 +387,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { throws ResourceAllocationException { Account caller = UserContext.current().getCaller(); long ownerId = cmd.getEntityOwnerId(); + Account owner = _accountDao.findById(ownerId); Long zoneId = cmd.getZoneId(); String volumeName = cmd.getVolumeName(); String url = cmd.getUrl(); @@ -392,7 +397,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { validateVolume(caller, ownerId, zoneId, volumeName, url, format); - VolumeVO volume = persistVolume(caller, ownerId, zoneId, volumeName, + VolumeVO volume = persistVolume(owner, zoneId, volumeName, url, cmd.getFormat()); VolumeInfo vol = volFactory.getVolume(volume.getId()); @@ -720,7 +725,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { return UUID.randomUUID().toString(); } - private VolumeVO persistVolume(Account caller, long ownerId, Long zoneId, + private VolumeVO persistVolume(Account owner, Long zoneId, String volumeName, String url, String format) { Transaction txn = Transaction.currentTxn(); @@ -728,12 +733,11 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { VolumeVO volume = new VolumeVO(volumeName, zoneId, -1L, -1L, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK); - Account owner = (caller.getId() == ownerId) ? caller : _accountMgr - .getActiveAccountById(ownerId); volume.setPoolId(null); volume.setDataCenterId(zoneId); volume.setPodId(null); - volume.setAccountId(ownerId); + volume.setAccountId(owner.getAccountId()); + volume.setDomainId(owner.getDomainId()); long diskOfferingId = _diskOfferingDao.findByUniqueName( "Cloud.com-Custom").getId(); volume.setDiskOfferingId(diskOfferingId); @@ -1573,6 +1577,13 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { } else { _volsDao.attachVolume(volume.getId(), vm.getId(), deviceId); } + // insert record for disk I/O statistics + VmDiskStatisticsVO diskstats = _vmDiskStatsDao.findBy(vm.getAccountId(), vm.getDataCenterId(),vm.getId(), volume.getId()); + if (diskstats == null) { + diskstats = new VmDiskStatisticsVO(vm.getAccountId(), vm.getDataCenterId(),vm.getId(), volume.getId()); + _vmDiskStatsDao.persist(diskstats); + } + return _volsDao.findById(volume.getId()); } else { if (answer != null) { @@ -1740,6 +1751,8 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { } } + // reload the volume from db + volumeOnPrimaryStorage = volFactory.getVolume(volumeOnPrimaryStorage.getId()); boolean moveVolumeNeeded = needMoveVolume(rootVolumeOfVm, volumeOnPrimaryStorage); if (moveVolumeNeeded) { @@ -2547,8 +2560,6 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { } - - @Override public Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account) throws ResourceAllocationException { VolumeInfo volume = volFactory.getVolume(volumeId); @@ -2620,7 +2631,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { VolumeVO volume = _volumeDao.findById(volumeId); if (volume == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find volume with specified volumeId"); - ex.addProxyObject(volume, volumeId, "volumeId"); + ex.addProxyObject(volumeId.toString(), "volumeId"); throw ex; } @@ -2640,7 +2651,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { + ". It should be either detached or the VM should be in stopped state."); PermissionDeniedException ex = new PermissionDeniedException( "Invalid state of the volume with specified ID. It should be either detached or the VM should be in stopped state."); - ex.addProxyObject(volume, volumeId, "volumeId"); + ex.addProxyObject(volume.getUuid(), "volumeId"); throw ex; } @@ -2655,7 +2666,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { if (!isExtractable && account != null && account.getType() != Account.ACCOUNT_TYPE_ADMIN) { // Global admins are always allowed to extract PermissionDeniedException ex = new PermissionDeniedException("The volume with specified volumeId is not allowed to be extracted"); - ex.addProxyObject(volume, volumeId, "volumeId"); + ex.addProxyObject(volume.getUuid(), "volumeId"); throw ex; } } @@ -2730,4 +2741,17 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { return null; } } + + @Override + public String getVmNameFromVolumeId(long volumeId) { + Long instanceId; + VolumeVO volume = _volsDao.findById(volumeId); + return getVmNameOnVolume(volume); + } + + @Override + public String getStoragePoolOfVolume(long volumeId) { + VolumeVO vol = _volsDao.findById(volumeId); + return dataStoreMgr.getPrimaryDataStore(vol.getPoolId()).getUuid(); + } } diff --git a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java index 8c894402c17..2be0c65fb4c 100755 --- a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java @@ -77,7 +77,10 @@ public class StoragePoolMonitor implements Listener { if (scCmd.getHypervisorType() == HypervisorType.XenServer || scCmd.getHypervisorType() == HypervisorType.KVM || scCmd.getHypervisorType() == HypervisorType.VMware || scCmd.getHypervisorType() == HypervisorType.Simulator || scCmd.getHypervisorType() == HypervisorType.Ovm) { List pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER); - pools.addAll(_poolDao.findZoneWideStoragePoolsByTags(host.getDataCenterId(), null)); + List zoneStoragePoolsByTags = _poolDao.findZoneWideStoragePoolsByTags(host.getDataCenterId(), null); + List zoneStoragePoolsByHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), scCmd.getHypervisorType()); + zoneStoragePoolsByTags.retainAll(zoneStoragePoolsByHypervisor); + pools.addAll(zoneStoragePoolsByTags); for (StoragePoolVO pool : pools) { if (pool.getStatus() != StoragePoolStatus.Up) { continue; diff --git a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java index 586f770c182..954c7e970f0 100755 --- a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java +++ b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java @@ -526,20 +526,28 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); + NetworkVO defaultNetwork = null; + if (dc.getNetworkType() == NetworkType.Advanced && dc.isSecurityGroupEnabled()) { + List networks = _networkDao.listByZoneSecurityGroup(dataCenterId); + if (networks == null || networks.size() == 0) { + throw new CloudRuntimeException("Can not found security enabled network in SG Zone " + dc); + } + defaultNetwork = networks.get(0); + } else { TrafficType defaultTrafficType = TrafficType.Public; + if (dc.getNetworkType() == NetworkType.Basic || dc.isSecurityGroupEnabled()) { defaultTrafficType = TrafficType.Guest; } - List defaultNetworks = _networkDao.listByZoneAndTrafficType(dataCenterId, defaultTrafficType); - - //api should never allow this situation to happen + // api should never allow this situation to happen if (defaultNetworks.size() != 1) { - throw new CloudRuntimeException("Found " + defaultNetworks.size() + " networks of type " + defaultTrafficType + " when expect to find 1"); + throw new CloudRuntimeException("Found " + defaultNetworks.size() + " networks of type " + + defaultTrafficType + " when expect to find 1"); + } + defaultNetwork = defaultNetworks.get(0); } - NetworkVO defaultNetwork = defaultNetworks.get(0); - List offerings = _networkModel.getSystemAccountNetworkOfferings(NetworkOfferingVO.SystemControlNetwork, NetworkOfferingVO.SystemManagementNetwork, NetworkOfferingVO.SystemStorageNetwork); List> networks = new ArrayList>(offerings.size() + 1); NicProfile defaultNic = new NicProfile(); diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index a7e61374dfb..a1a4c90572a 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -946,23 +946,27 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, if (volume.getInstanceId() != null) { UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); if (userVm != null) { - if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { - throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volume.getId() + " is associated with vm:" + userVm.getInstanceName() + " is in " - + userVm.getState().toString() + " state"); - } + if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { + throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volume.getId() + + " is associated with vm:" + userVm.getInstanceName() + " is in " + + userVm.getState().toString() + " state"); + } - if(userVm.getHypervisorType() == HypervisorType.VMware || userVm.getHypervisorType() == HypervisorType.KVM) { - List activeSnapshots = _snapshotDao.listByInstanceId(volume.getInstanceId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); - if(activeSnapshots.size() > 1) - throw new CloudRuntimeException("There is other active snapshot tasks on the instance to which the volume is attached, please try again later"); - } + if (userVm.getHypervisorType() == HypervisorType.VMware + || userVm.getHypervisorType() == HypervisorType.KVM) { + List activeSnapshots = _snapshotDao.listByInstanceId(volume.getInstanceId(), + Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); + if (activeSnapshots.size() > 1) + throw new CloudRuntimeException( + "There is other active snapshot tasks on the instance to which the volume is attached, please try again later"); + } - List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(userVm.getId(), - VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); - if (activeVMSnapshots.size() > 0) { - throw new CloudRuntimeException( - "There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later"); - } + List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(userVm.getId(), + VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); + if (activeVMSnapshots.size() > 0) { + throw new CloudRuntimeException( + "There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later"); + } } } diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index c15bd6b4b7f..273614ca8e0 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -1697,14 +1697,14 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, VMTemplateVO template = _tmpltDao.findById(id); if (template == null || template.getRemoved() != null) { InvalidParameterValueException ex = new InvalidParameterValueException("unable to find template/iso with specified id"); - ex.addProxyObject(template, id, "templateId"); + ex.addProxyObject(String.valueOf(id), "templateId"); throw ex; } // Don't allow to modify system template if (id == Long.valueOf(1)) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to update template/iso of specified id"); - ex.addProxyObject(template, id, "templateId"); + ex.addProxyObject(String.valueOf(id), "templateId"); throw ex; } diff --git a/server/src/com/cloud/user/AccountManager.java b/server/src/com/cloud/user/AccountManager.java index 6ba1f6a7f96..2e909c8e042 100755 --- a/server/src/com/cloud/user/AccountManager.java +++ b/server/src/com/cloud/user/AccountManager.java @@ -47,21 +47,15 @@ public interface AccountManager extends AccountService { boolean deleteAccount(AccountVO account, long callerUserId, Account caller); - boolean cleanupAccount(AccountVO account, long callerUserId, Account caller); - Long checkAccessAndSpecifyAuthority(Account caller, Long zoneId); - Account createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details, String uuid); - - UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone, String userUUID); - + Account createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details, String uuid); + /** * Logs out a user * @param userId */ - void logoutUser(Long userId); - - UserAccount getUserAccount(String username, Long domainId); + void logoutUser(long userId); /** * Authenticates a user when s/he logs in. @@ -87,9 +81,7 @@ public interface AccountManager extends AccountService { * @return the user/account pair if one exact match was found, null otherwise */ Pair findUserByApiKey(String apiKey); - - boolean lockAccount(long accountId); - + boolean enableAccount(long accountId); void buildACLSearchBuilder(SearchBuilder sb, Long domainId, diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index 7421422d294..6b4bf0ed6b8 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -46,23 +46,25 @@ import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; +import org.apache.cloudstack.region.gslb.GlobalLoadBalancerRuleDao; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; import com.cloud.api.ApiDBUtils; -import com.cloud.api.query.dao.UserAccountJoinDao; import com.cloud.api.query.vo.ControlledViewEntity; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; +import com.cloud.configuration.Resource.ResourceOwnerType; import com.cloud.configuration.ResourceCountVO; import com.cloud.configuration.ResourceLimit; -import com.cloud.configuration.Resource.ResourceOwnerType; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.configuration.dao.ResourceCountDao; import com.cloud.configuration.dao.ResourceLimitDao; import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DedicatedResourceVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterVnetDao; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -102,8 +104,8 @@ import com.cloud.projects.ProjectManager; import com.cloud.projects.ProjectVO; import com.cloud.projects.dao.ProjectAccountDao; import com.cloud.projects.dao.ProjectDao; +import com.cloud.region.ha.GlobalLoadBalancingRulesService; import com.cloud.server.auth.UserAuthenticator; -import com.cloud.storage.StorageManager; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeManager; @@ -162,8 +164,6 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Inject private UserAccountDao _userAccountDao; @Inject - private UserAccountJoinDao _userAccountJoinDao; - @Inject private VolumeDao _volumeDao; @Inject private UserVmDao _userVmDao; @@ -188,8 +188,6 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Inject private UserVmManager _vmMgr; @Inject - private StorageManager _storageMgr; - @Inject private TemplateManager _tmpltMgr; @Inject private ConfigurationManager _configMgr; @@ -229,6 +227,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Inject private AffinityGroupDao _affinityGroupDao; @Inject + private AccountGuestVlanMapDao _accountGuestVlanMapDao; @Inject private DataCenterVnetDao _dataCenterVnetDao; @@ -236,6 +235,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M private ResourceLimitService _resourceLimitMgr; @Inject private ResourceLimitDao _resourceLimitDao; + @Inject + private DedicatedResourceDao _dedicatedDao; + @Inject + private GlobalLoadBalancerRuleDao _gslbRuleDao; + @Inject + public com.cloud.region.ha.GlobalLoadBalancingRulesService _gslbService; private List _userAuthenticators; List _userPasswordEncoders; @@ -500,8 +505,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return success; } - @Override - public boolean lockAccount(long accountId) { + + protected boolean lockAccount(long accountId) { boolean success = false; Account account = _accountDao.findById(accountId); if (account != null) { @@ -539,8 +544,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return cleanupAccount(account, callerUserId, caller); } - @Override - public boolean cleanupAccount(AccountVO account, long callerUserId, Account caller) { + + protected boolean cleanupAccount(AccountVO account, long callerUserId, Account caller) { long accountId = account.getId(); boolean accountCleanupNeeded = false; @@ -554,6 +559,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } + // delete global load balancer rules for the account. + List gslbRules = _gslbRuleDao.listByAccount(accountId); + if (gslbRules != null && !gslbRules.isEmpty()) { + _gslbService.revokeAllGslbRulesForAccount(caller, accountId); + } + //delete the account from project accounts _projectAccountDao.removeAccountFromProjects(accountId); @@ -731,6 +742,23 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _resourceCountDao.removeEntriesByOwner(accountId, ResourceOwnerType.Account); _resourceLimitDao.removeEntriesByOwner(accountId, ResourceOwnerType.Account); + // release account specific acquired portable IP's. Since all the portable IP's must have been already + // disassociated with VPC/guest network (due to deletion), so just mark portable IP as free. + List portableIpsToRelease = _ipAddressDao.listByAccount(accountId); + for (IpAddress ip : portableIpsToRelease) { + s_logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); + _networkMgr.releasePortableIpAddress(ip.getId()); + } + //release dedication if any + List dedicatedResources = _dedicatedDao.listByAccountId(accountId); + if (dedicatedResources != null && !dedicatedResources.isEmpty()) { + s_logger.debug("Releasing dedicated resources for account " + accountId); + for (DedicatedResourceVO dr : dedicatedResources){ + if (!_dedicatedDao.remove(dr.getId())) { + s_logger.warn("Fail to release dedicated resources for account " + accountId); + } + } + } return true; } catch (Exception ex) { s_logger.warn("Failed to cleanup account " + account + " due to ", ex); @@ -1480,6 +1508,16 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M try { List accountsForCleanupInDomain = _accountDao.findCleanupsForRemovedAccounts(domainId); if (accountsForCleanupInDomain.isEmpty()) { + //release dedication if any, before deleting the domain + List dedicatedResources = _dedicatedDao.listByDomainId(domainId); + if (dedicatedResources != null && !dedicatedResources.isEmpty()) { + s_logger.debug("Releasing dedicated resources for domain" + domainId); + for (DedicatedResourceVO dr : dedicatedResources){ + if (!_dedicatedDao.remove(dr.getId())) { + s_logger.warn("Fail to release dedicated resources for domain " + domainId); + } + } + } s_logger.debug("Removing inactive domain id=" + domainId); _domainMgr.removeDomain(domainId); } else { @@ -1585,21 +1623,13 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } @Override - public Account getActiveAccountById(Long accountId) { - if (accountId == null) { - throw new InvalidParameterValueException("AccountId is required by account search"); - } else { - return _accountDao.findById(accountId); - } + public Account getActiveAccountById(long accountId) { + return _accountDao.findById(accountId); } @Override - public Account getAccount(Long accountId) { - if (accountId == null) { - throw new InvalidParameterValueException("AccountId is required by account search"); - } else { - return _accountDao.findByIdIncludingRemoved(accountId); - } + public Account getAccount(long accountId) { + return _accountDao.findByIdIncludingRemoved(accountId); } @Override @@ -1637,62 +1667,6 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return _userDao.findByIdIncludingRemoved(userId); } - @Override - public Pair, Long> finalizeAccountDomainForList(Account caller, String accountName, Long domainId, Long projectId) { - List permittedAccounts = new ArrayList(); - - if (isAdmin(caller.getType())) { - if (domainId == null && accountName != null) { - throw new InvalidParameterValueException("accountName and domainId might be specified together"); - } else if (domainId != null) { - Domain domain = _domainMgr.getDomain(domainId); - if (domain == null) { - throw new InvalidParameterValueException("Unable to find the domain by id=" + domainId); - } - - checkAccess(caller, domain); - - if (accountName != null) { - Account owner = getActiveAccountByName(accountName, domainId); - if (owner == null) { - throw new InvalidParameterValueException("Unable to find account with name " + accountName + " in domain id=" + domainId); - } - - permittedAccounts.add(owner.getId()); - } - } - } else if (accountName != null && domainId != null) { - if (!accountName.equals(caller.getAccountName()) || domainId.longValue() != caller.getDomainId()) { - throw new PermissionDeniedException("Can't list port forwarding rules for account " + accountName + " in domain " + domainId + ", permission denied"); - } - permittedAccounts.add(getActiveAccountByName(accountName, domainId).getId()); - } else { - permittedAccounts.add(caller.getAccountId()); - } - - if (domainId == null && caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) { - domainId = caller.getDomainId(); - } - - // set project information - if (projectId != null) { - if (projectId.longValue() == -1) { - permittedAccounts.addAll(_projectMgr.listPermittedProjectAccounts(caller.getId())); - } else { - permittedAccounts.clear(); - Project project = _projectMgr.getProject(projectId); - if (project == null) { - throw new InvalidParameterValueException("Unable to find project by id " + projectId); - } - if (!_projectMgr.canAccessProjectAccount(caller, project.getProjectAccountId())) { - throw new InvalidParameterValueException("Account " + caller + " can't access project id=" + projectId); - } - permittedAccounts.add(project.getProjectAccountId()); - } - } - - return new Pair, Long>(permittedAccounts, domainId); - } @Override public User getActiveUserByRegistrationToken(String registrationToken) { @@ -1708,7 +1682,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Override @DB - public AccountVO createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details, String uuid) { + public AccountVO createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details, String uuid) { // Validate domain Domain domain = _domainMgr.getDomain(domainId); if (domain == null) { @@ -1774,9 +1748,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return account; } - @Override @ActionEvent(eventType = EventTypes.EVENT_USER_CREATE, eventDescription = "creating User") - public UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone, String userUUID) { + protected UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone, String userUUID) { if (s_logger.isDebugEnabled()) { s_logger.debug("Creating user: " + userName + ", accountId: " + accountId + " timezone:" + timezone); } @@ -1801,29 +1774,13 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } @Override - public void logoutUser(Long userId) { + public void logoutUser(long userId) { UserAccount userAcct = _userAccountDao.findById(userId); if (userAcct != null) { ActionEventUtils.onActionEvent(userId, userAcct.getAccountId(), userAcct.getDomainId(), EventTypes.EVENT_USER_LOGOUT, "user has logged out"); } // else log some kind of error event? This likely means the user doesn't exist, or has been deleted... } - @Override - public UserAccount getUserAccount(String username, Long domainId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieiving user: " + username + " in domain " + domainId); - } - - UserAccount userAccount = _userAccountDao.getUserAccount(username, domainId); - if (userAccount == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find user with name " + username + " in domain " + domainId); - } - return null; - } - - return userAccount; - } @Override public UserAccount authenticateUser(String username, String password, Long domainId, String loginIpAddress, Map requestParameters) { @@ -2251,6 +2208,10 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M domainIdRecursiveListProject.second(true); } } + } else if (domainId != null) { + if (caller.getType() == Account.ACCOUNT_TYPE_NORMAL) { + permittedAccounts.add(caller.getId()); + } } } diff --git a/server/src/com/cloud/user/DomainManagerImpl.java b/server/src/com/cloud/user/DomainManagerImpl.java index c451041d951..20537bae926 100644 --- a/server/src/com/cloud/user/DomainManagerImpl.java +++ b/server/src/com/cloud/user/DomainManagerImpl.java @@ -35,6 +35,8 @@ import com.cloud.configuration.Resource.ResourceOwnerType; import com.cloud.configuration.ResourceLimit; import com.cloud.configuration.dao.ResourceCountDao; import com.cloud.configuration.dao.ResourceLimitDao; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -87,6 +89,8 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom private RegionManager _regionMgr; @Inject private ResourceLimitDao _resourceLimitDao; + @Inject + private DedicatedResourceDao _dedicatedDao; @Override public Domain getDomain(long domainId) { @@ -226,7 +230,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom if ((cleanup != null) && cleanup.booleanValue()) { if (!cleanupDomain(domain.getId(), ownerId)) { CloudRuntimeException e = new CloudRuntimeException("Failed to clean up domain resources and sub domains, delete failed on domain " + domain.getName() + " (id: " + domain.getId() + ")."); - e.addProxyObject(domain, domain.getId(), "domainId"); + e.addProxyObject(domain.getUuid(), "domainId"); throw e; } } else { @@ -235,13 +239,24 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom if (!_domainDao.remove(domain.getId())) { rollBackState = true; CloudRuntimeException e = new CloudRuntimeException("Delete failed on domain " + domain.getName() + " (id: " + domain.getId() + "); Please make sure all users and sub domains have been removed from the domain before deleting"); - e.addProxyObject(domain, domain.getId(), "domainId"); + e.addProxyObject(domain.getUuid(), "domainId"); throw e; + } else { + //release dedication if any, before deleting the domain + List dedicatedResources = _dedicatedDao.listByDomainId(domain.getId()); + if (dedicatedResources != null && !dedicatedResources.isEmpty()) { + s_logger.debug("Releasing dedicated resources for domain" + domain.getId()); + for (DedicatedResourceVO dr : dedicatedResources){ + if (!_dedicatedDao.remove(dr.getId())) { + s_logger.warn("Fail to release dedicated resources for domain " + domain.getId()); + } + } + } } } else { rollBackState = true; CloudRuntimeException e = new CloudRuntimeException("Can't delete the domain yet because it has " + accountsForCleanup.size() + "accounts that need a cleanup"); - e.addProxyObject(domain, domain.getId(), "domainId"); + e.addProxyObject(domain.getUuid(), "domainId"); throw e; } } @@ -333,6 +348,17 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom boolean deleteDomainSuccess = true; List accountsForCleanup = _accountDao.findCleanupsForRemovedAccounts(domainId); if (accountsForCleanup.isEmpty()) { + //release dedication if any, before deleting the domain + List dedicatedResources = _dedicatedDao.listByDomainId(domainId); + if (dedicatedResources != null && !dedicatedResources.isEmpty()) { + s_logger.debug("Releasing dedicated resources for domain" + domainId); + for (DedicatedResourceVO dr : dedicatedResources){ + if (!_dedicatedDao.remove(dr.getId())) { + s_logger.warn("Fail to release dedicated resources for domain " + domainId); + } + } + } + //delete domain deleteDomainSuccess = _domainDao.remove(domainId); // Delete resource count and resource limits entries set for this domain (if there are any). @@ -480,7 +506,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom DomainVO domain = _domainDao.findById(domainId); if (domain == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id"); - ex.addProxyObject(domain, domainId, "domainId"); + ex.addProxyObject(domainId.toString(), "domainId"); throw ex; } else if (domain.getParent() == null && domainName != null) { // check if domain is ROOT domain - and deny to edit it with the new name @@ -501,7 +527,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom if (!domains.isEmpty() && !sameDomain) { InvalidParameterValueException ex = new InvalidParameterValueException("Failed to update specified domain id with name '" + domainName + "' since it already exists in the system"); - ex.addProxyObject(domain, domainId, "domainId"); + ex.addProxyObject(domain.getUuid(), "domainId"); throw ex; } } diff --git a/server/src/com/cloud/vm/UserVmManager.java b/server/src/com/cloud/vm/UserVmManager.java index 4dcfb73e2a1..348017a0a44 100755 --- a/server/src/com/cloud/vm/UserVmManager.java +++ b/server/src/com/cloud/vm/UserVmManager.java @@ -20,6 +20,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import com.cloud.agent.api.VmDiskStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.exception.*; @@ -65,6 +66,8 @@ public interface UserVmManager extends VirtualMachineGuru, UserVmServi */ HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds); + HashMap> getVmDiskStatistics(long hostId, String hostName, List vmIds); + boolean deleteVmGroup(long groupId); boolean addInstanceToGroup(long userVmId, String group); @@ -95,4 +98,6 @@ public interface UserVmManager extends VirtualMachineGuru, UserVmServi boolean upgradeVirtualMachine(Long id, Long serviceOfferingId) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException; boolean setupVmForPvlan(boolean add, Long hostId, NicProfile nic); + + void collectVmDiskStatistics (UserVmVO userVm); } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index cefaee597ff..6cfb0780533 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -35,6 +35,7 @@ import javax.naming.ConfigurationException; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; +import com.cloud.server.ConfigurationServer; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.affinity.AffinityGroupVO; @@ -69,6 +70,8 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO; import com.cloud.agent.AgentManager; import com.cloud.agent.AgentManager.OnError; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.GetVmDiskStatsAnswer; +import com.cloud.agent.api.GetVmDiskStatsCommand; import com.cloud.agent.api.GetVmStatsAnswer; import com.cloud.agent.api.GetVmStatsCommand; import com.cloud.agent.api.PlugNicAnswer; @@ -78,6 +81,7 @@ import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StopAnswer; import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; +import com.cloud.agent.api.VmDiskStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NicTO; @@ -96,9 +100,11 @@ import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DedicatedResourceVO; import com.cloud.dc.HostPodVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; @@ -173,7 +179,6 @@ import com.cloud.resource.ResourceState; import com.cloud.server.Criteria; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; -import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOSCategoryVO; import com.cloud.storage.GuestOSVO; @@ -212,9 +217,11 @@ import com.cloud.user.SSHKeyPairVO; import com.cloud.user.User; import com.cloud.user.UserContext; import com.cloud.user.UserVO; +import com.cloud.user.VmDiskStatisticsVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.SSHKeyPairDao; import com.cloud.user.dao.UserDao; +import com.cloud.user.dao.VmDiskStatisticsDao; import com.cloud.uservm.UserVm; import com.cloud.utils.Journal; import com.cloud.utils.NumbersUtil; @@ -238,6 +245,7 @@ import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.InstanceGroupDao; import com.cloud.vm.dao.InstanceGroupVMMapDao; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; import com.cloud.vm.dao.UserVmCloneSettingDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; @@ -389,6 +397,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use protected GuestOSCategoryDao _guestOSCategoryDao; @Inject UsageEventDao _usageEventDao; + + @Inject + SecondaryStorageVmDao _secondaryDao; + @Inject + VmDiskStatisticsDao _vmDiskStatsDao; + @Inject protected VMSnapshotDao _vmSnapshotDao; @Inject @@ -400,10 +414,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use AffinityGroupDao _affinityGroupDao; @Inject TemplateDataFactory templateFactory; + @Inject + DedicatedResourceDao _dedicatedDao; + @Inject + ConfigurationServer _configServer; protected ScheduledExecutorService _executor = null; protected int _expungeInterval; protected int _expungeDelay; + protected boolean _dailyOrHourly = false; protected String _name; protected String _instance; @@ -735,6 +754,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } if (vm.getState() == State.Running && vm.getHostId() != null) { + collectVmDiskStatistics(vm); return _itMgr.reboot(vm, null, caller, owner); } else { s_logger.error("Vm id=" + vmId @@ -819,6 +839,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } + @Override public UserVm addNicToVirtualMachine(AddNicToVMCmd cmd) throws InvalidParameterValueException, PermissionDeniedException, CloudRuntimeException { Long vmId = cmd.getVmId(); @@ -1022,6 +1043,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use Network oldDefaultNetwork = null; oldDefaultNetwork = _networkModel.getDefaultNetworkForVm(vmId); + String oldNicIdString = Long.toString(_networkModel.getDefaultNic(vmId).getId()); long oldNetworkOfferingId = -1L; if(oldDefaultNetwork!=null) { @@ -1061,13 +1083,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use String nicIdString = Long.toString(nic.getId()); long newNetworkOfferingId = network.getNetworkOfferingId(); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vmInstance.getAccountId(), vmInstance.getDataCenterId(), - vmInstance.getId(), nicIdString, oldNetworkOfferingId, null, 1L, VirtualMachine.class.getName(), vmInstance.getUuid()); + vmInstance.getId(), oldNicIdString, oldNetworkOfferingId, null, 1L, VirtualMachine.class.getName(), vmInstance.getUuid()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmInstance.getAccountId(), vmInstance.getDataCenterId(), vmInstance.getId(), nicIdString, newNetworkOfferingId, null, 1L, VirtualMachine.class.getName(), vmInstance.getUuid()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vmInstance.getAccountId(), vmInstance.getDataCenterId(), vmInstance.getId(), nicIdString, newNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmInstance.getAccountId(), vmInstance.getDataCenterId(), - vmInstance.getId(), nicIdString, oldNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid()); + vmInstance.getId(), oldNicIdString, oldNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid()); return _vmDao.findById(vmInstance.getId()); } @@ -1090,6 +1112,41 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } + @Override + public HashMap> getVmDiskStatistics(long hostId, String hostName, List vmIds) throws CloudRuntimeException { + HashMap> vmDiskStatsById = new HashMap>(); + + if (vmIds.isEmpty()) { + return vmDiskStatsById; + } + + List vmNames = new ArrayList(); + + for (Long vmId : vmIds) { + UserVmVO vm = _vmDao.findById(vmId); + vmNames.add(vm.getInstanceName()); + } + + Answer answer = _agentMgr.easySend(hostId, new GetVmDiskStatsCommand(vmNames, _hostDao.findById(hostId).getGuid(), hostName)); + if (answer == null || !answer.getResult()) { + s_logger.warn("Unable to obtain VM disk statistics."); + return null; + } else { + HashMap> vmDiskStatsByName = ((GetVmDiskStatsAnswer)answer).getVmDiskStatsMap(); + + if (vmDiskStatsByName == null) { + s_logger.warn("Unable to obtain VM disk statistics."); + return null; + } + + for (String vmName : vmDiskStatsByName.keySet()) { + vmDiskStatsById.put(vmIds.get(vmNames.indexOf(vmName)), vmDiskStatsByName.get(vmName)); + } + } + + return vmDiskStatsById; + } + @Override public boolean upgradeVirtualMachine(Long vmId, Long newServiceOfferingId) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException{ Account caller = UserContext.current().getCaller(); @@ -1112,21 +1169,52 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use //Check if its a scale "up" ServiceOffering newServiceOffering = _configMgr.getServiceOffering(newServiceOfferingId); - ServiceOffering oldServiceOffering = _configMgr.getServiceOffering(vmInstance.getServiceOfferingId()); - if(newServiceOffering.getSpeed() <= oldServiceOffering.getSpeed() - && newServiceOffering.getRamSize() <= oldServiceOffering.getRamSize()){ + ServiceOffering currentServiceOffering = _configMgr.getServiceOffering(vmInstance.getServiceOfferingId()); + int newCpu = newServiceOffering.getCpu(); + int newMemory = newServiceOffering.getRamSize(); + int newSpeed = newServiceOffering.getSpeed(); + int currentCpu = currentServiceOffering.getCpu(); + int currentMemory = currentServiceOffering.getRamSize(); + int currentSpeed = currentServiceOffering.getSpeed(); + + if(newSpeed <= currentSpeed + && newMemory <= currentMemory + && newCpu <= currentCpu){ throw new InvalidParameterValueException("Only scaling up the vm is supported, new service offering should have both cpu and memory greater than the old values"); } + // Check resource limits + if (newCpu > currentCpu) { + _resourceLimitMgr.checkResourceLimit(caller, ResourceType.cpu, + newCpu - currentCpu); + } + if (newMemory > currentMemory) { + _resourceLimitMgr.checkResourceLimit(caller, ResourceType.memory, + newMemory - currentMemory); + } + // Dynamically upgrade the running vms boolean success = false; if(vmInstance.getState().equals(State.Running)){ int retry = _scaleRetry; + boolean enableDynamicallyScaleVm = Boolean.parseBoolean(_configServer.getConfigValue(Config.EnableDynamicallyScaleVm.key(), Config.ConfigurationParameterScope.zone.toString(), vmInstance.getDataCenterId())); + if(!enableDynamicallyScaleVm){ + throw new PermissionDeniedException("Dynamically scaling virtual machines is disabled for this zone, please contact your admin"); + } + + // Increment CPU and Memory count accordingly. + if (newCpu > currentCpu) { + _resourceLimitMgr.incrementResourceCount(caller.getAccountId(), ResourceType.cpu, new Long (newCpu - currentCpu)); + } + if (newMemory > currentMemory) { + _resourceLimitMgr.incrementResourceCount(caller.getAccountId(), ResourceType.memory, new Long (newMemory - currentMemory)); + } + while (retry-- != 0) { // It's != so that it can match -1. try{ // #1 Check existing host has capacity - boolean existingHostHasCapacity = _capacityMgr.checkIfHostHasCapacity(vmInstance.getHostId(), newServiceOffering.getSpeed() - oldServiceOffering.getSpeed(), - (newServiceOffering.getRamSize() - oldServiceOffering.getRamSize()) * 1024L * 1024L, false, ApiDBUtils.getCpuOverprovisioningFactor(), 1f, false); // TO DO fill it with mem. + boolean existingHostHasCapacity = _capacityMgr.checkIfHostHasCapacity(vmInstance.getHostId(), newServiceOffering.getSpeed() - currentServiceOffering.getSpeed(), + (newServiceOffering.getRamSize() - currentServiceOffering.getRamSize()) * 1024L * 1024L, false, ApiDBUtils.getCpuOverprovisioningFactor(), 1f, false); // TO DO fill it with mem. // #2 migrate the vm if host doesn't have capacity if (!existingHostHasCapacity){ @@ -1136,7 +1224,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use // #3 scale the vm now _itMgr.upgradeVmDb(vmId, newServiceOfferingId); vmInstance = _vmInstanceDao.findById(vmId); - vmInstance = _itMgr.reConfigureVm(vmInstance, oldServiceOffering, existingHostHasCapacity); + vmInstance = _itMgr.reConfigureVm(vmInstance, currentServiceOffering, existingHostHasCapacity); success = true; return success; }catch(InsufficientCapacityException e ){ @@ -1151,8 +1239,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use s_logger.warn("Received exception while scaling ",e); }finally{ if(!success){ - _itMgr.upgradeVmDb(vmId, oldServiceOffering.getId()); // rollback + _itMgr.upgradeVmDb(vmId, currentServiceOffering.getId()); // rollback + // Decrement CPU and Memory count accordingly. + if (newCpu > currentCpu) { + _resourceLimitMgr.decrementResourceCount(caller.getAccountId(), ResourceType.cpu, new Long (newCpu - currentCpu)); } + if (newMemory > currentMemory) { + _resourceLimitMgr.decrementResourceCount(caller.getAccountId(), ResourceType.memory, new Long (newMemory - currentMemory)); + } + } + + } } } @@ -1348,6 +1445,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use _executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("UserVm-Scavenger")); + String aggregationRange = configs.get("usage.stats.job.aggregation.range"); + int _usageAggregationRange = NumbersUtil.parseInt(aggregationRange, 1440); + int HOURLY_TIME = 60; + final int DAILY_TIME = 60 * 24; + if (_usageAggregationRange == DAILY_TIME) { + _dailyOrHourly = true; + } else if (_usageAggregationRange == HOURLY_TIME) { + _dailyOrHourly = true; + } else { + _dailyOrHourly = false; + } + _itMgr.registerGuru(VirtualMachine.Type.User, this); VirtualMachine.State.getStateMachine().registerListener( @@ -2317,8 +2426,21 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + zone.getId()); } - if (zone.getDomainId() != null) { - DomainVO domain = _domainDao.findById(zone.getDomainId()); + boolean isExplicit = false; + // check affinity group type Explicit dedication + if (affinityGroupIdList != null) { + for (Long affinityGroupId : affinityGroupIdList) { + AffinityGroupVO ag = _affinityGroupDao.findById(affinityGroupId); + String agType = ag.getType(); + if (agType.equals("ExplicitDedication")) { + isExplicit = true; + } + } + } + // check if zone is dedicated + DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(zone.getId()); + if (isExplicit && dedicatedZone != null) { + DomainVO domain = _domainDao.findById(dedicatedZone.getDomainId()); if (domain == null) { throw new CloudRuntimeException("Unable to find the domain " + zone.getDomainId() + " for the zone: " + zone); @@ -2845,6 +2967,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use userVm.setPrivateMacAddress(nic.getMacAddress()); } } + + List volumes = _volsDao.findByInstance(userVm.getId()); + VmDiskStatisticsVO diskstats = null; + for (VolumeVO volume : volumes) { + diskstats = _vmDiskStatsDao.findBy(userVm.getAccountId(), userVm.getDataCenterId(),userVm.getId(), volume.getId()); + if (diskstats == null) { + diskstats = new VmDiskStatisticsVO(userVm.getAccountId(), userVm.getDataCenterId(),userVm.getId(), volume.getId()); + _vmDiskStatsDao.persist(diskstats); + } + } + return true; } @@ -3230,7 +3363,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } catch (CloudException e) { CloudRuntimeException ex = new CloudRuntimeException( "Unable to destroy with specified vmId", e); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } @@ -3257,9 +3390,125 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } else { CloudRuntimeException ex = new CloudRuntimeException( "Failed to destroy vm with specified vmId"); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } + + } + + @Override + public void collectVmDiskStatistics (UserVmVO userVm) { + // Collect vm disk statistics from host before stopping Vm + long hostId = userVm.getHostId(); + List vmNames = new ArrayList(); + vmNames.add(userVm.getInstanceName()); + HostVO host = _hostDao.findById(hostId); + + GetVmDiskStatsAnswer diskStatsAnswer = null; + try { + diskStatsAnswer = (GetVmDiskStatsAnswer) _agentMgr.easySend(hostId, new GetVmDiskStatsCommand(vmNames, host.getGuid(), host.getName())); + } catch (Exception e) { + s_logger.warn("Error while collecting disk stats for vm: " + userVm.getHostName() + " from host: " + host.getName(), e); + return; + } + if (diskStatsAnswer != null) { + if (!diskStatsAnswer.getResult()) { + s_logger.warn("Error while collecting disk stats vm: " + userVm.getHostName() + " from host: " + host.getName() + "; details: " + diskStatsAnswer.getDetails()); + return; + } + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + try { + txn.start(); + HashMap> vmDiskStatsByName = diskStatsAnswer.getVmDiskStatsMap(); + List vmDiskStats = vmDiskStatsByName.get(userVm.getInstanceName()); + + if (vmDiskStats == null) + return; + + for (VmDiskStatsEntry vmDiskStat:vmDiskStats) { + SearchCriteria sc_volume = _volsDao.createSearchCriteria(); + sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStat.getPath()); + VolumeVO volume = _volsDao.search(sc_volume, null).get(0); + VmDiskStatisticsVO previousVmDiskStats = _vmDiskStatsDao.findBy(userVm.getAccountId(), userVm.getDataCenterId(), userVm.getId(), volume.getId()); + VmDiskStatisticsVO vmDiskStat_lock = _vmDiskStatsDao.lock(userVm.getAccountId(), userVm.getDataCenterId(), userVm.getId(), volume.getId()); + + if ((vmDiskStat.getIORead() == 0) && (vmDiskStat.getIOWrite() == 0) && (vmDiskStat.getBytesRead() == 0) && (vmDiskStat.getBytesWrite() == 0)) { + s_logger.debug("Read/Write of IO and Bytes are both 0. Not updating vm_disk_statistics"); + continue; + } + + if (vmDiskStat_lock == null) { + s_logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId()+ " and volumeId:" + volume.getId()); + continue; + } + + if (previousVmDiskStats != null + && ((previousVmDiskStats.getCurrentIORead() != vmDiskStat_lock.getCurrentIORead()) + || ((previousVmDiskStats.getCurrentIOWrite() != vmDiskStat_lock.getCurrentIOWrite()) + || (previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) + || (previousVmDiskStats.getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite())))) { + s_logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " IO Read: " + vmDiskStat.getIORead() + " IO Write: " + vmDiskStat.getIOWrite() + + " Bytes Read: " + vmDiskStat.getBytesRead() + " Bytes Write: " + vmDiskStat.getBytesWrite()); + continue; + } + + if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Read # of IO that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); + } + vmDiskStat_lock.setNetIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); + } + vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); + if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Write # of IO that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); + } + vmDiskStat_lock.setNetIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); + } + vmDiskStat_lock.setCurrentIOWrite(vmDiskStat.getIOWrite()); + if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Read # of Bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getBytesRead() + " Stored: " + vmDiskStat_lock.getCurrentBytesRead()); + } + vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); + } + vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); + if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Write # of Bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getBytesWrite() + " Stored: " + vmDiskStat_lock.getCurrentBytesWrite()); + } + vmDiskStat_lock.setNetBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); + } + vmDiskStat_lock.setCurrentBytesWrite(vmDiskStat.getBytesWrite()); + + if (! _dailyOrHourly) { + //update agg bytes + vmDiskStat_lock.setAggIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); + vmDiskStat_lock.setAggIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); + vmDiskStat_lock.setAggBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); + vmDiskStat_lock.setAggBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); + } + + _vmDiskStatsDao.update(vmDiskStat_lock.getId(), vmDiskStat_lock); + } + txn.commit(); + } catch (Exception e) { + txn.rollback(); + s_logger.warn("Unable to update vm disk statistics for vm: " + userVm.getId() + " from host: " + hostId, e); + } finally { + txn.close(); + } + } } @@ -3450,7 +3699,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (userVm == null) { InvalidParameterValueException ex = new InvalidParameterValueException( "unable to find a virtual machine with specified id"); - ex.addProxyObject(userVm, vmId, "vmId"); + ex.addProxyObject(String.valueOf(vmId), "vmId"); throw ex; } @@ -3492,7 +3741,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (vm.getState() != State.Stopped) { InvalidParameterValueException ex = new InvalidParameterValueException( "VM is not Stopped, unable to migrate the vm having the specified id"); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } @@ -3556,7 +3805,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use "No permission to migrate VM, Only Root Admin can migrate a VM!"); } - VMInstanceVO vm = _vmInstanceDao.findById(vmId); + UserVmVO vm = _vmDao.findById(vmId); if (vm == null) { throw new InvalidParameterValueException( "Unable to find the VM by id=" + vmId); @@ -3569,7 +3818,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } InvalidParameterValueException ex = new InvalidParameterValueException( "VM is not Running, unable to migrate the vm with specified id"); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } if (!vm.getHypervisorType().equals(HypervisorType.XenServer) @@ -3609,6 +3858,21 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + destinationHost.getResourceState()); } + HostVO srcHost = _hostDao.findById(srcHostId); + HostVO destHost = _hostDao.findById(destinationHost.getId()); + //if srcHost is dedicated and destination Host is not + if (checkIfHostIsDedicated(srcHost) && !checkIfHostIsDedicated(destHost)) { + //raise an alert + String msg = "VM is migrated on a non-dedicated host " + destinationHost.getName(); + _alertMgr.sendAlert(AlertManager.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); + } + //if srcHost is non dedicated but destination Host is. + if (!checkIfHostIsDedicated(srcHost) && checkIfHostIsDedicated(destHost)) { + //raise an alert + String msg = "VM is migrated on a dedicated host " + destinationHost.getName(); + _alertMgr.sendAlert(AlertManager.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); + } + // call to core process DataCenterVO dcVO = _dcDao.findById(destinationHost.getDataCenterId()); HostPodVO pod = _podDao.findById(destinationHost.getPodId()); @@ -3632,10 +3896,23 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + " already has max Running VMs(count includes system VMs), cannot migrate to this host"); } + collectVmDiskStatistics(vm); VMInstanceVO migratedVm = _itMgr.migrate(vm, srcHostId, dest); return migratedVm; } + private boolean checkIfHostIsDedicated(HostVO host) { + long hostId = host.getId(); + DedicatedResourceVO dedicatedHost = _dedicatedDao.findByHostId(hostId); + DedicatedResourceVO dedicatedClusterOfHost = _dedicatedDao.findByClusterId(host.getClusterId()); + DedicatedResourceVO dedicatedPodOfHost = _dedicatedDao.findByPodId(host.getPodId()); + if(dedicatedHost != null || dedicatedClusterOfHost != null || dedicatedPodOfHost != null) { + return true; + } else { + return false; + } + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_MIGRATE, eventDescription = "migrating VM", async = true) public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinationHost, @@ -3661,7 +3938,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } CloudRuntimeException ex = new CloudRuntimeException("VM is not Running, unable to migrate the vm with" + " specified id"); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } @@ -3755,7 +4032,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use VMInstanceVO migratedVm = _itMgr.migrateWithStorage(vm, srcHostId, destinationHost.getId(), volToPoolObjectMap); return migratedVm; - } +} @DB @Override @@ -3791,7 +4068,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } InvalidParameterValueException ex = new InvalidParameterValueException( "VM is Running, unable to move the vm with specified vmId"); - ex.addProxyObject(vm, cmd.getVmId(), "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } @@ -3805,7 +4082,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (oldAccount.getType() == Account.ACCOUNT_TYPE_PROJECT) { InvalidParameterValueException ex = new InvalidParameterValueException( "Specified Vm id belongs to the project and can't be moved"); - ex.addProxyObject(vm, cmd.getVmId(), "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } Account newAccount = _accountService.getActiveAccountByName( @@ -4058,7 +4335,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (network == null) { InvalidParameterValueException ex = new InvalidParameterValueException( "Unable to find specified network id"); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(networkId.toString(), "networkId"); throw ex; } @@ -4071,7 +4348,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (networkOffering.isSystemOnly()) { InvalidParameterValueException ex = new InvalidParameterValueException( "Specified Network id is system only and can't be used for vm deployment"); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } applicableNetworks.add(network); @@ -4122,7 +4399,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use " resources as a part of network provision for persistent network due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network" + " (with specified id) elements and resources as a part of network provision"); - e.addProxyObject(newNetwork, newNetwork.getId(), "networkId"); + e.addProxyObject(newNetwork.getUuid(), "networkId"); throw e; } } @@ -4179,10 +4456,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use long vmId = cmd.getVmId(); Long newTemplateId = cmd.getTemplateId(); + UserVmVO vm = _vmDao.findById(vmId); if (vm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Cannot find VM with ID " + vmId); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(String.valueOf(vmId), "vmId"); throw ex; } @@ -4228,29 +4506,43 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (rootVols.isEmpty()) { InvalidParameterValueException ex = new InvalidParameterValueException( "Can not find root volume for VM " + vm.getUuid()); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } VolumeVO root = rootVols.get(0); Long templateId = root.getTemplateId(); + boolean isISO = false; if(templateId == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("Currently there is no support to reset a vm that is deployed using ISO " + vm.getUuid()); - ex.addProxyObject(vm, vmId, "vmId"); - throw ex; + // Assuming that for a vm deployed using ISO, template ID is set to NULL + isISO = true; + templateId = vm.getIsoId(); } VMTemplateVO template = null; + //newTemplateId can be either template or ISO id. In the following snippet based on the vm deployment (from template or ISO) it is handled accordingly if(newTemplateId != null) { template = _templateDao.findById(newTemplateId); _accountMgr.checkAccess(caller, null, true, template); + if (isISO) { + if (!template.getFormat().equals(ImageFormat.ISO)) { + throw new InvalidParameterValueException("Invalid ISO id provided to restore the VM "); + } + } else { + if (template.getFormat().equals(ImageFormat.ISO)) { + throw new InvalidParameterValueException("Invalid template id provided to restore the VM "); + } + } } else { + if (isISO && templateId == null) { + throw new CloudRuntimeException("Cannot restore the VM since there is no ISO attached to VM"); + } template = _templateDao.findById(templateId); if (template == null) { InvalidParameterValueException ex = new InvalidParameterValueException( - "Cannot find template for specified volumeid and vmId"); - ex.addProxyObject(vm, vmId, "vmId"); - ex.addProxyObject(root, root.getId(), "volumeId"); + "Cannot find template/ISO for specified volumeid and vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); + ex.addProxyObject(root.getUuid(), "volumeId"); throw ex; } } @@ -4262,18 +4554,26 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use s_logger.debug("Stop vm " + vm.getUuid() + " failed", e); CloudRuntimeException ex = new CloudRuntimeException( "Stop vm failed for specified vmId"); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } } - /* If new template is provided allocate a new volume from new template otherwise allocate new volume from original template */ + /* If new template/ISO is provided allocate a new volume from new template/ISO otherwise allocate new volume from original template/ISO */ VolumeVO newVol = null; - if (newTemplateId != null){ + if (newTemplateId != null) { + if (isISO) { + newVol = volumeMgr.allocateDuplicateVolume(root, null); + vm.setIsoId(newTemplateId); + vm.setGuestOSId(template.getGuestOSId()); + vm.setTemplateId(newTemplateId); + _vmDao.update(vmId, vm); + } else { newVol = volumeMgr.allocateDuplicateVolume(root, newTemplateId); vm.setGuestOSId(template.getGuestOSId()); vm.setTemplateId(newTemplateId); _vmDao.update(vmId, vm); + } } else { newVol = volumeMgr.allocateDuplicateVolume(root, null); } @@ -4315,7 +4615,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use s_logger.debug("Unable to start VM " + vm.getUuid(), e); CloudRuntimeException ex = new CloudRuntimeException( "Unable to start VM with specified id" + e.getMessage()); - ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } } @@ -4386,6 +4686,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use @Override public void prepareStop(VirtualMachineProfile profile) { + UserVmVO vm = _vmDao.findById(profile.getId()); + if (vm.getState() == State.Running) + collectVmDiskStatistics(vm); } } diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java index 5dde7113906..c938e782e80 100755 --- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2836,7 +2836,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac long isDefault = (nic.isDefaultNic()) ? 1 : 0; // insert nic's Id into DB as resource_name UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmVO.getAccountId(), - vmVO.getDataCenterId(), vmVO.getId(), Long.toString(nic.getId()), nic.getNetworkId(), + vmVO.getDataCenterId(), vmVO.getId(), Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vmVO.getUuid()); return nic; } else { diff --git a/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java b/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java index efe18c3b375..52112792d14 100644 --- a/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java +++ b/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java @@ -125,8 +125,7 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro @DB @Override @ActionEvent(eventType = EventTypes.EVENT_AFFINITY_GROUP_DELETE, eventDescription = "Deleting affinity group") - public boolean deleteAffinityGroup(Long affinityGroupId, String account, Long domainId, String affinityGroupName) - throws ResourceInUseException { + public boolean deleteAffinityGroup(Long affinityGroupId, String account, Long domainId, String affinityGroupName) { Account caller = UserContext.current().getCaller(); Account owner = _accountMgr.finalizeOwner(caller, account, domainId, null); @@ -164,7 +163,15 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro List affinityGroupVmMap = _affinityGroupVMMapDao.listByAffinityGroup(affinityGroupId); if (!affinityGroupVmMap.isEmpty()) { - throw new ResourceInUseException("Cannot delete affinity group when it's in use by virtual machines"); + SearchBuilder listByAffinityGroup = _affinityGroupVMMapDao.createSearchBuilder(); + listByAffinityGroup.and("affinityGroupId", listByAffinityGroup.entity().getAffinityGroupId(), + SearchCriteria.Op.EQ); + listByAffinityGroup.done(); + SearchCriteria sc = listByAffinityGroup.create(); + sc.setParameters("affinityGroupId", affinityGroupId); + + _affinityGroupVMMapDao.lockRows(sc, null, true); + _affinityGroupVMMapDao.remove(sc); } _affinityGroupDao.expunge(affinityGroupId); diff --git a/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java b/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java index ec0be8c9d96..ac3b8f56e61 100644 --- a/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java +++ b/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java @@ -201,7 +201,7 @@ public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements A if (!_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Lb)) { InvalidParameterValueException ex = new InvalidParameterValueException( "LB service is not supported in specified network id"); - ex.addProxyObject(network, network.getId(), "networkId"); + ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } diff --git a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java index a1865c64af7..483c19af431 100644 --- a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java +++ b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java @@ -126,7 +126,8 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR throw new InvalidParameterValueException("Invalid region ID: " + regionId); } - if (!region.checkIfServiceEnabled(Region.Service.Gslb)) { + String providerDnsName = _globalConfigDao.getValue(Config.CloudDnsName.key()); + if (!region.checkIfServiceEnabled(Region.Service.Gslb) || (providerDnsName == null)) { throw new CloudRuntimeException("GSLB service is not enabled in region : " + region.getName()); } @@ -203,6 +204,10 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR _accountMgr.checkAccess(caller, null, true, loadBalancer); + if (gslbRule.getAccountId() != loadBalancer.getAccountId()) { + throw new InvalidParameterValueException("GSLB rule and load balancer rule does not belong to same account"); + } + if (loadBalancer.getState() == LoadBalancer.State.Revoke) { throw new InvalidParameterValueException("Load balancer ID " + loadBalancer.getUuid() + " is in revoke state"); } @@ -255,7 +260,13 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR s_logger.debug("Configuring gslb rule configuration on the gslb service providers in the participating zones"); // apply the gslb rule on to the back end gslb service providers on zones participating in gslb - applyGlobalLoadBalancerRuleConfig(gslbRuleId, false); + if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { + s_logger.warn("Failed to add load balancer rules " + newLbRuleIds + " to global load balancer rule id " + + gslbRuleId); + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to add load balancer rules to GSLB rule "); + throw ex; + } // on success set state to Active gslbRule.setState(GlobalLoadBalancerRule.State.Active); @@ -264,7 +275,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR success = true; } catch (ResourceUnavailableException e) { - throw new CloudRuntimeException("Failed to apply gslb config"); + throw new CloudRuntimeException("Failed to apply new GSLB configuration while assigning new LB rules to GSLB rule."); } return success; @@ -354,11 +365,28 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR s_logger.debug("Attempting to configure global load balancer rule configuration on the gslb service providers "); // apply the gslb rule on to the back end gslb service providers - applyGlobalLoadBalancerRuleConfig(gslbRuleId, false); + if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { + s_logger.warn("Failed to remove load balancer rules " + lbRuleIdsToremove + " from global load balancer rule id " + + gslbRuleId); + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to remove load balancer rule ids from GSLB rule "); + throw ex; + } - // on success set state to Active + txn.start(); + + // remove the mappings of gslb rule to Lb rule that are in revoked state + for (Long lbRuleId : lbRuleIdsToremove) { + GlobalLoadBalancerLbRuleMapVO removeGslbLbMap = _gslbLbMapDao.findByGslbRuleIdAndLbRuleId(gslbRuleId, lbRuleId); + _gslbLbMapDao.remove(removeGslbLbMap.getId()); + } + + // on success set state back to Active gslbRule.setState(GlobalLoadBalancerRule.State.Active); _gslbRuleDao.update(gslbRule.getId(), gslbRule); + + txn.commit(); + success = true; } catch (ResourceUnavailableException e) { throw new CloudRuntimeException("Failed to update removed load balancer details from gloabal load balancer"); @@ -368,24 +396,45 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } @Override - @DB @ActionEvent(eventType = EventTypes.EVENT_GLOBAL_LOAD_BALANCER_DELETE, eventDescription = "Delete global load balancer rule") public boolean deleteGlobalLoadBalancerRule(DeleteGlobalLoadBalancerRuleCmd deleteGslbCmd) { UserContext ctx = UserContext.current(); Account caller = ctx.getCaller(); - long gslbRuleId = deleteGslbCmd.getGlobalLoadBalancerId(); + + try { + revokeGslbRule(gslbRuleId, caller); + } catch (Exception e) { + s_logger.warn("Failed to delete GSLB rule due to" + e.getMessage()); + return false; + } + + return true; + } + + @DB + private void revokeGslbRule(long gslbRuleId, Account caller) { + GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); + if (gslbRule == null) { throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId); } _accountMgr.checkAccess(caller, SecurityChecker.AccessType.ModifyEntry, true, gslbRule); - if (gslbRule.getState() == GlobalLoadBalancerRule.State.Revoke) { - throw new InvalidParameterValueException("global load balancer rule id: " + gslbRuleId + " is already in revoked state"); + if (gslbRule.getState() == com.cloud.region.ha.GlobalLoadBalancerRule.State.Staged) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Rule Id: " + gslbRuleId + " is still in Staged state so just removing it."); + } + _gslbRuleDao.remove(gslbRuleId); + return; + } else if (gslbRule.getState() == GlobalLoadBalancerRule.State.Add || gslbRule.getState() == GlobalLoadBalancerRule.State.Active) { + //mark the GSlb rule to be in revoke state + gslbRule.setState(GlobalLoadBalancerRule.State.Revoke); + _gslbRuleDao.update(gslbRuleId, gslbRule); } Transaction txn = Transaction.currentTxn(); @@ -400,10 +449,6 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } } - //mark the GSlb rule to be in revoke state - gslbRule.setState(GlobalLoadBalancerRule.State.Revoke); - _gslbRuleDao.update(gslbRuleId, gslbRule); - txn.commit(); boolean success = false; @@ -423,10 +468,11 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR _gslbLbMapDao.remove(gslbLbMap.getId()); } } + //remove the GSLB rule itself _gslbRuleDao.remove(gslbRuleId); + txn.commit(); - return success; } @Override @@ -482,6 +528,10 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR @Override public List listGlobalLoadBalancerRule(ListGlobalLoadBalancerRuleCmd listGslbCmd) { + + UserContext ctx = UserContext.current(); + Account caller = ctx.getCaller(); + Integer regionId = listGslbCmd.getRegionId(); Long ruleId = listGslbCmd.getId(); List response = new ArrayList(); @@ -500,12 +550,14 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR if (gslbRule == null) { throw new InvalidParameterValueException("Invalid gslb rule id specified"); } + _accountMgr.checkAccess(caller, org.apache.cloudstack.acl.SecurityChecker.AccessType.ListEntry, false, gslbRule); + response.add(gslbRule); return response; } if (regionId != null) { - List gslbRules = _gslbRuleDao.listByRegionId(regionId); + List gslbRules = _gslbRuleDao.listByAccount(caller.getAccountId()); if (gslbRules != null) { response.addAll(gslbRules); } @@ -604,6 +656,19 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR return true; } + @Override + public boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) + throws com.cloud.exception.ResourceUnavailableException { + List gslbRules = _gslbRuleDao.listByAccount(accountId); + if (gslbRules != null && !gslbRules.isEmpty()) { + for (GlobalLoadBalancerRule gslbRule : gslbRules) { + revokeGslbRule(gslbRule.getId(), caller); + } + } + s_logger.debug("Successfully cleaned up GSLB rules for account id=" + accountId); + return true; + } + private boolean checkGslbServiceEnabledInZone(long zoneId, long physicalNetworkId) { if (_gslbProvider == null) { diff --git a/server/test/async-job-component.xml b/server/test/async-job-component.xml index 46982523a23..55f47cc5b50 100644 --- a/server/test/async-job-component.xml +++ b/server/test/async-job-component.xml @@ -74,6 +74,7 @@ under the License. 300 + 50 -1 diff --git a/server/test/com/cloud/network/MockNetworkManagerImpl.java b/server/test/com/cloud/network/MockNetworkManagerImpl.java index e5d34fbacc7..077395fca0e 100755 --- a/server/test/com/cloud/network/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/network/MockNetworkManagerImpl.java @@ -66,15 +66,8 @@ import com.cloud.user.Account; import com.cloud.user.User; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; -import com.cloud.vm.Nic; -import com.cloud.vm.NicProfile; -import com.cloud.vm.NicVO; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.*; import com.cloud.vm.VirtualMachine.Type; -import com.cloud.vm.VirtualMachineProfile; import org.apache.cloudstack.api.command.admin.network.DedicateGuestVlanRangeCmd; import org.apache.cloudstack.api.command.admin.network.ListDedicatedGuestVlanRangesCmd; import org.apache.cloudstack.api.command.admin.usage.ListTrafficTypeImplementorsCmd; @@ -518,14 +511,6 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage return false; } - /* (non-Javadoc) - * @see com.cloud.network.NetworkManager#cleanupIpResources(long, long, com.cloud.user.Account) - */ - @Override - public boolean cleanupIpResources(long addrId, long userId, Account caller) { - // TODO Auto-generated method stub - return false; - } /* (non-Javadoc) * @see com.cloud.network.NetworkManager#restartNetwork(java.lang.Long, com.cloud.user.Account, com.cloud.user.User, boolean) @@ -795,15 +780,6 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage return null; } - /* (non-Javadoc) - * @see com.cloud.network.NetworkManager#assignVpnGatewayIpAddress(long, com.cloud.user.Account, long) - */ - @Override - public PublicIp assignVpnGatewayIpAddress(long dcId, Account owner, long vpcId) - throws InsufficientAddressCapacityException, ConcurrentOperationException { - // TODO Auto-generated method stub - return null; - } /* (non-Javadoc) * @see com.cloud.network.NetworkManager#markPublicIpAsAllocated(com.cloud.network.IPAddressVO) @@ -887,7 +863,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage } @Override - public boolean releasePortableIpAddress(long ipAddressId) throws InsufficientAddressCapacityException { + public boolean releasePortableIpAddress(long ipAddressId) { return false;// TODO Auto-generated method stub } @@ -939,14 +915,9 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage return null; } - @Override - public boolean removeVmSecondaryIpsOfNic(long nicId) { - // TODO Auto-generated method stub - return false; - } @Override - public NicVO savePlaceholderNic(Network network, String ip4Address, Type vmType) { + public NicVO savePlaceholderNic(Network network, String ip4Address, String ip6Address, Type vmType) { // TODO Auto-generated method stub return null; } diff --git a/server/test/com/cloud/network/MockRulesManagerImpl.java b/server/test/com/cloud/network/MockRulesManagerImpl.java index 82a3e9346e3..331a47ffca6 100644 --- a/server/test/com/cloud/network/MockRulesManagerImpl.java +++ b/server/test/com/cloud/network/MockRulesManagerImpl.java @@ -135,19 +135,6 @@ public class MockRulesManagerImpl extends ManagerBase implements RulesManager, R return false; } - @Override - public boolean applyPortForwardingRules(long ipAddressId, - boolean continueOnError, Account caller) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean applyStaticNatRulesForIp(long sourceIpId, - boolean continueOnError, Account caller, boolean forRevoke) { - // TODO Auto-generated method stub - return false; - } @Override public boolean applyPortForwardingRulesForNetwork(long networkId, @@ -163,13 +150,6 @@ public class MockRulesManagerImpl extends ManagerBase implements RulesManager, R return false; } - @Override - public void checkIpAndUserVm(IpAddress ipAddress, UserVm userVm, - Account caller) { - // TODO Auto-generated method stub - - } - @Override public void checkRuleAndUserVm(FirewallRule rule, UserVm userVm, Account caller) { @@ -191,25 +171,6 @@ public class MockRulesManagerImpl extends ManagerBase implements RulesManager, R return false; } - @Override - public List listFirewallRulesByIp(long ipAddressId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public List listPortForwardingRulesForApplication( - long ipId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public List gatherPortForwardingRulesForApplication( - List addrs) { - // TODO Auto-generated method stub - return null; - } @Override public boolean revokePortForwardingRulesForVm(long vmId) { @@ -217,11 +178,6 @@ public class MockRulesManagerImpl extends ManagerBase implements RulesManager, R return false; } - @Override - public boolean revokeStaticNatRulesForVm(long vmId) { - // TODO Auto-generated method stub - return false; - } @Override public FirewallRule[] reservePorts(IpAddress ip, String protocol, @@ -231,25 +187,6 @@ public class MockRulesManagerImpl extends ManagerBase implements RulesManager, R return null; } - @Override - public boolean releasePorts(long ipId, String protocol, Purpose purpose, - int... ports) { - // TODO Auto-generated method stub - return false; - } - - @Override - public List listByNetworkId(long networkId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean applyStaticNatForIp(long sourceIpId, - boolean continueOnError, Account caller, boolean forRevoke) { - // TODO Auto-generated method stub - return false; - } @Override public boolean applyStaticNatsForNetwork(long networkId, diff --git a/server/test/com/cloud/server/ConfigurationServerImplTest.java b/server/test/com/cloud/server/ConfigurationServerImplTest.java new file mode 100644 index 00000000000..6e1f4f90b37 --- /dev/null +++ b/server/test/com/cloud/server/ConfigurationServerImplTest.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.server; + +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.io.FileUtils; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; + +public class ConfigurationServerImplTest { + final static String TEST = "the quick brown fox jumped over the lazy dog"; + + @Test(expected = IOException.class) + public void testGetBase64KeystoreNoSuchFile() throws IOException { + ConfigurationServerImpl.getBase64Keystore("notexisting" + System.currentTimeMillis()); + } + + @Test(expected = IOException.class) + public void testGetBase64KeystoreTooBigFile() throws IOException { + File temp = File.createTempFile("keystore", ""); + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < 1000; i++) { + builder.append("way too long...\n"); + } + FileUtils.writeStringToFile(temp, builder.toString()); + try { + ConfigurationServerImpl.getBase64Keystore(temp.getPath()); + } finally { + temp.delete(); + } + } + + @Test + public void testGetBase64Keystore() throws IOException { + File temp = File.createTempFile("keystore", ""); + try { + FileUtils.writeStringToFile(temp, Base64.encodeBase64String(TEST.getBytes())); + final String keystore = ConfigurationServerImpl.getBase64Keystore(temp.getPath()); + // let's decode it to make sure it makes sense + Base64.decodeBase64(keystore); + } finally { + temp.delete(); + } + } +} diff --git a/server/test/com/cloud/user/MockAccountManagerImpl.java b/server/test/com/cloud/user/MockAccountManagerImpl.java index 64919afa74f..38cc1a84a55 100644 --- a/server/test/com/cloud/user/MockAccountManagerImpl.java +++ b/server/test/com/cloud/user/MockAccountManagerImpl.java @@ -132,12 +132,6 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco return null; } - @Override - public Pair,Long> finalizeAccountDomainForList(Account caller, String accountName, Long domainId, Long projectId) { - // TODO Auto-generated method stub - return null; - } - @Override public Account getActiveAccountByName(String accountName, Long domainId) { // TODO Auto-generated method stub @@ -145,13 +139,13 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco } @Override - public Account getActiveAccountById(Long accountId) { + public Account getActiveAccountById(long accountId) { // TODO Auto-generated method stub return null; } @Override - public Account getAccount(Long accountId) { + public Account getAccount(long accountId) { // TODO Auto-generated method stub return null; } @@ -192,24 +186,12 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco return false; } - @Override - public boolean deleteAccount(AccountVO account, long callerUserId, Account caller) { - // TODO Auto-generated method stub - return false; - } - @Override public void checkAccess(Account account, Domain domain) throws PermissionDeniedException { // TODO Auto-generated method stub } - - @Override - public boolean cleanupAccount(AccountVO account, long callerUserId, Account caller) { - // TODO Auto-generated method stub - return false; - } - + @Override public Long checkAccessAndSpecifyAuthority(Account caller, Long zoneId) { // TODO Auto-generated method stub @@ -244,14 +226,10 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco } @Override - public void logoutUser(Long userId) { + public void logoutUser(long userId) { // TODO Auto-generated method stub } - @Override - public UserAccount getUserAccount(String username, Long domainId) { - return null; - } @Override public UserAccount authenticateUser(String username, String password, Long domainId, String loginIpAddress, Map requestParameters) { @@ -263,21 +241,12 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco return null; } - @Override - public UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone, String userUUID) { - return null; - } @Override public String[] createApiKeyAndSecretKey(RegisterCmd cmd) { return null; } - @Override - public boolean lockAccount(long accountId) { - return true; - } - @Override public boolean enableAccount(long accountId) { // TODO Auto-generated method stub @@ -341,15 +310,22 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco return null; } - @Override - public Account createAccount(String accountName, short accountType, - Long domainId, String networkDomain, Map details, String uuid) { - // TODO Auto-generated method stub - return null; - } + @Override public RoleType getRoleType(Account account) { return null; } + @Override + public boolean deleteAccount(AccountVO account, long callerUserId, Account caller) { + // TODO Auto-generated method stub + return false; + } + + @Override + public Account createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details, String uuid) { + // TODO Auto-generated method stub + return null; + } + } diff --git a/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java index e3b7d311ba7..442c2be3969 100644 --- a/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java +++ b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java @@ -40,6 +40,7 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; @@ -113,6 +114,9 @@ public class DeploymentPlanningManagerImplTest { @Inject ClusterDao _clusterDao; + @Inject + DedicatedResourceDao _dedicatedDao; + private static long domainId = 5L; private static long dataCenterId = 1L; @@ -252,6 +256,11 @@ public class DeploymentPlanningManagerImplTest { } @Bean + public DedicatedResourceDao dedicatedResourceDao() { + return Mockito.mock(DedicatedResourceDao.class); + } + + @Bean public GuestOSDao guestOSDao() { return Mockito.mock(GuestOSDao.class); } diff --git a/server/test/com/cloud/vm/MockUserVmManagerImpl.java b/server/test/com/cloud/vm/MockUserVmManagerImpl.java index 448a5dd9a21..40c49d4430a 100644 --- a/server/test/com/cloud/vm/MockUserVmManagerImpl.java +++ b/server/test/com/cloud/vm/MockUserVmManagerImpl.java @@ -47,6 +47,7 @@ import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; import org.springframework.stereotype.Component; import com.cloud.agent.api.StopAnswer; +import com.cloud.agent.api.VmDiskStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; @@ -168,6 +169,12 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } + @Override + public HashMap> getVmDiskStatistics(long hostId, String hostName, List vmIds) { + // TODO Auto-generated method stub + return null; + } + @Override public boolean deleteVmGroup(long groupId) { // TODO Auto-generated method stub @@ -461,4 +468,9 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, // TODO Auto-generated method stub return false; } + + @Override + public void collectVmDiskStatistics (UserVmVO userVm) { + // TODO Auto-generated method stub + } } diff --git a/server/test/com/cloud/vm/UserVmManagerTest.java b/server/test/com/cloud/vm/UserVmManagerTest.java index 6a9711401c9..5eedfa5d815 100755 --- a/server/test/com/cloud/vm/UserVmManagerTest.java +++ b/server/test/com/cloud/vm/UserVmManagerTest.java @@ -24,10 +24,8 @@ import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; +import static org.mockito.Mockito.times; import java.lang.reflect.Field; import java.util.List; @@ -64,6 +62,7 @@ import com.cloud.storage.VolumeManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.Storage.ImageFormat; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountService; @@ -200,6 +199,7 @@ public class UserVmManagerTest { doReturn(false).when(_rootVols).isEmpty(); when(_rootVols.get(eq(0))).thenReturn(_volumeMock); doReturn(3L).when(_volumeMock).getTemplateId(); + doReturn(ImageFormat.VHD).when(_templateMock).getFormat(); when(_templateDao.findById(anyLong())).thenReturn(_templateMock); doNothing().when(_accountMgr).checkAccess(_account, null, true, _templateMock); when(_itMgr.stop(_vmMock, _userMock, _account)).thenReturn(true); @@ -220,6 +220,40 @@ public class UserVmManagerTest { } + // Test restoreVM on providing new ISO Id, when VM(deployed using ISO) is in running state + @Test + public void testRestoreVMF5() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, + ConcurrentOperationException, ResourceAllocationException { + doReturn(VirtualMachine.State.Running).when(_vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_volsDao.findByInstanceAndType(314L, Volume.Type.ROOT)).thenReturn(_rootVols); + doReturn(false).when(_rootVols).isEmpty(); + when(_rootVols.get(eq(0))).thenReturn(_volumeMock); + doReturn(null).when(_volumeMock).getTemplateId(); + doReturn(3L).when(_vmMock).getIsoId(); + doReturn(ImageFormat.ISO).when(_templateMock).getFormat(); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + doNothing().when(_accountMgr).checkAccess(_account, null, true, _templateMock); + when(_itMgr.stop(_vmMock, _userMock, _account)).thenReturn(true); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock); + doNothing().when(_vmMock).setIsoId(14L); + when(_templateMock.getGuestOSId()).thenReturn(5L); + doNothing().when(_vmMock).setGuestOSId(anyLong()); + doNothing().when(_vmMock).setTemplateId(3L); + when(_vmDao.update(314L, _vmMock)).thenReturn(true); + when(_itMgr.start(_vmMock, null, _userMock, _account)).thenReturn(_vmMock); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock); + doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); + when(_volumeMock.getId()).thenReturn(3L); + doNothing().when(_volsDao).detachVolume(anyLong()); + + when(_templateMock.getUuid()).thenReturn("b1a3626e-72e0-4697-8c7c-a110940cc55d"); + + _userVmMgr.restoreVMInternal(_account, _vmMock, 14L); + + verify(_vmMock, times(1)).setIsoId(14L); + + } // Test scaleVm on incompatible HV. @Test(expected=InvalidParameterValueException.class) public void testScaleVMF1() throws Exception { diff --git a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java index 7129273a50c..b609022ff26 100644 --- a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java @@ -210,7 +210,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage } @Override - public boolean releasePortableIpAddress(long ipAddressId) throws InsufficientAddressCapacityException { + public boolean releasePortableIpAddress(long ipAddressId) { return false;// TODO Auto-generated method stub } @@ -1142,10 +1142,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage return false; } - - - - + /* (non-Javadoc) * @see com.cloud.network.NetworkManager#releaseNic(com.cloud.vm.VirtualMachineProfile, com.cloud.vm.Nic) */ @@ -1156,10 +1153,6 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage } - - - - /* (non-Javadoc) * @see com.cloud.network.NetworkManager#createNicForVm(com.cloud.network.Network, com.cloud.vm.NicProfile, com.cloud.vm.ReservationContext, com.cloud.vm.VirtualMachineProfileImpl, boolean, boolean) */ @@ -1172,24 +1165,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage return null; } - - - - - /* (non-Javadoc) - * @see com.cloud.network.NetworkManager#assignVpnGatewayIpAddress(long, com.cloud.user.Account, long) - */ - @Override - public PublicIp assignVpnGatewayIpAddress(long dcId, Account owner, long vpcId) - throws InsufficientAddressCapacityException, ConcurrentOperationException { - // TODO Auto-generated method stub - return null; - } - - - - - + /* (non-Javadoc) * @see com.cloud.network.NetworkManager#markPublicIpAsAllocated(com.cloud.network.IPAddressVO) */ @@ -1200,9 +1176,6 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage } - - - /* (non-Javadoc) * @see com.cloud.network.NetworkManager#assignDedicateIpAddress(com.cloud.user.Account, java.lang.Long, java.lang.Long, long, boolean) */ @@ -1240,22 +1213,6 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage } - - - - /* (non-Javadoc) - * @see com.cloud.network.NetworkManager#cleanupIpResources(long, long, com.cloud.user.Account) - */ - @Override - public boolean cleanupIpResources(long addrId, long userId, Account caller) { - // TODO Auto-generated method stub - return false; - } - - - - - /* (non-Javadoc) * @see com.cloud.network.NetworkManager#restartNetwork(java.lang.Long, com.cloud.user.Account, com.cloud.user.User, boolean) */ @@ -1436,14 +1393,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage @Override - public boolean removeVmSecondaryIpsOfNic(long nicId) { - // TODO Auto-generated method stub - return false; - } - - - @Override - public NicVO savePlaceholderNic(Network network, String ip4Address, Type vmType) { + public NicVO savePlaceholderNic(Network network, String ip4Address, String ip6Address, Type vmType) { // TODO Auto-generated method stub return null; } diff --git a/server/test/com/cloud/vpc/MockVpcManagerImpl.java b/server/test/com/cloud/vpc/MockVpcManagerImpl.java index 921321f52da..7e40083c8bd 100644 --- a/server/test/com/cloud/vpc/MockVpcManagerImpl.java +++ b/server/test/com/cloud/vpc/MockVpcManagerImpl.java @@ -373,7 +373,7 @@ public class MockVpcManagerImpl extends ManagerBase implements VpcManager { } @Override - public void validateNtwkOffForNtwkInVpc(Long networkId, long newNtwkOffId, String newCidr, String newNetworkDomain, Vpc vpc, String gateway, Account networkOwner) { + public void validateNtwkOffForNtwkInVpc(Long networkId, long newNtwkOffId, String newCidr, String newNetworkDomain, Vpc vpc, String gateway, Account networkOwner, Long aclId) { // TODO Auto-generated method stub } diff --git a/server/test/com/cloud/vpc/NetworkACLManagerTest.java b/server/test/com/cloud/vpc/NetworkACLManagerTest.java index 76b811f8685..ddcfe7fabb7 100644 --- a/server/test/com/cloud/vpc/NetworkACLManagerTest.java +++ b/server/test/com/cloud/vpc/NetworkACLManagerTest.java @@ -15,6 +15,7 @@ package com.cloud.vpc; +import com.cloud.configuration.ConfigurationManager; import com.cloud.network.Network; import com.cloud.network.NetworkManager; import com.cloud.network.NetworkModel; @@ -78,6 +79,8 @@ public class NetworkACLManagerTest extends TestCase{ @Inject NetworkDao _networkDao; @Inject + ConfigurationManager _configMgr; + @Inject NetworkModel _networkModel; @Inject List _networkAclElements; @@ -178,6 +181,11 @@ public class NetworkACLManagerTest extends TestCase{ return Mockito.mock(NetworkDao.class); } + @Bean + public ConfigurationManager configMgr() { + return Mockito.mock(ConfigurationManager.class); + } + @Bean public NetworkACLServiceProvider networkElements() { return Mockito.mock(NetworkACLServiceProvider.class); diff --git a/server/test/com/cloud/vpc/NetworkACLServiceTest.java b/server/test/com/cloud/vpc/NetworkACLServiceTest.java index 9a368b94ae4..e71fabfef2d 100644 --- a/server/test/com/cloud/vpc/NetworkACLServiceTest.java +++ b/server/test/com/cloud/vpc/NetworkACLServiceTest.java @@ -138,6 +138,7 @@ public class NetworkACLServiceTest extends TestCase{ Mockito.when(_networkAclMgr.getNetworkACL(Mockito.anyLong())).thenReturn(acl); Mockito.when(_networkAclMgr.createNetworkACLItem(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyString(), Mockito.anyList(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(NetworkACLItem.TrafficType.class), Mockito.anyLong(), Mockito.anyString(), Mockito.anyInt())).thenReturn(new NetworkACLItemVO()); + Mockito.when(_networkACLItemDao.findByAclAndNumber(Mockito.anyLong(), Mockito.anyInt())).thenReturn(null); assertNotNull(_aclService.createNetworkACLItem(createACLItemCmd)); } diff --git a/server/test/com/cloud/vpc/VpcApiUnitTest.java b/server/test/com/cloud/vpc/VpcApiUnitTest.java index e141c9658b8..400e00c8f3e 100644 --- a/server/test/com/cloud/vpc/VpcApiUnitTest.java +++ b/server/test/com/cloud/vpc/VpcApiUnitTest.java @@ -87,7 +87,7 @@ public class VpcApiUnitTest extends TestCase{ //1) correct network offering boolean result = false; try { - _vpcService.validateNtwkOffForNtwkInVpc(2L, 1, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO()); + _vpcService.validateNtwkOffForNtwkInVpc(2L, 1, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO(), null); result = true; } catch (Exception ex) { } finally { @@ -97,7 +97,7 @@ public class VpcApiUnitTest extends TestCase{ //2) invalid offering - source nat is not included result = false; try { - _vpcService.validateNtwkOffForNtwkInVpc(2L, 2, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO()); + _vpcService.validateNtwkOffForNtwkInVpc(2L, 2, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO(), null); result = true; } catch (InvalidParameterValueException ex) { } finally { @@ -107,7 +107,7 @@ public class VpcApiUnitTest extends TestCase{ //3) invalid offering - conserve mode is off result = false; try { - _vpcService.validateNtwkOffForNtwkInVpc(2L, 3, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO()); + _vpcService.validateNtwkOffForNtwkInVpc(2L, 3, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO(), null); result = true; } catch (InvalidParameterValueException ex) { } finally { @@ -117,7 +117,7 @@ public class VpcApiUnitTest extends TestCase{ //4) invalid offering - guest type shared result = false; try { - _vpcService.validateNtwkOffForNtwkInVpc(2L, 4, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO()); + _vpcService.validateNtwkOffForNtwkInVpc(2L, 4, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO(), null); result = true; } catch (InvalidParameterValueException ex) { } finally { @@ -127,7 +127,7 @@ public class VpcApiUnitTest extends TestCase{ //5) Invalid offering - no redundant router support result = false; try { - _vpcService.validateNtwkOffForNtwkInVpc(2L, 5, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO()); + _vpcService.validateNtwkOffForNtwkInVpc(2L, 5, "0.0.0.0", "111-", _vpcService.getVpc(1), "10.1.1.1", new AccountVO(), null); result = true; } catch (InvalidParameterValueException ex) { } finally { diff --git a/server/test/org/apache/cloudstack/affinity/AffinityApiUnitTest.java b/server/test/org/apache/cloudstack/affinity/AffinityApiUnitTest.java index 484b044e28e..5816b2829f2 100644 --- a/server/test/org/apache/cloudstack/affinity/AffinityApiUnitTest.java +++ b/server/test/org/apache/cloudstack/affinity/AffinityApiUnitTest.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.affinity; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyObject; @@ -51,6 +52,7 @@ import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.support.AnnotationConfigContextLoader; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.event.EventUtils; import com.cloud.event.EventVO; import com.cloud.event.dao.EventDao; @@ -64,6 +66,8 @@ import com.cloud.user.AccountVO; import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.UserVmDao; @@ -102,6 +106,10 @@ public class AffinityApiUnitTest { @Inject EventDao _eventDao; + @Inject + DedicatedResourceDao _dedicatedDao; + + private static long domainId = 5L; @@ -172,20 +180,6 @@ public class AffinityApiUnitTest { _affinityService.deleteAffinityGroup(null, "user", domainId, null); } - @Test(expected = ResourceInUseException.class) - public void deleteAffinityGroupInUse() throws ResourceInUseException { - List affinityGroupVmMap = new ArrayList(); - AffinityGroupVMMapVO mapVO = new AffinityGroupVMMapVO(20L, 10L); - affinityGroupVmMap.add(mapVO); - when(_affinityGroupVMMapDao.listByAffinityGroup(20L)).thenReturn(affinityGroupVmMap); - - AffinityGroupVO groupVO = new AffinityGroupVO(); - when(_groupDao.findById(20L)).thenReturn(groupVO); - when(_groupDao.lockRow(20L, true)).thenReturn(groupVO); - - _affinityService.deleteAffinityGroup(20L, "user", domainId, null); - } - @Test(expected = InvalidParameterValueException.class) public void updateAffinityGroupVMRunning() throws ResourceInUseException { @@ -230,6 +224,11 @@ public class AffinityApiUnitTest { } @Bean + public DedicatedResourceDao dedicatedResourceDao() { + return Mockito.mock(DedicatedResourceDao.class); + } + + @Bean public AccountManager accountManager() { return Mockito.mock(AccountManager.class); } diff --git a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java index df15ceb860a..6398e202b94 100644 --- a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java +++ b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java @@ -50,6 +50,7 @@ import com.cloud.dc.dao.DataCenterIpAddressDaoImpl; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDao; import com.cloud.dc.dao.DataCenterVnetDaoImpl; import com.cloud.dc.dao.DcDetailsDaoImpl; +import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDaoImpl; import com.cloud.dc.dao.PodVlanDaoImpl; import com.cloud.dc.dao.PodVlanMapDaoImpl; @@ -174,7 +175,7 @@ import org.apache.cloudstack.region.PortableIpRangeDaoImpl; }, includeFilters={@Filter(value=ChildTestConfiguration.Library.class, type=FilterType.CUSTOM)}, useDefaultFilters=false -) + ) public class ChildTestConfiguration { @@ -333,6 +334,11 @@ public class ChildTestConfiguration { return Mockito.mock(NetworkDao.class); } + @Bean + public DedicatedResourceDao DedicatedResourceDao() { + return Mockito.mock(DedicatedResourceDao.class); + } + @Bean public NetworkOfferingServiceMapDao networkOfferingServiceMapDao() { return Mockito.mock(NetworkOfferingServiceMapDao.class); diff --git a/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java b/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java index 1c281a08bed..ab545342cfa 100644 --- a/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java +++ b/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java @@ -577,8 +577,14 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { LoadBalancerVO lbRule1 = new LoadBalancerVO(); lbRule1.setState(FirewallRule.State.Active); Field networkIdField1 = LoadBalancerVO.class.getSuperclass().getDeclaredField("networkId"); + Field accountIdField1 = LoadBalancerVO.class.getSuperclass().getDeclaredField("accountId"); + Field domainIdField1 = LoadBalancerVO.class.getSuperclass().getDeclaredField("domainId"); networkIdField1.setAccessible(true); + accountIdField1.setAccessible(true); + domainIdField1.setAccessible(true); networkIdField1.set(lbRule1, new Long(1)); + accountIdField1.set(lbRule1, new Long(3)); + domainIdField1.set(lbRule1, new Long(1)); Field idField1 = LoadBalancerVO.class.getSuperclass().getDeclaredField("id"); idField1.setAccessible(true); idField1.set(lbRule1, new Long(1)); @@ -586,8 +592,14 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { LoadBalancerVO lbRule2 = new LoadBalancerVO(); lbRule2.setState(FirewallRule.State.Active); Field networkIdField2 = LoadBalancerVO.class.getSuperclass().getDeclaredField("networkId"); + Field accountIdField2 = LoadBalancerVO.class.getSuperclass().getDeclaredField("accountId"); + Field domainIdField2 = LoadBalancerVO.class.getSuperclass().getDeclaredField("domainId"); networkIdField2.setAccessible(true); + accountIdField2.setAccessible(true); + domainIdField2.setAccessible(true); networkIdField2.set(lbRule2, new Long(1)); + accountIdField2.set(lbRule2, new Long(3)); + domainIdField2.set(lbRule2, new Long(1)); Field idField2 = LoadBalancerVO.class.getSuperclass().getDeclaredField("id"); idField2.setAccessible(true); idField2.set(lbRule2, new Long(2)); @@ -611,6 +623,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { try { gslbServiceImpl.assignToGlobalLoadBalancerRule(assignCmd); } catch (InvalidParameterValueException e) { + s_logger.info(e.getMessage()); Assert.assertTrue(e.getMessage().contains("Load balancer rule specified should be in unique zone")); } } diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index b1feb022836..79550aee1bb 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -2453,7 +2453,6 @@ CREATE TABLE `cloud`.`resource_tags` ( CONSTRAINT `uc_resource_tags__uuid` UNIQUE (`uuid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; - CREATE TABLE `cloud`.`external_nicira_nvp_devices` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', `uuid` varchar(255) UNIQUE, diff --git a/setup/db/db/schema-302to40.sql b/setup/db/db/schema-302to40.sql index 7fa73483db6..832228cb434 100644 --- a/setup/db/db/schema-302to40.sql +++ b/setup/db/db/schema-302to40.sql @@ -114,9 +114,6 @@ UPDATE `cloud`.`configuration` set component='NetworkManager' where name='router UPDATE `cloud`.`configuration` set component='NetworkManager' where name='router.template.id'; UPDATE `cloud`.`configuration` set category='Advanced' where name='capacity.skipcounting.hours'; UPDATE `cloud`.`configuration` set category='Advanced' where name='use.local.storage'; -UPDATE `cloud`.`configuration` set category='Hidden' where name='router.ram.size'; -UPDATE `cloud`.`configuration` set category='Hidden' where name='secondary.storage.vm'; -UPDATE `cloud`.`configuration` set category='Hidden' where name='security.hash.key'; UPDATE `cloud`.`configuration` set description = 'Percentage (as a value between 0 and 1) of local storage utilization above which alerts will be sent about low local storage available.' where name = 'cluster.localStorage.capacity.notificationthreshold'; DELETE FROM `cloud`.`configuration` WHERE name='direct.agent.pool.size'; diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index acc29a2eb7f..67e2048dce2 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -173,8 +173,6 @@ ALTER TABLE upload ADD uuid VARCHAR(40); ALTER TABLE async_job modify job_cmd VARCHAR(255); -ALTER TABLE `cloud`.`alert` ADD INDEX `last_sent` (`last_sent` DESC) ; - ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `is_persistent` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides an ability to create persistent networks'; @@ -1653,3 +1651,7 @@ CREATE TABLE `cloud`.`netscaler_pod_ref` ( ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'eip.use.multiple.netscalers' , 'false', 'Should be set to true, if there will be multiple NetScaler devices providing EIP service in a zone'); + +UPDATE `cloud`.`configuration` set category='Advanced' where category='Advanced '; +UPDATE `cloud`.`configuration` set category='Hidden' where category='Hidden '; + diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index fd2ac568cad..74d4caeb6a1 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -27,12 +27,15 @@ ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `storage_motion_support UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `hypervisor_type`='VMware'; INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, storage_motion_supported) VALUES (UUID(), 'XenServer', '6.1.0', 50, 1, 13, 1); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES (UUID(), 'VMware', '5.1', 128, 0, 32); +UPDATE `cloud`.`hypervisor_capabilities` SET `storage_motion_supported`=true WHERE id=16; +UPDATE `cloud`.`hypervisor_capabilities` SET `storage_motion_supported`=true WHERE id=11; DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'midonet.apiserver.address', 'http://localhost:8081', 'Specify the address at which the Midonet API server can be contacted (if using Midonet)'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'midonet.providerrouter.id', 'd7c5e6a3-e2f4-426b-b728-b7ce6a0448e5', 'Specifies the UUID of the Midonet provider router (if using Midonet)'); ALTER TABLE `cloud`.`load_balancer_vm_map` ADD state VARCHAR(40) NULL COMMENT 'service status updated by LB healthcheck manager'; +alter table storage_pool add hypervisor varchar(32); alter table storage_pool change storage_provider_id storage_provider_name varchar(255); -- alter table template_host_ref add state varchar(255); -- alter table template_host_ref add update_count bigint unsigned; @@ -309,6 +312,27 @@ CREATE TABLE `cloud`.`affinity_group_vm_map` ( +CREATE TABLE `cloud`.`dedicated_resources` ( + `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40), + `data_center_id` bigint unsigned COMMENT 'data center id', + `pod_id` bigint unsigned COMMENT 'pod id', + `cluster_id` bigint unsigned COMMENT 'cluster id', + `host_id` bigint unsigned COMMENT 'host id', + `domain_id` bigint unsigned COMMENT 'domain id of the domain to which resource is dedicated', + `account_id` bigint unsigned COMMENT 'account id of the account to which resource is dedicated', + PRIMARY KEY (`id`), + CONSTRAINT `fk_dedicated_resources__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `cloud`.`data_center`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_dedicated_resources__pod_id` FOREIGN KEY (`pod_id`) REFERENCES `cloud`.`host_pod_ref`(`id`), + CONSTRAINT `fk_dedicated_resources__cluster_id` FOREIGN KEY (`cluster_id`) REFERENCES `cloud`.`cluster`(`id`), + CONSTRAINT `fk_dedicated_resources__host_id` FOREIGN KEY (`host_id`) REFERENCES `cloud`.`host`(`id`), + CONSTRAINT `fk_dedicated_resources__domain_id` FOREIGN KEY (`domain_id`) REFERENCES `domain`(`id`), + CONSTRAINT `fk_dedicated_resources__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`), + INDEX `i_dedicated_resources_domain_id`(`domain_id`), + INDEX `i_dedicated_resources_account_id`(`account_id`), + CONSTRAINT `uc_dedicated_resources__uuid` UNIQUE (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + CREATE TABLE nic_secondary_ips ( `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT, `uuid` varchar(40), @@ -334,6 +358,9 @@ ALTER TABLE `cloud`.`event` ADD COLUMN `archived` tinyint(1) unsigned NOT NULL D INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'alert.purge.interval', '86400', 'The interval (in seconds) to wait before running the alert purge thread'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'alert.purge.delay', '0', 'Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts'); +INSERT INTO `cloud`.`dedicated_resources` (`data_center_id`, `domain_id`) SELECT `id`, `domain_id` FROM `cloud`.`data_center` WHERE `domain_id` IS NOT NULL; +UPDATE `cloud`.`data_center` SET `domain_id` = NULL WHERE `domain_id` IS NOT NULL; + DROP VIEW IF EXISTS `cloud`.`event_view`; CREATE VIEW `cloud`.`event_view` AS select @@ -465,6 +492,8 @@ CREATE TABLE `cloud`.`global_load_balancer_lb_rule_map` ( CONSTRAINT `fk_lb_rule_id` FOREIGN KEY(`lb_rule_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'cloud.dns.name', null, 'DNS name of the cloud for the GSLB service'); + INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.cpus', '40', 'The default maximum number of cpu cores that can be used for an account'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.memory', '40960', 'The default maximum memory (in MiB) that can be used for an account'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.primary.storage', '200', 'The default maximum primary storage space (in GiB) that can be used for an account'); @@ -935,6 +964,7 @@ CREATE VIEW `cloud`.`storage_pool_view` AS storage_pool.removed, storage_pool.capacity_bytes, storage_pool.scope, + storage_pool.hypervisor, cluster.id cluster_id, cluster.uuid cluster_uuid, cluster.name cluster_name, @@ -1112,6 +1142,31 @@ CREATE TABLE `cloud`.`network_asa1000v_map` ( CONSTRAINT `fk_network_asa1000v_map__asa1000v_id` FOREIGN KEY (`asa1000v_id`) REFERENCES `external_cisco_asa1000v_devices`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +CREATE TABLE `cloud`.`vmware_data_center` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(255) UNIQUE, + `name` varchar(255) NOT NULL COMMENT 'Name of VMware datacenter', + `guid` varchar(255) NOT NULL UNIQUE COMMENT 'id of VMware datacenter', + `vcenter_host` varchar(255) NOT NULL COMMENT 'vCenter host containing this VMware datacenter', + `username` varchar(255) NOT NULL COMMENT 'Name of vCenter host user', + `password` varchar(255) NOT NULL COMMENT 'Password of vCenter host user', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`vmware_data_center_zone_map` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `zone_id` bigint unsigned NOT NULL UNIQUE COMMENT 'id of CloudStack zone', + `vmware_data_center_id` bigint unsigned NOT NULL UNIQUE COMMENT 'id of VMware datacenter', + PRIMARY KEY (`id`), + CONSTRAINT `fk_vmware_data_center_zone_map__vmware_data_center_id` FOREIGN KEY (`vmware_data_center_id`) REFERENCES `vmware_data_center`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`legacy_zones` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `zone_id` bigint unsigned NOT NULL UNIQUE COMMENT 'id of CloudStack zone', + PRIMARY KEY (`id`), + CONSTRAINT `fk_legacy_zones__zone_id` FOREIGN KEY (`zone_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `eip_associate_public_ip` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if public IP is associated with user VM creation by default when EIP service is enabled.' AFTER `elastic_ip_service`; @@ -1159,6 +1214,7 @@ CREATE VIEW `cloud`.`service_offering_view` AS service_offering.default_use, service_offering.vm_type, service_offering.sort_key, + service_offering.is_volatile, service_offering.deployment_planner, domain.id domain_id, domain.uuid domain_uuid, @@ -1173,8 +1229,10 @@ CREATE VIEW `cloud`.`service_offering_view` AS -- Add "default" field to account/user tables ALTER TABLE `cloud`.`account` ADD COLUMN `default` int(1) unsigned NOT NULL DEFAULT '0' COMMENT '1 if account is default'; +ALTER TABLE `cloud_usage`.`account` ADD COLUMN `default` int(1) unsigned NOT NULL DEFAULT '0' COMMENT '1 if account is default'; ALTER TABLE `cloud`.`user` ADD COLUMN `default` int(1) unsigned NOT NULL DEFAULT '0' COMMENT '1 if user is default'; UPDATE `cloud`.`account` SET `cloud`.`account`.`default`=1 WHERE id IN (1,2); +UPDATE `cloud_usage`.`account` SET `default`=1 WHERE id IN (1,2); UPDATE `cloud`.`user` SET `cloud`.`user`.`default`=1 WHERE id IN (1,2); ALTER VIEW `cloud`.`user_view` AS @@ -1943,8 +2001,92 @@ update `cloud`.`vpc_gateways` set network_acl_id = 2; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'VpcManager', 'blacklisted.routes', NULL, 'Routes that are blacklisted, can not be used for Static Routes creation for the VPC Private Gateway'); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'enable.dynamic.scale.vm', 'false', 'Enables/Diables dynamically scaling a vm'); + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'scale.retry', '2', 'Number of times to retry scaling up the vm'); + +UPDATE `cloud`.`snapshots` set swift_id=null where swift_id=0; + +DROP TABLE IF EXISTS `cloud`.`vm_disk_statistics`; +CREATE TABLE `cloud`.`vm_disk_statistics` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `data_center_id` bigint(20) unsigned NOT NULL, + `account_id` bigint(20) unsigned NOT NULL, + `vm_id` bigint(20) unsigned NOT NULL, + `volume_id` bigint(20) unsigned NOT NULL DEFAULT '0', + `net_io_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `net_io_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `current_io_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `current_io_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_io_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_io_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `net_bytes_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `net_bytes_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `current_bytes_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `current_bytes_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_bytes_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_bytes_write` bigint(20) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `id` (`id`), + UNIQUE KEY `account_id` (`account_id`,`data_center_id`,`vm_id`,`volume_id`), + KEY `i_vm_disk_statistics__account_id` (`account_id`), + KEY `i_vm_disk_statistics__account_id_data_center_id` (`account_id`,`data_center_id`), + CONSTRAINT `fk_vm_disk_statistics__account_id` FOREIGN KEY (`account_id`) REFERENCES `account` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB AUTO_INCREMENT=0 DEFAULT CHARSET=utf8; + +insert into `cloud`.`vm_disk_statistics`(data_center_id,account_id,vm_id,volume_id) +select volumes.data_center_id, volumes.account_id, vm_instance.id, volumes.id from volumes,vm_instance where vm_instance.vm_type="User" and vm_instance.state<>"Expunging" and volumes.instance_id=vm_instance.id order by vm_instance.id; + +DROP TABLE IF EXISTS `cloud_usage`.`vm_disk_statistics`; +CREATE TABLE `cloud_usage`.`vm_disk_statistics` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `data_center_id` bigint(20) unsigned NOT NULL, + `account_id` bigint(20) unsigned NOT NULL, + `vm_id` bigint(20) unsigned NOT NULL, + `volume_id` bigint(20) unsigned NOT NULL DEFAULT '0', + `net_io_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `net_io_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `current_io_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `current_io_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_io_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_io_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `net_bytes_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `net_bytes_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `current_bytes_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `current_bytes_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_bytes_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_bytes_write` bigint(20) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `id` (`id`), + UNIQUE KEY `account_id` (`account_id`,`data_center_id`,`vm_id`,`volume_id`) +) ENGINE=InnoDB CHARSET=utf8; + +insert into `cloud_usage`.`vm_disk_statistics` select * from `cloud`.`vm_disk_statistics`; + +DROP TABLE IF EXISTS `cloud_usage`.`usage_vm_disk`; +CREATE TABLE `cloud_usage`.`usage_vm_disk` ( + `account_id` bigint(20) unsigned NOT NULL, + `zone_id` bigint(20) unsigned NOT NULL, + `vm_id` bigint(20) unsigned NOT NULL, + `volume_id` bigint(20) unsigned NOT NULL DEFAULT '0', + `io_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `io_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_io_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_io_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `bytes_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `bytes_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_bytes_read` bigint(20) unsigned NOT NULL DEFAULT '0', + `agg_bytes_write` bigint(20) unsigned NOT NULL DEFAULT '0', + `event_time_millis` bigint(20) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`account_id`,`zone_id`,`vm_id`,`volume_id`,`event_time_millis`) +) ENGINE=InnoDB CHARSET=utf8; + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.disk.stats.interval', 0, 'Interval (in seconds) to report vm disk statistics.'); + -- Re-enable foreign key checking, at the end of the upgrade path SET foreign_key_checks = 1; -UPDATE `cloud`.`snapshot_policy` set uuid=id WHERE uuid is NULL; +UPDATE `cloud`.`snapshot_policy` set uuid=id WHERE uuid is NULL; +#update shared sg enabled network with not null name in Advance Security Group enabled network +UPDATE `cloud`.`networks` set name='Shared SG enabled network', display_text='Shared SG enabled network' WHERE name IS null AND traffic_type='Guest' AND data_center_id IN (select id from data_center where networktype='Advanced' and is_security_group_enabled=1) AND acl_type='Domain'; diff --git a/setup/dev/advancedsg.cfg b/setup/dev/advancedsg.cfg new file mode 100644 index 00000000000..e6922b639e5 --- /dev/null +++ b/setup/dev/advancedsg.cfg @@ -0,0 +1,185 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +{ + "zones": [ + { + "name": "Sandbox-Simulator", + "dns1": "10.147.28.6", + "physical_networks": [ + { + "name": "Sandbox-pnet", + "tags": [ + "cloud-simulator-pnet" + ], + "broadcastdomainrange": "Zone", + "providers": [ + { + "broadcastdomainrange": "ZONE", + "name": "VirtualRouter" + }, + { + "broadcastdomainrange": "ZONE", + "name": "SecurityGroupProvider" + } + ], + "traffictypes": [ + { + "typ": "Guest" + }, + { + "typ": "Management", + "simulator": "cloud-simulator-mgmt" + } + ], + "isolationmethods": [ + "VLAN" + ] + } + ], + "securitygroupenabled": "true", + "ipranges": [ + { + "startip": "10.147.31.150", + "endip": "10.147.31.159", + "netmask": "255.255.255.0", + "vlan": "31", + "gateway": "10.147.31.1" + } + ], + "networktype": "Advanced", + "pods": [ + { + "endip": "10.147.29.159", + "name": "POD0", + "startip": "10.147.29.150", + "netmask": "255.255.255.0", + "clusters": [ + { + "clustername": "C0", + "hypervisor": "Simulator", + "hosts": [ + { + "username": "root", + "url": "http://simulator0", + "password": "password" + } + ], + "clustertype": "CloudManaged", + "primaryStorages": [ + { + "url": "nfs://10.147.28.6:/export/home/sandbox/primary", + "name": "PS0" + } + ] + } + ], + "gateway": "10.147.29.1" + } + ], + "internaldns1": "10.147.28.6", + "secondaryStorages": [ + { + "url": "nfs://10.147.28.6:/export/home/sandbox/sstor" + } + ] + } + ], + "dbSvr": { + "dbSvr": "localhost", + "passwd": "cloud", + "db": "cloud", + "port": 3306, + "user": "cloud" + }, + "logger": [ + { + "name": "TestClient", + "file": "testclient.log" + }, + { + "name": "TestCase", + "file": "testcase.log" + } + ], + "globalConfig": [ + { + "name": "storage.cleanup.interval", + "value": "300" + }, + { + "name": "direct.agent.load.size", + "value": "1000" + }, + { + "name": "default.page.size", + "value": "10000" + }, + { + "name": "instance.name", + "value": "QA" + }, + { + "name": "workers", + "value": "10" + }, + { + "name": "vm.op.wait.interval", + "value": "5" + }, + { + "name": "account.cleanup.interval", + "value": "600" + }, + { + "name": "guest.domain.suffix", + "value": "sandbox.simulator" + }, + { + "name": "expunge.delay", + "value": "60" + }, + { + "name": "vm.allocation.algorithm", + "value": "random" + }, + { + "name": "expunge.interval", + "value": "60" + }, + { + "name": "expunge.workers", + "value": "3" + }, + { + "name": "secstorage.allowed.internal.sites", + "value": "10.147.28.0/24" + }, + { + "name": "check.pod.cidrs", + "value": "true" + } + ], + "mgtSvr": [ + { + "mgtSvrIp": "localhost", + "passwd": "password", + "user": "root", + "port": 8096 + } + ] +} diff --git a/test/integration/component/test_advancedsg_networks.py b/test/integration/component/test_advancedsg_networks.py new file mode 100644 index 00000000000..e24254d4b90 --- /dev/null +++ b/test/integration/component/test_advancedsg_networks.py @@ -0,0 +1,753 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" P1 tests for networks in advanced zone with security groups +""" +#Import Local Modules +import marvin +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * +from marvin.remoteSSHClient import remoteSSHClient +import datetime +import netaddr + +class Services: + """ Test networks in advanced zone with security groups""" + + def __init__(self): + self.services = { + "domain": { + "name": "DOM", + }, + "project": { + "name": "Project", + "displaytext": "Test project", + }, + "account": { + "email": "admin-XABU1@test.com", + "firstname": "admin-XABU1", + "lastname": "admin-XABU1", + "username": "admin-XABU1", + # Random characters are appended for unique + # username + "password": "fr3sca", + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, # in MHz + "memory": 128, # In MBs + }, + "shared_network_offering_sg": { + "name": 'MySharedOffering-sg', + "displaytext": 'MySharedOffering-sg', + "guestiptype": 'Shared', + "supportedservices": 'Dhcp,Dns,UserData,SecurityGroup', + "specifyVlan" : "False", + "specifyIpRanges" : "False", + "traffictype": 'GUEST', + "serviceProviderList" : { + "Dhcp": 'VirtualRouter', + "Dns": 'VirtualRouter', + "UserData": 'VirtualRouter', + "SecurityGroup": 'SecurityGroupProvider' + }, + }, + "shared_network_offering": { + "name": 'MySharedOffering', + "displaytext": 'MySharedOffering', + "guestiptype": 'Shared', + "supportedservices": 'Dhcp,Dns,UserData', + "specifyVlan" : "False", + "specifyIpRanges" : "False", + "traffictype": 'GUEST', + "serviceProviderList" : { + "Dhcp": 'VirtualRouter', + "Dns": 'VirtualRouter', + "UserData": 'VirtualRouter' + }, + }, + "shared_network_sg": { + "name": "MyIsolatedNetwork - Test", + "displaytext": "MyIsolatedNetwork", + "networkofferingid":"1", + "vlan" :1200, + "gateway" :"172.16.15.1", + "netmask" :"255.255.255.0", + "startip" :"172.16.15.2", + "endip" :"172.16.15.20", + "acltype" : "Domain", + "scope":"all", + }, + "shared_network": { + "name": "MySharedNetwork - Test", + "displaytext": "MySharedNetwork", + "vlan" :1201, + "gateway" :"172.16.15.1", + "netmask" :"255.255.255.0", + "startip" :"172.16.15.21", + "endip" :"172.16.15.41", + "acltype" : "Domain", + "scope":"all", + }, + "isolated_network_offering": { + "name": 'Network offering-DA services', + "displaytext": 'Network offering-DA services', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat', + "traffictype": 'GUEST', + "availability": 'Optional', + "serviceProviderList": { + "Dhcp": 'VirtualRouter', + "Dns": 'VirtualRouter', + "SourceNat": 'VirtualRouter', + "PortForwarding": 'VirtualRouter', + "Vpn": 'VirtualRouter', + "Firewall": 'VirtualRouter', + "Lb": 'VirtualRouter', + "UserData": 'VirtualRouter', + "StaticNat": 'VirtualRouter', + }, + }, + "isolated_network": { + "name": "Isolated Network", + "displaytext": "Isolated Network", + }, + "virtual_machine": { + "displayname": "Test VM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + # Hypervisor type should be same as + # hypervisor type of cluster + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "ostype": 'CentOS 5.3 (64-bit)', + # Cent OS 5.3 (64 bit) + "sleep": 90, + "timeout": 10, + "mode": 'advanced', + "securitygroupenabled": 'true' + } + +class TestNetworksInAdvancedSG(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.api_client = super( + TestSharedNetworks, + cls + ).getClsTestClient().getApiClient() + + cls.services = Services().services + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = cls.template.id + + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + + cls._cleanup = [ + cls.service_offering, + ] + return + + @classmethod + def tearDownClass(cls): + try: + #Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.api_client = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + self.cleanup_networks = [] + self.cleanup_accounts = [] + self.cleanup_domains = [] + self.cleanup_projects = [] + self.cleanup_vms = [] + return + + def tearDown(self): + try: + #Clean up, terminate the created network offerings + cleanup_resources(self.api_client, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + #below components is not a part of cleanup because to mandate the order and to cleanup network + try: + for vm in self.cleanup_vms: + vm.delete(self.api_client) + except Exception as e: + raise Exception("Warning: Exception during virtual machines cleanup : %s" % e) + + try: + for project in self.cleanup_projects: + project.delete(self.api_client) + except Exception as e: + raise Exception("Warning: Exception during project cleanup : %s" % e) + + try: + for account in self.cleanup_accounts: + account.delete(self.api_client) + except Exception as e: + raise Exception("Warning: Exception during account cleanup : %s" % e) + + try: + for domain in self.cleanup_domains: + domain.delete(self.api_client) + except Exception as e: + raise Exception("Warning: Exception during domain cleanup : %s" % e) + + #Wait till all resources created are cleaned up completely and then attempt to delete Network + time.sleep(self.services["sleep"]) + + try: + for network in self.cleanup_networks: + network.delete(self.api_client) + except Exception as e: + raise Exception("Warning: Exception during network cleanup : %s" % e) + return + + def test_createIsolatedNetwork(self): + """ Test Isolated Network """ + + # Steps, + # 1. create an Admin Account - admin-XABU1 + # 2. listPhysicalNetworks in available zone + # 3. createNetworkOffering: + # 4. Enable network offering - updateNetworkOffering - state=Enabled + # 5. createNetwork + # Validations, + # 1. listAccounts name=admin-XABU1, state=enabled returns your account + # 2. listPhysicalNetworks should return at least one active physical network + # 4. listNetworkOfferings - name=myisolatedoffering, should list enabled offering + # 5. network creation should FAIL since isolated network is not supported in advanced zone with security groups. + + #Create admin account + self.admin_account = Account.create( + self.api_client, + self.services["account"], + admin=True, + domainid=self.domain.id + ) + + self.cleanup_accounts.append(self.admin_account) + + #verify that the account got created with state enabled + list_accounts_response = Account.list( + self.api_client, + id=self.admin_account.account.id, + listall=True + ) + self.assertEqual( + isinstance(list_accounts_response, list), + True, + "listAccounts returned invalid object in response." + ) + self.assertNotEqual( + len(list_accounts_response), + 0, + "listAccounts returned empty list." + ) + self.assertEqual( + list_accounts_response[0].state, + "enabled", + "The admin account created is not enabled." + ) + + self.debug("Admin type account created: %s" % self.admin_account.name) + + #Create an user account + self.user_account = Account.create( + self.api_client, + self.services["account"], + admin=False, + domainid=self.domain.id + ) + + self.cleanup_accounts.append(self.user_account) + + #verify that the account got created with state enabled + list_accounts_response = Account.list( + self.api_client, + id=self.user_account.account.id, + listall=True + ) + self.assertEqual( + isinstance(list_accounts_response, list), + True, + "listAccounts returned invalid object in response." + ) + self.assertNotEqual( + len(list_accounts_response), + 0, + "listAccounts returned empty list." + ) + self.assertEqual( + list_accounts_response[0].state, + "enabled", + "The user account created is not enabled." + ) + + self.debug("User type account created: %s" % self.user_account.name) + + #Verify that there should be at least one physical network present in zone. + list_physical_networks_response = PhysicalNetwork.list( + self.api_client, + zoneid=self.zone.id + ) + self.assertEqual( + isinstance(list_physical_networks_response, list), + True, + "listPhysicalNetworks returned invalid object in response." + ) + self.assertNotEqual( + len(list_physical_networks_response), + 0, + "listPhysicalNetworks should return at least one physical network." + ) + + physical_network = list_physical_networks_response[0] + + self.debug("Physical network found: %s" % physical_network.id) + + #Create Network Offering + self.isolated_network_offering = NetworkOffering.create( + self.api_client, + self.services["isolated_network_offering"], + conservemode=False + ) + + self.cleanup.append(self.isolated_network_offering) + + #Verify that the network offering got created + list_network_offerings_response = NetworkOffering.list( + self.api_client, + id=self.isolated_network_offering.id + ) + self.assertEqual( + isinstance(list_network_offerings_response, list), + True, + "listNetworkOfferings returned invalid object in response." + ) + self.assertNotEqual( + len(list_network_offerings_response), + 0, + "listNetworkOfferings returned empty list." + ) + self.assertEqual( + list_network_offerings_response[0].state, + "Disabled", + "The network offering created should be bydefault disabled." + ) + + self.debug("Isolated Network offering created: %s" % self.isolated_network_offering.id) + + #Update network offering state from disabled to enabled. + network_offering_update_response = NetworkOffering.update( + self.isolated_network_offering, + self.api_client, + id=self.isolated_network_offering.id, + state="enabled" + ) + + #Verify that the state of the network offering is updated + list_network_offerings_response = NetworkOffering.list( + self.api_client, + id=self.isolated_network_offering.id + ) + self.assertEqual( + isinstance(list_network_offerings_response, list), + True, + "listNetworkOfferings returned invalid object in response." + ) + self.assertNotEqual( + len(list_network_offerings_response), + 0, + "listNetworkOfferings returned empty list." + ) + self.assertEqual( + list_network_offerings_response[0].state, + "Enabled", + "The network offering state should get updated to Enabled." + ) + + #create network using the isolated network offering created + try: + self.isolated_network = Network.create( + self.api_client, + self.services["isolated_network"], + networkofferingid=self.isolated_network_offering.id, + zoneid=self.zone.id, + ) + self.cleanup_networks.append(self.isolated_network) + self.fail("Create isolated network is invalid in advanced zone with security groups.") + except Exception as e: + self.debug("Network creation failed because create isolated network is invalid in advanced zone with security groups.") + + def test_createSharedNetwork_withoutSG(self): + """ Test Shared Network with used vlan 01 """ + + # Steps, + # 1. create an Admin account + # 2. create a shared NetworkOffering + # 3. enable the network offering + # 4. listPhysicalNetworks + # 5. createNetwork + # Validations, + # 1. listAccounts state=enabled returns your account + # 2. listNetworkOfferings - name=mysharedoffering , should list offering in disabled state + # 3. listNetworkOfferings - name=mysharedoffering, should list enabled offering + # 4. listPhysicalNetworks should return at least one active physical network + # 5. network creation should FAIL since there is no SecurityProvide in the network offering + + #Create admin account + self.admin_account = Account.create( + self.api_client, + self.services["account"], + admin=True, + domainid=self.domain.id + ) + + self.cleanup_accounts.append(self.admin_account) + + #verify that the account got created with state enabled + list_accounts_response = Account.list( + self.api_client, + id=self.admin_account.account.id, + listall=True + ) + self.assertEqual( + isinstance(list_accounts_response, list), + True, + "listAccounts returned invalid object in response." + ) + self.assertNotEqual( + len(list_accounts_response), + 0, + "listAccounts returned empty list." + ) + self.assertEqual( + list_accounts_response[0].state, + "enabled", + "The admin account created is not enabled." + ) + + self.debug("Domain admin account created: %s" % self.admin_account.account.id) + + #Verify that there should be at least one physical network present in zone. + list_physical_networks_response = PhysicalNetwork.list( + self.api_client, + zoneid=self.zone.id + ) + self.assertEqual( + isinstance(list_physical_networks_response, list), + True, + "listPhysicalNetworks returned invalid object in response." + ) + self.assertNotEqual( + len(list_physical_networks_response), + 0, + "listPhysicalNetworks should return at least one physical network." + ) + + physical_network = list_physical_networks_response[0] + + self.debug("Physical Network found: %s" % physical_network.id) + + self.services["shared_network_offering"]["specifyVlan"] = "True" + self.services["shared_network_offering"]["specifyIpRanges"] = "True" + + #Create Network Offering + self.shared_network_offering = NetworkOffering.create( + self.api_client, + self.services["shared_network_offering"], + conservemode=False + ) + + self.cleanup.append(self.shared_network_offering) + + #Verify that the network offering got created + list_network_offerings_response = NetworkOffering.list( + self.api_client, + id=self.shared_network_offering.id + ) + self.assertEqual( + isinstance(list_network_offerings_response, list), + True, + "listNetworkOfferings returned invalid object in response." + ) + self.assertNotEqual( + len(list_network_offerings_response), + 0, + "listNetworkOfferings returned empty list." + ) + self.assertEqual( + list_network_offerings_response[0].state, + "Disabled", + "The network offering created should be bydefault disabled." + ) + + self.debug("Shared Network Offering created: %s" % self.shared_network_offering.id) + + #Update network offering state from disabled to enabled. + network_offering_update_response = NetworkOffering.update( + self.shared_network_offering, + self.api_client, + id=self.shared_network_offering.id, + state="enabled" + ) + + #Verify that the state of the network offering is updated + list_network_offerings_response = NetworkOffering.list( + self.api_client, + id=self.shared_network_offering.id + ) + self.assertEqual( + isinstance(list_network_offerings_response, list), + True, + "listNetworkOfferings returned invalid object in response." + ) + self.assertNotEqual( + len(list_network_offerings_response), + 0, + "listNetworkOfferings returned empty list." + ) + self.assertEqual( + list_network_offerings_response[0].state, + "Enabled", + "The network offering state should get updated to Enabled." + ) + + #create network using the shared network offering created + self.services["shared_network"]["acltype"] = "domain" + self.services["shared_network"]["networkofferingid"] = self.shared_network_offering.id + self.services["shared_network"]["physicalnetworkid"] = physical_network.id + + try: + self.shared_network = Network.create( + self.api_client, + self.services["shared_network"], + networkofferingid=self.shared_network_offering.id, + zoneid=self.zone.id + ) + self.cleanup_networks.append(self.shared_network) + self.fail("Network created without SecurityProvider , which is invalid") + except Exception as e: + self.debug("Network creation failed because there is no SecurityProvider in the network offering.") + + def test_deployVM_SharedwithSG(self): + """ Test VM deployment in shared networks with SecurityProvider """ + + # Steps, + # 0. create a user account + # 1. Create one shared Network (scope=ALL, different IP ranges) + # 2. deployVirtualMachine in the above networkid within the user account + # 3. delete the user account + # Validations, + # 1. shared network should be created successfully + # 2. VM should deploy successfully + + #Create admin account + self.admin_account = Account.create( + self.api_client, + self.services["account"], + admin=True, + domainid=self.domain.id + ) + + self.cleanup_accounts.append(self.admin_account) + + #verify that the account got created with state enabled + list_accounts_response = Account.list( + self.api_client, + id=self.admin_account.account.id, + liistall=True + ) + self.assertEqual( + isinstance(list_accounts_response, list), + True, + "listAccounts returned invalid object in response." + ) + self.assertNotEqual( + len(list_accounts_response), + 0, + "listAccounts returned empty list." + ) + self.assertEqual( + list_accounts_response[0].state, + "enabled", + "The admin account created is not enabled." + ) + + self.debug("Admin type account created: %s" % self.admin_account.name) + + self.services["shared_network_offering_sg"]["specifyVlan"] = "True" + self.services["shared_network_offering_sg"]["specifyIpRanges"] = "True" + + #Create Network Offering + self.shared_network_offering_sg = NetworkOffering.create( + self.api_client, + self.services["shared_network_offering_sg"], + conservemode=False + ) + + self.cleanup.append(self.shared_network_offering_sg) + + #Verify that the network offering got created + list_network_offerings_response = NetworkOffering.list( + self.api_client, + id=self.shared_network_offering_sg.id + ) + self.assertEqual( + isinstance(list_network_offerings_response, list), + True, + "listNetworkOfferings returned invalid object in response." + ) + self.assertNotEqual( + len(list_network_offerings_response), + 0, + "listNetworkOfferings returned empty list." + ) + self.assertEqual( + list_network_offerings_response[0].state, + "Disabled", + "The network offering created should be bydefault disabled." + ) + + self.debug("Shared Network offering created: %s" % self.shared_network_offering_sg.id) + + #Update network offering state from disabled to enabled. + network_offering_update_response = NetworkOffering.update( + self.shared_network_offering_sg, + self.api_client, + id=self.shared_network_offering_sg.id, + state="enabled" + ) + + #Verify that the state of the network offering is updated + list_network_offerings_response = NetworkOffering.list( + self.api_client, + id=self.shared_network_offering_sg.id + ) + self.assertEqual( + isinstance(list_network_offerings_response, list), + True, + "listNetworkOfferings returned invalid object in response." + ) + self.assertNotEqual( + len(list_network_offerings_response), + 0, + "listNetworkOfferings returned empty list." + ) + self.assertEqual( + list_network_offerings_response[0].state, + "Enabled", + "The network offering state should get updated to Enabled." + ) + + physical_network = list_physical_networks_response[0] + + #create network using the shared network offering created + self.services["shared_network_sg"]["acltype"] = "domain" + self.services["shared_network_sg"]["networkofferingid"] = self.shared_network_offering_sg.id + self.services["shared_network_sg"]["physicalnetworkid"] = physical_network.id + self.shared_network_sg = Network.create( + self.api_client, + self.services["shared_network_sg"], + domainid=self.admin_account.account.domainid, + networkofferingid=self.shared_network_offering_sg.id, + zoneid=self.zone.id + ) + + self.cleanup_networks.append(self.shared_network_sg) + + list_networks_response = Network.list( + self.api_client, + id=self.shared_network_sg.id + ) + self.assertEqual( + isinstance(list_networks_response, list), + True, + "listNetworks returned invalid object in response." + ) + self.assertNotEqual( + len(list_networks_response), + 0, + "listNetworks returned empty list." + ) + self.assertEqual( + list_networks_response[0].specifyipranges, + True, + "The network is created with ip range but the flag is set to False." + ) + + self.debug("Shared Network created: %s" % self.shared_network_sg.id) + + self.shared_network_admin_account_virtual_machine = VirtualMachine.create( + self.api_client, + self.services["virtual_machine"], + accountid=self.admin_account.name, + domainid=self.admin_account.account.domainid, + networkids=self.shared_network_sg.id, + serviceofferingid=self.service_offering.id + ) + vms = VirtualMachine.list( + self.api_client, + id=self.shared_network_admin_account_virtual_machine.id, + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "listVirtualMachines returned invalid object in response." + ) + self.assertNotEqual( + len(vms), + 0, + "listVirtualMachines returned empty list." + ) + self.debug("Virtual Machine created: %s" % self.shared_network_admin_account_virtual_machine.id) + + ip_range = list(netaddr.iter_iprange(unicode(self.services["shared_network_sg"]["startip"]), unicode(self.services["shared_network_sg"]["endip"]))) + if netaddr.IPAddress(unicode(vms[0].nic[0].ipaddress)) not in ip_range: + self.fail("Virtual machine ip should be from the ip range assigned to network created.") + diff --git a/test/integration/component/test_assign_vm.py b/test/integration/component/test_assign_vm.py new file mode 100644 index 00000000000..1dc93a81417 --- /dev/null +++ b/test/integration/component/test_assign_vm.py @@ -0,0 +1,458 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +""" +#Import Local Modules +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.integration.lib.base import (Account, + Domain, + User, + Project, + Volume, + Snapshot, + DiskOffering, + ServiceOffering, + VirtualMachine) +from marvin.integration.lib.common import (get_domain, + get_zone, + get_template, + cleanup_resources, + list_volumes, + update_resource_limit, + list_networks, + list_snapshots, + list_virtual_machines) + +def log_test_exceptions(func): + def _log_test_exceptions(self, *args, **kwargs): + try: + func(self, *args, **kwargs) + except Exception as e: + self.debug('Test %s Failed due to Exception=%s' % (func, e)) + raise e + return _log_test_exceptions + +class Services: + """Test service data for:Change the ownershop of + VM/network/datadisk/snapshot/template/ISO from one account to any other account. + """ + def __init__(self): + self.services = {"domain" : {"name": "Domain",}, + "account" : {"email" : "test@test.com", + "firstname" : "Test", + "lastname" : "User", + "username" : "test", + # Random characters are appended in create account to + # ensure unique username generated each time + "password" : "password",}, + "user" : {"email" : "user@test.com", + "firstname": "User", + "lastname" : "User", + "username" : "User", + # Random characters are appended for unique + # username + "password" : "fr3sca",}, + "project" : {"name" : "Project", + "displaytext" : "Test project",}, + "volume" : {"diskname" : "TestDiskServ", + "max" : 6,}, + "disk_offering" : {"displaytext" : "Small", + "name" : "Small", + "disksize" : 1}, + "virtual_machine" : {"displayname" : "testserver", + "username" : "root",# VM creds for SSH + "password" : "password", + "ssh_port" : 22, + "hypervisor" : 'XenServer', + "privateport" : 22, + "publicport" : 22, + "protocol" : 'TCP',}, + "service_offering" : {"name" : "Tiny Instance", + "displaytext" : "Tiny Instance", + "cpunumber" : 1, + "cpuspeed" : 100,# in MHz + "memory" : 128}, + #"storagetype" : "local"}, + "sleep" : 60, + "ostype" : 'CentOS 5.3 (64-bit)',# CentOS 5.3 (64-bit) + } + +class TestVMOwnership(cloudstackTestCase): + @classmethod + def setUpClass(cls): + cls._cleanup = [] + cls.api_client = super(TestVMOwnership, + cls).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone Domain and create Domains and sub Domains. + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.services['mode'] = cls.zone.networktype + # Get and set template id for VM creation. + cls.template = get_template(cls.api_client, + cls.zone.id, + cls.services["ostype"]) + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = cls.template.id + + def create_domain_account_user(parentDomain=None): + domain = Domain.create(cls.api_client, + cls.services["domain"], + parentdomainid=parentDomain.id if parentDomain else None) + cls._cleanup.append(domain) + # Create an Account associated with domain + account = Account.create(cls.api_client, + cls.services["account"], + domainid=domain.id) + cls._cleanup.append(account) + # Create an User, Project, Volume associated with account + user = User.create(cls.api_client, + cls.services["user"], + account=account.name, + domainid=account.domainid) + cls._cleanup.append(user) + project = Project.create(cls.api_client, + cls.services["project"], + account=account.name, + domainid=account.domainid) + cls._cleanup.append(project) + volume = Volume.create(cls.api_client, + cls.services["volume"], + zoneid=cls.zone.id, + account=account.name, + domainid=account.domainid, + diskofferingid=cls.disk_offering.id) + cls._cleanup.append(volume) + return {'domain':domain, 'account':account, 'user':user, 'project':project, 'volume':volume} + + # Create disk offerings. + try: + cls.disk_offering = DiskOffering.create(cls.api_client, + cls.services["disk_offering"]) + # Create service offerings. + cls.service_offering = ServiceOffering.create(cls.api_client, + cls.services["service_offering"]) + # Cleanup + cls._cleanup = [cls.service_offering] + # Create domain, account, user, project and volumes. + cls.domain_account_user1 = create_domain_account_user() + cls.domain_account_user2 = create_domain_account_user() + cls.sdomain_account_user1 = create_domain_account_user(cls.domain_account_user1['domain']) + cls.sdomain_account_user2 = create_domain_account_user(cls.domain_account_user2['domain']) + cls.ssdomain_account_user2 = create_domain_account_user(cls.sdomain_account_user2['domain']) + except Exception as e: + raise e + return + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.api_client, reversed(cls._cleanup)) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + self.apiclient = self.api_client#self.testClient.getApiClient() + #self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + self.snapshot = None + return + + def create_vm(self, + account, + domain, + isRunning=False, + project =None, + limit =None, + pfrule =False, + lbrule =None, + natrule =None, + volume =None, + snapshot =False): + #TODO: Implemnt pfrule/lbrule/natrule + self.debug("Deploying instance in the account: %s" % account.name) + self.virtual_machine = VirtualMachine.create(self.apiclient, + self.services["virtual_machine"], + accountid=account.name, + domainid=domain.id, + serviceofferingid=self.service_offering.id, + mode=self.zone.networktype if pfrule else 'basic', + projectid=project.id if project else None) + self.debug("Deployed instance in account: %s" % account.name) + list_virtual_machines(self.apiclient, + id=self.virtual_machine.id) + if snapshot: + volumes = list_volumes(self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='ROOT', + listall=True) + self.snapshot = Snapshot.create(self.apiclient, + volumes[0].id, + account=account.name, + domainid=account.domainid) + if volume: + self.virtual_machine.attach_volume(self.apiclient, + volume) + if not isRunning: + self.virtual_machine.stop(self.apiclient) + self.cleanup.append(self.virtual_machine) + + def check_vm_is_moved_in_account_domainid(self, account): + list_vm_response = list_virtual_machines(self.api_client, + id=self.virtual_machine.id, + account=account.name, + domainid=account.domainid) + self.debug('VM=%s moved to account=%s and domainid=%s' % (list_vm_response, account.name, account.domainid)) + self.assertNotEqual(len(list_vm_response), 0, 'Unable to move VM to account=%s domainid=%s' % (account.name, account.domainid)) + + def tearDown(self): + try: + self.debug("Cleaning up the resources") + cleanup_resources(self.apiclient, reversed(self.cleanup)) + self.debug("Cleanup complete!") + except Exception as e: + self.debug("Warning! Exception in tearDown: %s" % e) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_01_move_across_different_domains(self): + """Test as root, stop a VM from domain1 and attempt to move it to account in domain2 + """ + # Validate the following: + # 1. deploy VM in domain_1 + # 2. stop VM in domain_1 + # 3. assignVirtualMachine to domain_2 + self.create_vm(self.domain_account_user1['account'], self.domain_account_user1['domain']) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.domain_account_user2['account'].name ,self.domain_account_user2['domain'].id) + self.check_vm_is_moved_in_account_domainid(self.domain_account_user2['account']) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_02_move_across_subdomains(self): + """Test as root, stop a VM from subdomain1 and attempt to move it to subdomain2 + """ + # Validate the following: + # 1. deploy VM in subdomain_1 + # 2. stop VM in subdomain_1 + # 3. assignVirtualMachine to subdomain_2 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain']) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + self.check_vm_is_moved_in_account_domainid(self.sdomain_account_user2['account']) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_03_move_from_domain_to_subdomain(self): + """Test as root stop a VM from domain1 and attempt to move it to subdomain1 + """ + # Validate the following: + # 1. deploy VM in domain_1 + # 2. stop VM in domain_1 + # 3. assignVirtualMachine to subdomain_1 + self.create_vm(self.domain_account_user1['account'], self.domain_account_user1['domain']) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.sdomain_account_user1['account'].name ,self.sdomain_account_user1['domain'].id) + self.check_vm_is_moved_in_account_domainid(self.sdomain_account_user1['account']) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_04_move_from_domain_to_sub_of_subdomain(self): + """Test as root, stop a VM from domain1 and attempt to move it to sub-subdomain1 + """ + # Validate the following: + # 1. deploy VM in domain_2 + # 2. stop VM in domain_2 + # 3. assignVirtualMachine to sub subdomain_2 + self.create_vm(self.domain_account_user2['account'], self.domain_account_user2['domain']) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.ssdomain_account_user2['account'].name ,self.ssdomain_account_user2['domain'].id) + self.check_vm_is_moved_in_account_domainid(self.ssdomain_account_user2['account']) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_05_move_to_domain_from_sub_of_subdomain(self): + """Test as root, stop a VM from sub-subdomain1 and attempt to move it to domain1 + """ + # Validate the following: + # 1. deploy VM in sub subdomain2 + # 2. stop VM in sub subdomain2 + # 3. assignVirtualMachine to sub domain2 + self.create_vm(self.ssdomain_account_user2['account'], self.ssdomain_account_user2['domain']) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.domain_account_user2['account'].name ,self.domain_account_user2['domain'].id) + self.check_vm_is_moved_in_account_domainid(self.domain_account_user2['account']) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_06_move_to_domain_from_subdomain(self): + """Test as root, stop a Vm from subdomain1 and attempt to move it to domain1 + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 + # 2. stop VM in sub subdomain1 + # 3. assignVirtualMachine to domain1 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain']) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.domain_account_user1['account'].name ,self.domain_account_user1['domain'].id) + self.check_vm_is_moved_in_account_domainid(self.domain_account_user1['account']) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_07_move_across_subdomain(self): + """Test as root, stop a VM from subdomain1 and attempt to move it to subdomain2 + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 + # 2. stop VM in sub subdomain1 + # 3. assignVirtualMachine to subdomain2 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain']) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + self.check_vm_is_moved_in_account_domainid(self.sdomain_account_user2['account']) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_08_move_across_subdomain_network_create(self): + """Test as root, stop a VM from subdomain1 and attempt to move it to subdomain2, network should get craeted + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 + # 2. stop VM in sub subdomain1 + # 3. assignVirtualMachine to subdomain2 network should get created + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain']) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + self.check_vm_is_moved_in_account_domainid(self.sdomain_account_user2['account']) + networks = list_networks(self.apiclient, + account=self.sdomain_account_user2['account'].name, + domainid=self.sdomain_account_user2['domain'].id) + self.assertEqual(isinstance(networks, list), + True, + "Check for list networks response return valid data") + self.assertNotEqual(len(networks), + 0, + "Check list networks response") + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_09_move_across_subdomain(self): + """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 + # 2. stop VM in sub subdomain1 + # 3. assignVirtualMachine to subdomain2 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain']) + self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_10_move_across_subdomain_vm_running(self): + """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 + # 3. assignVirtualMachine to subdomain2 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'],isRunning=True) + self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_11_move_across_subdomain_vm_pfrule(self): + """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 with PF rule set. + # 3. assignVirtualMachine to subdomain2 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'],pfrule=True) + self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_12_move_across_subdomain_vm_volumes(self): + """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 with volumes. + # 3. assignVirtualMachine to subdomain2 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'],volume=self.sdomain_account_user1['volume']) + self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + # Check all volumes attached to same VM + list_volume_response = list_volumes(self.apiclient, + virtualmachineid=self.virtual_machine.id, + type='DATADISK', + listall=True) + self.assertEqual(isinstance(list_volume_response, list), + True, + "Check list volumes response for valid list") + + self.assertNotEqual(list_volume_response[0].domainid, self.sdomain_account_user2['domain'].id, "Volume ownership not changed.") + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_13_move_across_subdomain_vm_snapshot(self): + """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 with snapshot. + # 3. assignVirtualMachine to subdomain2 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'], snapshot=True) + self.virtual_machine.assign_virtual_machine(self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + snapshots = list_snapshots(self.apiclient, + id=self.snapshot.id) + self.assertEqual(snapshots, + None, + "Snapshots stil present for a vm in domain") + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_14_move_across_subdomain_vm_project(self): + """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 with snapshot. + # 3. assignVirtualMachine to subdomain2 + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'], project=self.sdomain_account_user1['project']) + self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_15_move_across_subdomain_account_limit(self): + """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 when limit reached + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 when account limit is reached. + # 3. assignVirtualMachine to subdomain2 + update_resource_limit(self.apiclient, + 0, # VM Instances + account=self.sdomain_account_user2['account'].name, + domainid=self.sdomain_account_user2['domain'].id, + max=0) + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'], snapshot=True) + self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) + + @attr(tags = ["advanced"]) + @log_test_exceptions + def test_16_move_across_subdomain_volume_and_account_limit(self): + """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 volumes are attached and limit reached + """ + # Validate the following: + # 1. deploy VM in sub subdomain1 when account limit is reached. + # 3. assignVirtualMachine to subdomain2 + update_resource_limit( + self.apiclient, + 0, # VM Instances + account=self.sdomain_account_user2['account'].name, + domainid=self.sdomain_account_user2['domain'].id, + max=0) + self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'], snapshot=True, volume=self.sdomain_account_user1['volume']) + self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, self.apiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) diff --git a/test/integration/component/test_custom_hostname.py b/test/integration/component/test_custom_hostname.py index e5452141d9c..a85f619fc00 100644 --- a/test/integration/component/test_custom_hostname.py +++ b/test/integration/component/test_custom_hostname.py @@ -104,7 +104,7 @@ class TestInstanceNameFlagTrue(cloudstackTestCase): cls.services = Services().services # Get Zone, default template cls.zone = get_zone(cls.api_client, cls.services) - cls.services["mode"] = cls.zone.networktype + cls.services["mode"] = cls.zone.networktype cls.template = get_template( cls.api_client, cls.zone.id, diff --git a/test/integration/component/test_egress_rules.py b/test/integration/component/test_egress_rules.py index 872ca2c7b5d..607bac86325 100644 --- a/test/integration/component/test_egress_rules.py +++ b/test/integration/component/test_egress_rules.py @@ -194,7 +194,7 @@ class TestDefaultSecurityGroupEgress(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_deployVM_InDefaultSecurityGroup(self): """Test deploy VM in default security group with no egress rules """ @@ -351,7 +351,7 @@ class TestAuthorizeIngressRule(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_authorizeIngressRule(self): """Test authorize ingress rule """ @@ -509,7 +509,7 @@ class TestDefaultGroupEgress(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_01_default_group_with_egress(self): """Test default group with egress rule before VM deploy and ping, ssh """ @@ -710,7 +710,7 @@ class TestDefaultGroupEgressAfterDeploy(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_01_default_group_with_egress(self): """ Test default group with egress rule added after vm deploy and ping, ssh test @@ -893,7 +893,7 @@ class TestRevokeEgressRule(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_revoke_egress_rule(self): """Test revoke security group egress rule """ @@ -1155,7 +1155,7 @@ class TestInvalidAccountAuthroize(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_invalid_account_authroize(self): """Test invalid account authroize """ @@ -1283,7 +1283,7 @@ class TestMultipleAccountsEgressRuleNeg(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_multiple_account_egress_rule_negative(self): """Test multiple account egress rules negative case """ @@ -1531,7 +1531,7 @@ class TestMultipleAccountsEgressRule(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_multiple_account_egress_rule_positive(self): """Test multiple account egress rules positive case """ @@ -1822,7 +1822,7 @@ class TestStartStopVMWithEgressRule(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_start_stop_vm_egress(self): """ Test stop start Vm with egress rules """ @@ -2034,7 +2034,7 @@ class TestInvalidParametersForEgress(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_invalid_parameters(self): """ Test invalid parameters for egress rules """ diff --git a/test/integration/component/test_explicit_dedication.py b/test/integration/component/test_explicit_dedication.py new file mode 100644 index 00000000000..21a4904e71b --- /dev/null +++ b/test/integration/component/test_explicit_dedication.py @@ -0,0 +1,231 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" P1 tests for Storage motion +""" +#Import Local Modules +import marvin +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.remoteSSHClient import remoteSSHClient +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * +from nose.plugins.attrib import attr +#Import System modules +import time + +_multiprocess_shared_ = True +class Services: + """Test explicit dedication + """ + + def __init__(self): + self.services = { + "disk_offering":{ + "displaytext": "Small", + "name": "Small", + "disksize": 1 + }, + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "testexplicit", + # Random characters are appended in create account to + # ensure unique username generated each time + "password": "password", + }, + "virtual_machine" : + { + "affinity": { + "name": "explicit", + "type": "ExplicitDedication", + }, + "hypervisor" : "XenServer", + }, + "small": + # Create a small virtual machine instance with disk offering + { + "displayname": "testserver", + "username": "root", # VM creds for SSH + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "service_offerings": + { + "for-explicit": + { + # Small service offering ID to for change VM + # service offering from medium to small + "name": "For explicit", + "displaytext": "For explicit", + "cpunumber": 1, + "cpuspeed": 500, + "memory": 512 + } + }, + "template": { + "displaytext": "Cent OS Template", + "name": "Cent OS Template", + "passwordenabled": True, + }, + "diskdevice": '/dev/xvdd', + # Disk device where ISO is attached to instance + "mount_dir": "/mnt/tmp", + "sleep": 60, + "timeout": 10, + "ostype": 'CentOS 5.3 (64-bit)' + } + +class TestExplicitDedication(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.api_client = super(TestExplicitDedication, cls).getClsTestClient().getApiClient() + cls.services = Services().services + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.services['mode'] = cls.zone.networktype + + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = cls.template.id + + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=cls.domain.id + ) + + cls.small_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offerings"]["for-explicit"] + ) + + #cls.ag = AffinityGroup.create(cls.api_client, cls.services["virtual_machine"]["affinity"], + # account=cls.services["account"], domainid=cls.domain.id) + + cls._cleanup = [ + cls.small_offering, + cls.account + ] + + @classmethod + def tearDownClass(cls): + cls.api_client = super(TestExplicitDedication, cls).getClsTestClient().getApiClient() + cleanup_resources(cls.api_client, cls._cleanup) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + def tearDown(self): + #Clean up, terminate the created ISOs + cleanup_resources(self.apiclient, self.cleanup) + return + + # This test requires multi host and at least one host which is empty (no vms should + # be running on that host). It explicitly dedicates empty host to an account, deploys + # a vm for that account and verifies that the vm gets deployed to the dedicated host. + @attr(tags = ["advanced", "basic", "multihosts", "explicitdedication"]) + def test_01_deploy_vm_with_explicit_dedication(self): + """Test explicit dedication is placing vms of an account on dedicated hosts. + """ + # Validate the following + # 1. Find and dedicate an empty host to an account. + # 2. Create an affinity group for explicit dedication. + # 3. Create a vm deployment by passing the affinity group as a parameter. + # 4. Validate the vm got deployed on the dedicated host. + # 5. Cleanup. + + # list and find an empty hosts + all_hosts = list_hosts( + self.apiclient, + type='Routing', + ) + + empty_host = None + for host in all_hosts: + vms_on_host = list_virtual_machines( + self.api_client, + hostid=host.id) + if not vms_on_host: + empty_host = host + break + + # Create an affinity group for explicit dedication. + agCmd = createAffinityGroup.createAffinityGroupCmd() + agCmd.name = "explicit-affinity" + agCmd.displayText = "explicit-affinity" + agCmd.account = self.account.name + agCmd.domainid = self.account.domainid + agCmd.type = self.services['virtual_machine']['affinity']['type'] + self.apiclient.createAffinityGroup(agCmd) + + # dedicate the empty host to this account. + dedicateCmd = dedicateHost.dedicateHostCmd() + dedicateCmd.hostid = empty_host.id + dedicateCmd.domainid = self.domain.id + self.apiclient.dedicateHost(dedicateCmd) + + # deploy vm on the dedicated resource. + vm = VirtualMachine.create( + self.api_client, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + affinitygroupnames=["explicit-affinity"], + mode=self.services["mode"] + ) + + list_vm_response = list_virtual_machines( + self.apiclient, + id=vm.id + ) + + vm_response = list_vm_response[0] + + self.assertEqual( + vm_response.hostid, + empty_host.id, + "Check destination hostID of deployed VM" + ) + + # release the dedicated host to this account. + releaseCmd = releaseDedicatedHost.releaseDedicatedHostCmd() + releaseCmd.hostid = empty_host.id + releaseCmd.domainid = self.domain.id + self.apiclient.releaseDedicatedHost(releaseCmd) + + #Deletion of the created VM and affinity group is taken care as part of account clean + + return diff --git a/test/integration/component/test_high_availability.py b/test/integration/component/test_high_availability.py index cd2dfcea559..7b0f78e2446 100644 --- a/test/integration/component/test_high_availability.py +++ b/test/integration/component/test_high_availability.py @@ -169,7 +169,6 @@ class TestHighAvailability(cloudstackTestCase): try: #Clean up, terminate the created accounts, domains etc cleanup_resources(self.apiclient, self.cleanup) - self.testClient.close() except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return diff --git a/test/integration/component/test_host_high_availability.py b/test/integration/component/test_host_high_availability.py index 8c66d175dd7..57eb5edede9 100644 --- a/test/integration/component/test_host_high_availability.py +++ b/test/integration/component/test_host_high_availability.py @@ -18,15 +18,12 @@ """ P1 tests for dedicated Host high availability """ #Import Local Modules -import marvin from nose.plugins.attrib import attr from marvin.cloudstackTestCase import * from marvin.cloudstackAPI import * from marvin.integration.lib.utils import * from marvin.integration.lib.base import * from marvin.integration.lib.common import * -from marvin import remoteSSHClient -import datetime class Services: @@ -34,90 +31,90 @@ class Services: def __init__(self): self.services = { - "account": { - "email": "test@test.com", - "firstname": "HA", - "lastname": "HA", - "username": "HA", - # Random characters are appended for unique - # username - "password": "password", - }, - "service_offering_with_ha": { - "name": "Tiny Instance With HA Enabled", - "displaytext": "Tiny Instance", - "cpunumber": 1, - "cpuspeed": 100, # in MHz - "memory": 128, # In MBs - }, - "service_offering_without_ha": { - "name": "Tiny Instance Without HA", - "displaytext": "Tiny Instance", - "cpunumber": 1, - "cpuspeed": 100, # in MHz - "memory": 128, # In MBs - }, - "virtual_machine": { - "displayname": "VM", - "username": "root", - "password": "password", - "ssh_port": 22, - "hypervisor": 'XenServer', - # Hypervisor type should be same as - # hypervisor type of cluster - "privateport": 22, - "publicport": 22, - "protocol": 'TCP', - }, - "ostype": 'CentOS 5.3 (64-bit)', - "timeout": 100, - } + "account": { + "email": "test@test.com", + "firstname": "HA", + "lastname": "HA", + "username": "HA", + # Random characters are appended for unique + # username + "password": "password", + }, + "service_offering_with_ha": { + "name": "Tiny Instance With HA Enabled", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, # in MHz + "memory": 128, # In MBs + }, + "service_offering_without_ha": { + "name": "Tiny Instance Without HA", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, # in MHz + "memory": 128, # In MBs + }, + "virtual_machine": { + "displayname": "VM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + # Hypervisor type should be same as + # hypervisor type of cluster + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "ostype": 'CentOS 5.3 (64-bit)', + "timeout": 100, + } + class TestHostHighAvailability(cloudstackTestCase): """ Dedicated host HA test cases """ - + @classmethod def setUpClass(cls): - cls.api_client = super( - TestHostHighAvailability, - cls - ).getClsTestClient().getApiClient() + TestHostHighAvailability, + cls + ).getClsTestClient().getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain( - cls.api_client, - cls.services - ) + cls.api_client, + cls.services + ) cls.zone = get_zone( - cls.api_client, - cls.services - ) + cls.api_client, + cls.services + ) cls.template = get_template( - cls.api_client, - cls.zone.id, - cls.services["ostype"] - ) + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.service_offering_with_ha = ServiceOffering.create( - cls.api_client, - cls.services["service_offering_with_ha"], - offerha=True - ) - + cls.api_client, + cls.services["service_offering_with_ha"], + offerha=True + ) + cls.service_offering_without_ha = ServiceOffering.create( - cls.api_client, - cls.services["service_offering_without_ha"], - offerha=False - ) - + cls.api_client, + cls.services["service_offering_without_ha"], + offerha=False + ) + cls._cleanup = [ - cls.service_offering_with_ha, - cls.service_offering_without_ha, - ] + cls.service_offering_with_ha, + cls.service_offering_without_ha, + ] return @classmethod @@ -133,11 +130,11 @@ class TestHostHighAvailability(cloudstackTestCase): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.account = Account.create( - self.apiclient, - self.services["account"], - admin=True, - domainid=self.domain.id - ) + self.apiclient, + self.services["account"], + admin=True, + domainid=self.domain.id + ) self.cleanup = [self.account] return @@ -150,76 +147,76 @@ class TestHostHighAvailability(cloudstackTestCase): raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(configuration = "ha.tag") - @attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator"]) + @attr(configuration="ha.tag") + @attr(tags=["advanced", "advancedns", "sg", "basic", "eip", "simulator"]) def test_01_vm_deployment_with_compute_offering_with_ha_enabled(self): """ Test VM deployments (Create HA enabled Compute Service Offering and VM) """ - + # Steps, - #1. Create a Compute service offering with the “Offer HA” option selected. + #1. Create a Compute service offering with the 'Offer HA' option selected. #2. Create a Guest VM with the compute service offering created above. # Validations, - #1. Ensure that the offering is created and that in the UI the “Offer HA” field is enabled (Yes) - #The listServiceOffering API should list “offerha” as true. + #1. Ensure that the offering is created and that in the UI the 'Offer HA' field is enabled (Yes) + #The listServiceOffering API should list 'offerha' as true. #2. Select the newly created VM and ensure that the Compute offering field value lists the compute service offering that was selected. - # Also, check that the HA Enabled field is enabled “Yes”. - + # Also, check that the HA Enabled field is enabled 'Yes'. + #list and validate above created service offering with Ha enabled list_service_response = list_service_offering( - self.apiclient, - id=self.service_offering_with_ha.id - ) + self.apiclient, + id=self.service_offering_with_ha.id + ) self.assertEqual( isinstance(list_service_response, list), True, "listServiceOfferings returned invalid object in response." - ) + ) self.assertNotEqual( len(list_service_response), 0, "listServiceOfferings returned empty list." - ) + ) self.assertEqual( list_service_response[0].offerha, True, "The service offering is not HA enabled" - ) - + ) + #create virtual machine with the service offering with Ha enabled virtual_machine = VirtualMachine.create( - self.apiclient, - self.services["virtual_machine"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering_with_ha.id - ) + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_with_ha.id + ) vms = VirtualMachine.list( - self.apiclient, - id=virtual_machine.id, - listall=True - ) + self.apiclient, + id=virtual_machine.id, + listall=True + ) self.assertEqual( isinstance(vms, list), True, "listVirtualMachines returned invalid object in response." - ) + ) self.assertNotEqual( len(vms), 0, "listVirtualMachines returned empty list." - ) + ) self.debug("Deployed VM on host: %s" % vms[0].hostid) self.assertEqual( vms[0].haenable, True, "VM not created with HA enable tag" - ) + ) - @attr(configuration = "ha.tag") - @attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) + @attr(configuration="ha.tag") + @attr(tags=["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) def test_02_no_vm_creation_on_host_with_haenabled(self): """ Verify you can not create new VMs on hosts with an ha.tag """ - + # Steps, #1. Fresh install CS (Bonita) that supports this feature #2. Create Basic zone, pod, cluster, add 3 hosts to cluster (host1, host2, host3), secondary & primary Storage @@ -229,184 +226,184 @@ class TestHostHighAvailability(cloudstackTestCase): # Validations, #Check to make sure the newly created VM is not on any HA enabled hosts #The VM should be created only on host1 or host2 and never host3 (HA enabled) - + #create and verify virtual machine with HA enabled service offering virtual_machine_with_ha = VirtualMachine.create( - self.apiclient, - self.services["virtual_machine"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering_with_ha.id - ) - + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_with_ha.id + ) + vms = VirtualMachine.list( - self.apiclient, - id=virtual_machine_with_ha.id, - listall=True - ) - + self.apiclient, + id=virtual_machine_with_ha.id, + listall=True + ) + self.assertEqual( isinstance(vms, list), True, "listVirtualMachines returned invalid object in response." - ) - + ) + self.assertNotEqual( len(vms), 0, "listVirtualMachines returned empty list." - ) - + ) + vm = vms[0] - + self.debug("Deployed VM on host: %s" % vm.hostid) - + #validate the virtual machine created is host Ha enabled list_hosts_response = list_hosts( - self.apiclient, - id=vm.hostid - ) + self.apiclient, + id=vm.hostid + ) self.assertEqual( isinstance(list_hosts_response, list), True, "listHosts returned invalid object in response." - ) - + ) + self.assertNotEqual( len(list_hosts_response), 0, "listHosts retuned empty list in response." - ) - + ) + self.assertEqual( list_hosts_response[0].hahost, False, "VM created on HA enabled host." - ) - + ) + #create and verify virtual machine with Ha disabled service offering virtual_machine_without_ha = VirtualMachine.create( - self.apiclient, - self.services["virtual_machine"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering_without_ha.id - ) - + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_without_ha.id + ) + vms = VirtualMachine.list( - self.apiclient, - id=virtual_machine_without_ha.id, - listall=True - ) - + self.apiclient, + id=virtual_machine_without_ha.id, + listall=True + ) + self.assertEqual( isinstance(vms, list), True, "listVirtualMachines returned invalid object in response." - ) - + ) + self.assertNotEqual( len(vms), 0, "listVirtualMachines returned empty list." - ) - + ) + vm = vms[0] - + self.debug("Deployed VM on host: %s" % vm.hostid) - + #verify that the virtual machine created on the host is Ha disabled list_hosts_response = list_hosts( - self.apiclient, - id=vm.hostid - ) + self.apiclient, + id=vm.hostid + ) self.assertEqual( isinstance(list_hosts_response, list), True, "listHosts returned invalid object in response." - ) - + ) + self.assertNotEqual( len(list_hosts_response), 0, "listHosts returned empty list." - ) - + ) + host = list_hosts_response[0] - + self.assertEqual( host.hahost, False, "VM migrated to HA enabled host." - ) + ) - @attr(configuration = "ha.tag") - @attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) + @attr(configuration="ha.tag") + @attr(tags=["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) def test_03_cant_migrate_vm_to_host_with_ha_positive(self): """ Verify you can not migrate VMs to hosts with an ha.tag (positive) """ - + # Steps, - #1. Create a Compute service offering with the “Offer HA” option selected. + #1. Create a Compute service offering with the 'Offer HA' option selected. #2. Create a Guest VM with the compute service offering created above. - #3. Select the VM and migrate VM to another host. Choose a “Suitable” host (i.e. host2) + #3. Select the VM and migrate VM to another host. Choose a 'Suitable' host (i.e. host2) # Validations - #The option from the “Migrate instance to another host” dialog box” should list host3 as “Not Suitable” for migration. - #Confirm that the VM is migrated to the “Suitable” host you selected (i.e. host2) - + #The option from the 'Migrate instance to another host' dialog box' should list host3 as 'Not Suitable' for migration. + #Confirm that the VM is migrated to the 'Suitable' host you selected (i.e. host2) + #create and verify the virtual machine with HA enabled service offering virtual_machine_with_ha = VirtualMachine.create( - self.apiclient, - self.services["virtual_machine"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering_with_ha.id - ) - + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_with_ha.id + ) + vms = VirtualMachine.list( - self.apiclient, - id=virtual_machine_with_ha.id, - listall=True, - ) - + self.apiclient, + id=virtual_machine_with_ha.id, + listall=True, + ) + self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" - ) - + ) + self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" - ) - + ) + vm = vms[0] - + self.debug("Deployed VM on host: %s" % vm.hostid) - + #Find out a Suitable host for VM migration list_hosts_response = list_hosts( - self.apiclient, - ) + self.apiclient, + ) self.assertEqual( isinstance(list_hosts_response, list), True, "The listHosts API returned the invalid list" - ) - + ) + self.assertNotEqual( len(list_hosts_response), 0, "The listHosts returned nothing." - ) + ) suitableHost = None for host in list_hosts_response: if host.suitableformigration == True and host.hostid != vm.hostid: suitableHost = host break - + self.assertTrue(suitableHost is not None, "suitablehost should not be None") - + #Migration of the VM to a suitable host self.debug("Migrating VM-ID: %s to Host: %s" % (self.vm.id, suitableHost.id)) @@ -417,20 +414,20 @@ class TestHostHighAvailability(cloudstackTestCase): #Verify that the VM migrated to a targeted Suitable host list_vm_response = list_virtual_machines( - self.apiclient, - id=vm.id - ) + self.apiclient, + id=vm.id + ) self.assertEqual( isinstance(list_vm_response, list), True, "The listVirtualMachines returned the invalid list." - ) + ) self.assertNotEqual( list_vm_response, None, "The listVirtualMachines API returned nothing." - ) + ) vm_response = list_vm_response[0] @@ -438,82 +435,82 @@ class TestHostHighAvailability(cloudstackTestCase): vm_response.id, vm.id, "The virtual machine id and the the virtual machine from listVirtualMachines is not matching." - ) + ) self.assertEqual( vm_response.hostid, suitableHost.id, "The VM is not migrated to targeted suitable host." - ) - - @attr(configuration = "ha.tag") - @attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) + ) + + @attr(configuration="ha.tag") + @attr(tags=["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) def test_04_cant_migrate_vm_to_host_with_ha_negative(self): """ Verify you can not migrate VMs to hosts with an ha.tag (negative) """ - + # Steps, - #1. Create a Compute service offering with the “Offer HA” option selected. + #1. Create a Compute service offering with the 'Offer HA' option selected. #2. Create a Guest VM with the compute service offering created above. - #3. Select the VM and migrate VM to another host. Choose a “Not Suitable” host. + #3. Select the VM and migrate VM to another host. Choose a 'Not Suitable' host. # Validations, - #The option from the “Migrate instance to another host” dialog box” should list host3 as “Not Suitable” for migration. + #The option from the 'Migrate instance to another host' dialog box should list host3 as 'Not Suitable' for migration. #By design, The Guest VM can STILL can be migrated to host3 if the admin chooses to do so. - + #create and verify virtual machine with HA enabled service offering virtual_machine_with_ha = VirtualMachine.create( - self.apiclient, - self.services["virtual_machine"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering_with_ha.id - ) - + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_with_ha.id + ) + vms = VirtualMachine.list( - self.apiclient, - id=virtual_machine_with_ha.id, - listall=True - ) - + self.apiclient, + id=virtual_machine_with_ha.id, + listall=True + ) + self.assertEqual( isinstance(vms, list), True, "The listVirtualMachines returned invalid object in response." - ) - + ) + self.assertNotEqual( len(vms), 0, "The listVirtualMachines returned empty response." - ) - + ) + vm = vms[0] - + self.debug("Deployed VM on host: %s" % vm.hostid) - + #Find out Non-Suitable host for VM migration list_hosts_response = list_hosts( - self.apiclient, - ) + self.apiclient, + ) self.assertEqual( isinstance(list_hosts_response, list), True, "listHosts returned invalid object in response." - ) - + ) + self.assertNotEqual( len(list_hosts_response), 0, "listHosts returned empty response." - ) - - notSuitableHost = None + ) + + notSuitableHost = None for host in list_hosts_response: if not host.suitableformigration and host.hostid != vm.hostid: notSuitableHost = host - break - + break + self.assertTrue(notSuitableHost is not None, "notsuitablehost should not be None") - + #Migrate VM to Non-Suitable host self.debug("Migrating VM-ID: %s to Host: %s" % (vm.id, notSuitableHost.id)) @@ -524,290 +521,290 @@ class TestHostHighAvailability(cloudstackTestCase): #Verify that the virtual machine got migrated to targeted Non-Suitable host list_vm_response = list_virtual_machines( - self.apiclient, - id=vm.id - ) + self.apiclient, + id=vm.id + ) self.assertEqual( isinstance(list_vm_response, list), True, "listVirtualMachine returned invalid object in response." - ) + ) self.assertNotEqual( len(list_vm_response), 0, "listVirtualMachines returned empty response." - ) + ) self.assertEqual( list_vm_response[0].id, vm.id, "Virtual machine id with the virtual machine from listVirtualMachine is not matching." - ) + ) self.assertEqual( list_vm_response[0].hostid, notSuitableHost.id, "The detination host id of migrated VM is not matching." - ) + ) - @attr(configuration = "ha.tag") - @attr(speed = "slow") - @attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) + @attr(configuration="ha.tag") + @attr(speed="slow") + @attr(tags=["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) def test_05_no_vm_with_ha_gets_migrated_to_ha_host_in_live_migration(self): """ Verify that none of the VMs with HA enabled migrate to an ha tagged host during live migration """ - + # Steps, - #1. Fresh install CS (Bonita) that supports this feature + #1. Fresh install CS that supports this feature #2. Create Basic zone, pod, cluster, add 3 hosts to cluster (host1, host2, host3), secondary & primary Storage #3. When adding host3, assign the HA host tag. #4. Create VMs with and without the Compute Service Offering with the HA tag. - #5. Note the VMs on host1 and whether any of the VMs have their “HA enabled” flags enabled. + #5. Note the VMs on host1 and whether any of the VMs have their 'HA enabled' flags enabled. #6. Put host1 into maintenance mode. # Validations, #1. Make sure the VMs are created on either host1 or host2 and not on host3 #2. Putting host1 into maintenance mode should trigger a live migration. Make sure the VMs are not migrated to HA enabled host3. - + # create and verify virtual machine with HA disabled service offering virtual_machine_with_ha = VirtualMachine.create( - self.apiclient, - self.services["virtual_machine"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering_with_ha.id - ) - + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_with_ha.id + ) + vms = VirtualMachine.list( - self.apiclient, - id=virtual_machine_with_ha.id, - listall=True - ) - + self.apiclient, + id=virtual_machine_with_ha.id, + listall=True + ) + self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" - ) - + ) + self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" - ) - + ) + vm_with_ha_enabled = vms[0] - + #Verify the virtual machine got created on non HA host list_hosts_response = list_hosts( - self.apiclient, - id=vm_with_ha_enabled.hostid - ) + self.apiclient, + id=vm_with_ha_enabled.hostid + ) self.assertEqual( isinstance(list_hosts_response, list), True, "Check list response returns a valid list" - ) - + ) + self.assertNotEqual( len(list_hosts_response), 0, "Check Host is available" - ) - + ) + self.assertEqual( list_hosts_response[0].hahost, False, "The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled" - ) - + ) + #put the Host in maintainance mode self.debug("Enabling maintenance mode for host %s" % vm_with_ha_enabled.hostid) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = vm_with_ha_enabled.hostid self.apiclient.prepareHostForMaintenance(cmd) - + timeout = self.services["timeout"] - + #verify the VM live migration happened to another running host self.debug("Waiting for VM to come up") wait_for_vm( self.apiclient, virtualmachineid=vm_with_ha_enabled.id, interval=timeout - ) - + ) + vms = VirtualMachine.list( - self.apiclient, - id=vm_with_ha_enabled.id, - listall=True, - ) - + self.apiclient, + id=vm_with_ha_enabled.id, + listall=True, + ) + self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" - ) - + ) + self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" - ) - + ) + vm_with_ha_enabled1 = vms[0] - + list_hosts_response = list_hosts( - self.apiclient, - id=vm_with_ha_enabled1.hostid - ) + self.apiclient, + id=vm_with_ha_enabled1.hostid + ) self.assertEqual( isinstance(list_hosts_response, list), True, "Check list response returns a valid list" - ) - + ) + self.assertNotEqual( len(list_hosts_response), 0, "Check Host is available" - ) - + ) + self.assertEqual( list_hosts_response[0].hahost, False, "The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled" - ) - + ) + self.debug("Disabling the maintenance mode for host %s" % vm_with_ha_enabled.hostid) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = vm_with_ha_enabled.hostid self.apiclient.cancelHostMaintenance(cmd) - - @attr(configuration = "ha.tag") - @attr(speed = "slow") - @attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) + + @attr(configuration="ha.tag") + @attr(speed="slow") + @attr(tags=["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"]) def test_06_no_vm_without_ha_gets_migrated_to_ha_host_in_live_migration(self): """ Verify that none of the VMs without HA enabled migrate to an ha tagged host during live migration """ - + # Steps, - #1. Fresh install CS (Bonita) that supports this feature + #1. Fresh install CS that supports this feature #2. Create Basic zone, pod, cluster, add 3 hosts to cluster (host1, host2, host3), secondary & primary Storage #3. When adding host3, assign the HA host tag. #4. Create VMs with and without the Compute Service Offering with the HA tag. - #5. Note the VMs on host1 and whether any of the VMs have their “HA enabled” flags enabled. + #5. Note the VMs on host1 and whether any of the VMs have their 'HA enabled' flags enabled. #6. Put host1 into maintenance mode. # Validations, #1. Make sure the VMs are created on either host1 or host2 and not on host3 #2. Putting host1 into maintenance mode should trigger a live migration. Make sure the VMs are not migrated to HA enabled host3. - + # create and verify virtual machine with HA disabled service offering virtual_machine_without_ha = VirtualMachine.create( - self.apiclient, - self.services["virtual_machine"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering_without_ha.id - ) - + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_without_ha.id + ) + vms = VirtualMachine.list( - self.apiclient, - id=virtual_machine_without_ha.id, - listall=True - ) - + self.apiclient, + id=virtual_machine_without_ha.id, + listall=True + ) + self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" - ) - + ) + self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" - ) - + ) + vm_with_ha_disabled = vms[0] - + #Verify the virtual machine got created on non HA host list_hosts_response = list_hosts( - self.apiclient, - id=vm_with_ha_disabled.hostid - ) + self.apiclient, + id=vm_with_ha_disabled.hostid + ) self.assertEqual( isinstance(list_hosts_response, list), True, "Check list response returns a valid list" - ) - + ) + self.assertNotEqual( len(list_hosts_response), 0, "Check Host is available" - ) - + ) + self.assertEqual( list_hosts_response[0].hahost, False, "The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled" - ) - + ) + #put the Host in maintainance mode self.debug("Enabling maintenance mode for host %s" % vm_with_ha_disabled.hostid) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = vm_with_ha_disabled.hostid self.apiclient.prepareHostForMaintenance(cmd) - + timeout = self.services["timeout"] - + #verify the VM live migration happened to another running host self.debug("Waiting for VM to come up") wait_for_vm( self.apiclient, virtualmachineid=vm_with_ha_disabled.id, interval=timeout - ) - + ) + vms = VirtualMachine.list( - self.apiclient, - id=vm_with_ha_disabled.id, - listall=True - ) - + self.apiclient, + id=vm_with_ha_disabled.id, + listall=True + ) + self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" - ) - + ) + self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" - ) - + ) + list_hosts_response = list_hosts( - self.apiclient, - id=vms[0].hostid - ) + self.apiclient, + id=vms[0].hostid + ) self.assertEqual( isinstance(list_hosts_response, list), True, "Check list response returns a valid list" - ) - + ) + self.assertNotEqual( len(list_hosts_response), 0, "Check Host is available" - ) - + ) + self.assertEqual( list_hosts_response[0].hahost, False, "The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled" - ) - + ) + self.debug("Disabling the maintenance mode for host %s" % vm_with_ha_disabled.hostid) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = vm_with_ha_disabled.hostid diff --git a/test/integration/component/test_project_limits.py b/test/integration/component/test_project_limits.py index 9184dca5202..af99717ad16 100644 --- a/test/integration/component/test_project_limits.py +++ b/test/integration/component/test_project_limits.py @@ -201,8 +201,8 @@ class TestProjectLimits(cloudstackTestCase): project = Project.create( self.apiclient, self.services["project"], - account=self.admin.account.name, - domainid=self.admin.account.domainid + account=self.admin.name, + domainid=self.admin.domainid ) # Cleanup created project at end of test self.cleanup.append(project) @@ -343,8 +343,8 @@ class TestProjectLimits(cloudstackTestCase): project = Project.create( self.apiclient, self.services["project"], - account=self.admin.account.name, - domainid=self.admin.account.domainid + account=self.admin.name, + domainid=self.admin.domainid ) # Cleanup created project at end of test self.cleanup.append(project) diff --git a/test/integration/component/test_project_resources.py b/test/integration/component/test_project_resources.py index 84141889f3f..378238e70fd 100644 --- a/test/integration/component/test_project_resources.py +++ b/test/integration/component/test_project_resources.py @@ -651,9 +651,9 @@ class TestTemplates(cloudstackTestCase): """Test use of private template in a project """ # 1. Create a project - # 2. Verify that in order to use somebody’s Private template for vm + # 2. Verify that in order to use somebody's Private template for vm # creation in the project, permission to use the template has to - # be granted to the Project (use API “updateTemplatePermissions” + # be granted to the Project (use API 'updateTemplatePermissions' # with project id to achieve that). self.debug("Deploying VM for with public template: %s" % diff --git a/test/integration/component/test_project_usage.py b/test/integration/component/test_project_usage.py index ab789e1c13d..bb253e1c04a 100644 --- a/test/integration/component/test_project_usage.py +++ b/test/integration/component/test_project_usage.py @@ -124,7 +124,7 @@ class TestVmUsage(cloudstackTestCase): # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client, cls.services) cls.zone = get_zone(cls.api_client, cls.services) - cls.services['mode'] = cls.cls.zone.networktype + cls.services['mode'] = cls.zone.networktype template = get_template( cls.api_client, diff --git a/test/integration/component/test_security_groups.py b/test/integration/component/test_security_groups.py index 2ed27fe0c5d..3c25e25a34f 100644 --- a/test/integration/component/test_security_groups.py +++ b/test/integration/component/test_security_groups.py @@ -164,7 +164,7 @@ class TestDefaultSecurityGroup(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_01_deployVM_InDefaultSecurityGroup(self): """Test deploy VM in default security group """ @@ -243,7 +243,7 @@ class TestDefaultSecurityGroup(cloudstackTestCase): ) return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_02_listSecurityGroups(self): """Test list security groups for admin account """ @@ -278,7 +278,7 @@ class TestDefaultSecurityGroup(cloudstackTestCase): ) return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_03_accessInDefaultSecurityGroup(self): """Test access in default security group """ @@ -435,7 +435,7 @@ class TestAuthorizeIngressRule(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_01_authorizeIngressRule(self): """Test authorize ingress rule """ @@ -571,7 +571,7 @@ class TestRevokeIngressRule(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_01_revokeIngressRule(self): """Test revoke ingress rule """ @@ -868,7 +868,7 @@ class TestdeployVMWithUserData(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_01_deployVMWithUserData(self): """Test Deploy VM with User data""" @@ -1044,7 +1044,7 @@ class TestDeleteSecurityGroup(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_01_delete_security_grp_running_vm(self): """Test delete security group with running VM""" @@ -1128,7 +1128,7 @@ class TestDeleteSecurityGroup(cloudstackTestCase): ) return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_02_delete_security_grp_withoout_running_vm(self): """Test delete security group without running VM""" @@ -1290,7 +1290,7 @@ class TestIngressRule(cloudstackTestCase): return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_01_authorizeIngressRule_AfterDeployVM(self): """Test delete security group with running VM""" @@ -1402,7 +1402,7 @@ class TestIngressRule(cloudstackTestCase): % (ingress_rule_2["id"], e)) return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_02_revokeIngressRule_AfterDeployVM(self): """Test Revoke ingress rule after deploy VM""" @@ -1556,7 +1556,7 @@ class TestIngressRule(cloudstackTestCase): % (icmp_rule["ruleid"], e)) return - @attr(tags = ["sg", "eip"]) + @attr(tags = ["sg", "eip", "advancedsg"]) def test_03_stopStartVM_verifyIngressAccess(self): """Test Start/Stop VM and Verify ingress rule""" diff --git a/test/integration/component/test_shared_networks.py b/test/integration/component/test_shared_networks.py index 9845826bea6..5f964190356 100644 --- a/test/integration/component/test_shared_networks.py +++ b/test/integration/component/test_shared_networks.py @@ -235,6 +235,7 @@ class TestSharedNetworks(cloudstackTestCase): raise Exception("Warning: Exception during network cleanup : %s" % e) return + @attr(tags=["advanced", "advancedns"]) def test_sharedNetworkOffering_01(self): """ Test shared network Offering 01 """ @@ -372,6 +373,7 @@ class TestSharedNetworks(cloudstackTestCase): ) self.debug("NetworkOffering created and enabled: %s" % self.shared_network_offering.id) + @attr(tags=["advanced", "advancedns"]) def test_sharedNetworkOffering_02(self): """ Test Shared Network Offering 02 """ @@ -459,6 +461,7 @@ class TestSharedNetworks(cloudstackTestCase): except Exception as e: self.debug("Network Offering creation failed with vlan as False in advance mode and shared guest type.") + @attr(tags=["advanced", "advancedns"]) def test_sharedNetworkOffering_03(self): """ Test Shared Network Offering 03 """ @@ -547,6 +550,7 @@ class TestSharedNetworks(cloudstackTestCase): except Exception as e: self.debug("Network Offering creation failed with vlan as true and ip ranges as False in advance mode and with shared guest type.") + @attr(tags=["advanced", "advancedns"]) def test_createSharedNetwork_All(self): """ Test Shared Network ALL """ @@ -829,6 +833,7 @@ class TestSharedNetworks(cloudstackTestCase): if netaddr.IPAddress(unicode(vms[0].nic[0].ipaddress)) not in ip_range: self.fail("Virtual machine ip should be from the ip range assigned to network created.") + @attr(tags=["advanced", "advancedns"]) def test_createSharedNetwork_accountSpecific(self): """ Test Shared Networm with scope account """ @@ -1092,6 +1097,7 @@ class TestSharedNetworks(cloudstackTestCase): if netaddr.IPAddress(unicode(vms[0].nic[0].ipaddress)) not in ip_range: self.fail("Virtual machine ip should be from the ip range assigned to network created.") + @attr(tags=["advanced", "advancedns"]) def test_createSharedNetwork_domainSpecific(self): """ Test Shared Network with scope domain """ @@ -1446,6 +1452,7 @@ class TestSharedNetworks(cloudstackTestCase): if netaddr.IPAddress(unicode(vms[0].nic[0].ipaddress)) not in ip_range: self.fail("Virtual machine ip should be from the ip range assigned to network created.") + @attr(tags=["advanced", "advancedns"]) def test_createSharedNetwork_projectSpecific(self): """ Test Shared Network with scope project """ @@ -1740,6 +1747,7 @@ class TestSharedNetworks(cloudstackTestCase): if netaddr.IPAddress(unicode(vms[0].nic[0].ipaddress)) not in ip_range: self.fail("Virtual machine ip should be from the ip range assigned to network created.") + @attr(tags=["advanced", "advancedns"]) def test_createSharedNetwork_usedVlan(self): """ Test Shared Network with used vlan 01 """ @@ -1898,6 +1906,7 @@ class TestSharedNetworks(cloudstackTestCase): except Exception as e: self.debug("Network creation failed because the valn id being used by another network.") + @attr(tags=["advanced", "advancedns"]) def test_createSharedNetwork_usedVlan2(self): """ Test Shared Network with used vlan 02 """ @@ -2093,6 +2102,7 @@ class TestSharedNetworks(cloudstackTestCase): except Exception as e: self.debug("Network creation failed because the valn id being used by another network.") + @attr(tags=["advanced", "advancedns"]) def test_deployVM_multipleSharedNetwork(self): """ Test Vm deployment with multiple shared networks """ @@ -2350,6 +2360,7 @@ class TestSharedNetworks(cloudstackTestCase): self.assertTrue(self.network1_admin_account_virtual_machine.nic[0].ipaddress is not None, "ip should be assigned to running virtual machine") + @attr(tags=["advanced", "advancedns"]) def test_deployVM_isolatedAndShared(self): """ Test VM deployment in shared and isolated networks """ @@ -2697,6 +2708,7 @@ class TestSharedNetworks(cloudstackTestCase): except Exception as e: self.fail("SSH Access failed for %s: %s" % (self.isolated_network_admin_account_virtual_machine.ipaddress, e)) + @attr(tags=["advanced", "advancedns"]) def test_networkWithsubdomainaccessTrue(self): """ Test Shared Network with subdomainaccess=True """ @@ -2841,6 +2853,7 @@ class TestSharedNetworks(cloudstackTestCase): except: self.debug("Network creation failed because subdomainaccess parameter was passed when scope was account.") + @attr(tags=["advanced", "advancedns"]) def test_networkWithsubdomainaccessFalse(self): """ Test shared Network with subdomainaccess=False """ diff --git a/test/integration/component/test_stopped_vm.py b/test/integration/component/test_stopped_vm.py index f1096919824..0527b3d7ebf 100644 --- a/test/integration/component/test_stopped_vm.py +++ b/test/integration/component/test_stopped_vm.py @@ -78,7 +78,7 @@ class Services: { "displaytext": "Test ISO", "name": "testISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "ostype": 'CentOS 5.3 (64-bit)', "mode": 'HTTP_DOWNLOAD', # Downloading existing ISO @@ -219,10 +219,6 @@ class TestDeployVM(cloudstackTestCase): "Running", "VM should be in Running state after deployment" ) - try: - ssh = self.virtual_machine.get_ssh_client() - except Exception as e: - self.fail("SSH to VM instance failed!") return @attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"]) @@ -274,10 +270,6 @@ class TestDeployVM(cloudstackTestCase): "Running", "VM should be in Running state after deployment" ) - try: - ssh = self.virtual_machine.get_ssh_client() - except Exception as e: - self.fail("SSH to VM instance failed!") return @attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"]) diff --git a/test/integration/component/test_storage_motion.py b/test/integration/component/test_storage_motion.py index cf110d34e61..0dcc7f8e8d1 100644 --- a/test/integration/component/test_storage_motion.py +++ b/test/integration/component/test_storage_motion.py @@ -178,8 +178,6 @@ class TestStorageMotion(cloudstackTestCase): # Migrate to a host that requires storage motion hosts[:] = [host for host in hosts if host.requiresStorageMotion] - self.assert_(hosts is not None, msg="No valid hosts for storage motion") - self.assert_(len(hosts)>0, msg="No valid hosts for storage motion. Skipping") if hosts is None or len(hosts) == 0: self.skipTest("No valid hosts for storage motion. Skipping") diff --git a/test/integration/component/test_tags.py b/test/integration/component/test_tags.py index 12a586313f1..992ca1daf58 100644 --- a/test/integration/component/test_tags.py +++ b/test/integration/component/test_tags.py @@ -102,9 +102,9 @@ class Services: }, "iso": { - "displaytext": "DSL ISO", - "name": "DSL ISO", - "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", + "displaytext": "Dummy ISO", + "name": "Dummy ISO", + "url": "http://people.apache.org/~tsp/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, @@ -216,7 +216,7 @@ class TestResourceTags(cloudstackTestCase): cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name, - domainid=cls.account.account.domainid, + domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype ) diff --git a/test/integration/component/test_templates.py b/test/integration/component/test_templates.py index 1a60123b820..65beabfc98f 100644 --- a/test/integration/component/test_templates.py +++ b/test/integration/component/test_templates.py @@ -93,7 +93,6 @@ class Services: "templatefilter": 'self', }, "templatefilter": 'self', - "destzoneid": 2, # For Copy template (Destination zone) "ostype": 'CentOS 5.3 (64-bit)', "sleep": 60, "timeout": 10, @@ -423,94 +422,6 @@ class TestTemplates(cloudstackTestCase): ) return - @attr(tags = ["advanced", "advancedns", "multizone"]) - def test_02_copy_template(self): - """Test for copy template from one zone to another""" - - # Validate the following - # 1. copy template should be successful and - # secondary storage should contain new copied template. - - self.debug( - "Copying template from zone: %s to %s" % ( - self.template.id, - self.services["destzoneid"] - )) - cmd = copyTemplate.copyTemplateCmd() - cmd.id = self.template.id - cmd.destzoneid = self.services["destzoneid"] - cmd.sourcezoneid = self.zone.id - self.apiclient.copyTemplate(cmd) - - # Verify template is copied to another zone using ListTemplates - list_template_response = list_templates( - self.apiclient, - templatefilter=\ - self.services["templatefilter"], - id=self.template.id, - zoneid=self.services["destzoneid"] - ) - self.assertEqual( - isinstance(list_template_response, list), - True, - "Check for list template response return valid list" - ) - - self.assertNotEqual( - len(list_template_response), - 0, - "Check template extracted in List Templates" - ) - - template_response = list_template_response[0] - self.assertEqual( - template_response.id, - self.template.id, - "Check ID of the downloaded template" - ) - self.assertEqual( - template_response.zoneid, - self.services["destzoneid"], - "Check zone ID of the copied template" - ) - - # Cleanup- Delete the copied template - timeout = self.services["timeout"] - while True: - time.sleep(self.services["sleep"]) - list_template_response = list_templates( - self.apiclient, - templatefilter=\ - self.services["templatefilter"], - id=self.template_2.id, - zoneid=self.services["destzoneid"] - ) - self.assertEqual( - isinstance(list_template_response, list), - True, - "Check list response returns a valid list" - ) - self.assertNotEqual( - len(list_template_response), - 0, - "Check template extracted in List Templates" - ) - - template_response = list_template_response[0] - if template_response.isready == True: - break - - if timeout == 0: - raise Exception( - "Failed to download copied template(ID: %s)" % template_response.id) - - timeout = timeout - 1 - cmd = deleteTemplate.deleteTemplateCmd() - cmd.id = self.template.id - cmd.zoneid = self.services["destzoneid"] - self.apiclient.deleteTemplate(cmd) - return - @attr(tags = ["advanced", "advancedns"]) def test_03_delete_template(self): """Test Delete template diff --git a/test/integration/component/test_vm_passwdenabled.py b/test/integration/component/test_vm_passwdenabled.py index e89253c407a..e22a1a0a75a 100644 --- a/test/integration/component/test_vm_passwdenabled.py +++ b/test/integration/component/test_vm_passwdenabled.py @@ -90,8 +90,8 @@ class TestVMPasswordEnabled(cloudstackTestCase): cls.services["ostype"] ) # Set Zones and disk offerings - cls.services["small"]["zoneid"] = zone.id - cls.services["small"]["template"] = template.id + cls.services["service_offerings"]["small"]["zoneid"] = zone.id + cls.services["service_offerings"]["small"]["template"] = template.id # Create VMs, NAT Rules etc cls.account = Account.create( diff --git a/test/integration/component/test_vpc_host_maintenance.py b/test/integration/component/test_vpc_host_maintenance.py index 1cce2764fe8..d28b7985b9b 100644 --- a/test/integration/component/test_vpc_host_maintenance.py +++ b/test/integration/component/test_vpc_host_maintenance.py @@ -186,7 +186,6 @@ class Services: } -@unittest.skip("No suitable setup available for testing") class TestVMLifeCycleHostmaintenance(cloudstackTestCase): @classmethod @@ -562,7 +561,6 @@ class TestVMLifeCycleHostmaintenance(cloudstackTestCase): return -@unittest.skip("No suitable setup available for testing") class TestVPCNetworkRules(cloudstackTestCase): @classmethod diff --git a/test/integration/component/test_vpc_network_lbrules.py b/test/integration/component/test_vpc_network_lbrules.py index b4a66070d5b..66d6c4d4018 100644 --- a/test/integration/component/test_vpc_network_lbrules.py +++ b/test/integration/component/test_vpc_network_lbrules.py @@ -505,7 +505,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return nwacl_internet_1 @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_01_VPC_LBRulesListing(self): """ Test case no 210 and 227: List Load Balancing Rules belonging to a VPC """ @@ -551,7 +550,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_02_VPC_LBRulesAndVMListing(self): """ Test case no 211 and 228: List only VMs suitable for the Virtual Network on VPC for LB Rule """ @@ -595,7 +593,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_03_VPC_CreateLBRuleInMultipleNetworks(self): """ Test case no 212 : Create LB rules for 1 network which is part of a two/multiple virtual networks of a VPC using a new Public IP Address available with the VPC when the Virtual Router is in Running State @@ -623,7 +620,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_04_VPC_CreateLBRuleInMultipleNetworksVRStoppedState(self): """ Test case no 222 : Create LB rules for a two/multiple virtual networks of a VPC using a new Public IP Address available with the VPC when the Virtual Router is in Stopped State @@ -651,7 +647,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_05_VPC_CreateAndDeleteLBRule(self): """ Test case no 214 : Delete few(not all) LB rules for a single virtual network of a VPC belonging to a single Public IP Address when the Virtual Router is in Running State @@ -683,7 +678,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_06_VPC_CreateAndDeleteLBRuleVRStopppedState(self): """ Test case no 224 : Delete few(not all) LB rules for a single virtual network of a VPC belonging to a single Public IP Address when the Virtual Router is in Stopped State @@ -715,7 +709,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_07_VPC_CreateAndDeleteAllLBRule(self): """ Test case no 215 : Delete all LB rules for a single virtual network of a VPC belonging to a single Public IP Address when the Virtual Router is in Running State @@ -749,7 +742,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_08_VPC_CreateAndDeleteAllLBRuleVRStoppedState(self): """ Test case no 225 and 226 : Delete all LB rules for a single virtual network of a VPC belonging to a single Public IP Address when the Virtual Router is in Stopped State @@ -783,7 +775,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_09_VPC_LBRuleCreateFailMultipleVPC(self): """ Test case no 234 : User should not be allowed to create a LB rule for a VM that belongs to a different VPC. """ @@ -822,7 +813,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_10_VPC_FailedToCreateLBRuleNonVPCNetwork(self): """ Test case no 216 and 235: User should not be allowed to create a LB rule for a VM that does not belong to any VPC. """ @@ -860,7 +850,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_11_VPC_LBRuleCreateNotAllowed(self): """ Test case no 217 and 236: User should not be allowed to create a LB rule for a VM that does not belong to the same network but belongs to the same VPC. @@ -899,7 +888,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_12_VPC_LBRuleCreateFailForRouterIP(self): """ Test case no 218 and 237: User should not be allowed to create a LB rule on an Ipaddress that Source Nat enabled. """ @@ -928,7 +916,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_13_VPC_LBRuleCreateFailForPFSourceNATIP(self): """ Test case no 219 : User should not be allowed to create a LB rule on an Ipaddress that already has a PF rule. """ @@ -959,7 +946,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_14_VPC_LBRuleCreateFailForStaticNatRule(self): """ Test case no 220 : User should not be allowed to create a LB rule on an Ipaddress that already has a Static Nat rule. """ @@ -990,7 +976,6 @@ class TestVPCNetworkLBRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_15_VPC_RleaseIPForLBRuleCreated(self): """ Test case no 221 : Release Ip address that has a LB rule assigned to it. """ diff --git a/test/integration/component/test_vpc_network_pfrules.py b/test/integration/component/test_vpc_network_pfrules.py index 56792f49d00..c0c2b86426c 100644 --- a/test/integration/component/test_vpc_network_pfrules.py +++ b/test/integration/component/test_vpc_network_pfrules.py @@ -553,7 +553,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_03_network_services_VPC_StopCreateMultiplePF(self): """ Test case no 205 : Create PF rules for a two/multiple virtual networks of a VPC using a new Public IP Address available with the VPC when Virtual Router is in Stopped State @@ -587,7 +586,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_04_network_services_VPC_CreateMultiplePF(self): """ Test case no 191 : Create PF rules for a two/multiple virtual networks of a VPC using a new Public IP Address available with the VPC when Virtual Router is in Running State @@ -620,7 +618,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_05_network_services_VPC_StopDeletePF(self): """ Test case no 207 : Delete few(not all) PF rules for a single virtual network of a VPC belonging to a single Public IP Address when Virtual Router is in Stopped State @@ -654,7 +651,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_06_network_services_VPC_DeletePF(self): """ Test case no 193 : Delete few(not all) PF rules for a single virtual network of a VPC belonging to a single Public IP Address when Virtual Router is in Running State @@ -684,7 +680,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_07_network_services_VPC_StopDeleteAllPF(self): """ Test case no 208 : Delete all PF rules for a single virtual network of a VPC belonging to a single Public IP Address when Virtual Router is in Stopped State @@ -721,7 +716,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_08_network_services_VPC_DeleteAllPF(self): """ Test case no 194 : Delete all PF rules for a single virtual network of a VPC belonging to a single Public IP Address when Virtual Router is in Running State @@ -754,7 +748,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_09_network_services_VPC_StopDeleteAllMultiplePF(self): """ Test case no 209 : Delete all PF rules for two/multiple virtual networks of a VPC. Observe the status of the Public IP Addresses of the rules when Virtual Router is in Stopped State @@ -817,7 +810,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_10_network_services_VPC_DeleteAllMultiplePF(self): """ Test case no 195: Delete all PF rules for two/multiple virtual networks of a VPC. Observe the status of the Public IP Addresses of the rules when Virtual Router is in Running State diff --git a/test/integration/component/test_vpc_network_staticnatrule.py b/test/integration/component/test_vpc_network_staticnatrule.py index aceca62d1fb..bed1b5298b3 100644 --- a/test/integration/component/test_vpc_network_staticnatrule.py +++ b/test/integration/component/test_vpc_network_staticnatrule.py @@ -552,7 +552,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_03_VPC_StopCreateMultipleStaticNatRuleStopppedState(self): """ Test case no extra : Create Static Nat Rule rules for a two/multiple virtual networks of a VPC using a new Public IP Address available with the VPC when Virtual Router is in Stopped State @@ -586,7 +585,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_04_VPC_CreateMultipleStaticNatRule(self): """ Test case no 230 : Create Static NAT Rules for a two/multiple virtual networks of a VPC using a new Public IP Address available with the VPC when the Virtual Router is in Running State @@ -619,7 +617,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_05_network_services_VPC_DeleteAllPF(self): """ Test case no 232: Delete all Static NAT Rules for a single virtual network of a VPC belonging to a single Public IP Address when the Virtual Router is in Running State @@ -651,7 +648,6 @@ class TestVPCNetworkPFRules(cloudstackTestCase): return @attr(tags=["advanced", "intervlan"]) - @unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.") def test_06_network_services_VPC_DeleteAllMultiplePF(self): """ Test case no 233: Delete all Static NAT rules for two/multiple virtual networks of a VPC. Observe the status of the Public IP Addresses of the rules when the Virtual Router is in Running State. diff --git a/test/integration/smoke/test_internal_lb.py b/test/integration/smoke/test_internal_lb.py index ae64297bf1c..0535d6a5345 100644 --- a/test/integration/smoke/test_internal_lb.py +++ b/test/integration/smoke/test_internal_lb.py @@ -22,229 +22,166 @@ from marvin.cloudstackAPI import * from marvin.integration.lib.utils import * from marvin.integration.lib.base import * from marvin.integration.lib.common import * +from nose.plugins.attrib import attr + + +class Services: + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + "password": "password", + }, + "virtual_machine": { + "displayname": "Test VM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "ostype": 'CentOS 5.3 (64-bit)', + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 256, + }, + "network_offering": { + "name": "Network offering for internal lb service", + "displaytext": "Network offering for internal lb service", + "guestiptype": "Isolated", + "traffictype": "Guest", + "supportedservices": "Vpn,Dhcp,Dns,Lb,UserData,SourceNat,StaticNat,PortForwarding,NetworkACL", + "serviceProviderList": { + "Dhcp": "VpcVirtualRouter", + "Dns": "VpcVirtualRouter", + "Vpn": "VpcVirtualRouter", + "UserData": "VpcVirtualRouter", + "Lb": "InternalLbVM", + "SourceNat": "VpcVirtualRouter", + "StaticNat": "VpcVirtualRouter", + "PortForwarding": "VpcVirtualRouter", + "NetworkACL": "VpcVirtualRouter", + }, + "serviceCapabilityList": { + "SourceNat": {"SupportedSourceNatTypes": "peraccount"}, + "Lb": {"lbSchemes": "internal", "SupportedLbIsolation": "dedicated"} + } + } + } class TestInternalLb(cloudstackTestCase): - networkOfferingId = None - networkId = None - vmId = None - lbId = None + """Test Internal LB + """ - zoneId = 1 - serviceOfferingId = 1 - templateId = 5 + @classmethod + def setUpClass(cls): + cls.apiclient = super(TestInternalLb, cls).getClsTestClient().getApiClient() + cls.services = Services().services + cls.zone = get_zone(cls.apiclient, cls.services) + cls.domain = get_domain(cls.apiclient) + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offering"] + ) + cls.account = Account.create(cls.apiclient, services=cls.services["account"]) + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"] + ) + cls.debug("Successfully created account: %s, id: \ + %s" % (cls.account.name,\ + cls.account.id)) + cls.cleanup = [cls.account] - - serviceProviderList = [ - { - "provider": "VpcVirtualRouter", - "service": "Vpn" - }, - { - "provider": "VpcVirtualRouter", - "service": "UserData" - }, - { - "provider": "VpcVirtualRouter", - "service": "Dhcp" - }, - { - "provider": "VpcVirtualRouter", - "service": "Dns" - }, - { - "provider": "InternalLbVM", - "service": "Lb" - }, - { - "provider": "VpcVirtualRouter", - "service": "SourceNat" - }, - { - "provider": "VpcVirtualRouter", - "service": "StaticNat" - }, - { - "provider": "VpcVirtualRouter", - "service": "PortForwarding" - }, - { - "provider": "VpcVirtualRouter", - "service": "NetworkACL" - } - ] - - serviceCapsList = [ - { - "service": "SourceNat", - "capabilitytype": "SupportedSourceNatTypes", - "capabilityvalue": "peraccount" - }, - { - "service": "Lb", - "capabilitytype": "SupportedLbIsolation", - "capabilityvalue": "dedicated" - }, - { - "service": "Lb", - "capabilitytype": "lbSchemes", - "capabilityvalue": "internal" - } - ] - - def setUp(self): - self.apiClient = self.testClient.getApiClient() - - - + @attr(tags=["smoke", "advanced"]) def test_internallb(self): + """Test create, delete, assign, remove of internal loadbalancer + """ #1) Create and enable network offering with Internal Lb vm service - self.createNetworkOffering() - + self.networkOffering = NetworkOffering.create(self.apiclient, self.services["network_offering"], conservemode=False) + self.networkOffering.update(self.apiclient, state="Enabled") + #2) Create VPC and network in it - self.createNetwork() - - #3) Deploy a vm - self.deployVm() + vpcOffering = VpcOffering.list(self.apiclient) + self.assert_(vpcOffering is not None and len(vpcOffering)>0, "No VPC offerings found") + self.services["vpc"] = {} + self.services["vpc"]["name"] = "vpc-internallb" + self.services["vpc"]["displaytext"] = "vpc-internallb" + self.services["vpc"]["cidr"] = "10.1.1.0/24" + vpc = VPC.create( + apiclient=self.apiclient, + services=self.services["vpc"], + networkDomain="vpc.internallb", + vpcofferingid=vpcOffering[0].id, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.domain.id + ) + self.assert_(vpc is not None, "VPC creation failed") + self.services["vpcnetwork"] = {} + self.services["vpcnetwork"]["name"] = "vpcntwk" + self.services["vpcnetwork"]["displaytext"] = "vpcntwk" + ntwk = Network.create( + apiclient=self.apiclient, + services=self.services["vpcnetwork"], + accountid=self.account.name, + domainid=self.domain.id, + networkofferingid=self.networkOffering.id, + zoneid=self.zone.id, + vpcid=vpc.id, + gateway="10.1.1.1", + netmask="255.255.255.192" + ) + self.assertIsNotNone(ntwk, "Network failed to create") + self.debug("Network %s created in VPC %s" %(ntwk.id, vpc.id)) + + #3) Deploy a vm + self.services["virtual_machine"]["networkids"] = ntwk.id + vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"], + templateid=self.template.id, + zoneid=self.zone.id, + accountid=self.account.name, + domainid= self.domain.id, + serviceofferingid=self.service_offering.id, + ) + self.assert_(vm is not None, "VM failed to deploy") + self.assert_(vm.state == 'Running', "VM is not running") + self.debug("VM %s deployed in VPC %s" %(vm.id, vpc.id)) #4) Create an Internal Load Balancer - self.createInternalLoadBalancer() + applb = ApplicationLoadBalancer.create(self.apiclient, services=self.services, + name="lbrule", + sourceport=22, + instanceport=22, + algorithm="roundrobin", + scheme="internal", + sourcenetworkid=ntwk.id, + networkid=ntwk.id) #5) Assign the VM to the Internal Load Balancer - self.assignToLoadBalancerRule() + applb.assign(self.apiclient, vms=[vm]) #6) Remove the vm from the Interanl Load Balancer - self.removeFromLoadBalancerRule() + applb.remove(self.apiclient, vms=[vm]) #7) Delete the Load Balancer - self.deleteLoadBalancer() + applb.delete(self.apiclient) + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls.cleanup) + except Exception, e: + raise Exception("Cleanup failed with %s" % e) - def deployVm(self): - deployVirtualMachineCmd = deployVirtualMachine.deployVirtualMachineCmd() - deployVirtualMachineCmd.networkids = TestInternalLb.networkId - deployVirtualMachineCmd.serviceofferingid = TestInternalLb.serviceOfferingId - deployVirtualMachineCmd.zoneid = TestInternalLb.zoneId - deployVirtualMachineCmd.templateid = TestInternalLb.templateId - deployVirtualMachineCmd.hypervisor = "XenServer" - deployVMResponse = self.apiClient.deployVirtualMachine(deployVirtualMachineCmd) - TestInternalLb.vmId = deployVMResponse.id - - - def createInternalLoadBalancer(self): - createLoadBalancerCmd = createLoadBalancer.createLoadBalancerCmd() - createLoadBalancerCmd.name = "lb rule" - createLoadBalancerCmd.sourceport = 22 - createLoadBalancerCmd.instanceport = 22 - createLoadBalancerCmd.algorithm = "roundrobin" - createLoadBalancerCmd.scheme = "internal" - createLoadBalancerCmd.sourceipaddressnetworkid = TestInternalLb.networkId - createLoadBalancerCmd.networkid = TestInternalLb.networkId - createLoadBalancerResponse = self.apiClient.createLoadBalancer(createLoadBalancerCmd) - TestInternalLb.lbId = createLoadBalancerResponse.id - self.assertIsNotNone(createLoadBalancerResponse.id, "Failed to create a load balancer") - - - def assignToLoadBalancerRule(self): - assignToLoadBalancerRuleCmd = assignToLoadBalancerRule.assignToLoadBalancerRuleCmd() - assignToLoadBalancerRuleCmd.id = TestInternalLb.lbId - assignToLoadBalancerRuleCmd.virtualMachineIds = TestInternalLb.vmId - assignToLoadBalancerRuleResponse = self.apiClient.assignToLoadBalancerRule(assignToLoadBalancerRuleCmd) - self.assertTrue(assignToLoadBalancerRuleResponse.success, "Failed to assign the vm to the load balancer") - - - - def removeFromLoadBalancerRule(self): - removeFromLoadBalancerRuleCmd = removeFromLoadBalancerRule.removeFromLoadBalancerRuleCmd() - removeFromLoadBalancerRuleCmd.id = TestInternalLb.lbId - removeFromLoadBalancerRuleCmd.virtualMachineIds = TestInternalLb.vmId - removeFromLoadBalancerRuleResponse = self.apiClient.removeFromLoadBalancerRule(removeFromLoadBalancerRuleCmd) - self.assertTrue(removeFromLoadBalancerRuleResponse.success, "Failed to remove the vm from the load balancer") - - - - #def removeInternalLoadBalancer(self): - def deleteLoadBalancer(self): - deleteLoadBalancerCmd = deleteLoadBalancer.deleteLoadBalancerCmd() - deleteLoadBalancerCmd.id = TestInternalLb.lbId - deleteLoadBalancerResponse = self.apiClient.deleteLoadBalancer(deleteLoadBalancerCmd) - self.assertTrue(deleteLoadBalancerResponse.success, "Failed to remove the load balancer") - - - - def createNetwork(self): - createVPCCmd = createVPC.createVPCCmd() - createVPCCmd.name = "new vpc" - createVPCCmd.cidr = "10.1.1.0/24" - createVPCCmd.displaytext = "new vpc" - createVPCCmd.vpcofferingid = 1 - createVPCCmd.zoneid = self.zoneId - createVPCResponse = self.apiClient.createVPC(createVPCCmd) - - - createNetworkCmd = createNetwork.createNetworkCmd() - createNetworkCmd.name = "vpc network" - createNetworkCmd.displaytext = "vpc network" - createNetworkCmd.netmask = "255.255.255.0" - createNetworkCmd.gateway = "10.1.1.1" - createNetworkCmd.zoneid = self.zoneId - createNetworkCmd.vpcid = createVPCResponse.id - createNetworkCmd.networkofferingid = TestInternalLb.networkOfferingId - createNetworkResponse = self.apiClient.createNetwork(createNetworkCmd) - TestInternalLb.networkId = createNetworkResponse.id - - self.assertIsNotNone(createNetworkResponse.id, "Network failed to create") - - - def createNetworkOffering(self): - createNetworkOfferingCmd = createNetworkOffering.createNetworkOfferingCmd() - createNetworkOfferingCmd.name = "Network offering for internal lb service - " + str(random.randrange(1,100+1)) - createNetworkOfferingCmd.displaytext = "Network offering for internal lb service" - createNetworkOfferingCmd.guestiptype = "isolated" - createNetworkOfferingCmd.traffictype = "Guest" - createNetworkOfferingCmd.conservemode = "false" - createNetworkOfferingCmd.supportedservices = "Vpn,Dhcp,Dns,Lb,UserData,SourceNat,StaticNat,PortForwarding,NetworkACL" - - - createNetworkOfferingCmd.serviceproviderlist = [] - for item in self.serviceProviderList: - createNetworkOfferingCmd.serviceproviderlist.append({ - 'service': item['service'], - 'provider': item['provider'] - }) - - createNetworkOfferingCmd.servicecapabilitylist = [] - for item in self.serviceCapsList: - createNetworkOfferingCmd.servicecapabilitylist.append({ - 'service': item['service'], - 'capabilitytype': item['capabilitytype'], - 'capabilityvalue': item['capabilityvalue'] - }) - - - createNetworkOfferingResponse = self.apiClient.createNetworkOffering(createNetworkOfferingCmd) - TestInternalLb.networkOfferingId = createNetworkOfferingResponse.id - - #enable network offering - updateNetworkOfferingCmd = updateNetworkOffering.updateNetworkOfferingCmd() - updateNetworkOfferingCmd.id = TestInternalLb.networkOfferingId - updateNetworkOfferingCmd.state = "Enabled" - updateNetworkOfferingResponse = self.apiClient.updateNetworkOffering(updateNetworkOfferingCmd) - - - #list network offering to see if its enabled - listNetworkOfferingsCmd = listNetworkOfferings.listNetworkOfferingsCmd() - listNetworkOfferingsCmd.id = TestInternalLb.networkOfferingId - listOffResponse = self.apiClient.listNetworkOfferings(listNetworkOfferingsCmd) - - self.assertNotEqual(len(listOffResponse), 0, "Check if the list network offerings API \ - returns a non-empty response") - - - def tearDown(self): - #destroy the vm - if TestInternalLb.vmId is not None: - destroyVirtualMachineCmd = destroyVirtualMachine.destroyVirtualMachineCmd() - destroyVirtualMachineCmd.id = TestInternalLb.vmId - destroyVirtualMachineResponse = self.apiClient.destroyVirtualMachine(destroyVirtualMachineCmd) diff --git a/test/integration/smoke/test_iso.py b/test/integration/smoke/test_iso.py index c645d3b055d..75289b8fbe3 100644 --- a/test/integration/smoke/test_iso.py +++ b/test/integration/smoke/test_iso.py @@ -485,7 +485,7 @@ class TestISO(cloudstackTestCase): if len(self.zones) <= 1: self.skipTest("Not enough zones available to perform copy template") - self.services["destzoneid"] = filter(lambda z: z.id != self.zone.id, self.zones)[0] + self.services["destzoneid"] = filter(lambda z: z.id != self.zone.id, self.zones)[0].id self.debug("Copy ISO from %s to %s" % ( self.zone.id, diff --git a/test/integration/smoke/test_network.py b/test/integration/smoke/test_network.py index 61ddf46e9ef..a65748d2e97 100644 --- a/test/integration/smoke/test_network.py +++ b/test/integration/smoke/test_network.py @@ -189,8 +189,7 @@ class TestPublicIP(cloudstackTestCase): @attr(tags = ["advanced", "advancedns", "smoke"]) def test_public_ip_admin_account(self): - """Test for Associate/Disassociate - public IP address for admin account""" + """Test for Associate/Disassociate public IP address for admin account""" # Validate the following: # 1. listPubliIpAddresses API returns the list of acquired addresses @@ -240,8 +239,7 @@ class TestPublicIP(cloudstackTestCase): @attr(tags = ["advanced", "advancedns", "smoke"]) def test_public_ip_user_account(self): - """Test for Associate/Disassociate - public IP address for user account""" + """Test for Associate/Disassociate public IP address for user account""" # Validate the following: # 1. listPubliIpAddresses API returns the list of acquired addresses @@ -885,102 +883,102 @@ class TestLoadBalancingRule(cloudstackTestCase): # Check if VM is in Running state before creating LB rule vm_response = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid - ) + self.apiclient, + account=self.account.name, + domainid=self.account.domainid + ) self.assertEqual( - isinstance(vm_response, list), - True, - "Check list VM returns a valid list" - ) + isinstance(vm_response, list), + True, + "Check list VM returns a valid list" + ) self.assertNotEqual( - len(vm_response), - 0, - "Check Port Forwarding Rule is created" - ) + len(vm_response), + 0, + "Check Port Forwarding Rule is created" + ) for vm in vm_response: self.assertEqual( - vm.state, - 'Running', - "VM state should be Running before creating a NAT rule." - ) + vm.state, + 'Running', + "VM state should be Running before creating a NAT rule." + ) #Create Load Balancer rule and assign VMs to rule lb_rule = LoadBalancerRule.create( - self.apiclient, - self.services["lbrule"], - self.non_src_nat_ip.ipaddress.id, - accountid=self.account.name - ) + self.apiclient, + self.services["lbrule"], + self.non_src_nat_ip.ipaddress.id, + accountid=self.account.name + ) self.cleanup.append(lb_rule) lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) lb_rules = list_lb_rules( - self.apiclient, - id=lb_rule.id - ) + self.apiclient, + id=lb_rule.id + ) self.assertEqual( - isinstance(lb_rules, list), - True, - "Check list response returns a valid list" - ) + isinstance(lb_rules, list), + True, + "Check list response returns a valid list" + ) #verify listLoadBalancerRules lists the added load balancing rule self.assertNotEqual( - len(lb_rules), - 0, - "Check Load Balancer Rule in its List" - ) + len(lb_rules), + 0, + "Check Load Balancer Rule in its List" + ) self.assertEqual( - lb_rules[0].id, - lb_rule.id, - "Check List Load Balancer Rules returns valid Rule" - ) + lb_rules[0].id, + lb_rule.id, + "Check List Load Balancer Rules returns valid Rule" + ) # listLoadBalancerRuleInstances should list # all instances associated with that LB rule lb_instance_rules = list_lb_instances( - self.apiclient, - id=lb_rule.id - ) + self.apiclient, + id=lb_rule.id + ) self.assertEqual( - isinstance(lb_instance_rules, list), - True, - "Check list response returns a valid list" - ) + isinstance(lb_instance_rules, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - len(lb_instance_rules), - 0, - "Check Load Balancer instances Rule in its List" - ) + len(lb_instance_rules), + 0, + "Check Load Balancer instances Rule in its List" + ) self.assertIn( - lb_instance_rules[0].id, - [self.vm_1.id, self.vm_2.id], - "Check List Load Balancer instances Rules returns valid VM ID" - ) + lb_instance_rules[0].id, + [self.vm_1.id, self.vm_2.id], + "Check List Load Balancer instances Rules returns valid VM ID" + ) self.assertIn( - lb_instance_rules[1].id, - [self.vm_1.id, self.vm_2.id], - "Check List Load Balancer instances Rules returns valid VM ID" - ) + lb_instance_rules[1].id, + [self.vm_1.id, self.vm_2.id], + "Check List Load Balancer instances Rules returns valid VM ID" + ) try: self.debug("SSHing into IP address: %s after adding VMs (ID: %s , %s)" % - ( - self.non_src_nat_ip.ipaddress, - self.vm_1.id, - self.vm_2.id - )) + ( + self.non_src_nat_ip.ipaddress.ipaddress, + self.vm_1.id, + self.vm_2.id + )) ssh_1 = remoteSSHClient( - self.non_src_nat_ip.ipaddress, - self.services['lbrule']["publicport"], - self.vm_1.username, - self.vm_1.password - ) + self.non_src_nat_ip.ipaddress.ipaddress, + self.services['lbrule']["publicport"], + self.vm_1.username, + self.vm_1.password + ) # If Round Robin Algorithm is chosen, # each ssh command should alternate between VMs @@ -989,71 +987,71 @@ class TestLoadBalancingRule(cloudstackTestCase): time.sleep(self.services["lb_switch_wait"]) self.debug("SSHing again into IP address: %s with VMs (ID: %s , %s) added to LB rule" % - ( - self.non_src_nat_ip.ipaddress, - self.vm_1.id, - self.vm_2.id - )) + ( + self.non_src_nat_ip.ipaddress.ipaddress, + self.vm_1.id, + self.vm_2.id + )) ssh_2 = remoteSSHClient( - self.non_src_nat_ip.ipaddress, - self.services['lbrule']["publicport"], - self.vm_1.username, - self.vm_1.password - ) + self.non_src_nat_ip.ipaddress.ipaddress, + self.services['lbrule']["publicport"], + self.vm_1.username, + self.vm_1.password + ) hostnames.append(ssh_2.execute("hostname")[0]) self.debug("Hostnames after adding 2 VMs to LB rule: %s" % str(hostnames)) self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) self.assertIn( - self.vm_2.name, - hostnames, - "Check if ssh succeeded for server2" - ) + self.vm_2.name, + hostnames, + "Check if ssh succeeded for server2" + ) #SSH should pass till there is a last VM associated with LB rule lb_rule.remove(self.apiclient, [self.vm_2]) self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" % - ( - self.non_src_nat_ip.ipaddress, - self.vm_2.id - )) + ( + self.non_src_nat_ip.ipaddress.ipaddress, + self.vm_2.id + )) ssh_1 = remoteSSHClient( - self.non_src_nat_ip.ipaddress, - self.services['lbrule']["publicport"], - self.vm_1.username, - self.vm_1.password - ) + self.non_src_nat_ip.ipaddress.ipaddress, + self.services['lbrule']["publicport"], + self.vm_1.username, + self.vm_1.password + ) hostnames.append(ssh_1.execute("hostname")[0]) self.debug("Hostnames after removing VM2: %s" % str(hostnames)) except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % - (e, self.non_src_nat_ip.ipaddress)) + (e, self.non_src_nat_ip.ipaddress.ipaddress)) self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) lb_rule.remove(self.apiclient, [self.vm_1]) with self.assertRaises(Exception): - self.fail("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" % - ( - self.non_src_nat_ip.ipaddress, - self.vm_1.id - )) + self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" % + ( + self.non_src_nat_ip.ipaddress.ipaddress, + self.vm_1.id + )) ssh_1 = remoteSSHClient( - self.non_src_nat_ip.ipaddress, - self.services['lbrule']["publicport"], - self.vm_1.username, - self.vm_1.password - ) + self.non_src_nat_ip.ipaddress.ipaddress, + self.services['lbrule']["publicport"], + self.vm_1.username, + self.vm_1.password + ) ssh_1.execute("hostname")[0] return @@ -1203,7 +1201,7 @@ class TestRebootRouter(cloudstackTestCase): self.debug("SSH into VM (ID : %s ) after reboot" % self.vm_1.id) remoteSSHClient( - self.nat_rule.ipaddress, + self.nat_rule.ipaddress.ipaddress, self.services["natrule"]["publicport"], self.vm_1.username, self.vm_1.password @@ -1211,8 +1209,7 @@ class TestRebootRouter(cloudstackTestCase): except Exception as e: self.fail( "SSH Access failed for %s: %s" % \ - (self.vm_1.ipaddress, e) - ) + (self.nat_rule.ipaddress.ipaddress, e)) return def tearDown(self): @@ -1557,7 +1554,7 @@ class TestReleaseIP(cloudstackTestCase): @attr(tags = ["advanced", "advancedns", "smoke"]) def test_releaseIP(self): - """Test for Associate/Disassociate public IP address""" + """Test for release public IP address""" self.debug("Deleting Public IP : %s" % self.ip_addr.id) @@ -1579,7 +1576,6 @@ class TestReleaseIP(cloudstackTestCase): "Check if disassociated IP Address is no longer available" ) - self.debug("List NAT Rule response" + str(list_nat_rule)) # ListPortForwardingRules should not list # associated rules with Public IP address try: @@ -1587,18 +1583,18 @@ class TestReleaseIP(cloudstackTestCase): self.apiclient, id=self.nat_rule.id ) + self.debug("List NAT Rule response" + str(list_nat_rule)) except cloudstackAPIException: self.debug("Port Forwarding Rule is deleted") # listLoadBalancerRules should not list # associated rules with Public IP address - self.debug("List LB Rule response" + str(list_lb_rule)) try: list_lb_rule = list_lb_rules( self.apiclient, id=self.lb_rule.id ) - + self.debug("List LB Rule response" + str(list_lb_rule)) except cloudstackAPIException: self.debug("Port Forwarding Rule is deleted") @@ -1658,7 +1654,6 @@ class TestDeleteAccount(cloudstackTestCase): try: src_nat_ip_addr = src_nat_ip_addrs[0] - except Exception as e: self.fail("SSH failed for VM with IP: %s" % src_nat_ip_addr.ipaddress) @@ -1740,10 +1735,9 @@ class TestDeleteAccount(cloudstackTestCase): "Check routers are properly deleted." ) except Exception as e: - raise Exception( - "Exception raised while fetching routers for account: %s" % - self.account.name) + "Encountered %s raised while fetching routers for account: %s" % (e, + self.account.name)) return def tearDown(self): diff --git a/test/integration/smoke/test_network_acl.py b/test/integration/smoke/test_network_acl.py index 66f0a6fc1c5..4b3c1f70b67 100644 --- a/test/integration/smoke/test_network_acl.py +++ b/test/integration/smoke/test_network_acl.py @@ -22,97 +22,156 @@ from marvin.cloudstackAPI import * from marvin.integration.lib.utils import * from marvin.integration.lib.base import * from marvin.integration.lib.common import * +from nose.plugins.attrib import attr + +class Services: + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + "password": "password", + }, + "virtual_machine": { + "displayname": "Test VM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "ostype": 'CentOS 5.3 (64-bit)', + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 256, + }, + "network_offering": { + "name": "Network offering for internal lb service", + "displaytext": "Network offering for internal lb service", + "guestiptype": "Isolated", + "traffictype": "Guest", + "supportedservices": "Vpn,Dhcp,Dns,Lb,UserData,SourceNat,StaticNat,PortForwarding,NetworkACL", + "serviceProviderList": { + "Dhcp": "VpcVirtualRouter", + "Dns": "VpcVirtualRouter", + "Vpn": "VpcVirtualRouter", + "UserData": "VpcVirtualRouter", + "Lb": "InternalLbVM", + "SourceNat": "VpcVirtualRouter", + "StaticNat": "VpcVirtualRouter", + "PortForwarding": "VpcVirtualRouter", + "NetworkACL": "VpcVirtualRouter", + }, + "serviceCapabilityList": { + "SourceNat": {"SupportedSourceNatTypes": "peraccount"}, + "Lb": {"lbSchemes": "internal", "SupportedLbIsolation": "dedicated"} + } + } + } class TestNetworkACL(cloudstackTestCase): - networkOfferingId = 11 - networkId = None - vmId = None - vpcId = None - aclId = None - zoneId = 1 - serviceOfferingId = 1 - templateId = 5 + @classmethod + def setUpClass(cls): + cls.apiclient = super(TestNetworkACL, cls).getClsTestClient().getApiClient() + cls.services = Services().services + cls.zone = get_zone(cls.apiclient, cls.services) + cls.domain = get_domain(cls.apiclient) + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offering"] + ) + cls.account = Account.create(cls.apiclient, services=cls.services["account"]) + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"] + ) + cls.debug("Successfully created account: %s, id: \ + %s" % (cls.account.name,\ + cls.account.id)) + cls.cleanup = [cls.account] - def setUp(self): - self.apiClient = self.testClient.getApiClient() + @attr(tags=["advanced"]) + def test_network_acl(self): + """Test network ACL lists and items in VPC""" - - - def test_networkAcl(self): + # 0) Get the default network offering for VPC + networkOffering = NetworkOffering.list(self.apiclient, name="DefaultIsolatedNetworkOfferingForVpcNetworks") + self.assert_(networkOffering is not None and len(networkOffering) > 0, "No VPC based network offering") # 1) Create VPC - self.createVPC() + vpcOffering = VpcOffering.list(self.apiclient) + self.assert_(vpcOffering is not None and len(vpcOffering)>0, "No VPC offerings found") + self.services["vpc"] = {} + self.services["vpc"]["name"] = "vpc-networkacl" + self.services["vpc"]["displaytext"] = "vpc-networkacl" + self.services["vpc"]["cidr"] = "10.1.1.0/24" + vpc = VPC.create( + apiclient=self.apiclient, + services=self.services["vpc"], + networkDomain="vpc.networkacl", + vpcofferingid=vpcOffering[0].id, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.domain.id + ) + self.assert_(vpc is not None, "VPC creation failed") - # 2) Create ACl - self.createACL() + # 2) Create ACL + aclgroup = NetworkACLList.create(apiclient=self.apiclient, services={}, name="acl", description="acl", vpcid=vpc.id) + self.assertIsNotNone(aclgroup, "Failed to create NetworkACL list") + self.debug("Created a network ACL list %s" % aclgroup.name) - # 3) Create ACl Item - self.createACLItem() + # 3) Create ACL Item + aclitem = NetworkACL.create(apiclient=self.apiclient, services={}, + protocol="TCP", number="10", action="Deny", aclid=aclgroup.id, cidrlist=["0.0.0.0/0"]) + self.assertIsNotNone(aclitem, "Network failed to aclItem") + self.debug("Added a network ACL %s to ACL list %s" % (aclitem.id, aclgroup.name)) # 4) Create network with ACL - self.createNetwork() + self.services["vpcnetwork"] = {} + self.services["vpcnetwork"]["name"] = "vpcntwk" + self.services["vpcnetwork"]["displaytext"] = "vpcntwk" + ntwk = Network.create( + apiclient=self.apiclient, + services=self.services["vpcnetwork"], + accountid=self.account.name, + domainid=self.domain.id, + networkofferingid=networkOffering[0].id, + zoneid=self.zone.id, + vpcid=vpc.id, + aclid=aclgroup.id, + gateway="10.1.1.1", + netmask="255.255.255.192" + ) + self.assertIsNotNone(ntwk, "Network failed to create") + self.debug("Network %s created in VPC %s" %(ntwk.id, vpc.id)) + # 5) Deploy a vm - self.deployVm() + self.services["virtual_machine"]["networkids"] = ntwk.id + vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"], + templateid=self.template.id, + zoneid=self.zone.id, + accountid=self.account.name, + domainid= self.domain.id, + serviceofferingid=self.service_offering.id, + ) + self.assert_(vm is not None, "VM failed to deploy") + self.assert_(vm.state == 'Running', "VM is not running") + self.debug("VM %s deployed in VPC %s" %(vm.id, vpc.id)) - def createACL(self): - createAclCmd = createNetworkACLList.createNetworkACLListCmd() - createAclCmd.name = "acl1" - createAclCmd.description = "new acl" - createAclCmd.vpcId = TestNetworkACL.vpcId - createAclResponse = self.apiClient.createNetworkACLList(createAclCmd) - TestNetworkACL.aclId = createAclResponse.id - - def createACLItem(self): - createAclItemCmd = createNetworkACL.createNetworkACLCmd() - createAclItemCmd.cidr = "0.0.0.0/0" - createAclItemCmd.protocol = "TCP" - createAclItemCmd.number = "10" - createAclItemCmd.action = "Deny" - createAclItemCmd.aclId = TestNetworkACL.aclId - createAclItemResponse = self.apiClient.createNetworkACL(createAclItemCmd) - self.assertIsNotNone(createAclItemResponse.id, "Network failed to aclItem") - - def createVPC(self): - createVPCCmd = createVPC.createVPCCmd() - createVPCCmd.name = "new vpc" - createVPCCmd.cidr = "10.1.1.0/24" - createVPCCmd.displaytext = "new vpc" - createVPCCmd.vpcofferingid = 1 - createVPCCmd.zoneid = self.zoneId - createVPCResponse = self.apiClient.createVPC(createVPCCmd) - TestNetworkACL.vpcId = createVPCResponse.id - - - def createNetwork(self): - createNetworkCmd = createNetwork.createNetworkCmd() - createNetworkCmd.name = "vpc network" - createNetworkCmd.displaytext = "vpc network" - createNetworkCmd.netmask = "255.255.255.0" - createNetworkCmd.gateway = "10.1.1.1" - createNetworkCmd.zoneid = self.zoneId - createNetworkCmd.vpcid = TestNetworkACL.vpcId - createNetworkCmd.networkofferingid = TestNetworkACL.networkOfferingId - createNetworkCmd.aclId = TestNetworkACL.aclId - createNetworkResponse = self.apiClient.createNetwork(createNetworkCmd) - TestNetworkACL.networkId = createNetworkResponse.id - - self.assertIsNotNone(createNetworkResponse.id, "Network failed to create") - - def deployVm(self): - deployVirtualMachineCmd = deployVirtualMachine.deployVirtualMachineCmd() - deployVirtualMachineCmd.networkids = TestNetworkACL.networkId - deployVirtualMachineCmd.serviceofferingid = TestNetworkACL.serviceOfferingId - deployVirtualMachineCmd.zoneid = TestNetworkACL.zoneId - deployVirtualMachineCmd.templateid = TestNetworkACL.templateId - deployVirtualMachineCmd.hypervisor = "XenServer" - deployVMResponse = self.apiClient.deployVirtualMachine(deployVirtualMachineCmd) - TestNetworkACL.vmId = deployVMResponse.id - - def tearDown(self): - #destroy the vm - if TestNetworkACL.vmId is not None: - destroyVirtualMachineCmd = destroyVirtualMachine.destroyVirtualMachineCmd() - destroyVirtualMachineCmd.id = TestNetworkACL.vmId - destroyVirtualMachineResponse = self.apiClient.destroyVirtualMachine(destroyVirtualMachineCmd) + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls.cleanup) + except Exception, e: + raise Exception("Cleanup failed with %s" % e) diff --git a/test/integration/smoke/test_nicdetail.py b/test/integration/smoke/test_nicdetail.py deleted file mode 100644 index 3d8b1d62a47..00000000000 --- a/test/integration/smoke/test_nicdetail.py +++ /dev/null @@ -1,224 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -""" P1 tests for Scaling up Vm -""" -#Import Local Modules -import marvin -from marvin.cloudstackTestCase import * -from marvin.cloudstackAPI import * -from marvin.remoteSSHClient import remoteSSHClient -from marvin.integration.lib.utils import * -from marvin.integration.lib.base import * -from marvin.integration.lib.common import * -from nose.plugins.attrib import attr -#Import System modules -import time - -_multiprocess_shared_ = True -class Services: - """Test VM Life Cycle Services - """ - - def __init__(self): - self.services = { - - "account": { - "email": "test@test.com", - "firstname": "Test", - "lastname": "User", - "username": "test", - # Random characters are appended in create account to - # ensure unique username generated each time - "password": "password", - }, - "small": - # Create a small virtual machine instance with disk offering - { - "displayname": "testserver", - "username": "root", # VM creds for SSH - "password": "password", - "ssh_port": 22, - "hypervisor": 'XenServer', - "privateport": 22, - "publicport": 22, - "protocol": 'TCP', - }, - "disk_offering": { - "displaytext": "Small", - "name": "Small", - "storagetype": "shared", - "disksize": 1 - }, - "service_offerings": - { - "small": - { - # Small service offering ID to for change VM - # service offering from medium to small - "name": "SmallInstance", - "displaytext": "SmallInstance", - "cpunumber": 1, - "cpuspeed": 100, - "memory": 256, - }, - "big": - { - # Big service offering ID to for change VM - "name": "BigInstance", - "displaytext": "BigInstance", - "cpunumber": 1, - "cpuspeed": 100, - "memory": 512, - } - }, - #Change this - "template": { - "displaytext": "xs", - "name": "xs", - "passwordenabled": False, - }, - "diskdevice": '/dev/xvdd', - # Disk device where ISO is attached to instance - "mount_dir": "/mnt/tmp", - "sleep": 60, - "timeout": 10, - #Migrate VM to hostid - "ostype": 'CentOS 5.6 (64-bit)', - # CentOS 5.3 (64-bit) - } - -class TestNicDetail(cloudstackTestCase): - - @classmethod - def setUpClass(cls): - cls.api_client = super(TestNicDetail, cls).getClsTestClient().getApiClient() - cls.services = Services().services - - # Get Zone, Domain and templates - domain = get_domain(cls.api_client, cls.services) - zone = get_zone(cls.api_client, cls.services) - cls.services['mode'] = zone.networktype - - # Set Zone - - # Create account, service offerings, vm. - cls.account = Account.create( - cls.api_client, - cls.services["account"], - domainid=domain.id - ) - - cls.nic = "163738c7-ce3a-481d-ac68-4a8337043415"; - #how does it work - cls._cleanup = [ - cls.account - ] - - @classmethod - def tearDownClass(cls): - cls.api_client = super(TestNicDetail, cls).getClsTestClient().getApiClient() - cleanup_resources(cls.api_client, cls._cleanup) - return - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() - self.cleanup = [] - - def tearDown(self): - #Clean up, terminate the created ISOs - cleanup_resources(self.apiclient, self.cleanup) - return - - @attr(tags = ["advanced", "xenserver"]) - def test_01_updatenicdetail(self): - """Test nic detail - """ - # Validate the following - # Scale up the vm and see if it scales to the new svc offering and is finally in running state - - self.debug("Testing ADD nic detail Nic-ID: %s " % ( - self.nic - )) - - cmd = addNicDetail.addNicDetailCmd() - cmd.name = self.nic - cmd.value = self.nic - cmd.id = self.nic - self.apiclient.addNicDetail(cmd) - - listNicDetailCmd = listNicDetails.listNicDetailsCmd() - listNicDetailCmd.id = self.nic - listNicDetailResponse = self.api_client.listVirtualMachines(listNicDetailCmd) - - self.assertNotEqual(len(listNicDetailResponse), 0, "Check if the list API \ - returns a non-empty response") - - nicdetail = listNicDetailResponse[0] - - #self.assertEqual(nicdetail.id, self.nic, "Check if the Nic returned is the same as the one we asked for") - - - self.assertEqual(nicdetail.name, self.nic, "Check if Nic has right name") - - self.assertEqual(nicdetail.value, self.nic, "Check if Nic has right value") - - #updatenicdetail - self.debug("Testing UPDATE nic detail Nic-ID: %s " % ( - self.nic - )) - cmd = updateNicDetail.updateNicDetailCmd() - cmd.name = self.nic - cmd.value = self.disk_offering.id - cmd.id = self.nic - self.apiclient.addNicDetail(cmd) - - listNicDetailCmd = listNicDetails.listNicDetailsCmd() - listNicDetailCmd.id = self.nic - listNicDetailResponse = self.api_client.listVirtualMachines(listNicDetailCmd) - - self.assertNotEqual(len(listNicDetailResponse), 0, "Check if the list API \ - returns a non-empty response") - - nicdetail = listNicDetailResponse[0] - - #self.assertEqual(nicdetail.id, self.nic, "Check if the Nic returned is the same as the one we asked for") - - - self.assertEqual(nicdetail.name, self.nic, "Check if Nic has right name") - - self.assertEqual(nicdetail.value, self.disk_offering.id, "Check if Nic has right value") - - - #remove detail - self.debug("Testing REMOVE nic detail Nic-ID: %s " % ( - self.nic - )) - cmd = removeNicDetail.removeNicDetailCmd() - cmd.name = self.nic - cmd.id = self.nic - self.apiclient.removeNicDetail(cmd) - - listNicDetailCmd = listNicDetails.listNicDetailsCmd() - listNicDetailCmd.id = self.nic - listNicDetailResponse = self.api_client.listVirtualMachines(listNicDetailCmd) - - self.assertEqual(listNicDetailResponse, None, "Check if the list API \ - returns a non-empty response") - - - return diff --git a/test/integration/smoke/test_primary_storage.py b/test/integration/smoke/test_primary_storage.py index eb747fa6588..598654da112 100644 --- a/test/integration/smoke/test_primary_storage.py +++ b/test/integration/smoke/test_primary_storage.py @@ -35,32 +35,17 @@ class Services: def __init__(self): self.services = { - "nfs": { - 0: { - "url": "nfs://192.168.100.131/testprimary", + "nfs": + { + "url": "nfs://10.147.28.7/export/home/talluri/testprimary", # Format: File_System_Type/Location/Path - "name": "Primary XEN", - "hypervisor": 'XEN', + "name": "Primary XEN" }, - 1: { - "url": "nfs://192.168.100.131/Primary", - "name": "Primary KVM", - "hypervisor": 'KVM', - }, - 2: { - "url": "nfs://192.168.100.131/Primary", - "name": "Primary VMWare", - "hypervisor": 'VMWare', - }, - }, "iscsi": { - 0: { "url": "iscsi://192.168.100.21/iqn.2012-01.localdomain.clo-cstack-cos6:iser/1", # Format : iscsi://IP Address/IQN number/LUN# - "name": "Primary iSCSI", - "hypervisor": 'XEN', - }, - }, + "name": "Primary iSCSI" + } } class TestPrimaryStorageServices(cloudstackTestCase): @@ -85,31 +70,27 @@ class TestPrimaryStorageServices(cloudstackTestCase): raise Exception("Warning: Exception during cleanup : %s" % e) return - @unittest.skip("skipped - will not be adding storage in our environments") - def test_01_primary_storage(self): + @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"]) + def test_01_primary_storage_nfs(self): """Test primary storage pools - XEN, KVM, VMWare """ # Validate the following: - # 1. verify hypervisortype returned by api is Xen/KVM/VMWare + # 1. List Clusters # 2. verify that the cluster is in 'Enabled' allocation state # 3. verify that the host is added successfully and # in Up state with listHosts api response #Create NFS storage pools with on XEN/KVM/VMWare clusters - for k, v in self.services["nfs"].items(): - clusters = list_clusters( - self.apiclient, - zoneid=self.zone.id, - hypervisortype=v["hypervisor"] - ) - self.assertEqual( - isinstance(clusters, list), - True, - "Check list response returns a valid list" - ) - cluster = clusters[0] + + clusters = list_clusters( + self.apiclient, + zoneid=self.zone.id + ) + assert isinstance(clusters,list) and len(clusters)>0 + for cluster in clusters: + #Host should be present before adding primary storage list_hosts_response = list_hosts( self.apiclient, @@ -124,11 +105,11 @@ class TestPrimaryStorageServices(cloudstackTestCase): self.assertNotEqual( len(list_hosts_response), 0, - "Check list Hosts for hypervisor: " + v["hypervisor"] + "Check list Hosts in the cluster: " + cluster.name ) storage = StoragePool.create(self.apiclient, - v, + self.services["nfs"], clusterid=cluster.id, zoneid=self.zone.id, podid=self.pod.id @@ -140,13 +121,13 @@ class TestPrimaryStorageServices(cloudstackTestCase): self.assertEqual( storage.state, 'Up', - "Check primary storage state for hypervisor: " + v["hypervisor"] + "Check primary storage state " ) self.assertEqual( storage.type, 'NetworkFilesystem', - "Check storage pool type for hypervisor : " + v["hypervisor"] + "Check storage pool type " ) #Verify List Storage pool Response has newly added storage pool @@ -169,45 +150,76 @@ class TestPrimaryStorageServices(cloudstackTestCase): self.assertEqual( storage_response.id, storage.id, - "Check storage pool ID for hypervisor: " + v["hypervisor"] + "Check storage pool ID" ) self.assertEqual( storage.type, storage_response.type, - "Check storage pool type for hypervisor: " + v["hypervisor"] + "Check storage pool type " ) # Call cleanup for reusing primary storage cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] + return + + + @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"]) + def test_01_primary_storage_iscsi(self): + """Test primary storage pools - XEN, KVM, VMWare + """ + + # Validate the following: + # 1. List Clusters + # 2. verify that the cluster is in 'Enabled' allocation state + # 3. verify that the host is added successfully and + # in Up state with listHosts api response # Create iSCSI storage pools with on XEN/KVM clusters - for k, v in self.services["iscsi"].items(): - clusters = list_clusters( - self.apiclient, - zoneid=self.zone.id, - hypervisortype=v["hypervisor"] - ) + clusters = list_clusters( + self.apiclient, + zoneid=self.zone.id + ) + assert isinstance(clusters,list) and len(clusters)>0 + for cluster in clusters: + + #Host should be present before adding primary storage + list_hosts_response = list_hosts( + self.apiclient, + clusterid=cluster.id + ) self.assertEqual( - isinstance(clusters, list), + isinstance(list_hosts_response, list), True, "Check list response returns a valid list" ) - cluster = clusters[0] + + self.assertNotEqual( + len(list_hosts_response), + 0, + "Check list Hosts in the cluster: " + cluster + ) + storage = StoragePool.create(self.apiclient, - v, + self.services["iscsi"], clusterid=cluster.id, zoneid=self.zone.id, podid=self.pod.id ) self.cleanup.append(storage) - self.debug("Created iSCSI storage pool in cluster: %s" % cluster.id) - + self.debug("Created storage pool in cluster: %s" % cluster.id) + self.assertEqual( storage.state, 'Up', - "Check primary storage state for hypervisor: " + v["hypervisor"] + "Check primary storage state " + ) + + self.assertEqual( + storage.type, + 'NetworkFilesystem', + "Check storage pool type " ) #Verify List Storage pool Response has newly added storage pool @@ -221,24 +233,24 @@ class TestPrimaryStorageServices(cloudstackTestCase): "Check list response returns a valid list" ) self.assertNotEqual( - len(storage_pools_response), - 0, - "Check Hosts response for hypervisor: " + v["hypervisor"] + len(storage_pools_response), + 0, + "Check list Hosts response" ) storage_response = storage_pools_response[0] self.assertEqual( storage_response.id, storage.id, - "Check storage pool ID for hypervisor: " + v["hypervisor"] - ) + "Check storage pool ID" + ) self.assertEqual( storage.type, storage_response.type, - "Check storage pool type hypervisor: " + v["hypervisor"] + "Check storage pool type " ) - # Call cleanup for reusing primary storage cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] + return diff --git a/test/integration/smoke/test_privategw_acl.py b/test/integration/smoke/test_privategw_acl.py index 5daf6ca0a59..9c37e5e7fc0 100644 --- a/test/integration/smoke/test_privategw_acl.py +++ b/test/integration/smoke/test_privategw_acl.py @@ -22,6 +22,7 @@ from marvin.cloudstackAPI import * from marvin.integration.lib.utils import * from marvin.integration.lib.base import * from marvin.integration.lib.common import * +from nose.plugins.attrib import attr class TestPrivateGwACL(cloudstackTestCase): @@ -37,6 +38,8 @@ class TestPrivateGwACL(cloudstackTestCase): self.templateId = 5 self.privateGwId = None + + @attr(tags=["advanced"]) def test_privategw_acl(self): # 1) Create VPC diff --git a/test/integration/smoke/test_routers.py b/test/integration/smoke/test_routers.py index f6ca2790069..d89acf93736 100644 --- a/test/integration/smoke/test_routers.py +++ b/test/integration/smoke/test_routers.py @@ -107,7 +107,6 @@ class TestRouterServices(cloudstackTestCase): serviceofferingid=cls.service_offering.id ) cls.cleanup = [ - cls.vm_1, cls.account, cls.service_offering ] diff --git a/test/integration/smoke/test_service_offerings.py b/test/integration/smoke/test_service_offerings.py index 7f4d130ee80..0213c04cb02 100644 --- a/test/integration/smoke/test_service_offerings.py +++ b/test/integration/smoke/test_service_offerings.py @@ -27,24 +27,89 @@ from nose.plugins.attrib import attr _multiprocess_shared_ = True + class Services: """Test Service offerings Services """ def __init__(self): self.services = { - "off": - { - "name": "Service Offering", - "displaytext": "Service Offering", - "cpunumber": 1, - "cpuspeed": 100, # MHz - "memory": 128, # in MBs - }, - } + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended in create account to + # ensure unique username generated each time + "password": "password", + }, + "off": + { + "name": "Service Offering", + "displaytext": "Service Offering", + "cpunumber": 1, + "cpuspeed": 100, # MHz + "memory": 128, # in MBs + }, + "small": + # Create a small virtual machine instance with disk offering + { + "displayname": "testserver", + "username": "root", # VM creds for SSH + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "medium": # Create a medium virtual machine instance + { + "displayname": "testserver", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "service_offerings": + { + "tiny": + { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, # in MHz + "memory": 128, # In MBs + }, + "small": + { + # Small service offering ID to for change VM + # service offering from medium to small + "name": "Small Instance", + "displaytext": "Small Instance", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + }, + "medium": + { + # Medium service offering ID to for + # change VM service offering from small to medium + "name": "Medium Instance", + "displaytext": "Medium Instance", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 256, + } + }, + "ostype": 'CentOS 5.3 (64-bit)', + } + class TestCreateServiceOffering(cloudstackTestCase): - def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() @@ -60,8 +125,8 @@ class TestCreateServiceOffering(cloudstackTestCase): raise Exception("Warning: Exception during cleanup : %s" % e) return - - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"]) + + @attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"]) def test_01_create_service_offering(self): """Test to create service offering""" @@ -70,67 +135,65 @@ class TestCreateServiceOffering(cloudstackTestCase): # 2. The Cloud Database contains the valid information service_offering = ServiceOffering.create( - self.apiclient, - self.services["off"] - ) + self.apiclient, + self.services["off"] + ) self.cleanup.append(service_offering) self.debug("Created service offering with ID: %s" % service_offering.id) list_service_response = list_service_offering( - self.apiclient, - id=service_offering.id - ) + self.apiclient, + id=service_offering.id + ) self.assertEqual( - isinstance(list_service_response, list), - True, - "Check list response returns a valid list" - ) - + isinstance(list_service_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( - len(list_service_response), - 0, - "Check Service offering is created" - ) + len(list_service_response), + 0, + "Check Service offering is created" + ) service_response = list_service_response[0] self.assertEqual( - list_service_response[0].cpunumber, - self.services["off"]["cpunumber"], - "Check server id in createServiceOffering" - ) + list_service_response[0].cpunumber, + self.services["off"]["cpunumber"], + "Check server id in createServiceOffering" + ) self.assertEqual( - list_service_response[0].cpuspeed, - self.services["off"]["cpuspeed"], - "Check cpuspeed in createServiceOffering" - ) + list_service_response[0].cpuspeed, + self.services["off"]["cpuspeed"], + "Check cpuspeed in createServiceOffering" + ) self.assertEqual( - list_service_response[0].displaytext, - self.services["off"]["displaytext"], - "Check server displaytext in createServiceOfferings" - ) + list_service_response[0].displaytext, + self.services["off"]["displaytext"], + "Check server displaytext in createServiceOfferings" + ) self.assertEqual( - list_service_response[0].memory, - self.services["off"]["memory"], - "Check memory in createServiceOffering" - ) + list_service_response[0].memory, + self.services["off"]["memory"], + "Check memory in createServiceOffering" + ) self.assertEqual( - list_service_response[0].name, - self.services["off"]["name"], - "Check name in createServiceOffering" - ) + list_service_response[0].name, + self.services["off"]["name"], + "Check name in createServiceOffering" + ) return class TestServiceOfferings(cloudstackTestCase): - def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.cleanup = [] def tearDown(self): - try: #Clean up, terminate the created templates cleanup_resources(self.apiclient, self.cleanup) @@ -142,17 +205,61 @@ class TestServiceOfferings(cloudstackTestCase): @classmethod def setUpClass(cls): - cls.services = Services().services cls.api_client = super(TestServiceOfferings, cls).getClsTestClient().getApiClient() + cls.services = Services().services + domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.services['mode'] = cls.zone.networktype + cls.service_offering_1 = ServiceOffering.create( - cls.api_client, - cls.services["off"] - ) + cls.api_client, + cls.services["off"] + ) cls.service_offering_2 = ServiceOffering.create( - cls.api_client, - cls.services["off"] - ) - cls._cleanup = [cls.service_offering_1] + cls.api_client, + cls.services["off"] + ) + template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["medium"]["zoneid"] = cls.zone.id + cls.services["medium"]["template"] = template.id + + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=domain.id + ) + + cls.small_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offerings"]["small"] + ) + + cls.medium_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offerings"]["medium"] + ) + cls.medium_virtual_machine = VirtualMachine.create( + cls.api_client, + cls.services["medium"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.medium_offering.id, + mode=cls.services["mode"] + ) + cls._cleanup = [ + cls.small_offering, + cls.medium_offering, + cls.account + ] return @classmethod @@ -166,7 +273,7 @@ class TestServiceOfferings(cloudstackTestCase): raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"]) + @attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"]) def test_02_edit_service_offering(self): """Test to update existing service offering""" @@ -178,8 +285,8 @@ class TestServiceOfferings(cloudstackTestCase): random_displaytext = random_gen() random_name = random_gen() - self.debug("Updating service offering with ID: %s" % - self.service_offering_1.id) + self.debug("Updating service offering with ID: %s" % + self.service_offering_1.id) cmd = updateServiceOffering.updateServiceOfferingCmd() #Add parameters for API call @@ -189,35 +296,35 @@ class TestServiceOfferings(cloudstackTestCase): self.apiclient.updateServiceOffering(cmd) list_service_response = list_service_offering( - self.apiclient, - id=self.service_offering_1.id - ) + self.apiclient, + id=self.service_offering_1.id + ) self.assertEqual( - isinstance(list_service_response, list), - True, - "Check list response returns a valid list" - ) - + isinstance(list_service_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( - len(list_service_response), - 0, - "Check Service offering is updated" - ) + len(list_service_response), + 0, + "Check Service offering is updated" + ) self.assertEqual( - list_service_response[0].displaytext, - random_displaytext, - "Check server displaytext in updateServiceOffering" - ) + list_service_response[0].displaytext, + random_displaytext, + "Check server displaytext in updateServiceOffering" + ) self.assertEqual( - list_service_response[0].name, - random_name, - "Check server name in updateServiceOffering" - ) + list_service_response[0].name, + random_name, + "Check server name in updateServiceOffering" + ) return - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"]) + @attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"]) def test_03_delete_service_offering(self): """Test to delete service offering""" @@ -225,20 +332,108 @@ class TestServiceOfferings(cloudstackTestCase): # 1. deleteServiceOffering should return # a valid information for newly created offering - self.debug("Deleting service offering with ID: %s" % - self.service_offering_2.id) + self.debug("Deleting service offering with ID: %s" % + self.service_offering_2.id) self.service_offering_2.delete(self.apiclient) list_service_response = list_service_offering( - self.apiclient, - id=self.service_offering_2.id - ) + self.apiclient, + id=self.service_offering_2.id + ) self.assertEqual( - list_service_response, - None, - "Check if service offering exists in listDiskOfferings" - ) + list_service_response, + None, + "Check if service offering exists in listDiskOfferings" + ) return + + @attr(tags=["advanced", "advancedns", "smoke"]) + def test_04_change_offering_small(self): + """Test to change service to a small capacity + """ + # Validate the following + # 1. Log in to the Vm .We should see that the CPU and memory Info of + # this Vm matches the one specified for "Small" service offering. + # 2. Using listVM command verify that this Vm + # has Small service offering Id. + + self.debug("Stopping VM - ID: %s" % self.medium_virtual_machine.id) + self.medium_virtual_machine.stop(self.apiclient) + # Ensure that VM is in stopped state + list_vm_response = list_virtual_machines( + self.apiclient, + id=self.medium_virtual_machine.id + ) + if isinstance(list_vm_response, list): + vm = list_vm_response[0] + if vm.state == 'Stopped': + self.debug("VM state: %s" % vm.state) + else: + raise Exception( + "Failed to stop VM (ID: %s) in change service offering" % vm.id) + + self.debug("Change Service offering VM - ID: %s" % + self.medium_virtual_machine.id) + + cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd() + cmd.id = self.medium_virtual_machine.id + cmd.serviceofferingid = self.small_offering.id + self.apiclient.changeServiceForVirtualMachine(cmd) + + self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id) + self.medium_virtual_machine.start(self.apiclient) + # Ensure that VM is in running state + list_vm_response = list_virtual_machines( + self.apiclient, + id=self.medium_virtual_machine.id + ) + + if isinstance(list_vm_response, list): + vm = list_vm_response[0] + if vm.state == 'Running': + self.debug("VM state: %s" % vm.state) + else: + raise Exception( + "Failed to start VM (ID: %s) after changing service offering" % vm.id) + + try: + ssh = self.medium_virtual_machine.get_ssh_client() + except Exception as e: + self.fail( + "SSH Access failed for %s: %s" %\ + (self.medium_virtual_machine.ipaddress, e) + ) + + cpuinfo = ssh.execute("cat /proc/cpuinfo") + cpu_cnt = len([i for i in cpuinfo if "processor" in i]) + #'cpu MHz\t\t: 2660.499' + cpu_speed = [i for i in cpuinfo if "cpu MHz" in i][0].split()[3] + meminfo = ssh.execute("cat /proc/meminfo") + #MemTotal: 1017464 kB + total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1] + + self.debug( + "CPU count: %s, CPU Speed: %s, Mem Info: %s" % ( + cpu_cnt, + cpu_speed, + total_mem + )) + self.assertAlmostEqual( + int(cpu_cnt), + self.small_offering.cpunumber, + "Check CPU Count for small offering" + ) + self.assertAlmostEqual( + list_vm_response[0].cpuspeed, + self.small_offering.cpuspeed, + "Check CPU Speed for small offering" + ) + self.assertAlmostEqual( + int(total_mem) / 1024, # In MBs + int(self.small_offering.memory), + "Check Memory(kb) for small offering" + ) + return diff --git a/test/integration/smoke/test_templates.py b/test/integration/smoke/test_templates.py index 8b83f5e6e43..9478440f77e 100644 --- a/test/integration/smoke/test_templates.py +++ b/test/integration/smoke/test_templates.py @@ -665,7 +665,7 @@ class TestTemplates(cloudstackTestCase): if len(self.zones) <= 1: self.skipTest("Not enough zones available to perform copy template") - self.services["destzoneid"] = filter(lambda z: z.id != self.services["sourcezoneid"], self.zones)[0] + self.services["destzoneid"] = filter(lambda z: z.id != self.services["sourcezoneid"], self.zones)[0].id self.debug("Copy template from Zone: %s to %s" % ( self.services["sourcezoneid"], diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index d52ed9b8df6..afe9b8a6331 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -489,245 +489,6 @@ class TestVMLifeCycle(cloudstackTestCase): ) return - @attr(tags = ["advanced", "advancedns", "smoke"]) - def test_04_change_offering_small(self): - """Change Offering to a small capacity - """ - - # Validate the following - # 1. Log in to the Vm .We should see that the CPU and memory Info of - # this Vm matches the one specified for "Small" service offering. - # 2. Using listVM command verify that this Vm - # has Small service offering Id. - - self.debug("Stopping VM - ID: %s" % self.medium_virtual_machine.id) - - self.medium_virtual_machine.stop(self.apiclient) - - # Poll listVM to ensure VM is stopped properly - timeout = self.services["timeout"] - - while True: - time.sleep(self.services["sleep"]) - - # Ensure that VM is in stopped state - list_vm_response = list_virtual_machines( - self.apiclient, - id=self.medium_virtual_machine.id - ) - - if isinstance(list_vm_response, list): - - vm = list_vm_response[0] - if vm.state == 'Stopped': - self.debug("VM state: %s" % vm.state) - break - - if timeout == 0: - raise Exception( - "Failed to stop VM (ID: %s) in change service offering" % vm.id) - - timeout = timeout - 1 - - self.debug("Change Service offering VM - ID: %s" % - self.medium_virtual_machine.id) - - cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd() - cmd.id = self.medium_virtual_machine.id - cmd.serviceofferingid = self.small_offering.id - self.apiclient.changeServiceForVirtualMachine(cmd) - - self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id) - self.medium_virtual_machine.start(self.apiclient) - - # Poll listVM to ensure VM is started properly - timeout = self.services["timeout"] - - while True: - time.sleep(self.services["sleep"]) - - # Ensure that VM is in running state - list_vm_response = list_virtual_machines( - self.apiclient, - id=self.medium_virtual_machine.id - ) - - if isinstance(list_vm_response, list): - - vm = list_vm_response[0] - if vm.state == 'Running': - self.debug("VM state: %s" % vm.state) - break - - if timeout == 0: - raise Exception( - "Failed to start VM (ID: %s) after changing service offering" % vm.id) - - timeout = timeout - 1 - - try: - ssh = self.medium_virtual_machine.get_ssh_client() - except Exception as e: - self.fail( - "SSH Access failed for %s: %s" % \ - (self.medium_virtual_machine.ipaddress, e) - ) - - cpuinfo = ssh.execute("cat /proc/cpuinfo") - - cpu_cnt = len([i for i in cpuinfo if "processor" in i]) - #'cpu MHz\t\t: 2660.499' - cpu_speed = [i for i in cpuinfo if "cpu MHz" in i ][0].split()[3] - - meminfo = ssh.execute("cat /proc/meminfo") - #MemTotal: 1017464 kB - total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1] - - self.debug( - "CPU count: %s, CPU Speed: %s, Mem Info: %s" % ( - cpu_cnt, - cpu_speed, - total_mem - )) - self.assertAlmostEqual( - int(cpu_cnt), - self.small_offering.cpunumber, - "Check CPU Count for small offering" - ) - - self.assertAlmostEqual( - list_vm_response[0].cpuspeed, - self.small_offering.cpuspeed, - "Check CPU Speed for small offering" - ) - self.assertAlmostEqual( - int(total_mem) / 1024, # In MBs - self.small_offering.memory, - "Check Memory(kb) for small offering" - ) - return - - @attr(tags = ["advanced", "advancedns", "smoke"]) - def test_05_change_offering_medium(self): - """Change Offering to a medium capacity - """ - # Validate the following - # 1. Log in to the Vm .We should see that the CPU and memory Info of - # this Vm matches the one specified for "Medium" service offering. - # 2. Using listVM command verify that this Vm - # has Medium service offering Id. - - self.debug("Stopping VM - ID: %s" % self.small_virtual_machine.id) - self.small_virtual_machine.stop(self.apiclient) - - # Poll listVM to ensure VM is stopped properly - timeout = self.services["timeout"] - - while True: - time.sleep(self.services["sleep"]) - - # Ensure that VM is in stopped state - list_vm_response = list_virtual_machines( - self.apiclient, - id=self.small_virtual_machine.id - ) - - if isinstance(list_vm_response, list): - - vm = list_vm_response[0] - if vm.state == 'Stopped': - self.debug("VM state: %s" % vm.state) - break - - if timeout == 0: - raise Exception( - "Failed to stop VM (ID: %s) in change service offering" % vm.id) - - timeout = timeout - 1 - - self.debug("Change service offering VM - ID: %s" % - self.small_virtual_machine.id) - - cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd() - cmd.id = self.small_virtual_machine.id - cmd.serviceofferingid = self.medium_offering.id - self.apiclient.changeServiceForVirtualMachine(cmd) - - self.debug("Starting VM - ID: %s" % self.small_virtual_machine.id) - self.small_virtual_machine.start(self.apiclient) - - # Poll listVM to ensure VM is started properly - timeout = self.services["timeout"] - - while True: - time.sleep(self.services["sleep"]) - - # Ensure that VM is in running state - list_vm_response = list_virtual_machines( - self.apiclient, - id=self.small_virtual_machine.id - ) - - if isinstance(list_vm_response, list): - - vm = list_vm_response[0] - if vm.state == 'Running': - self.debug("VM state: %s" % vm.state) - break - - if timeout == 0: - raise Exception( - "Failed to start VM (ID: %s) after changing service offering" % vm.id) - - timeout = timeout - 1 - - list_vm_response = list_virtual_machines( - self.apiclient, - id=self.small_virtual_machine.id - ) - - try: - ssh_client = self.small_virtual_machine.get_ssh_client() - except Exception as e: - self.fail( - "SSH Access failed for %s: %s" % \ - (self.small_virtual_machine.ipaddress, e) - ) - - cpuinfo = ssh_client.execute("cat /proc/cpuinfo") - - cpu_cnt = len([i for i in cpuinfo if "processor" in i]) - #'cpu MHz\t\t: 2660.499' - cpu_speed = [i for i in cpuinfo if "cpu MHz" in i][0].split()[3] - - meminfo = ssh_client.execute("cat /proc/meminfo") - #MemTotal: 1017464 kB - total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1] - - self.debug( - "CPU count: %s, CPU Speed: %s, Mem Info: %s" % ( - cpu_cnt, - cpu_speed, - total_mem - )) - self.assertAlmostEqual( - int(cpu_cnt), - self.medium_offering.cpunumber, - "Check CPU Count for medium offering" - ) - - self.assertAlmostEqual( - list_vm_response[0].cpuspeed, - self.medium_offering.cpuspeed, - "Check CPU Speed for medium offering" - ) - - self.assertAlmostEqual( - int(total_mem) / 1024, # In MBs - self.medium_offering.memory, - "Check Memory(kb) for medium offering" - ) - return @attr(tags = ["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"]) def test_06_destroy_vm(self): @@ -969,6 +730,15 @@ class TestVMLifeCycle(cloudstackTestCase): cmd.virtualmachineid = self.virtual_machine.id self.apiclient.attachIso(cmd) + #determine device type from hypervisor + hosts = Host.list(self.apiclient, id=self.virtual_machine.hostid) + self.assertTrue(isinstance(hosts, list)) + self.assertTrue(len(hosts) > 0) + self.debug("Found %s host" % hosts[0].hypervisor) + + if hosts[0].hypervisor.lower() == "kvm": + self.services["diskdevice"] = "/dev/vda" + try: ssh_client = self.virtual_machine.get_ssh_client() except Exception as e: diff --git a/test/integration/smoke/test_volumedetail.py b/test/integration/smoke/test_volumedetail.py deleted file mode 100644 index f734dbb4de6..00000000000 --- a/test/integration/smoke/test_volumedetail.py +++ /dev/null @@ -1,239 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -""" P1 tests for Scaling up Vm -""" -#Import Local Modules -import marvin -from marvin.cloudstackTestCase import * -from marvin.cloudstackAPI import * -from marvin.remoteSSHClient import remoteSSHClient -from marvin.integration.lib.utils import * -from marvin.integration.lib.base import * -from marvin.integration.lib.common import * -from nose.plugins.attrib import attr -#Import System modules -import time - -_multiprocess_shared_ = True -class Services: - """Test VM Life Cycle Services - """ - - def __init__(self): - self.services = { - - "account": { - "email": "test@test.com", - "firstname": "Test", - "lastname": "User", - "username": "test", - # Random characters are appended in create account to - # ensure unique username generated each time - "password": "password", - }, - "small": - # Create a small virtual machine instance with disk offering - { - "displayname": "testserver", - "username": "root", # VM creds for SSH - "password": "password", - "ssh_port": 22, - "hypervisor": 'XenServer', - "privateport": 22, - "publicport": 22, - "protocol": 'TCP', - }, - "disk_offering": { - "displaytext": "Small", - "name": "Small", - "storagetype": "shared", - "disksize": 1 - }, - "service_offerings": - { - "small": - { - # Small service offering ID to for change VM - # service offering from medium to small - "name": "SmallInstance", - "displaytext": "SmallInstance", - "cpunumber": 1, - "cpuspeed": 100, - "memory": 256, - }, - "big": - { - # Big service offering ID to for change VM - "name": "BigInstance", - "displaytext": "BigInstance", - "cpunumber": 1, - "cpuspeed": 100, - "memory": 512, - } - }, - #Change this - "template": { - "displaytext": "xs", - "name": "xs", - "passwordenabled": False, - }, - "diskdevice": '/dev/xvdd', - # Disk device where ISO is attached to instance - "mount_dir": "/mnt/tmp", - "sleep": 60, - "timeout": 10, - #Migrate VM to hostid - "ostype": 'CentOS 5.6 (64-bit)', - # CentOS 5.3 (64-bit) - } - -class TestVolumeDetail(cloudstackTestCase): - - @classmethod - def setUpClass(cls): - cls.api_client = super(TestVolumeDetail, cls).getClsTestClient().getApiClient() - cls.services = Services().services - - # Get Zone, Domain and templates - domain = get_domain(cls.api_client, cls.services) - zone = get_zone(cls.api_client, cls.services) - cls.services['mode'] = zone.networktype - - # Set Zones and disk offerings ?? - - # Create account, service offerings, vm. - cls.account = Account.create( - cls.api_client, - cls.services["account"], - domainid=domain.id - ) - - - cls.disk_offering = DiskOffering.create( - cls.api_client, - cls.services["disk_offering"] - ) - - #create a volume - cls.volume = Volume.create( - cls.api_client, - { "diskname" : "ndm"}, - zoneid=zone.id, - account=cls.account.name, - domainid=cls.account.domainid, - diskofferingid=cls.disk_offering.id - ) - #how does it work ?? - cls._cleanup = [ - cls.volume, - cls.account - ] - - @classmethod - def tearDownClass(cls): - cls.api_client = super(TestVolumeDetail, cls).getClsTestClient().getApiClient() - cleanup_resources(cls.api_client, cls._cleanup) - return - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() - self.cleanup = [] - - def tearDown(self): - #Clean up, terminate the created ISOs - cleanup_resources(self.apiclient, self.cleanup) - return - - @attr(tags = ["advanced", "xenserver"]) - def test_01_updatevolumedetail(self): - """Test volume detail - """ - # Validate the following - # Scale up the vm and see if it scales to the new svc offering and is finally in running state - - self.debug("Testing ADD volume detail Volume-ID: %s " % ( - self.volume.id - )) - - cmd = addVolumeDetail.addVolumeDetailCmd() - cmd.name = self.volume.id - cmd.value = self.volume.id - cmd.id = self.volume.id - self.apiclient.addVolumeDetail(cmd) - - listVolumeDetailCmd = listVolumeDetails.listVolumeDetailsCmd() - listVolumeDetailCmd.id = self.volume.id - listVolumeDetailResponse = self.api_client.listVirtualMachines(listVolumeDetailCmd) - - self.assertNotEqual(len(listVolumeDetailResponse), 0, "Check if the list API \ - returns a non-empty response") - - volumedetail = listVolumeDetailResponse[0] - - #self.assertEqual(volumedetail.id, self.volume.id, "Check if the Volume returned is the same as the one we asked for") - - - self.assertEqual(volumedetail.name, self.volume.id, "Check if Volume has right name") - - self.assertEqual(volumedetail.value, self.volume.id, "Check if Volume has right value") - - #updatevolumedetail - self.debug("Testing UPDATE volume detail Volume-ID: %s " % ( - self.volume.id - )) - cmd = updateVolumeDetail.updateVolumeDetailCmd() - cmd.name = self.volume.id - cmd.value = self.disk_offering.id - cmd.id = self.volume.id - self.apiclient.addVolumeDetail(cmd) - - listVolumeDetailCmd = listVolumeDetails.listVolumeDetailsCmd() - listVolumeDetailCmd.id = self.volume.id - listVolumeDetailResponse = self.api_client.listVirtualMachines(listVolumeDetailCmd) - - self.assertNotEqual(len(listVolumeDetailResponse), 0, "Check if the list API \ - returns a non-empty response") - - volumedetail = listVolumeDetailResponse[0] - - #self.assertEqual(volumedetail.id, self.volume.id, "Check if the Volume returned is the same as the one we asked for") - - - self.assertEqual(volumedetail.name, self.volume.id, "Check if Volume has right name") - - self.assertEqual(volumedetail.value, self.disk_offering.id, "Check if Volume has right value") - - - #remove detail - self.debug("Testing REMOVE volume detail Volume-ID: %s " % ( - self.volume.id - )) - cmd = removeVolumeDetail.removeVolumeDetailCmd() - cmd.name = self.volume.id - cmd.id = self.volume.id - self.apiclient.removeVolumeDetail(cmd) - - listVolumeDetailCmd = listVolumeDetails.listVolumeDetailsCmd() - listVolumeDetailCmd.id = self.volume.id - listVolumeDetailResponse = self.api_client.listVirtualMachines(listVolumeDetailCmd) - - self.assertEqual(listVolumeDetailResponse, None, "Check if the list API \ - returns a non-empty response") - - - return diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py index 60fbb80f0e7..1bb9e5dd011 100644 --- a/test/integration/smoke/test_volumes.py +++ b/test/integration/smoke/test_volumes.py @@ -210,8 +210,8 @@ class TestCreateVolume(cloudstackTestCase): ) try: ssh = self.virtual_machine.get_ssh_client() + self.debug("Rebooting VM %s" % self.virtual_machine.id) ssh.execute("reboot") - except Exception as e: self.fail("SSH access failed for VM %s - %s" % (self.virtual_machine.ipaddress, e)) @@ -537,7 +537,7 @@ class TestVolumes(cloudstackTestCase): @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_07_resize_fail(self): - """Verify invalid options fail to Resize a volume""" + """Test resize (negative) non-existent volume""" # Verify the size is the new size is what we wanted it to be. self.debug("Fail Resize Volume ID: %s" % self.volume.id) @@ -585,8 +585,15 @@ class TestVolumes(cloudstackTestCase): self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True #stop the vm if it is on xenserver - if self.services['hypervisor'].lower() == "xenserver": + hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) + self.assertTrue(isinstance(hosts, list)) + self.assertTrue(len(hosts) > 0) + self.debug("Found %s host" % hosts[0].hypervisor) + + if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) + elif hosts[0].hypervisor.lower() == "vmware": + self.skipTest("Resize Volume is unsupported on VmWare") self.apiClient.resizeVolume(cmd) count = 0 @@ -611,24 +618,32 @@ class TestVolumes(cloudstackTestCase): True, "Verify the volume did not resize" ) - if self.services['hypervisor'].lower() == "xenserver": + if hosts[0].hypervisor == "XenServer": self.virtual_machine.start(self.apiClient) @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_08_resize_volume(self): - """Resize a volume""" + """Test resize a volume""" # Verify the size is the new size is what we wanted it to be. self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( self.volume.id, self.virtual_machine.id )) + self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True + hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) + self.assertTrue(isinstance(hosts, list)) + self.assertTrue(len(hosts) > 0) + self.debug("Found %s host" % hosts[0].hypervisor) - if self.services['hypervisor'].lower() == "xenserver": + if hosts[0].hypervisor == "XenServer": self.virtual_machine.stop(self.apiClient) + elif hosts[0].hypervisor.lower() == "vmware": + self.skipTest("Resize Volume is unsupported on VmWare") + self.debug("Resize Volume ID: %s" % self.volume.id) cmd = resizeVolume.resizeVolumeCmd() @@ -660,7 +675,9 @@ class TestVolumes(cloudstackTestCase): "Check if the volume resized appropriately" ) - if self.services['hypervisor'].lower() == "xenserver": + #start the vm if it is on xenserver + + if hosts[0].hypervisor == "XenServer": self.virtual_machine.start(self.apiClient) @attr(tags = ["advanced", "advancedns", "smoke","basic"]) diff --git a/test/setup-test-data.sh b/test/setup-test-data.sh deleted file mode 100755 index 844c275da7c..00000000000 --- a/test/setup-test-data.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -usage() { - printf "Usage: %s:\n - [-t path to tests ] \n - [-m mgmt-server ] \n - [-h hypervisor (xen|kvm) ] \n - [-p hypervisor root password ] \n - [-d db node url ]\n" $(basename $0) >&2 -} - -failed() { - exit $1 -} - -#defaults -TESTDIR="/root/cloudstack/test/" -MGMT_SVR="localhost" -DB_SVR="localhost" -HV_PASSWD="password" - -while getopts 't:d:m:p:h:' OPTION -do - case $OPTION in - d) dflag=1 - DB_SVR="$OPTARG" - ;; - t) tflag=1 - TESTDIR="$OPTARG" - ;; - m) mflag=1 - MGMT_SVR="$OPTARG" - ;; - h) hflag=1 - HV="$OPTARG" - ;; - p) pflag=1 - HV_PASSWD="$OPTARG" - ;; - ?) usage - failed 2 - ;; - esac -done - -#Damn Small Linux ISO type -if [[ $HV == "kvm" ]]; then - ostypeid=$(mysql -ucloud -Dcloud -pcloud -h$DB_SVR -s -N -r -e"select uuid from guest_os where display_name='CentOS 5.5 (64-bit)'") -else - ostypeid=$(mysql -ucloud -Dcloud -pcloud -h$DB_SVR -s -N -r -e"select uuid from guest_os where display_name='CentOS 5.3 (64-bit)'") -fi -if [[ $ostypeid == "" ]]; then - echo "Unable to contact DB server @ $DB_SVR" - exit 2 -fi - -nc -z $MGMT_SVR 8096 -if [[ $? -ne 0 ]]; then - echo "$MGMT_SVR doesn't have port 8096 open" - exit 2 -fi - -if [[ ! -d $TESTDIR ]]; then - echo "No directory $TESTDIR found" - exit 2 -fi -for file in `find $TESTDIR -name *.py -type f` -do - old_ostypeid=$(grep ostypeid $file | head -1 | cut -d: -f2 | tr -d " ,'") - if [[ $old_ostypeid != "" ]] - then - echo "replacing:" $old_ostypeid, "with:" $ostypeid,"in " $file - sed -i "s/$old_ostypeid/$ostypeid/g" $file - #sed -i "s/http:\/\/iso.linuxquestions.org\/download\/504\/1819\/http\/gd4.tuwien.ac.at\/dsl-4.4.10.iso/http:\/\/nfs1.lab.vmops.com\/isos_32bit\/dsl-4.4.10.iso/g" $file - sed -i "s/fr3sca/$HV_PASSWD/g" $file - fi -done - -#Python version check -version_tuple=$(python -c 'import sys; print(sys.version_info[:2])') -if [[ $version_tuple == "(2, 7)" ]] -then - echo "Done" -else - echo "WARN: Python version 2.7 not detected on system." -fi diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 5d7e3be4e29..43dad9987cd 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -86,6 +86,7 @@ known_categories = { 'Pod': 'Pod', 'PublicIpRange': 'Network', 'Zone': 'Zone', + 'Vmware' : 'Zone', 'NetworkOffering': 'Network Offering', 'NetworkACL': 'Network ACL', 'Network': 'Network', @@ -147,7 +148,9 @@ known_categories = { 'createCacheStore': 'Image Store', 'InternalLoadBalancer': 'Internal LB', 'DeploymentPlanners': 'Configuration', - 'PortableIp': 'Portable IP' + 'PortableIp': 'Portable IP', + 'dedicateHost': 'Dedicate Resources', + 'releaseDedicatedHost': 'Dedicate Resources' } diff --git a/tools/appliance/definitions/systemvmtemplate/postinstall.sh b/tools/appliance/definitions/systemvmtemplate/postinstall.sh index f532f88537c..203cf54d827 100644 --- a/tools/appliance/definitions/systemvmtemplate/postinstall.sh +++ b/tools/appliance/definitions/systemvmtemplate/postinstall.sh @@ -21,6 +21,12 @@ ROOTPW=password HOSTNAME=systemvm CLOUDSTACK_RELEASE=4.2.0 +add_backports () { + sed -i '/backports/d' /etc/apt/sources.list + echo 'deb http://http.us.debian.org/debian wheezy-backports main' >> /etc/apt/sources.list + apt-get update +} + install_packages() { DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical @@ -42,6 +48,9 @@ install_packages() { apt-get --no-install-recommends -q -y --force-yes install dnsmasq dnsmasq-utils # nfs client apt-get --no-install-recommends -q -y --force-yes install nfs-common + # nfs irqbalance + apt-get --no-install-recommends -q -y --force-yes install irqbalance + # vpn stuff apt-get --no-install-recommends -q -y --force-yes install xl2tpd bcrelay ppp ipsec-tools tdb-tools @@ -78,10 +87,7 @@ install_packages() { # rm -fr /opt/vmware-tools-distrib # apt-get -q -y --force-yes purge build-essential - # haproxy. Wheezy doesn't have haproxy, install from backports - #apt-get --no-install-recommends -q -y --force-yes install haproxy - wget http://ftp.us.debian.org/debian/pool/main/h/haproxy/haproxy_1.4.8-1_i386.deb - dpkg -i haproxy_1.4.8-1_i386.deb + apt-get --no-install-recommends -q -y --force-yes install haproxy } setup_accounts() { @@ -110,7 +116,7 @@ fix_nameserver() { # Replace /etc/resolv.conf also cat > /etc/resolv.conf << EOF nameserver 8.8.8.8 -nameserver 4.4.4.4 +nameserver 8.8.4.4 EOF } @@ -220,6 +226,8 @@ do_signature() { begin=$(date +%s) +echo "*************ADDING BACKPORTS********************" +add_backports echo "*************INSTALLING PACKAGES********************" install_packages echo "*************DONE INSTALLING PACKAGES********************" diff --git a/tools/appliance/definitions/systemvmtemplate64/postinstall.sh b/tools/appliance/definitions/systemvmtemplate64/postinstall.sh index 3ccf3cefdef..cbcd282b62e 100644 --- a/tools/appliance/definitions/systemvmtemplate64/postinstall.sh +++ b/tools/appliance/definitions/systemvmtemplate64/postinstall.sh @@ -21,6 +21,13 @@ ROOTPW=password HOSTNAME=systemvm CLOUDSTACK_RELEASE=4.2.0 +add_backports () { + sed -i '/backports/d' /etc/apt/sources.list + echo 'deb http://http.us.debian.org/debian wheezy-backports main' >> /etc/apt/sources.list + apt-get update +} + + install_packages() { DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical @@ -42,6 +49,8 @@ install_packages() { apt-get --no-install-recommends -q -y --force-yes install dnsmasq dnsmasq-utils # nfs client apt-get --no-install-recommends -q -y --force-yes install nfs-common + # nfs irqbalance + apt-get --no-install-recommends -q -y --force-yes install irqbalance # vpn stuff apt-get --no-install-recommends -q -y --force-yes install xl2tpd bcrelay ppp ipsec-tools tdb-tools @@ -78,10 +87,8 @@ install_packages() { # rm -fr /opt/vmware-tools-distrib # apt-get -q -y --force-yes purge build-essential - # haproxy. Wheezy doesn't have haproxy temporarily, install from backports - #apt-get --no-install-recommends -q -y --force-yes install haproxy - wget http://ftp.us.debian.org/debian/pool/main/h/haproxy/haproxy_1.4.8-1_amd64.deb - dpkg -i haproxy_1.4.8-1_amd64.deb + apt-get --no-install-recommends -q -y --force-yes install haproxy + } setup_accounts() { @@ -110,7 +117,7 @@ fix_nameserver() { # Replace /etc/resolv.conf also cat > /etc/resolv.conf << EOF nameserver 8.8.8.8 -nameserver 4.4.4.4 +nameserver 8.8.4.4 EOF } @@ -220,6 +227,8 @@ do_signature() { begin=$(date +%s) +echo "*************ADDING BACKPORTS********************" +add_backports echo "*************INSTALLING PACKAGES********************" install_packages echo "*************DONE INSTALLING PACKAGES********************" diff --git a/tools/cli/cloudmonkey/cachemaker.py b/tools/cli/cloudmonkey/cachemaker.py index a625b014d38..47749e5ae74 100644 --- a/tools/cli/cloudmonkey/cachemaker.py +++ b/tools/cli/cloudmonkey/cachemaker.py @@ -101,7 +101,7 @@ def monkeycache(apis): cache['asyncapis'] = [] apilist = getvalue(apis[responsekey], 'api') - if apilist == None: + if apilist is None: print "[monkeycache] Server response issue, no apis found" for api in apilist: diff --git a/tools/devcloud-kvm/devcloud-kvm.cfg b/tools/devcloud-kvm/devcloud-kvm.cfg index b3f048aac97..9f310e32755 100644 --- a/tools/devcloud-kvm/devcloud-kvm.cfg +++ b/tools/devcloud-kvm/devcloud-kvm.cfg @@ -46,7 +46,7 @@ ] } ], - "dns2": "4.4.4.4", + "dns2": "8.8.4.4", "dns1": "8.8.8.8", "securitygroupenabled": "true", "localstorageenabled": "true", diff --git a/tools/devcloud/devcloud-advancedsg.cfg b/tools/devcloud/devcloud-advancedsg.cfg new file mode 100644 index 00000000000..6c26b15f5da --- /dev/null +++ b/tools/devcloud/devcloud-advancedsg.cfg @@ -0,0 +1,119 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# This configuration is meant for running advanced networking with security groups, with management server on the laptop. +# It requires that the user run a DNS resolver within devcloud via 'apt-get install dnsmasq' + +{ + "zones": [ + { + "localstorageenabled": "true", + "name": "testzone", + "dns1": "8.8.8.8", + "physical_networks": [ + { + "broadcastdomainrange": "Zone", + "name": "shared", + "traffictypes": [ + { + "typ": "Management" + }, + { + "typ": "Guest" + } + ], + "providers": [ + { + "broadcastdomainrange": "ZONE", + "name": "VirtualRouter" + }, + { + "broadcastdomainrange": "ZONE", + "name": "SecurityGroupProvider" + } + ], + "isolationmethods": [ + "VLAN" + ] + } + ], + "securitygroupenabled": "true", + "ipranges": [ + { + "startip": "10.0.3.100", + "endip": "10.0.3.199", + "netmask": "255.255.255.0", + "vlan": "1003", + "gateway": "10.0.3.2" + } + ], + "networktype": "Advanced", + "pods": [ + { + "endip": "192.168.56.249", + "name": "testpod", + "startip": "192.168.56.200", + "netmask": "255.255.255.0", + "clusters": [ + { + "clustername": "testcluster", + "hypervisor": "XenServer", + "hosts": [ + { + "username": "root", + "url": "http://192.168.56.10/", + "password": "password" + } + ], + "clustertype": "CloudManaged" + } + ], + "gateway": "192.168.56.1" + } + ], + "internaldns1": "192.168.56.10", + "secondaryStorages": [ + { + "url": "nfs://192.168.56.10/opt/storage/secondary" + } + ] + } + ], + "dbSvr": { + "dbSvr": "localhost", + "passwd": "cloud", + "db": "cloud", + "port": 3306, + "user": "cloud" + }, + "logger": [ + { + "name": "TestClient", + "file": "/var/log/testclient.log" + }, + { + "name": "TestCase", + "file": "/var/log/testcase.log" + } + ], + "mgtSvr": [ + { + "mgtSvrIp": "127.0.0.1", + "port": 8096 + } + ] +} diff --git a/tools/devcloud/devcloud.cfg b/tools/devcloud/devcloud.cfg index 4432fb20d77..8ce62051b0b 100644 --- a/tools/devcloud/devcloud.cfg +++ b/tools/devcloud/devcloud.cfg @@ -45,7 +45,7 @@ ] } ], - "dns2": "4.4.4.4", + "dns2": "8.8.4.4", "dns1": "8.8.8.8", "securitygroupenabled": "true", "localstorageenabled": "true", diff --git a/tools/devcloud/devcloud_internal-mgt.cfg b/tools/devcloud/devcloud_internal-mgt.cfg index fe3dd1b41da..beae34547a8 100644 --- a/tools/devcloud/devcloud_internal-mgt.cfg +++ b/tools/devcloud/devcloud_internal-mgt.cfg @@ -44,7 +44,7 @@ ] } ], - "dns2": "4.4.4.4", + "dns2": "8.8.4.4", "dns1": "8.8.8.8", "securitygroupenabled": "true", "localstorageenabled": "true", diff --git a/tools/devcloud/quickcloud.cfg b/tools/devcloud/quickcloud.cfg index a2613d22bdb..77bc114f401 100644 --- a/tools/devcloud/quickcloud.cfg +++ b/tools/devcloud/quickcloud.cfg @@ -49,7 +49,7 @@ ] } ], - "dns2": "4.4.4.4", + "dns2": "8.8.4.4", "dns1": "8.8.8.8", "securitygroupenabled": "true", "localstorageenabled": "true", diff --git a/tools/marvin/marvin/TestCaseExecuteEngine.py b/tools/marvin/marvin/TestCaseExecuteEngine.py index 57438688486..4b64aaee835 100644 --- a/tools/marvin/marvin/TestCaseExecuteEngine.py +++ b/tools/marvin/marvin/TestCaseExecuteEngine.py @@ -72,7 +72,7 @@ class TestCaseExecuteEngine(object): self.injectTestCase(test) else: #logger bears the name of the test class - testcaselogger = logging.getLogger("testclient.testcase.%s"%test.__class__.__name__) + testcaselogger = logging.getLogger("%s" % (test)) fh = logging.FileHandler(self.logfile) fh.setFormatter(self.logformat) testcaselogger.addHandler(fh) diff --git a/tools/marvin/marvin/cloudstackConnection.py b/tools/marvin/marvin/cloudstackConnection.py index e3977dcf7d4..b092ef0c32f 100644 --- a/tools/marvin/marvin/cloudstackConnection.py +++ b/tools/marvin/marvin/cloudstackConnection.py @@ -219,7 +219,10 @@ class cloudConnection(object): cmdname, self.auth, payload=payload, method=method) self.logging.debug("Request: %s Response: %s" % (response.url, response.text)) - response = jsonHelper.getResultObj(response.json(), response_type) + try: + response = jsonHelper.getResultObj(response.json(), response_type) + except TypeError: + response = jsonHelper.getResultObj(response.json, response_type) if isAsync == "false": return response diff --git a/tools/marvin/marvin/configGenerator.py b/tools/marvin/marvin/configGenerator.py index 11945245ff1..7f6776a3559 100644 --- a/tools/marvin/marvin/configGenerator.py +++ b/tools/marvin/marvin/configGenerator.py @@ -279,7 +279,7 @@ def describe_setup_in_basic_mode(): for l in range(1): z = zone() z.dns1 = "8.8.8.8" - z.dns2 = "4.4.4.4" + z.dns2 = "8.8.4.4" z.internaldns1 = "192.168.110.254" z.internaldns2 = "192.168.110.253" z.name = "test"+str(l) @@ -400,7 +400,7 @@ def describe_setup_in_eip_mode(): for l in range(1): z = zone() z.dns1 = "8.8.8.8" - z.dns2 = "4.4.4.4" + z.dns2 = "8.8.4.4" z.internaldns1 = "192.168.110.254" z.internaldns2 = "192.168.110.253" z.name = "test"+str(l) @@ -529,7 +529,7 @@ def describe_setup_in_advanced_mode(): for l in range(1): z = zone() z.dns1 = "8.8.8.8" - z.dns2 = "4.4.4.4" + z.dns2 = "8.8.4.4" z.internaldns1 = "192.168.110.254" z.internaldns2 = "192.168.110.253" z.name = "test"+str(l) @@ -648,6 +648,126 @@ def describe_setup_in_advanced_mode(): return zs +'''sample code to generate setup configuration file''' +def describe_setup_in_advancedsg_mode(): + zs = cloudstackConfiguration() + + for l in range(1): + z = zone() + z.dns1 = "8.8.8.8" + z.dns2 = "8.8.4.4" + z.internaldns1 = "192.168.110.254" + z.internaldns2 = "192.168.110.253" + z.name = "test"+str(l) + z.networktype = 'Advanced' + z.vlan = "100-2000" + z.securitygroupenabled = "true" + + pn = physical_network() + pn.name = "test-network" + pn.traffictypes = [traffictype("Guest"), traffictype("Management")] + + #If security groups are reqd + sgprovider = provider() + sgprovider.broadcastdomainrange = 'ZONE' + sgprovider.name = 'SecurityGroupProvider' + + pn.providers.append(sgprovider) + z.physical_networks.append(pn) + + '''create 10 pods''' + for i in range(2): + p = pod() + p.name = "test" +str(l) + str(i) + p.gateway = "192.168.%d.1"%i + p.netmask = "255.255.255.0" + p.startip = "192.168.%d.200"%i + p.endip = "192.168.%d.220"%i + + '''add 10 clusters''' + for j in range(2): + c = cluster() + c.clustername = "test"+str(l)+str(i) + str(j) + c.clustertype = "CloudManaged" + c.hypervisor = "Simulator" + + '''add 10 hosts''' + for k in range(2): + h = host() + h.username = "root" + h.password = "password" + memory = 8*1024*1024*1024 + localstorage=1*1024*1024*1024*1024 + #h.url = "http://sim/%d%d%d%d/cpucore=1&cpuspeed=8000&memory=%d&localstorage=%d"%(l,i,j,k,memory,localstorage) + h.url = "http://sim/%d%d%d%d"%(l,i,j,k) + c.hosts.append(h) + + '''add 2 primary storages''' + for m in range(2): + primary = primaryStorage() + primary.name = "primary"+str(l) + str(i) + str(j) + str(m) + #primary.url = "nfs://localhost/path%s/size=%d"%(str(l) + str(i) + str(j) + str(m), size) + primary.url = "nfs://localhost/path%s"%(str(l) + str(i) + str(j) + str(m)) + c.primaryStorages.append(primary) + + p.clusters.append(c) + + z.pods.append(p) + + '''add two secondary''' + for i in range(5): + secondary = secondaryStorage() + secondary.url = "nfs://localhost/path"+str(l) + str(i) + z.secondaryStorages.append(secondary) + + '''add default guest network''' + ips = iprange() + ips.vlan = "26" + ips.startip = "172.16.26.2" + ips.endip = "172.16.26.100" + ips.gateway = "172.16.26.1" + ips.netmask = "255.255.255.0" + z.ipranges.append(ips) + + + zs.zones.append(z) + + '''Add one mgt server''' + mgt = managementServer() + mgt.mgtSvrIp = "localhost" + zs.mgtSvr.append(mgt) + + '''Add a database''' + db = dbServer() + db.dbSvr = "localhost" + + zs.dbSvr = db + + '''add global configuration''' + global_settings = {'expunge.delay': '60', + 'expunge.interval': '60', + 'expunge.workers': '3', + } + for k,v in global_settings.iteritems(): + cfg = configuration() + cfg.name = k + cfg.value = v + zs.globalConfig.append(cfg) + + ''''add loggers''' + testClientLogger = logger() + testClientLogger.name = "TestClient" + testClientLogger.file = "/tmp/testclient.log" + + testCaseLogger = logger() + testCaseLogger.name = "TestCase" + testCaseLogger.file = "/tmp/testcase.log" + + zs.logger.append(testClientLogger) + zs.logger.append(testCaseLogger) + + return zs + def generate_setup_config(config, file=None): describe = config if file is None: @@ -676,6 +796,7 @@ if __name__ == "__main__": parser.add_option("-i", "--input", action="store", default=None , dest="inputfile", help="input file") parser.add_option("-a", "--advanced", action="store_true", default=False, dest="advanced", help="use advanced networking") + parser.add_option("-s", "--advancedsg", action="store_true", default=False, dest="advancedsg", help="use advanced networking with security groups") parser.add_option("-o", "--output", action="store", default="./datacenterCfg", dest="output", help="the path where the json config file generated, by default is ./datacenterCfg") (options, args) = parser.parse_args() @@ -684,6 +805,8 @@ if __name__ == "__main__": config = get_setup_config(options.inputfile) if options.advanced: config = describe_setup_in_advanced_mode() + elif options.advancedsg: + config = describe_setup_in_advancedsg_mode() else: config = describe_setup_in_basic_mode() diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py index e8e3a8fedfb..b42d79d9fa5 100644 --- a/tools/marvin/marvin/deployDataCenter.py +++ b/tools/marvin/marvin/deployDataCenter.py @@ -320,6 +320,7 @@ class deployDataCenters(): createzone.securitygroupenabled = zone.securitygroupenabled createzone.localstorageenabled = zone.localstorageenabled createzone.networktype = zone.networktype + if zone.securitygroupenabled != "true": createzone.guestcidraddress = zone.guestcidraddress zoneresponse = self.apiClient.createZone(createzone) @@ -354,10 +355,37 @@ class deployDataCenters(): self.createVlanIpRanges(zone.networktype, zone.ipranges, \ zoneId, forvirtualnetwork=True) - if zone.networktype == "Advanced": + if zone.networktype == "Advanced" and zone.securitygroupenabled != "true": self.createpods(zone.pods, zoneId) self.createVlanIpRanges(zone.networktype, zone.ipranges, \ zoneId) + elif zone.networktype == "Advanced" and zone.securitygroupenabled == "true": + listnetworkoffering = listNetworkOfferings.listNetworkOfferingsCmd() + listnetworkoffering.name = "DefaultSharedNetworkOfferingWithSGService" + if zone.networkofferingname is not None: + listnetworkoffering.name = zone.networkofferingname + + listnetworkofferingresponse = \ + self.apiClient.listNetworkOfferings(listnetworkoffering) + + networkcmd = createNetwork.createNetworkCmd() + networkcmd.displaytext = "Shared SG enabled network" + networkcmd.name = "Shared SG enabled network" + networkcmd.networkofferingid = listnetworkofferingresponse[0].id + networkcmd.zoneid = zoneId + + ipranges = zone.ipranges + if ipranges: + iprange = ipranges.pop() + networkcmd.startip = iprange.startip + networkcmd.endip = iprange.endip + networkcmd.gateway = iprange.gateway + networkcmd.netmask = iprange.netmask + networkcmd.vlan = iprange.vlan + + networkcmdresponse = self.apiClient.createNetwork(networkcmd) + networkId = networkcmdresponse.id + self.createpods(zone.pods, zoneId, networkId) self.createSecondaryStorages(zone.secondaryStorages, zoneId) self.createCacheStorages(zone.cacheStorages, zoneId) diff --git a/tools/marvin/marvin/integration/lib/base.py b/tools/marvin/marvin/integration/lib/base.py index ec1c34e12c7..503ed6446f5 100755 --- a/tools/marvin/marvin/integration/lib/base.py +++ b/tools/marvin/marvin/integration/lib/base.py @@ -527,6 +527,19 @@ class VirtualMachine: if isinstance(response, list): return response[0].password + def assign_virtual_machine(self, apiclient, account, domainid): + """Move a user VM to another user under same domain.""" + + cmd = assignVirtualMachine.assignVirtualMachineCmd() + cmd.virtualmachineid = self.id + cmd.account = account + cmd.domainid = domainid + try: + response = apiclient.assignVirtualMachine(cmd) + return response + except Exception as e: + raise Exception("assignVirtualMachine failed - %s" %e) + class Volume: """Manage Volume Life cycle @@ -1358,18 +1371,18 @@ class NetworkOffering: if "useVpc" in services: cmd.useVpc = services["useVpc"] - cmd.serviceProviderList = [] + cmd.serviceproviderlist = [] if "serviceProviderList" in services: for service, provider in services["serviceProviderList"].items(): - cmd.serviceProviderList.append({ + cmd.serviceproviderlist.append({ 'service': service, 'provider': provider }) - if "servicecapabilitylist" in services: - cmd.serviceCapabilityList = [] - for service, capability in services["servicecapabilitylist"].items(): + if "serviceCapabilityList" in services: + cmd.servicecapabilitylist = [] + for service, capability in services["serviceCapabilityList"].items(): for ctype, value in capability.items(): - cmd.serviceCapabilityList.append({ + cmd.servicecapabilitylist.append({ 'service': service, 'capabilitytype': ctype, 'capabilityvalue': value @@ -1787,7 +1800,7 @@ class Network: def create(cls, apiclient, services, accountid=None, domainid=None, networkofferingid=None, projectid=None, subdomainaccess=None, zoneid=None, - gateway=None, netmask=None, vpcid=None, guestcidr=None): + gateway=None, netmask=None, vpcid=None, aclid=None, guestcidr=None): """Create Network for account""" cmd = createNetwork.createNetworkCmd() cmd.name = services["name"] @@ -1833,6 +1846,8 @@ class Network: cmd.guestcidr = guestcidr if vpcid: cmd.vpcid = vpcid + if aclid: + cmd.aclid = aclid return Network(apiclient.createNetwork(cmd).__dict__) def delete(self, apiclient): @@ -1875,25 +1890,55 @@ class NetworkACL: self.__dict__.update(items) @classmethod - def create(cls, apiclient, networkid, services, traffictype=None): + def create(cls, apiclient, services, networkid=None, protocol=None, + number=None, aclid=None, action='Allow', traffictype=None, cidrlist=[]): """Create network ACL rules(Ingress/Egress)""" cmd = createNetworkACL.createNetworkACLCmd() - cmd.networkid = networkid + if "networkid" in services: + cmd.networkid = services["networkid"] + elif networkid: + cmd.networkid = networkid + if "protocol" in services: cmd.protocol = services["protocol"] + if services["protocol"] == 'ICMP': + cmd.icmptype = -1 + cmd.icmpcode = -1 + elif protocol: + cmd.protocol = protocol - if services["protocol"] == 'ICMP': - cmd.icmptype = -1 - cmd.icmpcode = -1 - else: + if "startport" in services: cmd.startport = services["startport"] + if "endport" in services: cmd.endport = services["endport"] - cmd.cidrlist = services["cidrlist"] - if traffictype: + if "cidrlist" in services: + cmd.cidrlist = services["cidrlist"] + elif cidrlist: + cmd.cidrlist = cidrlist + + if "traffictype" in services: + cmd.traffictype = services["traffictype"] + elif traffictype: cmd.traffictype = traffictype - # Defaulted to Ingress + + if "action" in services: + cmd.action = services["action"] + elif action: + cmd.action = action + + if "number" in services: + cmd.number = services["number"] + elif number: + cmd.number = number + + if "aclid" in services: + cmd.aclid = services["aclid"] + elif aclid: + cmd.aclid = aclid + + # Defaulted to Ingress return NetworkACL(apiclient.createNetworkACL(cmd).__dict__) def delete(self, apiclient): @@ -1912,6 +1957,50 @@ class NetworkACL: return(apiclient.listNetworkACLs(cmd)) +class NetworkACLList: + """Manage Network ACL lists lifecycle""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, services, name=None, description=None, vpcid=None): + """Create network ACL container list""" + + cmd = createNetworkACLList.createNetworkACLListCmd() + if "name" in services: + cmd.name = services["name"] + elif name: + cmd.name = name + + if "description" in services: + cmd.description = services["description"] + elif description: + cmd.description = description + + if "vpcid" in services: + cmd.vpcid = services["vpcid"] + elif vpcid: + cmd.vpcid = vpcid + + return NetworkACLList(apiclient.createNetworkACLList(cmd).__dict__) + + def delete(self, apiclient): + """Delete network acl list""" + + cmd = deleteNetworkACLList.deleteNetworkACLListCmd() + cmd.id = self.id + return apiclient.deleteNetworkACLList(cmd) + + @classmethod + def list(cls, apiclient, **kwargs): + """List Network ACL lists""" + + cmd = listNetworkACLLists.listNetworkACLListsCmd() + [setattr(cmd, k, v) for k, v in kwargs.items()] + return(apiclient.listNetworkACLLists(cmd)) + + class Vpn: """Manage VPN life cycle""" @@ -2785,7 +2874,7 @@ class VPC: @classmethod def create(cls, apiclient, services, vpcofferingid, - zoneid, networkDomain=None, account=None, domainid=None): + zoneid, networkDomain=None, account=None, domainid=None, **kwargs): """Creates the virtual private connection (VPC)""" cmd = createVPC.createVPCCmd() @@ -2793,13 +2882,15 @@ class VPC: cmd.displaytext = "-".join([services["displaytext"], random_gen()]) cmd.vpcofferingid = vpcofferingid cmd.zoneid = zoneid - cmd.cidr = services["cidr"] + if "cidr" in services: + cmd.cidr = services["cidr"] if account: cmd.account = account if domainid: cmd.domainid = domainid if networkDomain: cmd.networkDomain = networkDomain + [setattr(cmd, k, v) for k, v in kwargs.items()] return VPC(apiclient.createVPC(cmd).__dict__) def update(self, apiclient, name=None, displaytext=None): @@ -3203,3 +3294,83 @@ class Region: cmd.id = self.id region = apiclient.removeRegion(cmd) return region + + +class ApplicationLoadBalancer: + """Manage Application Load Balancers in VPC""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, services, name=None, sourceport=None, instanceport=22, + algorithm="roundrobin", scheme="internal", sourcenetworkid=None, networkid=None): + """Create Application Load Balancer""" + cmd = createLoadBalancer.createLoadBalancerCmd() + + if "name" in services: + cmd.name = services["name"] + elif name: + cmd.name = name + + if "sourceport" in services: + cmd.sourceport = services["sourceport"] + elif sourceport: + cmd.sourceport = sourceport + + if "instanceport" in services: + cmd.instanceport = services["instanceport"] + elif instanceport: + cmd.instanceport = instanceport + + if "algorithm" in services: + cmd.algorithm = services["algorithm"] + elif algorithm: + cmd.algorithm = algorithm + + if "scheme" in services: + cmd.scheme = services["scheme"] + elif scheme: + cmd.scheme = scheme + + if "sourceipaddressnetworkid" in services: + cmd.sourceipaddressnetworkid = services["sourceipaddressnetworkid"] + elif sourcenetworkid: + cmd.sourceipaddressnetworkid = sourcenetworkid + + if "networkid" in services: + cmd.networkid = services["networkid"] + elif networkid: + cmd.networkid = networkid + + return LoadBalancerRule(apiclient.createLoadBalancer(cmd).__dict__) + + def delete(self, apiclient): + """Delete application load balancer""" + cmd = deleteLoadBalancer.deleteLoadBalancerCmd() + cmd.id = self.id + apiclient.deleteLoadBalancerRule(cmd) + return + + def assign(self, apiclient, vms): + """Assign virtual machines to load balancing rule""" + cmd = assignToLoadBalancerRule.assignToLoadBalancerRuleCmd() + cmd.id = self.id + cmd.virtualmachineids = [str(vm.id) for vm in vms] + apiclient.assignToLoadBalancerRule(cmd) + return + + def remove(self, apiclient, vms): + """Remove virtual machines from load balancing rule""" + cmd = removeFromLoadBalancerRule.removeFromLoadBalancerRuleCmd() + cmd.id = self.id + cmd.virtualmachineids = [str(vm.id) for vm in vms] + apiclient.removeFromLoadBalancerRule(cmd) + return + + @classmethod + def list(cls, apiclient, **kwargs): + """List all appln load balancers""" + cmd = listLoadBalancers.listLoadBalancersCmd() + [setattr(cmd, k, v) for k, v in kwargs.items()] + return(apiclient.listLoadBalancerRules(cmd)) \ No newline at end of file diff --git a/tools/marvin/marvin/jsonHelper.py b/tools/marvin/marvin/jsonHelper.py index 37363bc8c91..79a6369499c 100644 --- a/tools/marvin/marvin/jsonHelper.py +++ b/tools/marvin/marvin/jsonHelper.py @@ -142,7 +142,7 @@ if __name__ == "__main__": nsp = getResultObj(result) print nsp[0].id - result = '{ "listzonesresponse" : { "count":1 ,"zone" : [ {"id":1,"name":"test0","dns1":"8.8.8.8","dns2":"4.4.4.4","internaldns1":"192.168.110.254","internaldns2":"192.168.110.253","networktype":"Basic","securitygroupsenabled":true,"allocationstate":"Enabled","zonetoken":"5e818a11-6b00-3429-9a07-e27511d3169a","dhcpprovider":"DhcpServer"} ] } }' + result = '{ "listzonesresponse" : { "count":1 ,"zone" : [ {"id":1,"name":"test0","dns1":"8.8.8.8","dns2":"8.8.4.4","internaldns1":"192.168.110.254","internaldns2":"192.168.110.253","networktype":"Basic","securitygroupsenabled":true,"allocationstate":"Enabled","zonetoken":"5e818a11-6b00-3429-9a07-e27511d3169a","dhcpprovider":"DhcpServer"} ] } }' zones = getResultObj(result) print zones[0].id res = authorizeSecurityGroupIngress.authorizeSecurityGroupIngressResponse() @@ -165,7 +165,7 @@ if __name__ == "__main__": asynJob = getResultObj(result) print asynJob - result = '{ "createzoneresponse" : { "zone" : {"id":1,"name":"test0","dns1":"8.8.8.8","dns2":"4.4.4.4","internaldns1":"192.168.110.254","internaldns2":"192.168.110.253","networktype":"Basic","securitygroupsenabled":true,"allocationstate":"Enabled","zonetoken":"3442f287-e932-3111-960b-514d1f9c4610","dhcpprovider":"DhcpServer"} } }' + result = '{ "createzoneresponse" : { "zone" : {"id":1,"name":"test0","dns1":"8.8.8.8","dns2":"8.8.4.4","internaldns1":"192.168.110.254","internaldns2":"192.168.110.253","networktype":"Basic","securitygroupsenabled":true,"allocationstate":"Enabled","zonetoken":"3442f287-e932-3111-960b-514d1f9c4610","dhcpprovider":"DhcpServer"} } }' res = createZone.createZoneResponse() zone = getResultObj(result, res) print zone.id diff --git a/tools/marvin/marvin/sandbox/advancedsg/__init__.py b/tools/marvin/marvin/sandbox/advancedsg/__init__.py new file mode 100644 index 00000000000..57823fcc162 --- /dev/null +++ b/tools/marvin/marvin/sandbox/advancedsg/__init__.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + diff --git a/tools/marvin/marvin/sandbox/advancedsg/advancedsg_env.py b/tools/marvin/marvin/sandbox/advancedsg/advancedsg_env.py new file mode 100644 index 00000000000..f9edf4d5803 --- /dev/null +++ b/tools/marvin/marvin/sandbox/advancedsg/advancedsg_env.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +''' +############################################################ +# Experimental state of scripts +# * Need to be reviewed +# * Only a sandbox +############################################################ +''' +import random +import marvin +from ConfigParser import SafeConfigParser +from optparse import OptionParser +from marvin.configGenerator import * + + +def getGlobalSettings(config): + for k, v in dict(config.items('globals')).iteritems(): + cfg = configuration() + cfg.name = k + cfg.value = v + yield cfg + + +def describeResources(config): + zs = cloudstackConfiguration() + + z = zone() + z.dns1 = config.get('environment', 'dns') + z.internaldns1 = config.get('environment', 'dns') + z.name = 'Sandbox-%s'%(config.get('cloudstack', 'hypervisor')) + z.networktype = 'Advanced' + z.securitygroupenabled = 'true' + + sgprovider = provider() + sgprovider.broadcastdomainrange = 'ZONE' + sgprovider.name = 'SecurityGroupProvider' + + pn = physical_network() + pn.name = "Sandbox-pnet" + pn.tags = ["cloud-simulator-pnet"] + pn.traffictypes = [traffictype("Guest"), + traffictype("Management", {"simulator" : "cloud-simulator-mgmt"})] + pn.isolationmethods = ["VLAN"] + pn.providers.append(sgprovider) + + z.physical_networks.append(pn) + + p = pod() + p.name = 'POD0' + p.gateway = config.get('cloudstack', 'private.gateway') + p.startip = config.get('cloudstack', 'private.pod.startip') + p.endip = config.get('cloudstack', 'private.pod.endip') + p.netmask = config.get('cloudstack', 'private.netmask') + + v = iprange() + v.gateway = config.get('cloudstack', 'guest.gateway') + v.startip = config.get('cloudstack', 'guest.vlan.startip') + v.endip = config.get('cloudstack', 'guest.vlan.endip') + v.netmask = config.get('cloudstack', 'guest.netmask') + v.vlan = config.get('cloudstack', 'guest.vlan') + z.ipranges.append(v) + + c = cluster() + c.clustername = 'C0' + c.hypervisor = config.get('cloudstack', 'hypervisor') + c.clustertype = 'CloudManaged' + + h = host() + h.username = 'root' + h.password = config.get('cloudstack', 'host.password') + h.url = 'http://%s'%(config.get('cloudstack', 'host')) + c.hosts.append(h) + + ps = primaryStorage() + ps.name = 'PS0' + ps.url = config.get('cloudstack', 'primary.pool') + c.primaryStorages.append(ps) + + p.clusters.append(c) + z.pods.append(p) + + secondary = secondaryStorage() + secondary.url = config.get('cloudstack', 'secondary.pool') + z.secondaryStorages.append(secondary) + + '''Add zone''' + zs.zones.append(z) + + '''Add mgt server''' + mgt = managementServer() + mgt.mgtSvrIp = config.get('environment', 'mshost') + mgt.user = config.get('environment', 'mshost.user') + mgt.passwd = config.get('environment', 'mshost.passwd') + zs.mgtSvr.append(mgt) + + '''Add a database''' + db = dbServer() + db.dbSvr = config.get('environment', 'mysql.host') + db.user = config.get('environment', 'mysql.cloud.user') + db.passwd = config.get('environment', 'mysql.cloud.passwd') + zs.dbSvr = db + + '''Add some configuration''' + [zs.globalConfig.append(cfg) for cfg in getGlobalSettings(config)] + + ''''add loggers''' + testClientLogger = logger() + testClientLogger.name = 'TestClient' + testClientLogger.file = 'testclient.log' + + testCaseLogger = logger() + testCaseLogger.name = 'TestCase' + testCaseLogger.file = 'testcase.log' + + zs.logger.append(testClientLogger) + zs.logger.append(testCaseLogger) + return zs + + +if __name__ == '__main__': + parser = OptionParser() + parser.add_option('-i', '--input', action='store', default='setup.properties', \ + dest='input', help='file containing environment setup information') + parser.add_option('-o', '--output', action='store', default='./sandbox.cfg', \ + dest='output', help='path where environment json will be generated') + + + (opts, args) = parser.parse_args() + + cfg_parser = SafeConfigParser() + cfg_parser.read(opts.input) + + cfg = describeResources(cfg_parser) + generate_setup_config(cfg, opts.output) diff --git a/tools/marvin/marvin/sandbox/advancedsg/setup.properties b/tools/marvin/marvin/sandbox/advancedsg/setup.properties new file mode 100644 index 00000000000..ee07ce23938 --- /dev/null +++ b/tools/marvin/marvin/sandbox/advancedsg/setup.properties @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +[globals] +#global settings in cloudstack +expunge.delay=60 +expunge.interval=60 +storage.cleanup.interval=300 +account.cleanup.interval=600 +expunge.workers=3 +workers=10 +vm.allocation.algorithm=random +vm.op.wait.interval=5 +guest.domain.suffix=sandbox.simulator +instance.name=QA +direct.agent.load.size=1000 +default.page.size=10000 +check.pod.cidrs=true +secstorage.allowed.internal.sites=10.147.28.0/24 +[environment] +dns=10.147.28.6 +mshost=localhost +mshost.user=root +mshost.passwd=password +mysql.host=localhost +mysql.cloud.user=cloud +mysql.cloud.passwd=cloud +[cloudstack] +#management network +private.gateway=10.147.29.1 +private.pod.startip=10.147.29.150 +private.pod.endip=10.147.29.159 +private.netmask=255.255.255.0 +#guest network +guest.gateway=10.147.31.1 +guest.vlan=31 +guest.vlan.startip=10.147.31.150 +guest.vlan.endip=10.147.31.159 +guest.netmask=255.255.255.0 +#hypervisor host information +hypervisor=Simulator +host=simulator0 +host.password=password +#storage pools +primary.pool=nfs://10.147.28.6:/export/home/sandbox/primary +secondary.pool=nfs://10.147.28.6:/export/home/sandbox/sstor diff --git a/tools/marvin/setup.py b/tools/marvin/setup.py index 8dfd1b895d0..eeed3bfa8fd 100644 --- a/tools/marvin/setup.py +++ b/tools/marvin/setup.py @@ -45,7 +45,7 @@ setup(name="Marvin", url="https://builds.apache.org/job/cloudstack-marvin/", packages=["marvin", "marvin.cloudstackAPI", "marvin.integration", "marvin.integration.lib", "marvin.sandbox", - "marvin.sandbox.advanced", "marvin.sandbox.basic"], + "marvin.sandbox.advanced", "marvin.sandbox.advancedsg", "marvin.sandbox.basic"], license="LICENSE.txt", install_requires=[ "mysql-connector-python", diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 7f6df22797e..a86b2a25ca5 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -38,9 +38,9 @@ div.toolbar, div.toolbar, .multi-wizard .progress ul li, .multi-wizard.zone-wizard .select-container .field .select-array-item { -/*\*/ + /*\*/ display: block; -/**/ + /**/ -height: 1px; } @@ -1762,22 +1762,51 @@ div.list-view td.state.off span { .detail-group .main-groups table td.value .view-all { cursor: pointer; /*[empty]height:;*/ - border-left: 1px solid #9FA2A5; /*+border-radius:4px 0 0 4px;*/ -moz-border-radius: 4px 0 0 4px; -webkit-border-radius: 4px 0 0 4px; -khtml-border-radius: 4px 0 0 4px; border-radius: 4px 0 0 4px; - background: url(../images/sprites.png) no-repeat 100% -398px; float: right; - margin: 1px 0 0; - padding: 8px 33px 6px 15px; + margin: 7px 0 0; + padding: 0px; +} + +.detail-group .main-groups table td.value .view-all span { + display: block; + float: left; + padding: 5px 2px 8px 4px; + background: url(../images/gradients.png) repeat-x 0px -529px; + border-left: 1px solid #9FA2A6; + /*+border-radius:4px 0 0 4px;*/ + -moz-border-radius: 4px 0 0 4px; + -webkit-border-radius: 4px 0 0 4px; + -khtml-border-radius: 4px 0 0 4px; + border-radius: 4px 0 0 4px; + margin-top: -5px; +} + +.detail-group .main-groups table td.value .view-all .end { + background: url(../images/sprites.png) no-repeat 100% -397px; + float: right; + width: 22px; + height: 25px; + padding: 0px; + margin: -6px 0px 0px; } .detail-group .main-groups table td.value .view-all:hover { background-position: 100% -431px; } +.detail-group .main-groups table td.value .view-all:hover span { + background-position: 0px -566px; +} + +.detail-group .main-groups table td.value .view-all:hover div.end { + background-position: -618px -430px; +} + .detail-view .detail-group .button.add { clear: both; margin: 0px 21px 13px 0 !important; @@ -3461,7 +3490,7 @@ div.view table td.editable div.action.cancel { /*** Actions*/ table td.actions { cursor: default; -/*Make fixed*/ + /*Make fixed*/ width: 200px; min-width: 200px; max-width: 200px; @@ -3748,6 +3777,10 @@ Dialogs*/ font-size: 15px; } +.ui-dialog div.form-container span.message br { + margin-bottom: 13px; +} + .ui-dialog div.form-container div.form-item { width: 100%; display: inline-block; @@ -4329,7 +4362,7 @@ Dialogs*/ margin: 6px 9px 9px; padding: 9px; color: #FFFFFF; -/*Adjusting the font size for proper display*/ + /*Adjusting the font size for proper display*/ font-size: 10px; border-left: 1px solid #6A6A6A; border-right: 1px solid #6A6A6A; @@ -7857,6 +7890,13 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t font-weight: bold; } +.multi-edit .data .data-body .data-item tr td.add-vm p { + text-indent: 0; + padding-left: 9px; + margin-top: 3px; + margin-bottom: 6px; +} + .multi-edit .data .data-body .data-item tr td.multi-actions .icon { /*+placement:shift -3px -2px;*/ position: relative; @@ -7925,6 +7965,7 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t width: 87px !important; min-width: 87px !important; max-width: 87px !important; + font-size: 10px; } /** Header fields*/ @@ -7974,6 +8015,14 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t .multi-edit .header-fields input[type=submit] { } +/*Sortable*/ +.multi-edit table tbody tr td.reorder, +.multi-edit table thead tr th.reorder { + width: 30px !important; + min-width: 30px !important; + max-width: 30px !important; +} + /*Security Rules*/ .security-rules .multi-edit input { width: 69px; @@ -11715,24 +11764,28 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it background-position: 0px -613px; } -.restart .icon { +.restart .icon, +.releaseDedicatedZone .icon { background-position: 0px -63px; } -.restart:hover .icon { +.restart:hover .icon, +.releaseDedicatedZone:hover .icon { background-position: 0px -645px; } .destroy .icon, .remove .icon, .delete .icon, -.decline .icon { +.decline .icon, +.deleteacllist .icon { background-position: 1px -92px; } .destroy:hover .icon, .remove:hover .icon, -.delete:hover .icon { +.delete:hover .icon, +.deleteacllist:hover .icon { background-position: 1px -674px; } @@ -11818,13 +11871,15 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it .downloadVolume .icon, .downloadTemplate .icon, -.downloadISO .icon { +.downloadISO .icon, +.replaceacllist .icon { background-position: -35px -125px; } .downloadVolume:hover .icon, .downloadTemplate:hover .icon, -.downloadISO:hover .icon { +.downloadISO:hover .icon, +.replaceacllist:hover .icon { background-position: -35px -707px; } @@ -11857,26 +11912,30 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it } .add .icon, -.addNew .icon { +.addNew .icon, +.assignVm .icon { background-position: -37px -61px; } .add:hover .icon, -.addNew:hover .icon { +.addNew:hover .icon, +.assignVm:hover .icon { background-position: -37px -643px; } .create .icon, .createTemplate .icon, .enableSwift .icon, -.addVM .icon { +.addVM .icon, +.dedicateZone .icon { background-position: -69px -63px; } .create:hover .icon, .createTemplate:hover .icon, .enableSwift:hover .icon, -.addVM:hover .icon { +.addVM:hover .icon, +.dedicateZone:hover .icon { background-position: -69px -645px; } diff --git a/ui/dictionary.jsp b/ui/dictionary.jsp index ded9ea063d4..a5f0662e0bf 100644 --- a/ui/dictionary.jsp +++ b/ui/dictionary.jsp @@ -25,6 +25,9 @@ under the License. <% long now = System.currentTimeMillis(); %>