Marvin IP cleared tests for various cloudstack components

One big blob of all the tests that were posted for IP clearance several
months ago. IP clearance VOTE has passed on general@

Ref: http://markmail.org/thread/xareczan2kx4hhom

RAT check passed.
This commit is contained in:
Prasanna Santhanam 2013-05-14 10:24:27 +05:30
parent 1a31a3bb10
commit 894413e362
25 changed files with 43872 additions and 212 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,814 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for dedicated Host high availability
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from marvin import remoteSSHClient
import datetime
class Services:
""" Dedicated host HA test cases """
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "HA",
"lastname": "HA",
"username": "HA",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering_with_ha": {
"name": "Tiny Instance With HA Enabled",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"service_offering_without_ha": {
"name": "Tiny Instance Without HA",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"virtual_machine": {
"displayname": "VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"timeout": 100,
}
class TestHostHighAvailability(cloudstackTestCase):
""" Dedicated host HA test cases """
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestHostHighAvailability,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(
cls.api_client,
cls.services
)
cls.zone = get_zone(
cls.api_client,
cls.services
)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering_with_ha = ServiceOffering.create(
cls.api_client,
cls.services["service_offering_with_ha"],
offerha=True
)
cls.service_offering_without_ha = ServiceOffering.create(
cls.api_client,
cls.services["service_offering_without_ha"],
offerha=False
)
cls._cleanup = [
cls.service_offering_with_ha,
cls.service_offering_without_ha,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
self.testClient.close()
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(configuration = "ha.tag")
@attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator"])
def test_01_vm_deployment_with_compute_offering_with_ha_enabled(self):
""" Test VM deployments (Create HA enabled Compute Service Offering and VM) """
# Steps,
#1. Create a Compute service offering with the “Offer HA” option selected.
#2. Create a Guest VM with the compute service offering created above.
# Validations,
#1. Ensure that the offering is created and that in the UI the “Offer HA” field is enabled (Yes)
#The listServiceOffering API should list “offerha” as true.
#2. Select the newly created VM and ensure that the Compute offering field value lists the compute service offering that was selected.
# Also, check that the HA Enabled field is enabled “Yes”.
#list and validate above created service offering with Ha enabled
list_service_response = list_service_offering(
self.apiclient,
id=self.service_offering_with_ha.id
)
self.assertEqual(
isinstance(list_service_response, list),
True,
"listServiceOfferings returned invalid object in response."
)
self.assertNotEqual(
len(list_service_response),
0,
"listServiceOfferings returned empty list."
)
self.assertEqual(
list_service_response[0].offerha,
True,
"The service offering is not HA enabled"
)
#create virtual machine with the service offering with Ha enabled
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering_with_ha.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"listVirtualMachines returned invalid object in response."
)
self.assertNotEqual(
len(vms),
0,
"listVirtualMachines returned empty list."
)
self.debug("Deployed VM on host: %s" % vms[0].hostid)
self.assertEqual(
vms[0].haenable,
True,
"VM not created with HA enable tag"
)
@attr(configuration = "ha.tag")
@attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"])
def test_02_no_vm_creation_on_host_with_haenabled(self):
""" Verify you can not create new VMs on hosts with an ha.tag """
# Steps,
#1. Fresh install CS (Bonita) that supports this feature
#2. Create Basic zone, pod, cluster, add 3 hosts to cluster (host1, host2, host3), secondary & primary Storage
#3. When adding host3, assign the HA host tag.
#4. You should already have a compute service offering with HA already create from above. If not, create one for HA.
#5. Create VMs with the service offering with and without the HA tag
# Validations,
#Check to make sure the newly created VM is not on any HA enabled hosts
#The VM should be created only on host1 or host2 and never host3 (HA enabled)
#create and verify virtual machine with HA enabled service offering
virtual_machine_with_ha = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering_with_ha.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine_with_ha.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"listVirtualMachines returned invalid object in response."
)
self.assertNotEqual(
len(vms),
0,
"listVirtualMachines returned empty list."
)
vm = vms[0]
self.debug("Deployed VM on host: %s" % vm.hostid)
#validate the virtual machine created is host Ha enabled
list_hosts_response = list_hosts(
self.apiclient,
id=vm.hostid
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"listHosts returned invalid object in response."
)
self.assertNotEqual(
len(list_hosts_response),
0,
"listHosts retuned empty list in response."
)
self.assertEqual(
list_hosts_response[0].hahost,
False,
"VM created on HA enabled host."
)
#create and verify virtual machine with Ha disabled service offering
virtual_machine_without_ha = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering_without_ha.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine_without_ha.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"listVirtualMachines returned invalid object in response."
)
self.assertNotEqual(
len(vms),
0,
"listVirtualMachines returned empty list."
)
vm = vms[0]
self.debug("Deployed VM on host: %s" % vm.hostid)
#verify that the virtual machine created on the host is Ha disabled
list_hosts_response = list_hosts(
self.apiclient,
id=vm.hostid
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"listHosts returned invalid object in response."
)
self.assertNotEqual(
len(list_hosts_response),
0,
"listHosts returned empty list."
)
host = list_hosts_response[0]
self.assertEqual(
host.hahost,
False,
"VM migrated to HA enabled host."
)
@attr(configuration = "ha.tag")
@attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"])
def test_03_cant_migrate_vm_to_host_with_ha_positive(self):
""" Verify you can not migrate VMs to hosts with an ha.tag (positive) """
# Steps,
#1. Create a Compute service offering with the “Offer HA” option selected.
#2. Create a Guest VM with the compute service offering created above.
#3. Select the VM and migrate VM to another host. Choose a “Suitable” host (i.e. host2)
# Validations
#The option from the “Migrate instance to another host” dialog box” should list host3 as “Not Suitable” for migration.
#Confirm that the VM is migrated to the “Suitable” host you selected (i.e. host2)
#create and verify the virtual machine with HA enabled service offering
virtual_machine_with_ha = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering_with_ha.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine_with_ha.id,
listall=True,
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm = vms[0]
self.debug("Deployed VM on host: %s" % vm.hostid)
#Find out a Suitable host for VM migration
list_hosts_response = list_hosts(
self.apiclient,
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"The listHosts API returned the invalid list"
)
self.assertNotEqual(
len(list_hosts_response),
0,
"The listHosts returned nothing."
)
suitableHost = None
for host in list_hosts_response:
if host.suitableformigration == True and host.hostid != vm.hostid:
suitableHost = host
break
self.assertTrue(suitableHost is not None, "suitablehost should not be None")
#Migration of the VM to a suitable host
self.debug("Migrating VM-ID: %s to Host: %s" % (self.vm.id, suitableHost.id))
cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
cmd.hostid = suitableHost.id
cmd.virtualmachineid = self.vm.id
self.apiclient.migrateVirtualMachine(cmd)
#Verify that the VM migrated to a targeted Suitable host
list_vm_response = list_virtual_machines(
self.apiclient,
id=vm.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"The listVirtualMachines returned the invalid list."
)
self.assertNotEqual(
list_vm_response,
None,
"The listVirtualMachines API returned nothing."
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.id,
vm.id,
"The virtual machine id and the the virtual machine from listVirtualMachines is not matching."
)
self.assertEqual(
vm_response.hostid,
suitableHost.id,
"The VM is not migrated to targeted suitable host."
)
@attr(configuration = "ha.tag")
@attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"])
def test_04_cant_migrate_vm_to_host_with_ha_negative(self):
""" Verify you can not migrate VMs to hosts with an ha.tag (negative) """
# Steps,
#1. Create a Compute service offering with the “Offer HA” option selected.
#2. Create a Guest VM with the compute service offering created above.
#3. Select the VM and migrate VM to another host. Choose a “Not Suitable” host.
# Validations,
#The option from the “Migrate instance to another host” dialog box” should list host3 as “Not Suitable” for migration.
#By design, The Guest VM can STILL can be migrated to host3 if the admin chooses to do so.
#create and verify virtual machine with HA enabled service offering
virtual_machine_with_ha = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering_with_ha.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine_with_ha.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"The listVirtualMachines returned invalid object in response."
)
self.assertNotEqual(
len(vms),
0,
"The listVirtualMachines returned empty response."
)
vm = vms[0]
self.debug("Deployed VM on host: %s" % vm.hostid)
#Find out Non-Suitable host for VM migration
list_hosts_response = list_hosts(
self.apiclient,
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"listHosts returned invalid object in response."
)
self.assertNotEqual(
len(list_hosts_response),
0,
"listHosts returned empty response."
)
notSuitableHost = None
for host in list_hosts_response:
if not host.suitableformigration and host.hostid != vm.hostid:
notSuitableHost = host
break
self.assertTrue(notSuitableHost is not None, "notsuitablehost should not be None")
#Migrate VM to Non-Suitable host
self.debug("Migrating VM-ID: %s to Host: %s" % (vm.id, notSuitableHost.id))
cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
cmd.hostid = notSuitableHost.id
cmd.virtualmachineid = vm.id
self.apiclient.migrateVirtualMachine(cmd)
#Verify that the virtual machine got migrated to targeted Non-Suitable host
list_vm_response = list_virtual_machines(
self.apiclient,
id=vm.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"listVirtualMachine returned invalid object in response."
)
self.assertNotEqual(
len(list_vm_response),
0,
"listVirtualMachines returned empty response."
)
self.assertEqual(
list_vm_response[0].id,
vm.id,
"Virtual machine id with the virtual machine from listVirtualMachine is not matching."
)
self.assertEqual(
list_vm_response[0].hostid,
notSuitableHost.id,
"The detination host id of migrated VM is not matching."
)
@attr(configuration = "ha.tag")
@attr(speed = "slow")
@attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"])
def test_05_no_vm_with_ha_gets_migrated_to_ha_host_in_live_migration(self):
""" Verify that none of the VMs with HA enabled migrate to an ha tagged host during live migration """
# Steps,
#1. Fresh install CS (Bonita) that supports this feature
#2. Create Basic zone, pod, cluster, add 3 hosts to cluster (host1, host2, host3), secondary & primary Storage
#3. When adding host3, assign the HA host tag.
#4. Create VMs with and without the Compute Service Offering with the HA tag.
#5. Note the VMs on host1 and whether any of the VMs have their “HA enabled” flags enabled.
#6. Put host1 into maintenance mode.
# Validations,
#1. Make sure the VMs are created on either host1 or host2 and not on host3
#2. Putting host1 into maintenance mode should trigger a live migration. Make sure the VMs are not migrated to HA enabled host3.
# create and verify virtual machine with HA disabled service offering
virtual_machine_with_ha = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering_with_ha.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine_with_ha.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm_with_ha_enabled = vms[0]
#Verify the virtual machine got created on non HA host
list_hosts_response = list_hosts(
self.apiclient,
id=vm_with_ha_enabled.hostid
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_hosts_response),
0,
"Check Host is available"
)
self.assertEqual(
list_hosts_response[0].hahost,
False,
"The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled"
)
#put the Host in maintainance mode
self.debug("Enabling maintenance mode for host %s" % vm_with_ha_enabled.hostid)
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
cmd.id = vm_with_ha_enabled.hostid
self.apiclient.prepareHostForMaintenance(cmd)
timeout = self.services["timeout"]
#verify the VM live migration happened to another running host
self.debug("Waiting for VM to come up")
wait_for_vm(
self.apiclient,
virtualmachineid=vm_with_ha_enabled.id,
interval=timeout
)
vms = VirtualMachine.list(
self.apiclient,
id=vm_with_ha_enabled.id,
listall=True,
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm_with_ha_enabled1 = vms[0]
list_hosts_response = list_hosts(
self.apiclient,
id=vm_with_ha_enabled1.hostid
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_hosts_response),
0,
"Check Host is available"
)
self.assertEqual(
list_hosts_response[0].hahost,
False,
"The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled"
)
self.debug("Disabling the maintenance mode for host %s" % vm_with_ha_enabled.hostid)
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
cmd.id = vm_with_ha_enabled.hostid
self.apiclient.cancelHostMaintenance(cmd)
@attr(configuration = "ha.tag")
@attr(speed = "slow")
@attr(tags = ["advanced", "advancedns", "sg", "basic", "eip", "simulator", "multihost"])
def test_06_no_vm_without_ha_gets_migrated_to_ha_host_in_live_migration(self):
""" Verify that none of the VMs without HA enabled migrate to an ha tagged host during live migration """
# Steps,
#1. Fresh install CS (Bonita) that supports this feature
#2. Create Basic zone, pod, cluster, add 3 hosts to cluster (host1, host2, host3), secondary & primary Storage
#3. When adding host3, assign the HA host tag.
#4. Create VMs with and without the Compute Service Offering with the HA tag.
#5. Note the VMs on host1 and whether any of the VMs have their “HA enabled” flags enabled.
#6. Put host1 into maintenance mode.
# Validations,
#1. Make sure the VMs are created on either host1 or host2 and not on host3
#2. Putting host1 into maintenance mode should trigger a live migration. Make sure the VMs are not migrated to HA enabled host3.
# create and verify virtual machine with HA disabled service offering
virtual_machine_without_ha = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering_without_ha.id
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine_without_ha.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
vm_with_ha_disabled = vms[0]
#Verify the virtual machine got created on non HA host
list_hosts_response = list_hosts(
self.apiclient,
id=vm_with_ha_disabled.hostid
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_hosts_response),
0,
"Check Host is available"
)
self.assertEqual(
list_hosts_response[0].hahost,
False,
"The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled"
)
#put the Host in maintainance mode
self.debug("Enabling maintenance mode for host %s" % vm_with_ha_disabled.hostid)
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
cmd.id = vm_with_ha_disabled.hostid
self.apiclient.prepareHostForMaintenance(cmd)
timeout = self.services["timeout"]
#verify the VM live migration happened to another running host
self.debug("Waiting for VM to come up")
wait_for_vm(
self.apiclient,
virtualmachineid=vm_with_ha_disabled.id,
interval=timeout
)
vms = VirtualMachine.list(
self.apiclient,
id=vm_with_ha_disabled.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should return valid response for deployed VM"
)
self.assertNotEqual(
len(vms),
0,
"List VMs should return valid response for deployed VM"
)
list_hosts_response = list_hosts(
self.apiclient,
id=vms[0].hostid
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_hosts_response),
0,
"Check Host is available"
)
self.assertEqual(
list_hosts_response[0].hahost,
False,
"The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled"
)
self.debug("Disabling the maintenance mode for host %s" % vm_with_ha_disabled.hostid)
cmd = cancelHostMaintenance.cancelHostMaintenanceCmd()
cmd.id = vm_with_ha_disabled.hostid
self.apiclient.cancelHostMaintenance(cmd)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -5,9 +5,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,891 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests VM life cycle in VPC network functionality
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from marvin.remoteSSHClient import remoteSSHClient
import datetime
class Services:
"""Test VM life cycle in VPC network services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 64,
},
"service_offering_1": {
"name": "Tiny Instance- tagged host 1",
"displaytext": "Tiny off-tagged host2",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 64,
"tags": "HOST_TAGS_HERE"
},
"service_offering_2": {
"name": "Tiny Instance- tagged host 2",
"displaytext": "Tiny off-tagged host2",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 64,
"tags": "HOST_TAGS_HERE"
},
"network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"network_offering_no_lb": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"network_off_shared": {
"name": 'Shared Network offering',
"displaytext": 'Shared Network offering',
"guestiptype": 'Shared',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"specifyIpRanges": True,
"specifyVlan": True
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat',
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.0.0.1/24'
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0',
"limit": 5,
# Max networks allowed as per hypervisor
# Xenserver -> 5, VMWare -> 9
},
"lbrule": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"openfirewall": False,
"startport": 2222,
"endport": 2222,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"fw_rule": {
"startport": 1,
"endport": 6000,
"cidr": '0.0.0.0/0',
# Any network (For creating FW rule)
"protocol": "TCP"
},
"http_rule": {
"startport": 80,
"endport": 80,
"cidrlist": '0.0.0.0/0',
"protocol": "ICMP"
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
"userdata": 'This is sample data',
},
"ostype": 'CentOS 5.3 (64-bit)',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
"mode": 'advanced'
}
@unittest.skip("No suitable setup available for testing")
class TestVMLifeCycleHostmaintenance(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestVMLifeCycleHostmaintenance,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering_1 = ServiceOffering.create(
cls.api_client,
cls.services["service_offering_1"]
)
cls.service_offering_2 = ServiceOffering.create(
cls.api_client,
cls.services["service_offering_2"]
)
cls.vpc_off = VpcOffering.create(
cls.api_client,
cls.services["vpc_offering"]
)
cls.vpc_off.update(cls.api_client, state='Enabled')
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.vpc_off = VpcOffering.create(
cls.api_client,
cls.services["vpc_offering"]
)
cls.vpc_off.update(cls.api_client, state='Enabled')
cls.services["vpc"]["cidr"] = '10.1.1.1/16'
cls.vpc = VPC.create(
cls.api_client,
cls.services["vpc"],
vpcofferingid=cls.vpc_off.id,
zoneid=cls.zone.id,
account=cls.account.account.name,
domainid=cls.account.account.domainid
)
cls.nw_off = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=False
)
# Enable Network offering
cls.nw_off.update(cls.api_client, state='Enabled')
# Creating network using the network offering created
cls.network_1 = Network.create(
cls.api_client,
cls.services["network"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
networkofferingid=cls.nw_off.id,
zoneid=cls.zone.id,
gateway='10.1.1.1',
vpcid=cls.vpc.id
)
cls.nw_off_no_lb = NetworkOffering.create(
cls.api_client,
cls.services["network_offering_no_lb"],
conservemode=False
)
# Enable Network offering
cls.nw_off_no_lb.update(cls.api_client, state='Enabled')
# Creating network using the network offering created
cls.network_2 = Network.create(
cls.api_client,
cls.services["network"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
networkofferingid=cls.nw_off_no_lb.id,
zoneid=cls.zone.id,
gateway='10.1.2.1',
vpcid=cls.vpc.id
)
# Spawn an instance in that network
cls.vm_1 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering_1.id,
networkids=[str(cls.network_1.id)]
)
# Spawn an instance in that network
cls.vm_2 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering_1.id,
networkids=[str(cls.network_1.id)]
)
cls.vm_3 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering_2.id,
networkids=[str(cls.network_2.id)]
)
routers = Router.list(
cls.api_client,
account=cls.account.account.name,
domainid=cls.account.account.domainid,
listall=True
)
if isinstance(routers, list):
cls.vpcvr = routers[0]
cls._cleanup = [
cls.service_offering_1,
cls.service_offering_2,
cls.nw_off,
cls.nw_off_no_lb,
]
return
@classmethod
def tearDownClass(cls):
try:
cls.account.delete(cls.api_client)
wait_for_cleanup(cls.api_client, ["account.cleanup.interval"])
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
# Waiting for network cleanup to delete vpc offering
wait_for_cleanup(cls.api_client, ["network.gc.wait",
"network.gc.interval"])
cls.vpc_off.delete(cls.api_client)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.debug("Check the status of VPC virtual router")
routers = Router.list(
self.apiclient,
networkid=self.network_1.id,
listall=True
)
if not isinstance(routers, list):
raise Exception("No response from list routers API")
self.router = routers[0]
if self.router.state == "Running":
Router.stop(self.apiclient, id=self.router.id)
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def validate_vm_deployment(self):
"""Validates VM deployment on different hosts"""
vms = VirtualMachine.list(
self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
networkid=self.network_1.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs shall return a valid response"
)
host_1 = vms[0].hostid
self.debug("Host for network 1: %s" % vms[0].hostid)
vms = VirtualMachine.list(
self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
networkid=self.network_2.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs shall return a valid response"
)
host_2 = vms[0].hostid
self.debug("Host for network 2: %s" % vms[0].hostid)
self.assertNotEqual(
host_1,
host_2,
"Both the virtual machines should be deployed on diff hosts "
)
return
@attr(tags=["advanced", "intervlan"])
def test_01_enable_maintenance_with_vpc_nw(self):
""" Test enable Maintenance Mode on Hosts which have VPC elements
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Add network1(10.1.1.1/24) and network2(10.1.2.1/24) to this VPC.
# 3. Deploy vm1 and vm2 in network1 and vm3 and vm4 in network2. Make
# sure vm1 and vm3 are deployed on one host in the cluster while
# vm2 and vm4 are deployed on the other host in the cluster. This
# can be done using host's tags & service offerings with host tags
# Steps:
# 1.Enable Maintenance on one of host on which VPCVR is present
# Validations:
# 1. Successfully push the host into maintenance mode.
# 2. VMs present on the above host should successfully migrate to the
# other host present in the cluster
self.validate_vm_deployment()
self.debug("Stop the host on which the VPC virtual router is running")
try:
Host.enableMaintenance(self.apiclient, id=self.vpcvr.hostid)
except Exception as e:
self.fail("Failed to enable maintenance mode on host: %s" % e)
self.debug(
"Check if all instances belonging to the account are up again?")
routers = Router.list(
self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List routers shall return a valid VPCVR for account"
)
for router in routers:
self.assertEqual(
router.state,
"Running",
"Router state should be running after migration"
)
vms = VirtualMachine.list(
self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"VM response should return instances running for account"
)
for vm in vms:
self.assertEqual(
vm.state,
"Ruuning",
"Vm state should be running after migration"
)
return
@attr(tags=["advanced", "intervlan"])
def test_02_cancel_maintenance(self):
""" Test cancel Maintenance Mode on the above Hosts + Migrate VMs Back
"""
# Steps
# 1. Cancel Maintenance Mode on the host.
# 2. Migrate the VMs back onto the host on which Maintenance mode is
# cancelled.
# Validate the following
# 1. Successfully cancel the Maintenance mode on the host.
# 2. Migrate the VMs back successfully onto the host.
# 3. Check that the network connectivity exists with the migrated VMs.
self.debug("Cancel host maintenence on which the VPCVR is running")
try:
Host.cancelMaintenance(self.apiclient, id=self.vpcvr.hostid)
except Exception as e:
self.fail("Failed to enable maintenance mode on host: %s" % e)
self.debug(
"Migrating the instances back to the host: %s" %
self.vpcvr.hostid)
try:
cmd = migrateSystemVm.migrateSystemVmCmd()
cmd.hostid = self.vpcvr.hostid
cmd.virtualmachineid = self.vpcvr.id
self.apiclient.migrateSystemVm(cmd)
except Exception as e:
self.fail("Failed to migrate VPCVR back: %s" % e)
self.debug("Check the status of router after migration")
routers = Router.list(
self.apiclient,
id=self.vpcvr.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List routers shall return the valid response"
)
self.assertEqual(
routers[0].state,
"Running",
"Router state should be running"
)
# TODO: Check for the network connectivity
return
@attr(tags=["advanced", "intervlan"])
def test_03_reconnect_host(self):
""" Test reconnect Host which has VPC elements
"""
# Steps:
# 1.Reconnect one of the host on which VPC Virtual Router is present.
# Validate the following
# 1. Host should successfully reconnect.
# 2. Network connectivity to all the VMs on the host should not be
# effected due to reconnection.
self.debug("Reconnecting the host where VPC VR is running")
try:
Host.reconnect(self.apiclient, id=self.vpcvr.hostid)
except Exception as e:
self.fail("Failed to reconnect to host: %s" % e)
self.debug("Check the status of router after migration")
routers = Router.list(
self.apiclient,
id=self.vpcvr.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List routers shall return the valid response"
)
self.assertEqual(
routers[0].state,
"Running",
"Router state should be running"
)
# TODO: Check for the network connectivity
return
@unittest.skip("No suitable setup available for testing")
class TestVPCNetworkRules(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestVPCNetworkRules,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering_1 = ServiceOffering.create(
cls.api_client,
cls.services["service_offering_1"]
)
cls.service_offering_2 = ServiceOffering.create(
cls.api_client,
cls.services["service_offering_2"]
)
cls.vpc_off = VpcOffering.create(
cls.api_client,
cls.services["vpc_offering"]
)
cls.vpc_off.update(cls.api_client, state='Enabled')
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.vpc_off = VpcOffering.create(
cls.api_client,
cls.services["vpc_offering"]
)
cls.vpc_off.update(cls.api_client, state='Enabled')
cls.services["vpc"]["cidr"] = '10.1.1.1/16'
cls.vpc = VPC.create(
cls.api_client,
cls.services["vpc"],
vpcofferingid=cls.vpc_off.id,
zoneid=cls.zone.id,
account=cls.account.account.name,
domainid=cls.account.account.domainid
)
cls.nw_off = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=False
)
# Enable Network offering
cls.nw_off.update(cls.api_client, state='Enabled')
# Creating network using the network offering created
cls.network_1 = Network.create(
cls.api_client,
cls.services["network"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
networkofferingid=cls.nw_off.id,
zoneid=cls.zone.id,
gateway='10.1.1.1',
vpcid=cls.vpc.id
)
cls.nw_off_no_lb = NetworkOffering.create(
cls.api_client,
cls.services["network_offering_no_lb"],
conservemode=False
)
# Enable Network offering
cls.nw_off_no_lb.update(cls.api_client, state='Enabled')
# Creating network using the network offering created
cls.network_2 = Network.create(
cls.api_client,
cls.services["network"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
networkofferingid=cls.nw_off_no_lb.id,
zoneid=cls.zone.id,
gateway='10.1.2.1',
vpcid=cls.vpc.id
)
# Spawn an instance in that network
cls.vm_1 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering_1.id,
networkids=[str(cls.network_1.id)]
)
# Spawn an instance in that network
cls.vm_2 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering_2.id,
networkids=[str(cls.network_1.id)]
)
cls.vm_3 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering_1.id,
networkids=[str(cls.network_2.id)]
)
cls.vm_4 = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering_2.id,
networkids=[str(cls.network_2.id)]
)
cls._cleanup = [
cls.service_offering_1,
cls.service_offering_2,
cls.nw_off,
cls.nw_off_no_lb,
]
return
@classmethod
def tearDownClass(cls):
try:
cls.account.delete(cls.api_client)
wait_for_cleanup(cls.api_client, ["account.cleanup.interval"])
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
# Waiting for network cleanup to delete vpc offering
wait_for_cleanup(cls.api_client, ["network.gc.wait",
"network.gc.interval"])
cls.vpc_off.delete(cls.api_client)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def validate_vm_deployment(self):
"""Validates VM deployment on different hosts"""
vms = VirtualMachine.list(
self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
networkid=self.network_1.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs shall return a valid response"
)
host_1 = vms[0].hostid
self.debug("Host for network 1: %s" % vms[0].hostid)
vms = VirtualMachine.list(
self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
networkid=self.network_2.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs shall return a valid response"
)
host_2 = vms[0].hostid
self.debug("Host for network 2: %s" % vms[0].hostid)
self.assertNotEqual(
host_1,
host_2,
"Both the virtual machines should be deployed on diff hosts "
)
return
@attr(tags=["advanced", "intervlan"])
def test_list_pf_rules_for_vpc(self):
""" Test List Port Forwarding Rules & vms belonging to a VPC
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Add network1(10.1.1.1/24) and network2(10.1.2.1/24) to this VPC.
# 3. Deploy vm1 and vm2 in network1 and vm3 and vm4 in network2.
# Make sure vm1 and vm3 are deployed on one host in the cluster
# while vm2 and vm4 are deployed on the other host in the cluster.
# This can be done using host's tags and service offerings with
# host tags.
# 4. Create a PF rule for vms in network1.
# 5. Create a PF rule for vms in network2.
# Steps:
# 1. List all the Port Forwarding Rules belonging to a VPC
# 2. Successfully List the Port Forwarding Rules belonging to the VPC
# 3. List the VMs on network1 for selection for the PF Rule
# 4. Successfully list the VMs for Port Forwarding Rule creation
self.debug("Associating public IP for network: %s" %
self.network_1.name)
public_ip_1 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network_1.id,
vpcid=self.vpc.id
)
self.debug("Associated %s with network %s" % (
public_ip_1.ipaddress.ipaddress,
self.network_1.id
))
nat_rule_1 = NATRule.create(
self.apiclient,
self.vm_1,
self.services["natrule"],
ipaddressid=public_ip_1.ipaddress.id,
openfirewall=False,
networkid=self.network_1.id,
vpcid=self.vpc.id
)
self.debug("Associating public IP for network: %s" %
self.network_2.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network_2.id,
vpcid=self.vpc.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
self.network_2.id
))
nat_rule_2 = NATRule.create(
self.apiclient,
self.vm_3,
self.services["natrule"],
ipaddressid=public_ip_2.ipaddress.id,
openfirewall=False,
networkid=self.network_2.id,
vpcid=self.vpc.id
)
self.debug("Listing all the PF rules belonging to VPC")
nat_rules = NATRule.list(
self.apiclient,
vpcid=self.vpc.id,
listall=True
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT rules should return the valid list"
)
self.assertEqual(
len(nat_rules),
2,
"List NAT for VPC shall return all NAT rules belonging to VPC"
)
for nat_rule in nat_rules:
self.assertEqual(
nat_rule.vpcid,
self.vpc.id,
"NAT rules should belong to VPC"
)
self.debug(
"Listing all the VMs belonging to VPC for network: %s" %
self.network_1.name)
vms = VirtualMachine.list(
self.apiclient,
networkid=self.network_1.id,
vpcid=self.vpc.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List virtual machines should return the valid list"
)
for vm in vms:
self.assertEqual(
vm.networkid,
self.network_1.id,
"List VMs should return vms belonging to network_1"
)
return

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,876 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC network functionality - Port Forwarding Rules.
"""
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.integration.lib.base import (stopRouter,
startRouter,
Account,
VpcOffering,
VPC,
ServiceOffering,
NATRule,
NetworkACL,
PublicIPAddress,
NetworkOffering,
Network,
VirtualMachine,
LoadBalancerRule,
StaticNATRule)
from marvin.integration.lib.common import (get_domain,
get_zone,
get_template,
cleanup_resources,
wait_for_cleanup,
list_routers)
class Services:
"""Test VPC network services - Port Forwarding Rules Test Data Class.
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"host1":None,
"host2":None,
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 1000,
"memory": 512,
},
"network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
"servicecapabilitylist": {
},
},
"network_offering_no_lb": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.0.0.1/24'
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"lbrule": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"openfirewall": False,
"startport": 22,
"endport": 2222,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"lbrule_http": {
"name": "HTTP",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 80,
"publicport": 8888,
"openfirewall": False,
"startport": 80,
"endport": 8888,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"http_rule": {
"privateport": 80,
"publicport": 80,
"startport": 80,
"endport": 80,
"cidrlist": '0.0.0.0/0',
"protocol": "TCP"
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10,
"mode": 'advanced'
}
class TestVPCNetworkPFRules(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestVPCNetworkPFRules,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
print("Warning: Exception during cleanup : %s" % e)
#raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self._cleanup = [self.account]
self.debug("Creating a VPC offering..")
self.vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
self._cleanup.append(self.vpc_off)
self.debug("Enabling the VPC offering created")
self.vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating a VPC network in the account: %s" % self.account.account.name)
self.services["vpc"]["cidr"] = '10.1.1.1/16'
self.vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
return
def tearDown(self):
try:
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self._cleanup)
wait_for_cleanup(self.apiclient, [
"network.gc.interval",
"network.gc.wait"])
except Exception as e:
self.debug("Warning: Exception during cleanup : %s" % e)
#raise Exception("Warning: Exception during cleanup : %s" % e)
return
def get_Router_For_VPC(self):
routers = list_routers(self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
self.assertNotEqual(len(routers),
0,
"Check list router response"
)
router = routers[0]
return router
def stop_VPC_VRouter(self):
router = self.get_Router_For_VPC()
self.debug("Stopping router ID: %s" % router.id)
cmd = stopRouter.stopRouterCmd()
cmd.id = router.id
self.apiclient.stopRouter(cmd)
routers = list_routers(self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
router = routers[0]
self.assertEqual(router.state,
'Stopped',
"Check list router response for router state"
)
return router
def start_VPC_VRouter(self, router):
# Start the VPC Router
cmd = startRouter.startRouterCmd()
cmd.id = router.id
self.apiclient.startRouter(cmd)
routers = list_routers(self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
zoneid=self.zone.id
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
router = routers[0]
self.assertEqual(router.state,
'Running',
"Check list router response for router state"
)
def check_ssh_into_vm(self, vm, public_ip, testnegative=False):
self.debug("Checking if we can SSH into VM=%s on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
try:
vm.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress)
if not testnegative:
self.debug("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.fail("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
except:
if not testnegative:
self.fail("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
else:
self.debug("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
def check_wget_from_vm(self, vm, public_ip, testnegative=False):
import urllib
self.debug("Checking if we can wget from a VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
try:
urllib.urlretrieve("http://%s/test.html" % public_ip.ipaddress.ipaddress, filename="test.html")
if not testnegative:
self.debug("Successesfull to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.fail("Successesfull to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
except:
if not testnegative:
self.fail("Failed to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.debug("Failed to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
def create_StaticNatRule_For_VM(self, vm, public_ip, network):
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
StaticNATRule.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=vm.id,
networkid=network.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
def create_NatRule_For_VM(self, vm, public_ip, network, services=None):
self.debug("Creatinng NAT rule in network for vm with public IP")
if not services:
services = self.services["natrule"]
nat_rule = NATRule.create(self.apiclient,
vm,
services,
ipaddressid=public_ip.ipaddress.id,
openfirewall=False,
networkid=network.id,
vpcid=self.vpc.id
)
self.debug("Adding NetwrokACl rules to make NAT rule accessible")
nwacl_nat = NetworkACL.create(self.apiclient,
networkid=network.id,
services=services,
traffictype='Ingress'
)
self.debug('nwacl_nat=%s' % nwacl_nat.__dict__)
return nat_rule
def acquire_Public_IP(self, network):
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=None, #network.id,
vpcid=self.vpc.id
)
self.debug("Associated %s with network %s" % (public_ip.ipaddress.ipaddress,
network.id
))
return public_ip
def create_VPC(self, cidr='10.1.2.1/16'):
self.debug("Creating a VPC offering..")
self.services["vpc_offering"]["name"] = self.services["vpc_offering"]["name"] + str(cidr)
vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
self._cleanup.append(self.vpc_off)
self.debug("Enabling the VPC offering created")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating a VPC network in the account: %s" % self.account.account.name)
self.services["vpc"]["cidr"] = cidr
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=vpc_off.id,
zoneid=self.zone.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
return vpc
def create_Network(self, net_offerring, gateway='10.1.1.1',vpc=None):
try:
self.debug('Create NetworkOffering')
net_offerring["name"] = "NET_OFF-" + str(gateway)
nw_off = NetworkOffering.create(self.apiclient,
net_offerring,
conservemode=False
)
# Enable Network offering
nw_off.update(self.apiclient, state='Enabled')
self._cleanup.append(nw_off)
self.debug('Created and Enabled NetworkOffering')
self.services["network"]["name"] = "NETWORK-" + str(gateway)
self.debug('Adding Network=%s' % self.services["network"])
obj_network = Network.create(self.apiclient,
self.services["network"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
networkofferingid=nw_off.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id if vpc else self.vpc.id
)
self.debug("Created network with ID: %s" % obj_network.id)
return obj_network
except:
self.fail('Unable to create a Network with offering=%s' % net_offerring)
def create_VM_in_Network(self, network, host_id=None):
try:
self.debug('Creating VM in network=%s' % network.name)
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)],
hostid=host_id
)
self.debug('Created VM=%s in network=%s' % (vm.id, network.name))
return vm
except:
self.fail('Unable to create VM in a Network=%s' % network.name)
def create_LB_Rule(self, public_ip, network, vmarray, services=None):
self.debug("Creating LB rule for IP address: %s" %
public_ip.ipaddress.ipaddress)
objservices = None
if services:
objservices = services
else:
objservices = self.services["lbrule"]
lb_rule = LoadBalancerRule.create(
self.apiclient,
objservices,
ipaddressid=public_ip.ipaddress.id,
accountid=self.account.account.name,
networkid=network.id,
vpcid=self.vpc.id,
domainid=self.account.account.domainid
)
self.debug("Adding virtual machines %s and %s to LB rule" % (vmarray))
lb_rule.assign(self.apiclient, vmarray)
return lb_rule
def create_egress_Internet_Rule(self, network):
self.debug("Adding Egress rules to network %s and %s to allow access to internet" % (network.name,self.services["http_rule"]))
nwacl_internet_1 = NetworkACL.create(
self.apiclient,
networkid=network.id,
services=self.services["http_rule"],
traffictype='Ingress'
)
return nwacl_internet_1
@attr(tags=["advanced", "intervlan"])
def test_01_network_services_VPC_StopCreatePF(self):
""" Test case no 204 : Create PF rules for a single virtual network of a VPC,
using a new Public IP Address available with the VPC when Virtual Router is in Stopped State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Stop the VPC Virtual Router.
# 6. Use the Create PF rule for vm in network1.
# 7. Start VPC Virtual Router.
# 8. Successfully ssh into the Guest VM using the PF rule
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
router = self.stop_VPC_VRouter()
self.create_NatRule_For_VM( vm_1, public_ip_1, network_1)
self.start_VPC_VRouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
def test_02_network_services_VPC_CreatePF(self):
""" Test case no 190 : Create PF rules for a single virtual network of a VPC using a
new Public IP Address available with the VPC when Virtual Router is in Running State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
self.create_NatRule_For_VM( vm_1, public_ip_1, network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_03_network_services_VPC_StopCreateMultiplePF(self):
""" Test case no 205 : Create PF rules for a two/multiple virtual networks of a VPC using
a new Public IP Address available with the VPC when Virtual Router is in Stopped State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 in network1.
# 6. Deploy vm2 in network2.
# 7. Stop the VPC Virtual Router.
# 8. Use the Create PF rule for vm1 in network1.
# 9. Use the Create PF rule for vm2 in network2.
# 10. Start VPC Virtual Router.
# 11. Successfully ssh into the Guest VM1 and VM2 using the PF rule
network_1 = self.create_Network(self.services["network_offering_no_lb"])
network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.create_VM_in_Network(network_1)
vm_2 = self.create_VM_in_Network(network_2)
public_ip_1 = self.acquire_Public_IP(network_1)
public_ip_2 = self.acquire_Public_IP(network_2)
router = self.stop_VPC_VRouter()
self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
self.create_NatRule_For_VM(vm_2, public_ip_2, network_2)
self.start_VPC_VRouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_04_network_services_VPC_CreateMultiplePF(self):
""" Test case no 191 : Create PF rules for a two/multiple virtual networks of a VPC using a
new Public IP Address available with the VPC when Virtual Router is in Running State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 in network1.
# 6. Deploy vm2 in network2.
# 7. Use the Create PF rule for vm1 in network1.
# 8. Use the Create PF rule for vm2 in network2.
# 9. Start VPC Virtual Router.
# 10. Successfully ssh into the Guest VM1 and VM2 using the PF rule
network_1 = self.create_Network(self.services["network_offering"])
network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.create_VM_in_Network(network_1)
vm_2 = self.create_VM_in_Network(network_2)
public_ip_1 = self.acquire_Public_IP(network_1)
public_ip_2 = self.acquire_Public_IP(network_2)
router = self.stop_VPC_VRouter()
self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
self.create_NatRule_For_VM(vm_2, public_ip_2, network_2)
self.start_VPC_VRouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_05_network_services_VPC_StopDeletePF(self):
""" Test case no 207 : Delete few(not all) PF rules for a single virtual network of
a VPC belonging to a single Public IP Address when Virtual Router is in Stopped State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 8. Stop the VPC Virtual Router.
# 9. Delete internet PF rule
# 10. Start VPC Virtual Router.
# 11. wget a file present on http server of VM1 should fail
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
http_rule = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1, self.services["http_rule"])
#http_rule = self.create_egress_Internet_Rule(network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
router = self.stop_VPC_VRouter()
http_rule.delete()
self.start_VPC_VRouter(router)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_06_network_services_VPC_DeletePF(self):
""" Test case no 193 : Delete few(not all) PF rules for a single virtual network of
a VPC belonging to a single Public IP Address when Virtual Router is in Running State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 9. Delete internet PF rule
# 10. wget a file present on http server of VM1 should fail
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
http_rule=self.create_NatRule_For_VM(vm_1, public_ip_1, network_1, self.services["http_rule"])
#http_rule = self.create_egress_Internet_Rule(network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
http_rule.delete()
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_07_network_services_VPC_StopDeleteAllPF(self):
""" Test case no 208 : Delete all PF rules for a single virtual network of a
VPC belonging to a single Public IP Address when Virtual Router is in Stopped State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 8. Stop the VPC Virtual Router.
# 9. Delete all PF rule
# 10. Start VPC Virtual Router.
# 11. wget a file present on http server of VM1 should fail
# 12. ssh into Guest VM using the PF rule should fail
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
nat_rule = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
http_rule = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1, self.services["http_rule"])
#http_rule = self.create_egress_Internet_Rule(network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
router = self.stop_VPC_VRouter()
http_rule.delete()
nat_rule.delete()
self.start_VPC_VRouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_08_network_services_VPC_DeleteAllPF(self):
""" Test case no 194 : Delete all PF rules for a single virtual network of a
VPC belonging to a single Public IP Address when Virtual Router is in Running State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 8. Delete all PF rule
# 9. wget a file present on http server of VM1 should fail
# 10. ssh into Guest VM using the PF rule should fail
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
nat_rule = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
http_rule = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1, self.services["http_rule"])
#http_rule = self.create_egress_Internet_Rule(network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
http_rule.delete()
nat_rule.delete()
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_09_network_services_VPC_StopDeleteAllMultiplePF(self):
""" Test case no 209 : Delete all PF rules for two/multiple virtual networks of a VPC.
Observe the status of the Public IP Addresses of the rules when Virtual Router is in Stopped State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16.
# 2. Create a Network offering - NO1 with all supported services.
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 and vm2 in network1.
# 6. Deploy vm3 and vm4 in network2.
# 7. Use the Create PF rule ssh and http for vm1 and vm2 in network1.
# 8. Use the Create PF rule ssh and http for vm3 and vm4 in network2.
# 9. Successfully ssh into the Guest vm1, vm2, vm3 and vm4 using the PF rule.
# 10. Succesfully wget a file from http server present on vm1, vm2, vm3 and vm4.
# 11. Stop VPC Virtual Router.
# 12. Delete all PF rultes for vm1, vm2, vm3 and vm4.
# 12. Start VPC Virtual Router.
# 13. Fail to ssh and http to vm1, vm2, vm3 and vm4.
network_1 = self.create_Network(self.services["network_offering"])
network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.create_VM_in_Network(network_1)
vm_2 = self.create_VM_in_Network(network_1)
vm_3 = self.create_VM_in_Network(network_2)
vm_4 = self.create_VM_in_Network(network_2)
public_ip_1 = self.acquire_Public_IP(network_1)
public_ip_2 = self.acquire_Public_IP(network_1)
nat_rule1 = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
nat_rule2 = self.create_NatRule_For_VM(vm_2, public_ip_2, network_1)
http_rule1 = self.create_egress_Internet_Rule(network_1)
nat_rule3 = self.create_NatRule_For_VM(vm_3, public_ip_1, network_2)
nat_rule4 = self.create_NatRule_For_VM(vm_4, public_ip_2, network_2)
http_rule2 = self.create_egress_Internet_Rule(network_2)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
self.check_ssh_into_vm(vm_3, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_4, public_ip_2, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=False)
self.check_wget_from_vm(vm_3, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_4, public_ip_2, testnegative=False)
router = self.stop_VPC_VRouter()
nat_rule1.delete()
nat_rule2.delete()
nat_rule3.delete()
nat_rule4.delete()
http_rule1.delete()
http_rule2.delete()
self.start_VPC_VRouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=True)
self.check_ssh_into_vm(vm_3, public_ip_1, testnegative=True)
self.check_ssh_into_vm(vm_4, public_ip_2, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=True)
self.check_wget_from_vm(vm_3, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_4, public_ip_2, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_10_network_services_VPC_DeleteAllMultiplePF(self):
""" Test case no 195: Delete all PF rules for two/multiple virtual networks of a VPC.
Observe the status of the Public IP Addresses of the rules when Virtual Router is in Running State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16.
# 2. Create a Network offering - NO1 with all supported services.
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 and vm2 in network1.
# 6. Deploy vm3 and vm4 in network2.
# 7. Use the Create PF rule ssh and http for vm1 and vm2 in network1.
# 8. Use the Create PF rule ssh and http for vm3 and vm4 in network2.
# 9. Successfully ssh into the Guest vm1, vm2, vm3 and vm4 using the PF rule.
# 10. Succesfully wget a file from http server present on vm1, vm2, vm3 and vm4.
# 12. Delete all PF rultes for vm1, vm2, vm3 and vm4.
# 13. Fail to ssh and http to vm1, vm2, vm3 and vm4.
network_1 = self.create_Network(self.services["network_offering"])
network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.create_VM_in_Network(network_1)
vm_2 = self.create_VM_in_Network(network_1)
vm_3 = self.create_VM_in_Network(network_2)
vm_4 = self.create_VM_in_Network(network_2)
public_ip_1 = self.acquire_Public_IP(network_1)
public_ip_2 = self.acquire_Public_IP(network_1)
nat_rule1 = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
nat_rule2 = self.create_NatRule_For_VM(vm_2, public_ip_2, network_1)
http_rule1 = self.create_egress_Internet_Rule(network_1)
nat_rule3 = self.create_NatRule_For_VM(vm_3, public_ip_1, network_2)
nat_rule4 = self.create_NatRule_For_VM(vm_4, public_ip_2, network_2)
http_rule2 = self.create_egress_Internet_Rule(network_2)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
self.check_ssh_into_vm(vm_3, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_4, public_ip_2, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=False)
self.check_wget_from_vm(vm_3, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_4, public_ip_2, testnegative=False)
nat_rule1.delete()
nat_rule2.delete()
nat_rule3.delete()
nat_rule4.delete()
http_rule1.delete()
http_rule2.delete()
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=True)
self.check_ssh_into_vm(vm_3, public_ip_1, testnegative=True)
self.check_ssh_into_vm(vm_4, public_ip_2, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=True)
self.check_wget_from_vm(vm_3, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_4, public_ip_2, testnegative=True)
return

View File

@ -0,0 +1,710 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC network functionality - Port Forwarding Rules.
"""
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.integration.lib.base import (stopRouter,
startRouter,
Account,
VpcOffering,
VPC,
ServiceOffering,
NATRule,
NetworkACL,
PublicIPAddress,
NetworkOffering,
Network,
VirtualMachine,
LoadBalancerRule,
StaticNATRule)
from marvin.integration.lib.common import (get_domain,
get_zone,
get_template,
cleanup_resources,
wait_for_cleanup,
list_routers)
class Services:
"""Test VPC network services - Port Forwarding Rules Test Data Class.
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"host1":None,
"host2":None,
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 1000,
"memory": 512,
},
"network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
"servicecapabilitylist": {
},
},
"network_offering_no_lb": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.0.0.1/24'
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"lbrule": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"openfirewall": False,
"startport": 22,
"endport": 2222,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"lbrule_http": {
"name": "HTTP",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 80,
"publicport": 8888,
"openfirewall": False,
"startport": 80,
"endport": 8888,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"http_rule": {
"privateport": 80,
"publicport": 80,
"startport": 80,
"endport": 80,
"cidrlist": '0.0.0.0/0',
"protocol": "TCP"
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10,
"mode": 'advanced'
}
class TestVPCNetworkPFRules(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestVPCNetworkPFRules,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
print("Warning: Exception during cleanup : %s" % e)
#raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self._cleanup = [self.account]
self.debug("Creating a VPC offering..")
self.vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
self._cleanup.append(self.vpc_off)
self.debug("Enabling the VPC offering created")
self.vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating a VPC network in the account: %s" % self.account.account.name)
self.services["vpc"]["cidr"] = '10.1.1.1/16'
self.vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
return
def tearDown(self):
try:
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self._cleanup)
wait_for_cleanup(self.apiclient, [
"network.gc.interval",
"network.gc.wait"])
except Exception as e:
self.debug("Warning: Exception during cleanup : %s" % e)
#raise Exception("Warning: Exception during cleanup : %s" % e)
return
def get_Router_For_VPC(self):
routers = list_routers(self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
self.assertNotEqual(len(routers),
0,
"Check list router response"
)
router = routers[0]
return router
def stop_VPC_VRouter(self):
router = self.get_Router_For_VPC()
self.debug("Stopping router ID: %s" % router.id)
cmd = stopRouter.stopRouterCmd()
cmd.id = router.id
self.apiclient.stopRouter(cmd)
routers = list_routers(self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
router = routers[0]
self.assertEqual(router.state,
'Stopped',
"Check list router response for router state"
)
return router
def start_VPC_VRouter(self, router):
# Start the VPC Router
cmd = startRouter.startRouterCmd()
cmd.id = router.id
self.apiclient.startRouter(cmd)
routers = list_routers(self.apiclient,
account=self.account.account.name,
domainid=self.account.account.domainid,
zoneid=self.zone.id
)
self.assertEqual(isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
router = routers[0]
self.assertEqual(router.state,
'Running',
"Check list router response for router state"
)
def check_ssh_into_vm(self, vm, public_ip, testnegative=False):
self.debug("Checking if we can SSH into VM=%s on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
try:
vm.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress)
if not testnegative:
self.debug("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.fail("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
except:
if not testnegative:
self.fail("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
else:
self.debug("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
def check_wget_from_vm(self, vm, public_ip, testnegative=False):
import urllib
self.debug("Checking if we can wget from a VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
try:
urllib.urlretrieve("http://%s/test.html" % public_ip.ipaddress.ipaddress, filename="test.html")
if not testnegative:
self.debug("Successesfull to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.fail("Successesfull to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
except:
if not testnegative:
self.fail("Failed to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
else:
self.debug("Failed to wget from VM=%s http server on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
def create_StaticNatRule_For_VM(self, vm, public_ip, network):
self.debug("Enabling static NAT for IP: %s" %
public_ip.ipaddress.ipaddress)
try:
StaticNATRule.enable(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
virtualmachineid=vm.id,
networkid=network.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
def create_NatRule_For_VM(self, vm, public_ip, network, services=None):
self.debug("Creatinng NAT rule in network for vm with public IP")
if not services:
services = self.services["natrule"]
nat_rule = NATRule.create(self.apiclient,
vm,
services,
ipaddressid=public_ip.ipaddress.id,
openfirewall=False,
networkid=network.id,
vpcid=self.vpc.id
)
self.debug("Adding NetwrokACl rules to make NAT rule accessible")
nwacl_nat = NetworkACL.create(self.apiclient,
networkid=network.id,
services=services,
traffictype='Ingress'
)
self.debug('nwacl_nat=%s' % nwacl_nat.__dict__)
return nat_rule
def acquire_Public_IP(self, network):
self.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=None, #network.id,
vpcid=self.vpc.id
)
self.debug("Associated %s with network %s" % (public_ip.ipaddress.ipaddress,
network.id
))
return public_ip
def create_VPC(self, cidr='10.1.2.1/16'):
self.debug("Creating a VPC offering..")
self.services["vpc_offering"]["name"] = self.services["vpc_offering"]["name"] + str(cidr)
vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
self._cleanup.append(self.vpc_off)
self.debug("Enabling the VPC offering created")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating a VPC network in the account: %s" % self.account.account.name)
self.services["vpc"]["cidr"] = cidr
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=vpc_off.id,
zoneid=self.zone.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
return vpc
def create_Network(self, net_offerring, gateway='10.1.1.1',vpc=None):
try:
self.debug('Create NetworkOffering')
net_offerring["name"] = "NET_OFF-" + str(gateway)
nw_off = NetworkOffering.create(self.apiclient,
net_offerring,
conservemode=False
)
# Enable Network offering
nw_off.update(self.apiclient, state='Enabled')
self._cleanup.append(nw_off)
self.debug('Created and Enabled NetworkOffering')
self.services["network"]["name"] = "NETWORK-" + str(gateway)
self.debug('Adding Network=%s' % self.services["network"])
obj_network = Network.create(self.apiclient,
self.services["network"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
networkofferingid=nw_off.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id if vpc else self.vpc.id
)
self.debug("Created network with ID: %s" % obj_network.id)
return obj_network
except:
self.fail('Unable to create a Network with offering=%s' % net_offerring)
def create_VM_in_Network(self, network, host_id=None):
try:
self.debug('Creating VM in network=%s' % network.name)
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)],
hostid=host_id
)
self.debug('Created VM=%s in network=%s' % (vm.id, network.name))
return vm
except:
self.fail('Unable to create VM in a Network=%s' % network.name)
def create_LB_Rule(self, public_ip, network, vmarray, services=None):
self.debug("Creating LB rule for IP address: %s" %
public_ip.ipaddress.ipaddress)
objservices = None
if services:
objservices = services
else:
objservices = self.services["lbrule"]
lb_rule = LoadBalancerRule.create(
self.apiclient,
objservices,
ipaddressid=public_ip.ipaddress.id,
accountid=self.account.account.name,
networkid=network.id,
vpcid=self.vpc.id,
domainid=self.account.account.domainid
)
self.debug("Adding virtual machines %s and %s to LB rule" % (vmarray))
lb_rule.assign(self.apiclient, vmarray)
return lb_rule
def create_egress_Internet_Rule(self, network):
self.debug("Adding Egress rules to network %s and %s to allow access to internet" % (network.name,self.services["http_rule"]))
nwacl_internet_1 = NetworkACL.create(
self.apiclient,
networkid=network.id,
services=self.services["http_rule"],
traffictype='Ingress'
)
return nwacl_internet_1
@attr(tags=["advanced", "intervlan"])
def test_01_VPC_StaticNatRuleCreateStoppedState(self):
""" Test case no extra :
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Stop the VPC Virtual Router.
# 6. Use the Create PF rule for vm in network1.
# 7. Start VPC Virtual Router.
# 8. Successfully ssh into the Guest VM using the PF rule
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
router = self.stop_VPC_VRouter()
self.create_NatRule_For_VM( vm_1, public_ip_1, network_1)
self.start_VPC_VRouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
def test_02_VPC_CreateStaticNatRule(self):
""" Test case no 229 : Create Static NAT Rule for a single virtual network of
a VPC using a new Public IP Address available with the VPC when the Virtual Router is in Running State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create Static Nat rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
self.create_StaticNatRule_For_VM( vm_1, public_ip_1, network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_03_VPC_StopCreateMultipleStaticNatRuleStopppedState(self):
""" Test case no extra : Create Static Nat Rule rules for a two/multiple virtual networks of a VPC using
a new Public IP Address available with the VPC when Virtual Router is in Stopped State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 in network1.
# 6. Deploy vm2 in network2.
# 7. Stop the VPC Virtual Router.
# 8. Use the Create PF rule for vm1 in network1.
# 9. Use the Create PF rule for vm2 in network2.
# 10. Start VPC Virtual Router.
# 11. Successfully ssh into the Guest VM1 and VM2 using the PF rule
network_1 = self.create_Network(self.services["network_offering_no_lb"])
network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.create_VM_in_Network(network_1)
vm_2 = self.create_VM_in_Network(network_2)
public_ip_1 = self.acquire_Public_IP(network_1)
public_ip_2 = self.acquire_Public_IP(network_2)
router = self.stop_VPC_VRouter()
self.create_StaticNatRule_For_VM(vm_1, public_ip_1, network_1)
self.create_StaticNatRule_For_VM(vm_2, public_ip_2, network_2)
self.start_VPC_VRouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_04_VPC_CreateMultipleStaticNatRule(self):
""" Test case no 230 : Create Static NAT Rules for a two/multiple virtual networks of
a VPC using a new Public IP Address available with the VPC when the Virtual Router is in Running State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 in network1.
# 6. Deploy vm2 in network2.
# 7. Use the Create PF rule for vm1 in network1.
# 8. Use the Create PF rule for vm2 in network2.
# 9. Start VPC Virtual Router.
# 10. Successfully ssh into the Guest VM1 and VM2 using the PF rule
network_1 = self.create_Network(self.services["network_offering"])
network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.create_VM_in_Network(network_1)
vm_2 = self.create_VM_in_Network(network_2)
public_ip_1 = self.acquire_Public_IP(network_1)
public_ip_2 = self.acquire_Public_IP(network_2)
router = self.stop_VPC_VRouter()
self.create_StaticNatRule_For_VM(vm_1, public_ip_1, network_1)
self.create_StaticNatRule_For_VM(vm_2, public_ip_2, network_2)
self.start_VPC_VRouter(router)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_05_network_services_VPC_DeleteAllPF(self):
""" Test case no 232: Delete all Static NAT Rules for a single virtual network of
a VPC belonging to a single Public IP Address when the Virtual Router is in Running State
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Create a Network offering - NO1 with all supported services
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Deploy vm1 in network1.
# 5. Use the Create PF rule for vm in network1.
# 6. Successfully ssh into the Guest VM using the PF rule.
# 7. Successfully wget a file on http server of VM1.
# 8. Delete all PF rule
# 9. wget a file present on http server of VM1 should fail
# 10. ssh into Guest VM using the PF rule should fail
network_1 = self.create_Network(self.services["network_offering"])
vm_1 = self.create_VM_in_Network(network_1)
public_ip_1 = self.acquire_Public_IP(network_1)
nat_rule = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
http_rule = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1, self.services["http_rule"])
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
http_rule.delete()
nat_rule.delete()
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
return
@attr(tags=["advanced", "intervlan"])
@unittest.skip("Implemented but not executed: VPC with multiple network fails to set PF rule.")
def test_06_network_services_VPC_DeleteAllMultiplePF(self):
""" Test case no 233: Delete all Static NAT rules for two/multiple virtual networks of a VPC.
Observe the status of the Public IP Addresses of the rules when the Virtual Router is in Running State.
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16.
# 2. Create a Network offering - NO1 with all supported services.
# 3. Add network1(10.1.1.1/24) using N01 to this VPC.
# 4. Add network2(10.1.2.1/24) using N01 to this VPC.
# 5. Deploy vm1 and vm2 in network1.
# 6. Deploy vm3 and vm4 in network2.
# 7. Use the Create PF rule ssh and http for vm1 and vm2 in network1.
# 8. Use the Create PF rule ssh and http for vm3 and vm4 in network2.
# 9. Successfully ssh into the Guest vm1, vm2, vm3 and vm4 using the PF rule.
# 10. Succesfully wget a file from http server present on vm1, vm2, vm3 and vm4.
# 12. Delete all PF rultes for vm1, vm2, vm3 and vm4.
# 13. Fail to ssh and http to vm1, vm2, vm3 and vm4.
network_1 = self.create_Network(self.services["network_offering"])
network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1')
vm_1 = self.create_VM_in_Network(network_1)
vm_2 = self.create_VM_in_Network(network_1)
vm_3 = self.create_VM_in_Network(network_2)
vm_4 = self.create_VM_in_Network(network_2)
public_ip_1 = self.acquire_Public_IP(network_1)
public_ip_2 = self.acquire_Public_IP(network_1)
nat_rule1 = self.create_NatRule_For_VM(vm_1, public_ip_1, network_1)
nat_rule2 = self.create_NatRule_For_VM(vm_2, public_ip_2, network_1)
http_rule1 = self.create_egress_Internet_Rule(network_1)
nat_rule3 = self.create_NatRule_For_VM(vm_3, public_ip_1, network_2)
nat_rule4 = self.create_NatRule_For_VM(vm_4, public_ip_2, network_2)
http_rule2 = self.create_egress_Internet_Rule(network_2)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=False)
self.check_ssh_into_vm(vm_3, public_ip_1, testnegative=False)
self.check_ssh_into_vm(vm_4, public_ip_2, testnegative=False)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=False)
self.check_wget_from_vm(vm_3, public_ip_1, testnegative=False)
self.check_wget_from_vm(vm_4, public_ip_2, testnegative=False)
nat_rule1.delete()
nat_rule2.delete()
nat_rule3.delete()
nat_rule4.delete()
http_rule1.delete()
http_rule2.delete()
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=True)
self.check_ssh_into_vm(vm_2, public_ip_2, testnegative=True)
self.check_ssh_into_vm(vm_3, public_ip_1, testnegative=True)
self.check_ssh_into_vm(vm_4, public_ip_2, testnegative=True)
self.check_wget_from_vm(vm_1, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_2, public_ip_2, testnegative=True)
self.check_wget_from_vm(vm_3, public_ip_1, testnegative=True)
self.check_wget_from_vm(vm_4, public_ip_2, testnegative=True)
return

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,153 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#Test from the Marvin - Testing in Python wiki
#All tests inherit from cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
#Import Integration Libraries
#base - contains all resources as entities and defines create, delete, list operations on them
from marvin.integration.lib.base import Account, VirtualMachine, ServiceOffering
#utils - utility classes for common cleanup, external library wrappers etc
from marvin.integration.lib.utils import cleanup_resources
#common - commonly used methods for all tests are listed here
from marvin.integration.lib.common import get_zone, get_domain, get_template
class TestData(object):
"""Test data object that is required to create resources
"""
def __init__(self):
self.testdata = {
#data to create an account
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
"password": "password",
},
#data reqd for virtual machine creation
"virtual_machine" : {
"name" : "testvm",
"displayname" : "Test VM",
},
#small service offering
"service_offering": {
"small": {
"name": "Small Instance",
"displaytext": "Small Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 256,
},
},
"ostype": 'CentOS 5.3 (64-bit)',
}
class TestDeployVM(cloudstackTestCase):
"""Test deploy a VM into a user account
"""
def setUp(self):
self.testdata = TestData().testdata
self.apiclient = self.testClient.getApiClient()
# Get Zone, Domain and Default Built-in template
self.domain = get_domain(self.apiclient, self.testdata)
self.zone = get_zone(self.apiclient, self.testdata)
self.testdata["mode"] = self.zone.networktype
self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"])
#create a user account
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
#create a service offering
self.service_offering = ServiceOffering.create(
self.apiclient,
self.testdata["service_offering"]["small"]
)
#build cleanup list
self.cleanup = [
self.service_offering,
self.account
]
def test_deploy_vm(self):
"""Test Deploy Virtual Machine
# Validate the following:
# 1. Virtual Machine is accessible via SSH
# 2. listVirtualMachines returns accurate information
"""
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id
)
list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s"\
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vms, list),
True,
"List VM response was not a valid list"
)
self.assertNotEqual(
len(list_vms),
0,
"List VM response was empty"
)
vm = list_vms[0]
self.assertEqual(
vm.id,
self.virtual_machine.id,
"Virtual Machine ids do not match"
)
self.assertEqual(
vm.name,
self.virtual_machine.name,
"Virtual Machine names do not match"
)
self.assertEqual(
vm.state,
"Running",
msg="VM is not in Running state"
)
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)

View File

@ -1,209 +0,0 @@
{
"zones": [
{
"name": "Sandbox-Simulator",
"guestcidraddress": "10.1.1.0/24",
"dns1": "10.147.28.6",
"physical_networks": [
{
"providers": [
{
"broadcastdomainrange": "ZONE",
"name": "VirtualRouter"
},
{
"broadcastdomainrange": "ZONE",
"name": "VpcVirtualRouter"
},
{
"broadcastdomainrange": "ZONE",
"name": "InternalLb"
}
],
"name": "Sandbox-pnet",
"tags": [
"cloud-simulator-public"
],
"broadcastdomainrange": "Zone",
"vlan": "675-679",
"traffictypes": [
{
"typ": "Guest"
},
{
"typ": "Management",
"simulator": "cloud-simulator-mgmt"
},
{
"typ": "Public",
"simulator": "cloud-simulator-public"
}
],
"isolationmethods": [
"VLAN"
]
},
{
"providers": [
{
"broadcastdomainrange": "ZONE",
"name": "VirtualRouter"
},
{
"broadcastdomainrange": "ZONE",
"name": "VpcVirtualRouter"
},
{
"broadcastdomainrange": "ZONE",
"name": "InternalLb"
}
],
"name": "Sandbox-pnet2",
"tags": [
"cloud-simulator-guest"
],
"broadcastdomainrange": "Zone",
"vlan": "800-1000",
"traffictypes": [
{
"typ": "Guest",
"simulator": "cloud-simulator-guest"
}
],
"isolationmethods": [
"VLAN"
]
}
],
"securitygroupenabled": "false",
"ipranges": [
{
"startip": "10.147.31.150",
"endip": "10.147.31.159",
"netmask": "255.255.255.0",
"vlan": "31",
"gateway": "10.147.31.1"
}
],
"networktype": "Advanced",
"pods": [
{
"endip": "10.147.29.159",
"name": "POD0",
"startip": "10.147.29.150",
"netmask": "255.255.255.0",
"clusters": [
{
"clustername": "C0",
"hypervisor": "Simulator",
"hosts": [
{
"username": "root",
"url": "http://simulator0",
"password": "password"
}
],
"clustertype": "CloudManaged",
"primaryStorages": [
{
"url": "nfs://10.147.28.6:/export/home/sandbox/primary",
"name": "PS0"
}
]
}
],
"gateway": "10.147.29.1"
}
],
"internaldns1": "10.147.28.6",
"secondaryStorages": [
{
"url": "nfs://10.147.28.6:/export/home/sandbox/sstor"
}
]
}
],
"dbSvr": {
"dbSvr": "localhost",
"passwd": "cloud",
"db": "cloud",
"port": 3306,
"user": "cloud"
},
"logger": [
{
"name": "TestClient",
"file": "testclient.log"
},
{
"name": "TestCase",
"file": "testcase.log"
}
],
"globalConfig": [
{
"name": "storage.cleanup.interval",
"value": "300"
},
{
"name": "direct.agent.load.size",
"value": "1000"
},
{
"name": "default.page.size",
"value": "10000"
},
{
"name": "instance.name",
"value": "QA"
},
{
"name": "workers",
"value": "10"
},
{
"name": "vm.op.wait.interval",
"value": "5"
},
{
"name": "account.cleanup.interval",
"value": "600"
},
{
"name": "guest.domain.suffix",
"value": "sandbox.simulator"
},
{
"name": "expunge.delay",
"value": "60"
},
{
"name": "vm.allocation.algorithm",
"value": "random"
},
{
"name": "expunge.interval",
"value": "60"
},
{
"name": "expunge.workers",
"value": "3"
},
{
"name": "secstorage.allowed.internal.sites",
"value": "10.147.28.0/24"
},
{
"name": "check.pod.cidrs",
"value": "true"
}
],
"mgtSvr": [
{
"mgtSvrIp": "localhost",
"passwd": "password",
"user": "root",
"port": 8096
}
]
}