diff --git a/api/src/com/cloud/network/Network.java b/api/src/com/cloud/network/Network.java index 8b402bcefd1..e4990de6cda 100644 --- a/api/src/com/cloud/network/Network.java +++ b/api/src/com/cloud/network/Network.java @@ -142,6 +142,8 @@ public interface Network extends ControlledEntity, StateObject, I public static final Provider GloboDns = new Provider("GloboDns", true); // add Big Switch Bcf Provider public static final Provider BigSwitchBcf = new Provider("BigSwitchBcf", false); + //Add ConfigDrive provider + public static final Provider ConfigDrive = new Provider("ConfigDrive", false); private final String name; private final boolean isExternal; diff --git a/api/src/com/cloud/network/NetworkModel.java b/api/src/com/cloud/network/NetworkModel.java index 220fa99467f..5e9839449c8 100644 --- a/api/src/com/cloud/network/NetworkModel.java +++ b/api/src/com/cloud/network/NetworkModel.java @@ -17,6 +17,8 @@ package com.cloud.network; +import com.google.common.collect.ImmutableMap; + import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -48,6 +50,28 @@ import org.apache.cloudstack.framework.config.ConfigKey; * participants in the orchestration can use this interface to query the data. */ public interface NetworkModel { + String METATDATA_DIR = "metadata"; + String USERDATA_DIR = "userdata"; + String USERDATA_FILE = "user_data"; + String PASSWORD_DIR = "password"; + String PASSWORD_FILE = "vm_password"; + String PASSWORD_CHECKSUM_FILE = "vm-password-md5checksum"; + String SERVICE_OFFERING_FILE = "service-offering"; + String AVAILABILITY_ZONE_FILE = "availability-zone"; + String LOCAL_HOSTNAME_FILE = "local-hostname"; + String INSTANCE_ID_FILE = "instance-id"; + String VM_ID_FILE = "vm-id"; + String PUBLIC_KEYS_FILE = "public-keys"; + String CLOUD_IDENTIFIER_FILE = "cloud-identifier"; + int CONFIGDATA_DIR = 0; + int CONFIGDATA_FILE = 1; + int CONFIGDATA_CONTENT = 2; + ImmutableMap openStackFileMapping = ImmutableMap.of( + AVAILABILITY_ZONE_FILE, "availability_zone", + LOCAL_HOSTNAME_FILE, "hostname", + VM_ID_FILE, "uuid", + INSTANCE_ID_FILE, "name" + ); static final ConfigKey MACIdentifier = new ConfigKey("Advanced",Integer.class, "mac.identifier", "0", "This value will be used while generating the mac addresses for isolated and shared networks. The hexadecimal equivalent value will be present at the 2nd octet of the mac address. Default value is null which means this feature is disabled.Its scope is global.", true, ConfigKey.Scope.Global); diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index bfe6b0d6c7f..8161fb2564b 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -24,6 +24,8 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import org.apache.log4j.Logger; + import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.ACL; @@ -47,7 +49,6 @@ import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java index ea2d19dfc1c..57e2e9c3c01 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java @@ -21,6 +21,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.log4j.Logger; + import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -35,7 +37,6 @@ import org.apache.cloudstack.api.response.GuestOSResponse; import org.apache.cloudstack.api.response.SecurityGroupResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceUnavailableException; diff --git a/core/src/com/cloud/agent/api/AttachIsoAnswer.java b/core/src/com/cloud/agent/api/AttachIsoAnswer.java new file mode 100644 index 00000000000..5cc3c71bace --- /dev/null +++ b/core/src/com/cloud/agent/api/AttachIsoAnswer.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + + +public class AttachIsoAnswer extends Answer { + private Integer deviceKey; + + + public AttachIsoAnswer(AttachIsoCommand cmd, String result) { + super(cmd, false, result); + this.deviceKey = null; + } + + public AttachIsoAnswer(AttachIsoCommand cmd, Integer deviceId) { + super(cmd); + this.deviceKey = deviceId; + } + + public AttachIsoAnswer(AttachIsoCommand cmd) { + super(cmd); + this.deviceKey = null; + } + + public AttachIsoAnswer(AttachIsoCommand command, boolean success, String details) { + super(command,success,details); + this.deviceKey = null; + } + + public AttachIsoAnswer(Command command, Exception e) { + super(command, e); + } + + public Integer getDeviceKey() { + return deviceKey; + } + + public void setDeviceKey(Integer deviceKey) { + this.deviceKey = deviceKey; + } +} diff --git a/core/src/com/cloud/agent/api/AttachIsoCommand.java b/core/src/com/cloud/agent/api/AttachIsoCommand.java index 07676878b6b..88a031972fc 100644 --- a/core/src/com/cloud/agent/api/AttachIsoCommand.java +++ b/core/src/com/cloud/agent/api/AttachIsoCommand.java @@ -25,14 +25,22 @@ public class AttachIsoCommand extends Command { private String storeUrl; private String isoPath; private boolean attach; + private Integer deviceKey; + private boolean force; protected AttachIsoCommand() { } - public AttachIsoCommand(String vmName, String isoPath, boolean attach) { + public AttachIsoCommand(String vmName, String isoPath, boolean attach, Integer deviceKey, boolean force) { this.vmName = vmName; this.isoPath = isoPath; this.attach = attach; + this.deviceKey = deviceKey; + this.force = force; + } + + public AttachIsoCommand(String vmName, String isoPath, boolean attach) { + this(vmName, isoPath, attach, null, false); } @Override @@ -52,6 +60,10 @@ public class AttachIsoCommand extends Command { return attach; } + public void setAttach(boolean attach) { + this.attach = attach; + } + public String getStoreUrl() { return storeUrl; } @@ -59,4 +71,16 @@ public class AttachIsoCommand extends Command { public void setStoreUrl(String url) { storeUrl = url; } + + public void setDeviceKey(Integer deviceKey) { + this.deviceKey = deviceKey; + } + + public Integer getDeviceKey() { + return deviceKey; + } + + public boolean isForce() { + return force; + } } diff --git a/core/src/com/cloud/agent/api/HandleConfigDriveIsoCommand.java b/core/src/com/cloud/agent/api/HandleConfigDriveIsoCommand.java new file mode 100644 index 00000000000..d6d87d48c05 --- /dev/null +++ b/core/src/com/cloud/agent/api/HandleConfigDriveIsoCommand.java @@ -0,0 +1,78 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import java.util.List; + +import com.cloud.agent.api.to.DataStoreTO; + +public class HandleConfigDriveIsoCommand extends Command { + + String isoFile; + List vmData; + String configDriveLabel; + boolean create = false; + private boolean update = false; + private DataStoreTO destStore; + + public HandleConfigDriveIsoCommand(List vmData, String label, DataStoreTO destStore, String isoFile, boolean create, boolean update) { + this.vmData = vmData; + this.configDriveLabel = label; + this.create = create; + this.update = update; + this.destStore = destStore; + + + this.isoFile = isoFile; + } + + @Override + public boolean executeInSequence() { + return false; + } + + public List getVmData() { + return vmData; + } + + public void setVmData(List vmData) { + this.vmData = vmData; + } + + public boolean isCreate() { + return create; + } + + public String getConfigDriveLabel() { + return configDriveLabel; + } + + public DataStoreTO getDestStore() { + return destStore; + } + + public String getIsoFile() { + return isoFile; + } + + public boolean isUpdate() { + return update; + } +} diff --git a/core/test/org/apache/cloudstack/api/agent/test/AttachIsoCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/AttachIsoCommandTest.java index 124762bfcb2..f1dfefe19fe 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/AttachIsoCommandTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/AttachIsoCommandTest.java @@ -19,16 +19,24 @@ package org.apache.cloudstack.api.agent.test; +import org.junit.Assert; +import org.junit.Test; + +import com.google.gson.Gson; + +import com.cloud.agent.api.AttachIsoCommand; +import com.cloud.agent.api.Command; +import com.cloud.serializer.GsonHelper; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import org.junit.Test; - -import com.cloud.agent.api.AttachIsoCommand; - public class AttachIsoCommandTest { - AttachIsoCommand aic = new AttachIsoCommand("vmname", "isopath", false); + + private static final Gson s_gson = GsonHelper.getGson(); + + AttachIsoCommand aic = new AttachIsoCommand("vmname", "isopath", false, 1, true); @Test public void testGetVmName() { @@ -80,4 +88,17 @@ public class AttachIsoCommandTest { b = aic.getWait(); assertEquals(b, 0); } + + @Test + public void testSerialization() { + AttachIsoCommand after = serializeAndDeserialize(aic); + Assert.assertEquals(aic, after); + } + + private T serializeAndDeserialize(T command) { + final String json = s_gson.toJson(new Command[] {command}); + Command[] forwardedCommands = s_gson.fromJson(json, Command[].class); + return (T) forwardedCommands[0]; + } + } diff --git a/engine/api/src/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/com/cloud/vm/VirtualMachineManager.java index 31e668640d9..556ec86f060 100644 --- a/engine/api/src/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/com/cloud/vm/VirtualMachineManager.java @@ -52,7 +52,7 @@ public interface VirtualMachineManager extends Manager { static final ConfigKey ExecuteInSequence = new ConfigKey("Advanced", Boolean.class, "execute.in.sequence.hypervisor.commands", "false", "If set to true, start, stop, reboot, copy and migrate commands will be serialized on the agent side. If set to false the commands are executed in parallel. Default value is false.", false); - static final ConfigKey VmConfigDriveLabel = new ConfigKey("Hidden", String.class, "vm.configdrive.label", "config", + static final ConfigKey VmConfigDriveLabel = new ConfigKey("Hidden", String.class, "vm.configdrive.label", "config-2", "The default label name for the config drive", false); public interface Topics { diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 398cf56e278..64285841ff7 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -41,7 +41,6 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; - import javax.naming.ConfigurationException; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; @@ -69,7 +68,6 @@ import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.InputSource; import org.xml.sax.SAXException; - import com.google.common.base.Strings; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; @@ -2251,16 +2249,15 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv DiskDef.DiskBus diskBusTypeData = (diskBusType == DiskDef.DiskBus.SCSI) ? diskBusType : DiskDef.DiskBus.VIRTIO; final DiskDef disk = new DiskDef(); + int devId = volume.getDiskSeq().intValue(); if (volume.getType() == Volume.Type.ISO) { if (volPath == null) { /* Add iso as placeholder */ - disk.defISODisk(null); + disk.defISODisk(null, devId); } else { - disk.defISODisk(volPath); + disk.defISODisk(volPath, devId); } } else { - final int devId = volume.getDiskSeq().intValue(); - if (diskBusType == DiskDef.DiskBus.SCSI ) { disk.setQemuDriver(true); disk.setDiscard(DiscardType.UNMAP); @@ -2390,9 +2387,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return _storagePoolMgr; } - public synchronized String attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach) throws LibvirtException, URISyntaxException, + public synchronized String attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach, final Integer diskSeq) throws LibvirtException, URISyntaxException, InternalErrorException { - String isoXml = null; + final DiskDef iso = new DiskDef(); if (isoPath != null && isAttach) { final int index = isoPath.lastIndexOf("/"); final String path = isoPath.substring(0, index); @@ -2401,20 +2398,17 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv final KVMPhysicalDisk isoVol = secondaryPool.getPhysicalDisk(name); isoPath = isoVol.getPath(); - final DiskDef iso = new DiskDef(); - iso.defISODisk(isoPath); - isoXml = iso.toString(); + iso.defISODisk(isoPath, diskSeq); } else { - final DiskDef iso = new DiskDef(); - iso.defISODisk(null); - isoXml = iso.toString(); + iso.defISODisk(null, diskSeq); } - final List disks = getDisks(conn, vmName); - final String result = attachOrDetachDevice(conn, true, vmName, isoXml); + final String result = attachOrDetachDevice(conn, true, vmName, iso.toString()); if (result == null && !isAttach) { + final List disks = getDisks(conn, vmName); for (final DiskDef disk : disks) { - if (disk.getDeviceType() == DiskDef.DeviceType.CDROM) { + if (disk.getDeviceType() == DiskDef.DeviceType.CDROM + && (diskSeq == null || disk.getDiskLabel() == iso.getDiskLabel())) { cleanupDisk(disk); } } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java index d979d553f48..5f2af5cbe72 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java @@ -22,7 +22,6 @@ import java.io.StringReader; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; @@ -116,7 +115,7 @@ public class LibvirtDomainXMLParser { } def.defFileBasedDisk(diskFile, diskLabel, DiskDef.DiskBus.valueOf(bus.toUpperCase()), fmt); } else if (device.equalsIgnoreCase("cdrom")) { - def.defISODisk(diskFile); + def.defISODisk(diskFile , i+1); } } else if (type.equalsIgnoreCase("block")) { def.defBlockBasedDisk(diskDev, diskLabel, diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index 0196c85bb58..81077fa7254 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -609,22 +609,24 @@ public class LibvirtVMDef { } - /* skip iso label */ - private String getDevLabel(int devId, DiskBus bus) { + /* skip iso labels */ + private String getDevLabel(int devId, DiskBus bus, boolean forIso) { if (devId < 0) { return ""; } - if (devId == 2) { - devId++; - } - if (bus == DiskBus.SCSI) { return "sd" + getDevLabelSuffix(devId); } else if (bus == DiskBus.VIRTIO) { return "vd" + getDevLabelSuffix(devId); } + if (forIso) { + devId --; + } else if(devId >= 2) { + devId += 2; + } return "hd" + getDevLabelSuffix(devId); + } private String getDevLabelSuffix(int deviceIndex) { @@ -649,7 +651,7 @@ public class LibvirtVMDef { _deviceType = DeviceType.DISK; _diskCacheMode = DiskCacheMode.NONE; _sourcePath = filePath; - _diskLabel = getDevLabel(devId, bus); + _diskLabel = getDevLabel(devId, bus, false); _diskFmtType = diskFmtType; _bus = bus; @@ -659,19 +661,33 @@ public class LibvirtVMDef { _diskType = DiskType.FILE; _deviceType = DeviceType.CDROM; _sourcePath = volPath; - _diskLabel = "hdc"; + _diskLabel = getDevLabel(3, DiskBus.IDE, true); _diskFmtType = DiskFmtType.RAW; _diskCacheMode = DiskCacheMode.NONE; _bus = DiskBus.IDE; } + public void defISODisk(String volPath, Integer devId) { + if (devId == null) { + defISODisk(volPath); + } else { + _diskType = DiskType.FILE; + _deviceType = DeviceType.CDROM; + _sourcePath = volPath; + _diskLabel = getDevLabel(devId, DiskBus.IDE, true); + _diskFmtType = DiskFmtType.RAW; + _diskCacheMode = DiskCacheMode.NONE; + _bus = DiskBus.IDE; + } + } + public void defBlockBasedDisk(String diskName, int devId, DiskBus bus) { _diskType = DiskType.BLOCK; _deviceType = DeviceType.DISK; _diskFmtType = DiskFmtType.RAW; _diskCacheMode = DiskCacheMode.NONE; _sourcePath = diskName; - _diskLabel = getDevLabel(devId, bus); + _diskLabel = getDevLabel(devId, bus, false); _bus = bus; } @@ -696,7 +712,7 @@ public class LibvirtVMDef { _sourcePort = sourcePort; _authUserName = authUserName; _authSecretUUID = authSecretUUID; - _diskLabel = getDevLabel(devId, bus); + _diskLabel = getDevLabel(devId, bus, false); _bus = bus; _diskProtocol = protocol; } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAttachIsoCommandWrapper.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAttachIsoCommandWrapper.java index 3c6da922e46..a2f62e6cdce 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAttachIsoCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAttachIsoCommandWrapper.java @@ -24,7 +24,7 @@ import java.net.URISyntaxException; import org.libvirt.Connect; import org.libvirt.LibvirtException; -import com.cloud.agent.api.Answer; +import com.cloud.agent.api.AttachIsoAnswer; import com.cloud.agent.api.AttachIsoCommand; import com.cloud.exception.InternalErrorException; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; @@ -32,23 +32,19 @@ import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; @ResourceWrapper(handles = AttachIsoCommand.class) -public final class LibvirtAttachIsoCommandWrapper extends CommandWrapper { +public final class LibvirtAttachIsoCommandWrapper extends CommandWrapper { @Override - public Answer execute(final AttachIsoCommand command, final LibvirtComputingResource libvirtComputingResource) { + public AttachIsoAnswer execute(final AttachIsoCommand command, final LibvirtComputingResource libvirtComputingResource) { try { final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(command.getVmName()); - libvirtComputingResource.attachOrDetachISO(conn, command.getVmName(), command.getIsoPath(), command.isAttach()); - } catch (final LibvirtException e) { - return new Answer(command, false, e.toString()); - } catch (final URISyntaxException e) { - return new Answer(command, false, e.toString()); - } catch (final InternalErrorException e) { - return new Answer(command, false, e.toString()); + libvirtComputingResource.attachOrDetachISO(conn, command.getVmName(), command.getIsoPath(), command.isAttach(), command.getDeviceKey()); + } catch (final LibvirtException|URISyntaxException|InternalErrorException e) { + return new AttachIsoAnswer(command, e); } - return new Answer(command); + return new AttachIsoAnswer(command, command.getDeviceKey()); } } \ No newline at end of file diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index d052069e8c6..6248b7c6b3a 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -27,7 +27,6 @@ import java.nio.channels.SocketChannel; import java.rmi.RemoteException; import com.cloud.configuration.Resource.ResourceType; -import org.joda.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -43,13 +42,13 @@ import java.util.Random; import java.util.Set; import java.util.TimeZone; import java.util.UUID; - import javax.naming.ConfigurationException; +import org.apache.commons.lang.math.NumberUtils; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.apache.log4j.NDC; -import org.apache.commons.lang.StringUtils; - +import org.joda.time.Duration; import com.google.gson.Gson; import com.vmware.vim25.AboutInfo; import com.vmware.vim25.BoolPolicy; @@ -85,7 +84,6 @@ import com.vmware.vim25.VirtualDevice; import com.vmware.vim25.VirtualDeviceBackingInfo; import com.vmware.vim25.VirtualDeviceConfigSpec; import com.vmware.vim25.VirtualDeviceConfigSpecOperation; -import com.vmware.vim25.VirtualUSBController; import com.vmware.vim25.VirtualDisk; import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo; import com.vmware.vim25.VirtualEthernetCard; @@ -102,6 +100,7 @@ import com.vmware.vim25.VirtualMachineRelocateSpec; import com.vmware.vim25.VirtualMachineRelocateSpecDiskLocator; import com.vmware.vim25.VirtualMachineRuntimeInfo; import com.vmware.vim25.VirtualMachineVideoCard; +import com.vmware.vim25.VirtualUSBController; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; import org.apache.cloudstack.api.ApiConstants; @@ -111,10 +110,10 @@ import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.commons.lang.math.NumberUtils; - +import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.AttachIsoAnswer; import com.cloud.agent.api.AttachIsoCommand; import com.cloud.agent.api.BackupSnapshotAnswer; import com.cloud.agent.api.BackupSnapshotCommand; @@ -259,7 +258,6 @@ import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.NetworkDetails; import com.cloud.hypervisor.vmware.mo.TaskMO; import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; -import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder; import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; import com.cloud.hypervisor.vmware.mo.VirtualSwitchType; @@ -282,8 +280,8 @@ import com.cloud.storage.resource.StoragePoolResource; import com.cloud.storage.resource.StorageSubsystemCommandHandler; import com.cloud.storage.resource.VmwareStorageLayoutHelper; import com.cloud.storage.resource.VmwareStorageProcessor; -import com.cloud.storage.resource.VmwareStorageSubsystemCommandHandler; import com.cloud.storage.resource.VmwareStorageProcessor.VmwareStorageProcessorConfigurableFields; +import com.cloud.storage.resource.VmwareStorageSubsystemCommandHandler; import com.cloud.storage.template.TemplateProp; import com.cloud.utils.DateUtil; import com.cloud.utils.ExecutionResult; @@ -1493,7 +1491,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - + Collections.sort(validatedDisks, (d1, d2) -> d1.getDiskSeq().compareTo(d2.getDiskSeq())); return validatedDisks.toArray(new DiskTO[0]); } @@ -1880,35 +1878,42 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } + i++; } else { // Note: we will always plug a CDROM device if (volIso != null) { - TemplateObjectTO iso = (TemplateObjectTO)volIso.getData(); + for (DiskTO vol : disks) { + if (vol.getType() == Volume.Type.ISO) { - if (iso.getPath() != null && !iso.getPath().isEmpty()) { - DataStoreTO imageStore = iso.getDataStore(); - if (!(imageStore instanceof NfsTO)) { - s_logger.debug("unsupported protocol"); - throw new Exception("unsupported protocol"); - } - NfsTO nfsImageStore = (NfsTO)imageStore; - String isoPath = nfsImageStore.getUrl() + File.separator + iso.getPath(); - Pair isoDatastoreInfo = getIsoDatastoreInfo(hyperHost, isoPath); - assert (isoDatastoreInfo != null); - assert (isoDatastoreInfo.second() != null); + TemplateObjectTO iso = (TemplateObjectTO) vol.getData(); - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, - i + 1); - deviceConfigSpecArray[i].setDevice(isoInfo.first()); - if (isoInfo.second()) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); + if (iso.getPath() != null && !iso.getPath().isEmpty()) { + DataStoreTO imageStore = iso.getDataStore(); + if (!(imageStore instanceof NfsTO)) { + s_logger.debug("unsupported protocol"); + throw new Exception("unsupported protocol"); + } + NfsTO nfsImageStore = (NfsTO) imageStore; + String isoPath = nfsImageStore.getUrl() + File.separator + iso.getPath(); + Pair isoDatastoreInfo = getIsoDatastoreInfo(hyperHost, isoPath); + assert (isoDatastoreInfo != null); + assert (isoDatastoreInfo.second() != null); + + deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); + Pair isoInfo = + VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, i + 1); + deviceConfigSpecArray[i].setDevice(isoInfo.first()); + if (isoInfo.second()) { + if (s_logger.isDebugEnabled()) + s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); + } else { + if (s_logger.isDebugEnabled()) + s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); + } + } + i++; } } } else { @@ -1926,10 +1931,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } + i++; } } - i++; + // // Setup ROOT/DATA disk devices @@ -2910,9 +2916,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private static VolumeObjectTO getVolumeInSpec(VirtualMachineTO vmSpec, VolumeObjectTO srcVol) { for (DiskTO disk : vmSpec.getDisks()) { - VolumeObjectTO vol = (VolumeObjectTO)disk.getData(); - if (vol.getId() == srcVol.getId()) - return vol; + if (disk.getData() instanceof VolumeObjectTO) { + VolumeObjectTO vol = (VolumeObjectTO) disk.getData(); + if (vol.getId() == srcVol.getId()) + return vol; + } } return null; @@ -3237,7 +3245,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String isoFileName = isoUrl.substring(isoFileNameStartPos); int templateRootPos = isoUrl.indexOf("template/tmpl"); - if (templateRootPos < 0) { + templateRootPos = (templateRootPos < 0 ? isoUrl.indexOf("ConfigDrive") : templateRootPos); + if (templateRootPos < 0 ) { throw new Exception("Invalid ISO path info"); } @@ -4195,7 +4204,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return str.replace('/', '-'); } - protected Answer execute(AttachIsoCommand cmd) { + protected AttachIsoAnswer execute(AttachIsoCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource AttachIsoCommand: " + _gson.toJson(cmd)); } @@ -4221,7 +4230,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } else { try { if (!vmMo.unmountToolsInstaller()) { - return new Answer(cmd, false, + return new AttachIsoAnswer(cmd, false, "Failed to unmount vmware-tools installer ISO as the corresponding CDROM device is locked by VM. Please unmount the CDROM device inside the VM and ret-try."); } } catch (Throwable e) { @@ -4229,7 +4238,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } - return new Answer(cmd); + return new AttachIsoAnswer(cmd); } } @@ -4244,7 +4253,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa int isoNameStartPos = isoPath.lastIndexOf('/'); String isoFileName = isoPath.substring(isoNameStartPos + 1); - String isoStorePathFromRoot = isoPath.substring(storeUrl.length(), isoNameStartPos); + String isoStorePathFromRoot = isoPath.substring(storeUrl.length() + 1, isoNameStartPos + 1); + // TODO, check if iso is already attached, or if there is a previous // attachment @@ -4253,12 +4263,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String isoDatastorePath = String.format("[%s] %s%s", storeName, isoStorePathFromRoot, isoFileName); if (cmd.isAttach()) { - vmMo.attachIso(isoDatastorePath, morSecondaryDs, true, false); + vmMo.attachIso(isoDatastorePath, morSecondaryDs, true, false, cmd.getDeviceKey()); + return new AttachIsoAnswer(cmd); } else { - vmMo.detachIso(isoDatastorePath); + int key = vmMo.detachIso(isoDatastorePath, cmd.isForce()); + return new AttachIsoAnswer(cmd, key); } - return new Answer(cmd); } catch (Throwable e) { if (e instanceof RemoteException) { s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); @@ -4268,11 +4279,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (cmd.isAttach()) { String msg = "AttachIsoCommand(attach) failed due to " + VmwareHelper.getExceptionMessage(e); s_logger.error(msg, e); - return new Answer(cmd, false, msg); + return new AttachIsoAnswer(cmd, false, msg); } else { String msg = "AttachIsoCommand(detach) failed due to " + VmwareHelper.getExceptionMessage(e); s_logger.warn(msg, e); - return new Answer(cmd, false, msg); + return new AttachIsoAnswer(cmd, false, msg); } } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index b2925b1aa75..9cbc7a74fbc 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -33,11 +33,11 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import com.google.common.base.Strings; import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; +import com.google.common.base.Strings; import com.google.gson.Gson; import com.vmware.vim25.HostHostBusAdapter; import com.vmware.vim25.HostInternetScsiHba; @@ -71,6 +71,7 @@ import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -93,7 +94,6 @@ import com.cloud.hypervisor.vmware.mo.HostMO; import com.cloud.hypervisor.vmware.mo.HostStorageSystemMO; import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.NetworkDetails; -import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; import com.cloud.hypervisor.vmware.resource.VmwareResource; @@ -1525,7 +1525,7 @@ public class VmwareStorageProcessor implements StorageProcessor { int isoNameStartPos = isoPath.lastIndexOf('/'); String isoFileName = isoPath.substring(isoNameStartPos + 1); - String isoStorePathFromRoot = isoPath.substring(storeUrl.length(), isoNameStartPos); + String isoStorePathFromRoot = isoPath.substring(storeUrl.length() + 1, isoNameStartPos); // TODO, check if iso is already attached, or if there is a previous // attachment diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrepareForMigrationCommandWrapper.java index 2fa25be8e66..5f03ac714d2 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrepareForMigrationCommandWrapper.java @@ -47,7 +47,7 @@ public final class CitrixPrepareForMigrationCommandWrapper extends CommandWrappe String configDriveLabel = vm.getConfigDriveLabel(); if (configDriveLabel == null) { - configDriveLabel = "config"; + configDriveLabel = "config-2"; } if (s_logger.isDebugEnabled()) { diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/element/NuageVspElement.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/element/NuageVspElement.java index 26fdaac5e14..abe7a71098b 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/element/NuageVspElement.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/element/NuageVspElement.java @@ -19,7 +19,39 @@ package com.cloud.network.element; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.annotation.Nullable; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import net.nuage.vsp.acs.client.api.model.VspAclRule; +import net.nuage.vsp.acs.client.api.model.VspDhcpDomainOption; +import net.nuage.vsp.acs.client.api.model.VspNetwork; +import net.nuage.vsp.acs.client.api.model.VspStaticNat; + +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; +import org.apache.log4j.Logger; + +import com.google.common.base.Function; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; + +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.network.ExternalNetworkDeviceManager; +import org.apache.cloudstack.resourcedetail.VpcDetailVO; +import org.apache.cloudstack.resourcedetail.dao.VpcDetailsDao; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.StartupCommand; @@ -56,7 +88,9 @@ import com.cloud.network.dao.FirewallRulesCidrsDao; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; +import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkServiceMapDao; +import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.network.manager.NuageVspManager; @@ -95,33 +129,6 @@ import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; -import com.google.common.base.Function; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimap; -import com.google.common.collect.Sets; -import net.nuage.vsp.acs.client.api.model.VspAclRule; -import net.nuage.vsp.acs.client.api.model.VspDhcpDomainOption; -import net.nuage.vsp.acs.client.api.model.VspNetwork; -import net.nuage.vsp.acs.client.api.model.VspStaticNat; -import org.apache.cloudstack.api.InternalIdentity; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.network.ExternalNetworkDeviceManager; -import org.apache.cloudstack.resourcedetail.VpcDetailVO; -import org.apache.cloudstack.resourcedetail.dao.VpcDetailsDao; -import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; - -import javax.annotation.Nullable; -import javax.inject.Inject; -import javax.naming.ConfigurationException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; public class NuageVspElement extends AdapterBase implements ConnectivityProvider, IpDeployer, SourceNatServiceProvider, StaticNatServiceProvider, FirewallServiceProvider, DhcpServiceProvider, ResourceStateAdapter, VpcProvider, NetworkACLServiceProvider { @@ -159,6 +166,8 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider @Inject NetworkServiceMapDao _ntwkSrvcDao; @Inject + NetworkDao _networkDao; + @Inject DomainDao _domainDao; @Inject IPAddressDao _ipAddressDao; @@ -267,6 +276,13 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider return false; } + if (!_nuageVspEntityBuilder.usesVirtualRouter(offering.getId())) { + // Update broadcast uri if VR is no longer used + NetworkVO networkToUpdate = _networkDao.findById(network.getId()); + String broadcastUriStr = networkToUpdate.getUuid() + "/null"; + networkToUpdate.setBroadcastUri(Networks.BroadcastDomainType.Vsp.toUri(broadcastUriStr)); + _networkDao.update(network.getId(), networkToUpdate); + } VspNetwork vspNetwork = _nuageVspEntityBuilder.buildVspNetwork(network); List ingressFirewallRules = getFirewallRulesToApply(network, FirewallRule.TrafficType.Ingress); @@ -547,10 +563,11 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider List vlans = _vlanDao.listByZone(newVlan.getDataCenterId()); if (CollectionUtils.isNotEmpty(vlans)) { boolean newVlanUnderlay = NuageVspUtil.isUnderlayEnabledForVlan(_vlanDetailsDao, newVlan); + final String newCidr = NetUtils.getCidrFromGatewayAndNetmask(newVlan.getVlanGateway(), newVlan.getVlanNetmask()); + for (VlanVO vlan : vlans) { if (vlan.getId() == newVlan.getId()) continue; - final String newCidr = NetUtils.getCidrFromGatewayAndNetmask(newVlan.getVlanGateway(), newVlan.getVlanNetmask()); final String existingCidr = NetUtils.getCidrFromGatewayAndNetmask(vlan.getVlanGateway(), vlan.getVlanNetmask()); NetUtils.SupersetOrSubset supersetOrSubset = NetUtils.isNetowrkASubsetOrSupersetOfNetworkB(newCidr, existingCidr); @@ -635,7 +652,7 @@ public class NuageVspElement extends AdapterBase implements ConnectivityProvider @Override public boolean implementVpc(Vpc vpc, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { List vpcOfferingServices = _vpcOfferingSrvcDao.listByVpcOffId(vpc.getVpcOfferingId()); - Multimap supportedVpcServices = NuageVspManagerImpl.NUAGE_VSP_VPC_SERVICE_MAP; + Multimap supportedVpcServices = NuageVspManagerImpl.SUPPORTED_NUAGE_VSP_VPC_SERVICE_MAP; for (VpcOfferingServiceMapVO vpcOfferingService : vpcOfferingServices) { Network.Service service = Network.Service.getService(vpcOfferingService.getService()); if (!supportedVpcServices.containsKey(service)) { diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/guru/NuageVspGuestNetworkGuru.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/guru/NuageVspGuestNetworkGuru.java index a36bc0a07d0..23a0efce739 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/guru/NuageVspGuestNetworkGuru.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/guru/NuageVspGuestNetworkGuru.java @@ -61,6 +61,7 @@ import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterDetailVO; import com.cloud.dc.VlanVO; import com.cloud.dc.dao.DataCenterDetailsDao; +import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.deploy.DeployDestination; import com.cloud.deploy.DeploymentPlan; import com.cloud.domain.dao.DomainDao; @@ -88,6 +89,7 @@ import com.cloud.user.Account; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; import com.cloud.util.NuageVspEntityBuilder; +import com.cloud.util.NuageVspUtil; import com.cloud.utils.StringUtils; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; @@ -130,6 +132,8 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru implements Networ NetworkOrchestrationService _networkOrchestrationService; @Inject DataCenterDetailsDao _dcDetailsDao; + @Inject + VlanDetailsDao _vlanDetailsDao; public NuageVspGuestNetworkGuru() { super(); @@ -299,6 +303,24 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru implements Networ } VspNetwork vspNetwork = _nuageVspEntityBuilder.buildVspNetwork(implemented, true); + + if (vspNetwork.isShared()) { + Boolean previousUnderlay= null; + for (VlanVO vlan : _vlanDao.listVlansByNetworkId(networkId)) { + boolean underlay = NuageVspUtil.isUnderlayEnabledForVlan(_vlanDetailsDao, vlan); + if (previousUnderlay == null || underlay == previousUnderlay) { + previousUnderlay = underlay; + } else { + throw new CloudRuntimeException("Mixed values for the underlay flag for IP ranges in the same subnet is not supported"); + } + } + if (previousUnderlay != null) { + vspNetwork = new VspNetwork.Builder().fromObject(vspNetwork) + .vlanUnderlay(previousUnderlay) + .build(); + } + } + String tenantId = context.getDomain().getName() + "-" + context.getAccount().getAccountId(); String broadcastUriStr = implemented.getUuid() + "/" + vspNetwork.getVirtualRouterIp(); implemented.setBroadcastUri(Networks.BroadcastDomainType.Vsp.toUri(broadcastUriStr)); @@ -495,6 +517,16 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru implements Networ //update the extra DHCP options } + // Update broadcast Uri to enable VR ip update + if (!network.getBroadcastUri().getPath().substring(1).equals(vspNetwork.getVirtualRouterIp())) { + NetworkVO networkToUpdate = _networkDao.findById(network.getId()); + String broadcastUriStr = networkToUpdate.getUuid() + "/" + vspNetwork.getVirtualRouterIp(); + networkToUpdate.setBroadcastUri(Networks.BroadcastDomainType.Vsp.toUri(broadcastUriStr)); + _networkDao.update(network.getId(), networkToUpdate); + if (network instanceof NetworkVO) { + ((NetworkVO) network).setBroadcastUri(networkToUpdate.getBroadcastUri()); + } + } nic.setBroadcastUri(network.getBroadcastUri()); nic.setIsolationUri(network.getBroadcastUri()); @@ -586,8 +618,15 @@ public class NuageVspGuestNetworkGuru extends GuestNetworkGuru implements Networ } } + + private boolean isServiceProvidedByVR(Network network, Network.Service service ) { + return (_networkModel.areServicesSupportedInNetwork(network.getId(), service) && + ( _networkModel.isProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.VirtualRouter) || + _networkModel.isProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.VPCVirtualRouter))); + } + private void checkMultipleSubnetsCombinedWithUseData(Network network) { - if (_ntwkOfferingSrvcDao.listServicesForNetworkOffering(network.getNetworkOfferingId()).contains(Network.Service.UserData.getName())) { + if (isServiceProvidedByVR(network, Network.Service.UserData)) { List vlanVOs = _vlanDao.listVlansByNetworkId(network.getId()); if (vlanVOs.stream() .map(VlanVO::getVlanGateway) diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java index 96016e6bb53..748de100c0c 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/manager/NuageVspManagerImpl.java @@ -161,7 +161,8 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, private static final Logger s_logger = Logger.getLogger(NuageVspManagerImpl.class); - public static final Multimap NUAGE_VSP_VPC_SERVICE_MAP; + public static final Multimap DEFAULT_NUAGE_VSP_VPC_SERVICE_MAP; + public static final Multimap SUPPORTED_NUAGE_VSP_VPC_SERVICE_MAP; private static final ConfigKey[] NUAGE_VSP_CONFIG_KEYS = new ConfigKey[] { NuageVspConfigDns, NuageVspDnsExternal, NuageVspConfigGateway, NuageVspSharedNetworkDomainTemplateName, NuageVspVpcDomainTemplateName, NuageVspIsolatedNetworkDomainTemplateName }; @@ -216,8 +217,11 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, static { Set nuageVspProviders = ImmutableSet.of(Network.Provider.NuageVsp); Set vrProviders = ImmutableSet.of(Network.Provider.VPCVirtualRouter); - Set lbProviders = ImmutableSet.of(Network.Provider.InternalLbVm); - NUAGE_VSP_VPC_SERVICE_MAP = ImmutableMultimap.builder() + Set defaultLbProviders = ImmutableSet.of(Network.Provider.InternalLbVm); + Set supportedLbProviders = ImmutableSet.of(Network.Provider.InternalLbVm); + Set supportedUserDataProviders = ImmutableSet.of(Network.Provider.VPCVirtualRouter, Network.Provider.ConfigDrive); + + DEFAULT_NUAGE_VSP_VPC_SERVICE_MAP = ImmutableMultimap.builder() .putAll(Network.Service.Connectivity, nuageVspProviders) .putAll(Network.Service.Gateway, nuageVspProviders) .putAll(Network.Service.Dhcp, nuageVspProviders) @@ -225,9 +229,15 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, .putAll(Network.Service.SourceNat, nuageVspProviders) .putAll(Network.Service.NetworkACL, nuageVspProviders) .putAll(Network.Service.UserData, vrProviders) - .putAll(Network.Service.Lb, lbProviders) + .putAll(Network.Service.Lb, defaultLbProviders) .putAll(Network.Service.Dns, vrProviders) .build(); + + Multimap builder = HashMultimap.create(DEFAULT_NUAGE_VSP_VPC_SERVICE_MAP); + builder.putAll(Network.Service.UserData, supportedUserDataProviders); + builder.putAll(Network.Service.Lb, supportedLbProviders); + + SUPPORTED_NUAGE_VSP_VPC_SERVICE_MAP = ImmutableMultimap.copyOf(builder); } private Listener _nuageVspResourceListener; @@ -1193,9 +1203,9 @@ public class NuageVspManagerImpl extends ManagerBase implements NuageVspManager, s_logger.debug("Creating default Nuage VPC offering " + nuageVPCOfferingName); } - createVpcOffering(nuageVPCOfferingName, nuageVPCOfferingDisplayText, NUAGE_VSP_VPC_SERVICE_MAP, true, VpcOffering.State.Enabled, null); + createVpcOffering(nuageVPCOfferingName, nuageVPCOfferingDisplayText, DEFAULT_NUAGE_VSP_VPC_SERVICE_MAP, true, VpcOffering.State.Enabled, null); } else { - updateVpcOffering(offering, NUAGE_VSP_VPC_SERVICE_MAP); + updateVpcOffering(offering, DEFAULT_NUAGE_VSP_VPC_SERVICE_MAP); } } }); diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/network/resource/NuageVspResource.java b/plugins/network-elements/nuage-vsp/src/com/cloud/network/resource/NuageVspResource.java index a96b67097e8..dae21a93a75 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/network/resource/NuageVspResource.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/network/resource/NuageVspResource.java @@ -22,9 +22,11 @@ package com.cloud.network.resource; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; - import javax.naming.ConfigurationException; +import org.apache.log4j.Logger; +import com.google.common.base.Strings; + import net.nuage.vsp.acs.client.api.NuageVspAclClient; import net.nuage.vsp.acs.client.api.NuageVspApiClient; import net.nuage.vsp.acs.client.api.NuageVspElementClient; @@ -36,10 +38,6 @@ import net.nuage.vsp.acs.client.common.RequestType; import net.nuage.vsp.acs.client.common.model.NuageVspEntity; import net.nuage.vsp.acs.client.exception.NuageVspException; -import org.apache.log4j.Logger; - -import com.google.common.base.Strings; - import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; diff --git a/plugins/network-elements/nuage-vsp/src/com/cloud/util/NuageVspEntityBuilder.java b/plugins/network-elements/nuage-vsp/src/com/cloud/util/NuageVspEntityBuilder.java index b34337fe2aa..cd986e5cbcb 100644 --- a/plugins/network-elements/nuage-vsp/src/com/cloud/util/NuageVspEntityBuilder.java +++ b/plugins/network-elements/nuage-vsp/src/com/cloud/util/NuageVspEntityBuilder.java @@ -227,7 +227,6 @@ public class NuageVspEntityBuilder { if (networkOffering.getGuestType() == Network.GuestType.Shared) { List vlans = _vlanDao.listVlansByNetworkIdIncludingRemoved(network.getId()); - List vspAddressRanges = vlans.stream() .map(vlan -> new VspAddressRange.Builder().gateway(vlan.getVlanGateway()).netmask(vlan.getVlanNetmask()).build()) @@ -290,9 +289,11 @@ public class NuageVspEntityBuilder { .findFirst() .get(); + boolean underlayEnabled = NuageVspUtil.isUnderlayEnabledForVlan(_vlanDetailsDao, matchingVlan); return new VspNetwork.Builder().fromObject(vspNetwork) .gateway(matchingVlan.getVlanGateway()) .cidr(NetUtils.getCidrFromGatewayAndNetmask(matchingVlan.getVlanGateway(), matchingVlan.getVlanNetmask())) + .vlanUnderlay(underlayEnabled) .build(); } diff --git a/plugins/network-elements/nuage-vsp/test/com/cloud/network/element/NuageVspElementTest.java b/plugins/network-elements/nuage-vsp/test/com/cloud/network/element/NuageVspElementTest.java index 43d465fb5f3..46046fd9643 100644 --- a/plugins/network-elements/nuage-vsp/test/com/cloud/network/element/NuageVspElementTest.java +++ b/plugins/network-elements/nuage-vsp/test/com/cloud/network/element/NuageVspElementTest.java @@ -60,7 +60,9 @@ import com.cloud.network.NuageVspDeviceVO; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; +import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkServiceMapDao; +import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.NuageVspDao; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkVO; @@ -109,6 +111,7 @@ public class NuageVspElementTest extends NuageTest { @Mock private DomainRouterDao _domainRouterDao; @Mock private ResourceManager _resourceManager; @Mock private ResourceTagDao _resourceTagDao; + @Mock private NetworkDao _networkDao; @Before public void setUp() throws Exception { @@ -173,6 +176,11 @@ public class NuageVspElementTest extends NuageTest { when(network.getDataCenterId()).thenReturn(NETWORK_ID); when(_networkModel.isProviderForNetwork(Provider.NuageVsp, NETWORK_ID)).thenReturn(true); + final NetworkVO networkVO = mock(NetworkVO.class); + when(network.getUuid()).thenReturn("aaaaaa"); + + when(_networkDao.findById(NETWORK_ID)).thenReturn(networkVO); + final NetworkOffering offering = mock(NetworkOffering.class); when(offering.getId()).thenReturn(NETWORK_ID); when(offering.getTrafficType()).thenReturn(TrafficType.Guest); diff --git a/plugins/network-elements/nuage-vsp/test/com/cloud/network/guru/NuageVspGuestNetworkGuruTest.java b/plugins/network-elements/nuage-vsp/test/com/cloud/network/guru/NuageVspGuestNetworkGuruTest.java index 957408d06dc..b1d6771a36a 100644 --- a/plugins/network-elements/nuage-vsp/test/com/cloud/network/guru/NuageVspGuestNetworkGuruTest.java +++ b/plugins/network-elements/nuage-vsp/test/com/cloud/network/guru/NuageVspGuestNetworkGuruTest.java @@ -335,6 +335,8 @@ public class NuageVspGuestNetworkGuruTest extends NuageTest { when(reservationContext.getAccount()).thenReturn(networksAccount); when(reservationContext.getDomain()).thenReturn(networksDomain); + when(_networkDao.findById(NETWORK_ID)).thenReturn(network); + _nuageVspGuestNetworkGuru.reserve(nicProfile, network, vmProfile, mock(DeployDestination.class), reservationContext); } diff --git a/plugins/network-elements/nuage-vsp/test/com/cloud/util/NuageVspEntityBuilderTest.java b/plugins/network-elements/nuage-vsp/test/com/cloud/util/NuageVspEntityBuilderTest.java index 52d8f73f871..b34b1a8cd3f 100644 --- a/plugins/network-elements/nuage-vsp/test/com/cloud/util/NuageVspEntityBuilderTest.java +++ b/plugins/network-elements/nuage-vsp/test/com/cloud/util/NuageVspEntityBuilderTest.java @@ -96,8 +96,8 @@ public class NuageVspEntityBuilderTest extends NuageTest { @Mock private NetworkDetailsDao _networkDetailsDao; @Mock private NetworkOfferingDao _networkOfferingDao; @Mock private NetworkOfferingServiceMapDao _networkOfferingServiceMapDao; - @Mock private NicSecondaryIpDao _nicSecondaryIpDao; @Mock private NicDao _nicDao; + @Mock private NicSecondaryIpDao _nicSecondaryIpDao; @Mock private VlanDao _vlanDao; @Mock private VlanDetailsDao _vlanDetailsDao; @Mock private VpcDao _vpcDao; diff --git a/server/resources/META-INF/cloudstack/server-network/spring-server-network-context.xml b/server/resources/META-INF/cloudstack/server-network/spring-server-network-context.xml index 480561292da..3df3a05182d 100644 --- a/server/resources/META-INF/cloudstack/server-network/spring-server-network-context.xml +++ b/server/resources/META-INF/cloudstack/server-network/spring-server-network-context.xml @@ -61,4 +61,8 @@ + + + + diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index efa9ffbde6b..e583b717a16 100644 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -30,20 +30,19 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; import java.util.Collections; - import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; -import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; + +import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao; - import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; @@ -2349,14 +2348,14 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi final List vmData = new ArrayList(); if (userData != null) { - vmData.add(new String[]{"userdata", "user-data", new String(Base64.decodeBase64(userData),StringUtils.getPreferredCharset())}); + vmData.add(new String[]{USERDATA_DIR, USERDATA_FILE, new String(Base64.decodeBase64(userData),StringUtils.getPreferredCharset())}); } - vmData.add(new String[]{"metadata", "service-offering", StringUtils.unicodeEscape(serviceOffering)}); - vmData.add(new String[]{"metadata", "availability-zone", StringUtils.unicodeEscape(zoneName)}); - vmData.add(new String[]{"metadata", "local-hostname", StringUtils.unicodeEscape(vmName)}); - vmData.add(new String[]{"metadata", "instance-id", vmName}); - vmData.add(new String[]{"metadata", "vm-id", String.valueOf(vmId)}); - vmData.add(new String[]{"metadata", "public-keys", publicKey}); + vmData.add(new String[]{METATDATA_DIR, SERVICE_OFFERING_FILE, StringUtils.unicodeEscape(serviceOffering)}); + vmData.add(new String[]{METATDATA_DIR, AVAILABILITY_ZONE_FILE, StringUtils.unicodeEscape(zoneName)}); + vmData.add(new String[]{METATDATA_DIR, LOCAL_HOSTNAME_FILE, StringUtils.unicodeEscape(vmName)}); + vmData.add(new String[]{METATDATA_DIR, INSTANCE_ID_FILE, vmName}); + vmData.add(new String[]{METATDATA_DIR, VM_ID_FILE, String.valueOf(vmId)}); + vmData.add(new String[]{METATDATA_DIR, PUBLIC_KEYS_FILE, publicKey}); String cloudIdentifier = _configDao.getValue("cloud.identifier"); if (cloudIdentifier == null) { @@ -2364,7 +2363,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } else { cloudIdentifier = "CloudStack-{" + cloudIdentifier + "}"; } - vmData.add(new String[]{"metadata", "cloud-identifier", cloudIdentifier}); + vmData.add(new String[]{METATDATA_DIR, CLOUD_IDENTIFIER_FILE, cloudIdentifier}); if (password != null && !password.isEmpty() && !password.equals("saved_password")) { @@ -2385,10 +2384,10 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi BigInteger bigInt = new BigInteger(1, digest); String hashtext = bigInt.toString(16); - vmData.add(new String[]{"password", "vm-password-md5checksum", hashtext}); + vmData.add(new String[]{PASSWORD_DIR, PASSWORD_CHECKSUM_FILE, hashtext}); } - vmData.add(new String[]{"password", "vm-password", password}); + vmData.add(new String[]{PASSWORD_DIR, PASSWORD_FILE, password}); } return vmData; diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index 707055fc593..d7ae6276f85 100644 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -2896,6 +2896,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { //Add Internal Load Balancer element as a default network service provider addDefaultInternalLbProviderToPhysicalNetwork(pNetwork.getId()); + // Add the config drive provider + addConfigDriveToPhysicalNetwork(pNetwork.getId()); + return pNetwork; } }); @@ -4198,13 +4201,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { addProviderToPhysicalNetwork(physicalNetworkId, "BaremetalUserdataProvider", null, null); } else if (dvo.getNetworkType() == NetworkType.Advanced) { addProviderToPhysicalNetwork(physicalNetworkId, "BaremetalPxeProvider", null, null); - enableBaremetalProvider("BaremetalPxeProvider"); + enableProvider("BaremetalPxeProvider"); } return null; } - private void enableBaremetalProvider(String providerName) { + private void enableProvider(String providerName) { QueryBuilder q = QueryBuilder.create(PhysicalNetworkServiceProviderVO.class); q.and(q.entity().getProviderName(), SearchCriteria.Op.EQ, providerName); PhysicalNetworkServiceProviderVO provider = q.find(); @@ -4212,6 +4215,22 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { _pNSPDao.update(provider.getId(), provider); } + private PhysicalNetworkServiceProvider addConfigDriveToPhysicalNetwork(long physicalNetworkId) { + PhysicalNetworkVO pvo = _physicalNetworkDao.findById(physicalNetworkId); + DataCenterVO dvo = _dcDao.findById(pvo.getDataCenterId()); + if (dvo.getNetworkType() == NetworkType.Advanced) { + + Provider provider = Network.Provider.getProvider("ConfigDrive"); + if (provider == null) { + return null; + } + + addProviderToPhysicalNetwork(physicalNetworkId, Provider.ConfigDrive.getName(), null, null); + enableProvider(Provider.ConfigDrive.getName()); + } + return null; + + } protected boolean isNetworkSystem(Network network) { NetworkOffering no = _networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId()); if (no.isSystemOnly()) { diff --git a/server/src/com/cloud/network/element/ConfigDriveNetworkElement.java b/server/src/com/cloud/network/element/ConfigDriveNetworkElement.java new file mode 100644 index 00000000000..8c0f5009874 --- /dev/null +++ b/server/src/com/cloud/network/element/ConfigDriveNetworkElement.java @@ -0,0 +1,424 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.element; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.inject.Inject; + +import org.apache.log4j.Logger; + +import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.AttachIsoAnswer; +import com.cloud.agent.api.AttachIsoCommand; +import com.cloud.agent.api.HandleConfigDriveIsoCommand; +import com.cloud.agent.api.to.DiskTO; +import com.cloud.configuration.ConfigurationManager; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.UnsupportedServiceException; +import com.cloud.host.Host; +import com.cloud.host.dao.HostDao; +import com.cloud.network.Network; +import com.cloud.network.Network.Capability; +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; +import com.cloud.network.NetworkMigrationResponder; +import com.cloud.network.NetworkModel; +import com.cloud.network.Networks.TrafficType; +import com.cloud.network.PhysicalNetworkServiceProvider; +import com.cloud.network.dao.NetworkDao; +import com.cloud.offering.NetworkOffering; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.Storage; +import com.cloud.storage.Volume; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.fsm.StateListener; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.Nic; +import com.cloud.vm.NicProfile; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.UserVmDetailVO; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.UserVmDetailsDao; + +public class ConfigDriveNetworkElement extends AdapterBase implements NetworkElement, UserDataServiceProvider, + StateListener, NetworkMigrationResponder { + private static final Logger s_logger = Logger.getLogger(ConfigDriveNetworkElement.class); + + private static final Map> capabilities = setCapabilities(); + + @Inject + NetworkDao _networkConfigDao; + @Inject + NetworkModel _networkMgr; + @Inject + UserVmManager _userVmMgr; + @Inject + UserVmDao _userVmDao; + @Inject + UserVmDetailsDao _userVmDetailsDao; + @Inject + DomainRouterDao _routerDao; + @Inject + ConfigurationManager _configMgr; + @Inject + DataCenterDao _dcDao; + @Inject + AgentManager _agentManager; + @Inject + ServiceOfferingDao _serviceOfferingDao; + @Inject + NetworkModel _networkModel; + @Inject + GuestOSCategoryDao _guestOSCategoryDao; + @Inject + GuestOSDao _guestOSDao; + @Inject + HostDao _hostDao; + @Inject + DataStoreManager _dataStoreMgr; + @Inject + EndPointSelector _ep; + @Inject + VolumeOrchestrationService _volumeMgr; + + public final static String CONFIGDRIVEFILENAME = "configdrive.iso"; + public final static String CONFIGDRIVEDIR= "ConfigDrive"; + public final static Integer CONFIGDRIVEDISKSEQ= new Integer(4); + + private boolean canHandle(TrafficType trafficType) { + return trafficType.equals(TrafficType.Guest); + } + + @Override + public boolean start() { + VirtualMachine.State.getStateMachine().registerListener(this); + return super.start(); + } + + @Override + public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ResourceUnavailableException, ConcurrentOperationException, + InsufficientCapacityException { + return canHandle(offering.getTrafficType()); + } + + @Override + public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vmProfile, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, + InsufficientCapacityException, ResourceUnavailableException { + return true; + } + + @Override + public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) { + if (!nic.isDefaultNic()) { + return true; + } + // Remove form secondary storage + DataStore secondaryStore = _dataStoreMgr.getImageStore(network.getDataCenterId()); + + String isoFile = "/" + CONFIGDRIVEDIR + "/" + vm.getInstanceName()+ "/" + CONFIGDRIVEFILENAME; + HandleConfigDriveIsoCommand deleteCommand = new HandleConfigDriveIsoCommand(vm.getVmData(), + vm.getConfigDriveLabel(), secondaryStore.getTO(), isoFile, false, false); + // Delete the ISO on the secondary store + EndPoint endpoint = _ep.select(secondaryStore); + if (endpoint == null) { + s_logger.error(String.format("Secondary store: %s not available", secondaryStore.getName())); + return false; + } + Answer answer = endpoint.sendMessage(deleteCommand); + return answer.getResult(); + } + + @Override + public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { + return true; // assume that the agent will remove userdata etc + } + + @Override + public boolean destroy(Network config, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { + return true; // assume that the agent will remove userdata etc + } + + @Override + public Provider getProvider() { + return Provider.ConfigDrive; + } + + @Override + public Map> getCapabilities() { + return capabilities; + } + + private static Map> setCapabilities() { + Map> capabilities = new HashMap<>(); + capabilities.put(Service.UserData, null); + return capabilities; + } + + @Override + public boolean isReady(PhysicalNetworkServiceProvider provider) { + return true; + } + + @Override + public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { + return true; + } + + @Override + public boolean canEnableIndividualServices() { + return false; + } + + @Override + public boolean addPasswordAndUserdata(Network network, NicProfile nic, VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) + throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { + UserVmDetailVO vmDetailSshKey = _userVmDetailsDao.findDetail(profile.getId(), "SSH.PublicKey"); + return (canHandle(network.getTrafficType()) && updateConfigDrive(profile, + (vmDetailSshKey!=null?vmDetailSshKey.getValue():null))) + && updateConfigDriveIso(network, profile, dest.getHost(), false); + } + + @Override + public boolean savePassword(Network network, NicProfile nic, VirtualMachineProfile profile) throws ResourceUnavailableException { + if (!(canHandle(network.getTrafficType()) && updateConfigDrive(profile, (String) profile.getParameter(VirtualMachineProfile.Param.VmSshPubKey)))) return false; + return updateConfigDriveIso(network, profile, true); + } + + @Override + public boolean saveSSHKey(Network network, NicProfile nic, VirtualMachineProfile vm, String sshPublicKey) throws ResourceUnavailableException { + if (!(canHandle(network.getTrafficType()) && updateConfigDrive(vm, sshPublicKey))) return false; + return updateConfigDriveIso(network, vm, true); + } + + @Override + public boolean saveUserData(Network network, NicProfile nic, VirtualMachineProfile profile) throws ResourceUnavailableException { + if (!(canHandle(network.getTrafficType()) && updateConfigDrive(profile, (String) profile.getParameter(VirtualMachineProfile.Param.VmSshPubKey)))) return false; + return updateConfigDriveIso(network, profile, true); + } + + @Override + public boolean verifyServicesCombination(Set services) { + return true; + } + + @Override + public boolean preStateTransitionEvent(VirtualMachine.State oldState, VirtualMachine.Event event, VirtualMachine.State newState, VirtualMachine vo, boolean status, Object opaque) { + return true; + } + + @Override + public boolean postStateTransitionEvent(StateMachine2.Transition transition, VirtualMachine vo, boolean status, Object opaque) { + if (transition.getToState().equals(VirtualMachine.State.Expunging) && transition.getEvent().equals(VirtualMachine.Event.ExpungeOperation)) { + Nic nic = _networkModel.getDefaultNic(vo.getId()); + try { + if (nic != null) { + final Network network = _networkMgr.getNetwork(nic.getNetworkId()); + final UserDataServiceProvider userDataUpdateProvider = _networkModel.getUserDataUpdateProvider(network); + final Provider provider = userDataUpdateProvider.getProvider(); + if (provider.equals(Provider.ConfigDrive)) { + // Delete config drive ISO on destroy + DataStore secondaryStore = _dataStoreMgr.getImageStore(vo.getDataCenterId()); + String isoFile = "/" + CONFIGDRIVEDIR + "/" + vo.getInstanceName() + "/" + CONFIGDRIVEFILENAME; + HandleConfigDriveIsoCommand deleteCommand = new HandleConfigDriveIsoCommand(null, + null, secondaryStore.getTO(), isoFile, false, false); + EndPoint endpoint = _ep.select(secondaryStore); + if (endpoint == null) { + s_logger.error(String.format("Secondary store: %s not available", secondaryStore.getName())); + return false; + } + Answer answer = endpoint.sendMessage(deleteCommand); + if (!answer.getResult()) { + s_logger.error(String.format("Update ISO failed, details: %s", answer.getDetails())); + return false; + } + } + } + } catch (UnsupportedServiceException usse) {} + } + return true; + } + + @Override + public boolean prepareMigration(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) { + if (nic.isDefaultNic() && _networkModel.getUserDataUpdateProvider(network).getProvider().equals(Provider.ConfigDrive)) { + s_logger.trace(String.format("[prepareMigration] for vm: %s", vm.getInstanceName())); + DataStore secondaryStore = _dataStoreMgr.getImageStore(network.getDataCenterId()); + configureConfigDriveDisk(vm, secondaryStore); + return false; + } + else return true; + } + + @Override + public void rollbackMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { + + } + + @Override + public void commitMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { + + } + + private boolean updateConfigDriveIso(Network network, VirtualMachineProfile profile, boolean update) throws ResourceUnavailableException { + return updateConfigDriveIso(network, profile, null, update); + } + + private boolean updateConfigDriveIso(Network network, VirtualMachineProfile profile, Host host, boolean update) throws ResourceUnavailableException { + Integer deviceKey = null; + Long hostId; + if (host == null) { + hostId = (profile.getVirtualMachine().getHostId() == null ? profile.getVirtualMachine().getLastHostId(): profile.getVirtualMachine().getHostId()); + } else { + hostId = host.getId(); + } + + DataStore secondaryStore = _dataStoreMgr.getImageStore(network.getDataCenterId()); + // Detach the existing ISO file if the machine is running + if (update && profile.getVirtualMachine().getState().equals(VirtualMachine.State.Running)) { + s_logger.debug("Detach config drive ISO for vm " + profile.getInstanceName() + " in host " + _hostDao.findById(hostId)); + deviceKey = detachIso(secondaryStore, profile.getInstanceName(), hostId); + } + + // Create/Update the iso on the secondary store + s_logger.debug(String.format("%s config drive ISO for vm %s in host %s", + (update?"update":"create"), profile.getInstanceName(), _hostDao.findById(hostId).getName())); + EndPoint endpoint = _ep.select(secondaryStore); + if (endpoint == null ) + throw new ResourceUnavailableException(String.format("%s failed, secondary store not available", + (update?"Update":"Create")),secondaryStore.getClass(),secondaryStore.getId()); + String isoPath = CONFIGDRIVEDIR + "/" + profile.getInstanceName() + "/" + CONFIGDRIVEFILENAME; + HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(profile.getVmData(), + profile.getConfigDriveLabel(), secondaryStore.getTO(), isoPath, true, update); + Answer createIsoAnswer = endpoint.sendMessage(configDriveIsoCommand); + if (!createIsoAnswer.getResult()) { + throw new ResourceUnavailableException(String.format("%s ISO failed, details: %s", + (update?"Update":"Create"), createIsoAnswer.getDetails()),ConfigDriveNetworkElement.class,0L); + } + configureConfigDriveDisk(profile, secondaryStore); + + // Re-attach the ISO if the machine is running + if (update && profile.getVirtualMachine().getState().equals(VirtualMachine.State.Running)) { + s_logger.debug("Re-attach config drive ISO for vm " + profile.getInstanceName() + " in host " + _hostDao.findById(hostId)); + attachIso(secondaryStore, profile.getInstanceName(), hostId, deviceKey); + } + return true; + + } + + private void configureConfigDriveDisk(VirtualMachineProfile profile, DataStore secondaryStore) { + boolean isoAvailable = false; + String isoPath = CONFIGDRIVEDIR + "/" + profile.getInstanceName() + "/" + CONFIGDRIVEFILENAME; + for (DiskTO dataTo : profile.getDisks()) { + if (dataTo.getPath().equals(isoPath)) { + isoAvailable = true; + break; + } + } + if (!isoAvailable) { + TemplateObjectTO dataTO = new TemplateObjectTO(); + dataTO.setDataStore(secondaryStore.getTO()); + dataTO.setUuid(profile.getUuid()); + dataTO.setPath(isoPath); + dataTO.setFormat(Storage.ImageFormat.ISO); + + profile.addDisk(new DiskTO(dataTO, CONFIGDRIVEDISKSEQ.longValue(), isoPath, Volume.Type.ISO)); + } + } + + private boolean updateConfigDrive(VirtualMachineProfile profile, String publicKey) { + UserVmVO vm = _userVmDao.findById(profile.getId()); + if (vm.getType() != VirtualMachine.Type.User) { + return false; + } + // add/update userdata and/or password info into vm profile + Nic defaultNic = _networkModel.getDefaultNic(vm.getId()); + if (defaultNic != null) { + final String serviceOffering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId()).getDisplayText(); + final String zoneName = _dcDao.findById(vm.getDataCenterId()).getName(); + boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); + + List vmData = _networkModel.generateVmData(vm.getUserData(), serviceOffering, zoneName, vm.getInstanceName(), vm.getId(), + publicKey, (String) profile.getParameter(VirtualMachineProfile.Param.VmPassword), isWindows); + profile.setVmData(vmData); + profile.setConfigDriveLabel(VirtualMachineManager.VmConfigDriveLabel.value()); + } + return true; + } + + private Integer detachIso (DataStore secondaryStore, String instanceName, Long hostId) throws ResourceUnavailableException { + String isoPath = CONFIGDRIVEDIR + "/" + instanceName + "/" + CONFIGDRIVEFILENAME; + AttachIsoCommand isoCommand = new AttachIsoCommand(instanceName, secondaryStore.getUri() + "/" + isoPath, false, CONFIGDRIVEDISKSEQ, true); + isoCommand.setStoreUrl(secondaryStore.getUri()); + Answer attachIsoAnswer = null; + + try { + attachIsoAnswer = _agentManager.send(hostId, isoCommand); + } catch (OperationTimedoutException e) { + throw new ResourceUnavailableException("Detach ISO failed: " + e.getMessage(), ConfigDriveNetworkElement.class, 0L); + } + + if (!attachIsoAnswer.getResult()) { + throw new ResourceUnavailableException("Detach ISO failed: " + attachIsoAnswer.getDetails(), ConfigDriveNetworkElement.class, 0L); + } + + if (attachIsoAnswer instanceof AttachIsoAnswer) { + return ((AttachIsoAnswer)attachIsoAnswer).getDeviceKey(); + } else { + return CONFIGDRIVEDISKSEQ; + } + } + + private void attachIso (DataStore secondaryStore, String instanceName, Long hostId, Integer deviceKey) throws ResourceUnavailableException { + String isoPath = CONFIGDRIVEDIR + "/" + instanceName + "/" + CONFIGDRIVEFILENAME; + AttachIsoCommand isoCommand = new AttachIsoCommand(instanceName, secondaryStore.getUri() + "/" + isoPath, true); + isoCommand.setStoreUrl(secondaryStore.getUri()); + isoCommand.setDeviceKey(deviceKey); + Answer attachIsoAnswer = null; + try { + attachIsoAnswer = _agentManager.send(hostId, isoCommand); + } catch (OperationTimedoutException e) { + throw new ResourceUnavailableException("Attach ISO failed: " + e.getMessage() ,ConfigDriveNetworkElement.class,0L); + } + if (!attachIsoAnswer.getResult()) { + throw new ResourceUnavailableException("Attach ISO failed: " + attachIsoAnswer.getDetails(),ConfigDriveNetworkElement.class,0L); + } + } + +} diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java index 5622482b0b7..ab6441ac5b7 100644 --- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java @@ -33,7 +33,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; - import javax.annotation.PostConstruct; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -48,7 +47,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; - import com.cloud.configuration.Config; import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.DataCenter; @@ -206,7 +204,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis private List vpcElements = null; private final List nonSupportedServices = Arrays.asList(Service.SecurityGroup, Service.Firewall); private final List supportedProviders = Arrays.asList(Provider.VPCVirtualRouter, Provider.NiciraNvp, Provider.InternalLbVm, Provider.Netscaler, - Provider.JuniperContrailVpcRouter, Provider.Ovs, Provider.NuageVsp, Provider.BigSwitchBcf); + Provider.JuniperContrailVpcRouter, Provider.Ovs, Provider.NuageVsp, Provider.BigSwitchBcf, Provider.ConfigDrive); int _cleanupInterval; int _maxNetworks; diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index f2e159349e4..0470874f213 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.vm; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; @@ -3996,6 +3998,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir protected String validateUserData(String userData, HTTPMethod httpmethod) { byte[] decodedUserData = null; if (userData != null) { + + if (userData.contains("%")) { + try { + userData = URLDecoder.decode(userData, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new InvalidParameterValueException("Url decoding of userdata failed."); + } + } + if (!Base64.isBase64(userData)) { throw new InvalidParameterValueException("User data is not base64 encoded"); } diff --git a/server/test/com/cloud/network/element/ConfigDriveNetworkElementTest.java b/server/test/com/cloud/network/element/ConfigDriveNetworkElementTest.java new file mode 100644 index 00000000000..11da24f95df --- /dev/null +++ b/server/test/com/cloud/network/element/ConfigDriveNetworkElementTest.java @@ -0,0 +1,272 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.element; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import org.apache.xerces.impl.dv.util.Base64; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +import com.google.common.collect.Maps; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.HandleConfigDriveIsoCommand; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.network.Network; +import com.cloud.network.NetworkModelImpl; +import com.cloud.network.Networks; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkServiceMapDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.offerings.NetworkOfferingVO; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.GuestOSCategoryVO; +import com.cloud.storage.GuestOSVO; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateListener; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.Nic; +import com.cloud.vm.NicProfile; +import com.cloud.vm.UserVmDetailVO; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VirtualMachineProfileImpl; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.UserVmDetailsDao; +import com.cloud.vm.dao.VMInstanceDao; + +public class ConfigDriveNetworkElementTest { + + public static final String CLOUD_ID = "xx"; + public static final String PUBLIC_KEY = "publicKey"; + public static final String PASSWORD = "password"; + public static final long NETWORK_ID = 1L; + private final long DATACENTERID = NETWORK_ID; + private final String ZONENAME = "zone1"; + private final String VMINSTANCENAME = "vm_name"; + private final String VMOFFERING = "custom_instance"; + private final long VMID = 30L; + private final String VMUSERDATA = "userdata"; + private final long SOID = 31L; + private final long HOSTID = NETWORK_ID; + private final String HOSTNAME = "host1"; + + @Mock private ConfigurationDao _configDao; + @Mock private DataCenterDao _dcDao; + @Mock private DataStoreManager _dataStoreMgr; + @Mock private GuestOSCategoryDao _guestOSCategoryDao ; + @Mock private GuestOSDao _guestOSDao; + @Mock private HostDao _hostDao; + @Mock private ServiceOfferingDao _serviceOfferingDao; + @Mock private UserVmDao _vmDao; + @Mock private VMInstanceDao _vmInstanceDao; + @Mock private UserVmDetailsDao _userVmDetailsDao; + @Mock private NetworkDao _networkDao; + @Mock private NetworkServiceMapDao _ntwkSrvcDao; + + @Mock private DataCenterVO dataCenterVO; + @Mock private DataStore dataStore; + @Mock private DeployDestination deployDestination; + @Mock private EndPoint endpoint; + @Mock private EndPointSelector _ep; + @Mock private GuestOSCategoryVO guestOSCategoryVo; + @Mock private GuestOSVO guestOSVO; + @Mock private HostVO hostVO; + @Mock private NetworkVO network; + @Mock private Nic nic; + @Mock private NicProfile nicp; + @Mock private ServiceOfferingVO serviceOfferingVO; + @Mock private UserVmVO virtualMachine; + + @InjectMocks private final ConfigDriveNetworkElement _configDrivesNetworkElement = new ConfigDriveNetworkElement(); + @InjectMocks @Spy private NetworkModelImpl _networkModel = new NetworkModelImpl(); + + @org.junit.Before + public void setUp() throws NoSuchFieldException, IllegalAccessException { + MockitoAnnotations.initMocks(this); + + _configDrivesNetworkElement._networkModel = _networkModel; + + when(_dataStoreMgr.getImageStore(DATACENTERID)).thenReturn(dataStore); + when(_ep.select(dataStore)).thenReturn(endpoint); + when(_vmDao.findById(VMID)).thenReturn(virtualMachine); + when(_dcDao.findById(DATACENTERID)).thenReturn(dataCenterVO); + when(_hostDao.findById(HOSTID)).thenReturn(hostVO); + doReturn(nic).when(_networkModel).getDefaultNic(VMID); + when(_serviceOfferingDao.findByIdIncludingRemoved(VMID, SOID)).thenReturn(serviceOfferingVO); + when(_guestOSDao.findById(anyLong())).thenReturn(guestOSVO); + when(_guestOSCategoryDao.findById(anyLong())).thenReturn(guestOSCategoryVo); + when(_configDao.getValue("cloud.identifier")).thenReturn(CLOUD_ID); + when(network.getDataCenterId()).thenReturn(DATACENTERID); + when(guestOSCategoryVo.getName()).thenReturn("Linux"); + when(dataCenterVO.getName()).thenReturn(ZONENAME); + when(serviceOfferingVO.getDisplayText()).thenReturn(VMOFFERING); + when(guestOSVO.getCategoryId()).thenReturn(0L); + when(virtualMachine.getGuestOSId()).thenReturn(0L); + when(virtualMachine.getType()).thenReturn(VirtualMachine.Type.User); + when(virtualMachine.getId()).thenReturn(VMID); + when(virtualMachine.getServiceOfferingId()).thenReturn(SOID); + when(virtualMachine.getDataCenterId()).thenReturn(DATACENTERID); + when(virtualMachine.getInstanceName()).thenReturn(VMINSTANCENAME); + when(virtualMachine.getUserData()).thenReturn(Base64.encode(VMUSERDATA.getBytes())); + when(deployDestination.getHost()).thenReturn(hostVO); + when(hostVO.getId()).thenReturn(HOSTID); + when(nic.isDefaultNic()).thenReturn(true); + when(nic.getNetworkId()).thenReturn(NETWORK_ID); + when(network.getId()).thenReturn(NETWORK_ID); + when(_networkModel.getNetwork(NETWORK_ID)).thenReturn(network); + //when(_networkModel.getUserDataUpdateProvider(network)).thenReturn(_configDrivesNetworkElement); + + when(_ntwkSrvcDao.getProviderForServiceInNetwork(NETWORK_ID, Network.Service.UserData)).thenReturn(_configDrivesNetworkElement.getProvider().getName()); + + _networkModel.setNetworkElements(Arrays.asList(_configDrivesNetworkElement)); + _networkModel.start(); + + } + + @Test + public void testCanHandle() throws InsufficientCapacityException, ResourceUnavailableException { + final NetworkOfferingVO ntwkoffer = mock(NetworkOfferingVO.class); + when(ntwkoffer.getTrafficType()).thenReturn(Networks.TrafficType.Guest); + assertTrue(_configDrivesNetworkElement.implement(null, ntwkoffer, null,null)); + + when(ntwkoffer.getTrafficType()).thenReturn(Networks.TrafficType.Public); + assertFalse(_configDrivesNetworkElement.implement(null, ntwkoffer, null, null)); + } + + @Test + @SuppressWarnings("unchecked") + public void testExpunge() throws NoTransitionException, NoSuchFieldException, IllegalAccessException { + final StateMachine2 stateMachine = VirtualMachine.State.getStateMachine(); + + final Field listenersField = StateMachine2.class.getDeclaredField("_listeners"); + listenersField.setAccessible(true); + List> listeners = + (List>)listenersField.get(stateMachine); + + listeners.clear(); + + _configDrivesNetworkElement.start(); + + when(virtualMachine.getState()).thenReturn(VirtualMachine.State.Stopped); + when(_vmInstanceDao.updateState(VirtualMachine.State.Stopped, VirtualMachine.Event.ExpungeOperation, VirtualMachine.State.Expunging, virtualMachine, null)).thenReturn(true); + + final Answer answer = mock(Answer.class); + when(endpoint.sendMessage(any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); + when(answer.getResult()).thenReturn(true); + + stateMachine.transitTo(virtualMachine, VirtualMachine.Event.ExpungeOperation, null, _vmInstanceDao); + + ArgumentCaptor commandCaptor = ArgumentCaptor.forClass(HandleConfigDriveIsoCommand.class); + verify(endpoint, times(1)).sendMessage(commandCaptor.capture()); + HandleConfigDriveIsoCommand deleteCommand = commandCaptor.getValue(); + + assertThat(deleteCommand.isCreate(), is(false)); + assertThat(deleteCommand.isUpdate(), is(false)); + + + } + + @Test + public void testRelease() { + final Answer answer = mock(Answer.class); + when(endpoint.sendMessage(any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); + when(answer.getResult()).thenReturn(true); + VirtualMachineProfile profile = new VirtualMachineProfileImpl(virtualMachine, null, serviceOfferingVO, null, null); + assertTrue(_configDrivesNetworkElement.release(network, nicp, profile, null)); + } + + @Test + public void testGetCapabilities () { + assertThat(_configDrivesNetworkElement.getCapabilities(), hasEntry(Network.Service.UserData, null)); + } + + @Test + public void testAddPasswordAndUserdata() throws InsufficientCapacityException, ResourceUnavailableException { + final Answer answer = mock(Answer.class); + final UserVmDetailVO userVmDetailVO = mock(UserVmDetailVO.class); + when(endpoint.sendMessage(any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); + when(answer.getResult()).thenReturn(true); + when(network.getTrafficType()).thenReturn(Networks.TrafficType.Guest); + when(virtualMachine.getState()).thenReturn(VirtualMachine.State.Stopped); + when(userVmDetailVO.getValue()).thenReturn(PUBLIC_KEY); + when(_userVmDetailsDao.findDetail(anyLong(), anyString())).thenReturn(userVmDetailVO); + Map parms = Maps.newHashMap(); + parms.put(VirtualMachineProfile.Param.VmPassword, PASSWORD); + parms.put(VirtualMachineProfile.Param.VmSshPubKey, PUBLIC_KEY); + VirtualMachineProfile profile = new VirtualMachineProfileImpl(virtualMachine, null, serviceOfferingVO, null, parms); + assertTrue(_configDrivesNetworkElement.addPasswordAndUserdata( + network, nicp, profile, deployDestination, null)); + + ArgumentCaptor commandCaptor = ArgumentCaptor.forClass(HandleConfigDriveIsoCommand.class); + verify(endpoint, times(1)).sendMessage(commandCaptor.capture()); + HandleConfigDriveIsoCommand result = commandCaptor.getValue(); + List actualVmData = result.getVmData(); + + assertThat(actualVmData, containsInAnyOrder( + new String[]{"userdata", "user_data", VMUSERDATA}, + new String[]{"metadata", "service-offering", VMOFFERING}, + new String[]{"metadata", "availability-zone", ZONENAME}, + new String[]{"metadata", "local-hostname", VMINSTANCENAME}, + new String[]{"metadata", "vm-id", String.valueOf(VMID)}, + new String[]{"metadata", "instance-id", String.valueOf(VMINSTANCENAME)}, + new String[]{"metadata", "public-keys", PUBLIC_KEY}, + new String[]{"metadata", "cloud-identifier", String.format("CloudStack-{%s}", CLOUD_ID)}, + new String[]{PASSWORD, "vm_password", PASSWORD} + )); + + } +} diff --git a/server/test/com/cloud/vm/UserVmManagerTest.java b/server/test/com/cloud/vm/UserVmManagerTest.java index c61d5cdce1d..e58cd4077e9 100644 --- a/server/test/com/cloud/vm/UserVmManagerTest.java +++ b/server/test/com/cloud/vm/UserVmManagerTest.java @@ -17,8 +17,9 @@ package com.cloud.vm; import static org.hamcrest.Matchers.instanceOf; -import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; @@ -35,6 +36,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.UnsupportedEncodingException; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.HashMap; @@ -45,13 +47,7 @@ import java.util.UUID; import com.cloud.dc.VlanVO; import com.cloud.dc.dao.VlanDao; import com.cloud.network.dao.IPAddressVO; -import com.cloud.network.element.UserDataServiceProvider; -import com.cloud.storage.Storage; -import com.cloud.user.User; -import com.cloud.event.dao.UsageEventDao; -import com.cloud.uservm.UserVm; import org.junit.Assert; -import org.apache.cloudstack.api.BaseCmd; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; @@ -61,6 +57,7 @@ import org.mockito.Spy; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.user.vm.RestoreVMCmd; @@ -72,14 +69,16 @@ import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationSer import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.ConfigurationManager; -import com.cloud.dc.DataCenterVO; import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.deploy.DeployDestination; +import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; @@ -96,11 +95,13 @@ import com.cloud.network.NetworkModel; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.element.UserDataServiceProvider; import com.cloud.offering.ServiceOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; @@ -112,9 +113,11 @@ import com.cloud.user.AccountManager; import com.cloud.user.AccountService; import com.cloud.user.AccountVO; import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; +import com.cloud.uservm.UserVm; import com.cloud.utils.db.EntityManager; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; @@ -123,7 +126,6 @@ import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; -import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; public class UserVmManagerTest { @@ -1084,4 +1086,14 @@ public class UserVmManagerTest { assertTrue("validate return the value with padding", encodedUserdataWithPadding.equals(_userVmMgr.validateUserData(encodedUserdataWithPadding, BaseCmd.HTTPMethod.GET))); } + @Test + public void testValidateUrlEncodedBase64() throws UnsupportedEncodingException { + // fo should be encoded in base64 either as Zm8 or Zm8= + String encodedUserdata = "Zm+8/w8="; + String urlEncodedUserdata = java.net.URLEncoder.encode(encodedUserdata, "UTF-8"); + + // Verify that we accept both but return the padded version + assertEquals("validate return the value with padding", encodedUserdata, _userVmMgr.validateUserData(encodedUserdata, BaseCmd.HTTPMethod.GET)); + assertEquals("validate return the value with padding", encodedUserdata, _userVmMgr.validateUserData(urlEncodedUserdata, BaseCmd.HTTPMethod.GET)); + } } diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 493c9b84b25..93d8c271df3 100644 --- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -16,6 +16,21 @@ // under the License. package org.apache.cloudstack.storage.resource; +import static com.cloud.network.NetworkModel.CONFIGDATA_CONTENT; +import static com.cloud.network.NetworkModel.CONFIGDATA_DIR; +import static com.cloud.network.NetworkModel.CONFIGDATA_FILE; +import static com.cloud.network.NetworkModel.METATDATA_DIR; +import static com.cloud.network.NetworkModel.PASSWORD_DIR; +import static com.cloud.network.NetworkModel.PASSWORD_FILE; +import static com.cloud.network.NetworkModel.PUBLIC_KEYS_FILE; +import static com.cloud.network.NetworkModel.USERDATA_DIR; +import static com.cloud.network.NetworkModel.USERDATA_FILE; +import static com.cloud.utils.StringUtils.join; +import static com.cloud.utils.storage.S3.S3Utils.putFile; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static org.apache.commons.lang.StringUtils.substringAfterLast; + import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; @@ -29,6 +44,8 @@ import java.io.UnsupportedEncodingException; import java.net.InetAddress; import java.net.URI; import java.net.UnknownHostException; +import java.nio.charset.Charset; +import java.nio.file.Path; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.HashMap; @@ -36,7 +53,68 @@ import java.util.List; import java.util.Map; import java.util.UUID; import javax.naming.ConfigurationException; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http.HttpContentCompressor; +import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; + +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.FilenameUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.NameValuePair; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.utils.URLEncodedUtils; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.log4j.Logger; +import org.joda.time.DateTime; +import org.joda.time.format.ISODateTimeFormat; + import com.amazonaws.services.s3.model.S3ObjectSummary; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.io.Files; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; + +import org.apache.cloudstack.framework.security.keystore.KeystoreManager; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.DeleteCommand; +import org.apache.cloudstack.storage.command.DownloadCommand; +import org.apache.cloudstack.storage.command.DownloadProgressCommand; +import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; +import org.apache.cloudstack.storage.command.UploadStatusAnswer; +import org.apache.cloudstack.storage.command.UploadStatusAnswer.UploadStatus; +import org.apache.cloudstack.storage.command.UploadStatusCommand; +import org.apache.cloudstack.storage.template.DownloadManager; +import org.apache.cloudstack.storage.template.DownloadManagerImpl; +import org.apache.cloudstack.storage.template.DownloadManagerImpl.ZfsPathParser; +import org.apache.cloudstack.storage.template.UploadEntity; +import org.apache.cloudstack.storage.template.UploadManager; +import org.apache.cloudstack.storage.template.UploadManagerImpl; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.imagestore.ImageStoreUtil; +import org.apache.cloudstack.utils.security.DigestHelper; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckHealthAnswer; import com.cloud.agent.api.CheckHealthCommand; @@ -45,6 +123,7 @@ import com.cloud.agent.api.ComputeChecksumCommand; import com.cloud.agent.api.DeleteSnapshotsDirCommand; import com.cloud.agent.api.GetStorageStatsAnswer; import com.cloud.agent.api.GetStorageStatsCommand; +import com.cloud.agent.api.HandleConfigDriveIsoCommand; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingStorageCommand; import com.cloud.agent.api.ReadyAnswer; @@ -82,6 +161,7 @@ import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.configuration.Resource; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.NetworkModel; import com.cloud.resource.ServerResourceBase; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage; @@ -109,63 +189,7 @@ import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; import com.cloud.utils.storage.S3.S3Utils; import com.cloud.vm.SecondaryStorageVm; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.channel.Channel; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioServerSocketChannel; -import io.netty.handler.codec.http.HttpContentCompressor; -import io.netty.handler.codec.http.HttpRequestDecoder; -import io.netty.handler.codec.http.HttpResponseEncoder; -import io.netty.handler.logging.LogLevel; -import io.netty.handler.logging.LoggingHandler; -import org.apache.cloudstack.framework.security.keystore.KeystoreManager; -import org.apache.cloudstack.storage.command.CopyCmdAnswer; -import org.apache.cloudstack.storage.command.CopyCommand; -import org.apache.cloudstack.storage.command.DeleteCommand; -import org.apache.cloudstack.storage.command.DownloadCommand; -import org.apache.cloudstack.storage.command.DownloadProgressCommand; -import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; -import org.apache.cloudstack.storage.command.UploadStatusAnswer; -import org.apache.cloudstack.storage.command.UploadStatusAnswer.UploadStatus; -import org.apache.cloudstack.storage.command.UploadStatusCommand; -import org.apache.cloudstack.storage.template.DownloadManager; -import org.apache.cloudstack.storage.template.DownloadManagerImpl; -import org.apache.cloudstack.storage.template.DownloadManagerImpl.ZfsPathParser; -import org.apache.cloudstack.storage.template.UploadEntity; -import org.apache.cloudstack.storage.template.UploadManager; -import org.apache.cloudstack.storage.template.UploadManagerImpl; -import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.cloudstack.storage.to.TemplateObjectTO; -import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.cloudstack.utils.imagestore.ImageStoreUtil; -import org.apache.cloudstack.utils.security.DigestHelper; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.FilenameUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.NameValuePair; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.utils.URLEncodedUtils; -import org.apache.http.impl.client.DefaultHttpClient; -import org.apache.log4j.Logger; -import org.joda.time.DateTime; -import org.joda.time.format.ISODateTimeFormat; - -import static com.cloud.utils.StringUtils.join; -import static com.cloud.utils.storage.S3.S3Utils.putFile; -import static java.lang.String.format; -import static java.util.Arrays.asList; -import static org.apache.commons.lang.StringUtils.substringAfterLast; import java.io.OutputStreamWriter; @@ -176,6 +200,16 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S private static final String TEMPLATE_ROOT_DIR = "template/tmpl"; private static final String VOLUME_ROOT_DIR = "volumes"; private static final String POST_UPLOAD_KEY_LOCATION = "/etc/cloudstack/agent/ms-psk"; + private static final String cloudStackConfigDriveName = "/cloudstack/"; + private static final String openStackConfigDriveName = "/openstack/latest/"; + + private static final Map updatableConfigData = Maps.newHashMap(); + static { + + updatableConfigData.put(PUBLIC_KEYS_FILE, METATDATA_DIR); + updatableConfigData.put(USERDATA_FILE, USERDATA_DIR); + updatableConfigData.put(PASSWORD_FILE, PASSWORD_DIR); + } int _timeout; @@ -291,6 +325,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S return execute((DeleteCommand)cmd); } else if (cmd instanceof UploadStatusCommand) { return execute((UploadStatusCommand)cmd); + } else if (cmd instanceof HandleConfigDriveIsoCommand) { + return execute((HandleConfigDriveIsoCommand)cmd); } else if (cmd instanceof GetDatadisksCommand) { return execute((GetDatadisksCommand)cmd); } else if (cmd instanceof CreateDatadiskTemplateCommand) { @@ -300,6 +336,306 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } } + private Answer execute(HandleConfigDriveIsoCommand cmd) { + + if (cmd.isCreate()) { + s_logger.debug(String.format("VMdata %s, attach = %s", cmd.getVmData(), cmd.isCreate())); + if(cmd.getVmData() == null) return new Answer(cmd, false, "No Vmdata available"); + String nfsMountPoint = getRootDir(cmd.getDestStore().getUrl(), _nfsVersion); + File isoFile = new File(nfsMountPoint, cmd.getIsoFile()); + if(isoFile.exists()) { + if (!cmd.isUpdate()) { + return new Answer(cmd, true, "ISO already available"); + } else { + // Find out if we have to recover the password/ssh-key from the already available ISO. + try { + List recoveredVmData = recoverVmData(isoFile); + for (String[] vmDataEntry : cmd.getVmData()) { + if (updatableConfigData.containsKey(vmDataEntry[CONFIGDATA_FILE]) + && updatableConfigData.get(vmDataEntry[CONFIGDATA_FILE]).equals(vmDataEntry[CONFIGDATA_DIR])) { + updateVmData(recoveredVmData, vmDataEntry); + } + } + cmd.setVmData(recoveredVmData); + } catch (IOException e) { + return new Answer(cmd, e); + } + } + } + return createConfigDriveIsoForVM(cmd); + } else { + DataStoreTO dstore = cmd.getDestStore(); + if (dstore instanceof NfsTO) { + NfsTO nfs = (NfsTO) dstore; + String relativeTemplatePath = new File(cmd.getIsoFile()).getParent(); + String nfsMountPoint = getRootDir(nfs.getUrl(), _nfsVersion); + File tmpltPath = new File(nfsMountPoint, relativeTemplatePath); + try { + FileUtils.deleteDirectory(tmpltPath); + } catch (IOException e) { + return new Answer(cmd, e); + } + return new Answer(cmd); + } else { + return new Answer(cmd, false, "Not implemented yet"); + } + } + } + + private void updateVmData(List recoveredVmData, String[] vmDataEntry) { + for (String[] recoveredEntry : recoveredVmData) { + if (recoveredEntry[CONFIGDATA_DIR].equals(vmDataEntry[CONFIGDATA_DIR]) + && recoveredEntry[CONFIGDATA_FILE].equals(vmDataEntry[CONFIGDATA_FILE])) { + recoveredEntry[CONFIGDATA_CONTENT] = vmDataEntry[CONFIGDATA_CONTENT]; + return; + } + } + recoveredVmData.add(vmDataEntry); + } + + private List recoverVmData(File isoFile) throws IOException { + String tempDirName = null; + List recoveredVmData = Lists.newArrayList(); + boolean mounted = false; + try { + Path tempDir = java.nio.file.Files.createTempDirectory("ConfigDrive"); + tempDirName = tempDir.toString(); + + // Unpack the current config drive file + Script command = new Script(!_inSystemVM, "mount", _timeout, s_logger); + command.add("-o", "loop"); + command.add(isoFile.getAbsolutePath()); + command.add(tempDirName); + String result = command.execute(); + + if (result != null) { + String errMsg = "Unable to mount " + isoFile.getAbsolutePath() + " at " + tempDirName + " due to " + result; + s_logger.error(errMsg); + throw new IOException(errMsg); + } + mounted = true; + + + // Scan directory structure + for (File configDirectory: (new File(tempDirName, "cloudstack")).listFiles()){ + for (File configFile: configDirectory.listFiles()) { + recoveredVmData.add(new String[]{configDirectory.getName(), + Files.getNameWithoutExtension(configFile.getName()), + Files.readFirstLine(configFile, Charset.defaultCharset())}); + } + } + + } finally { + if (mounted) { + Script command = new Script(!_inSystemVM, "umount", _timeout, s_logger); + command.add(tempDirName); + String result = command.execute(); + if (result != null) { + s_logger.warn("Unable to umount " + tempDirName + " due to " + result); + } + } + try { + FileUtils.deleteDirectory(new File(tempDirName)); + } catch (IOException ioe) { + s_logger.warn("Failed to delete ConfigDrive temporary directory: " + tempDirName, ioe); + } + } + return recoveredVmData; + } + + public Answer createConfigDriveIsoForVM(HandleConfigDriveIsoCommand cmd) { + //create folder for the VM + if (cmd.getVmData() != null) { + + Path tempDir = null; + String tempDirName = null; + try { + tempDir = java.nio.file.Files.createTempDirectory("ConfigDrive"); + tempDirName = tempDir.toString(); + + //create OpenStack files + //create folder with empty files + File openStackFolder = new File(tempDirName + openStackConfigDriveName); + if (openStackFolder.exists() || openStackFolder.mkdirs()) { + File vendorDataFile = new File(openStackFolder,"vendor_data.json"); + try (FileWriter fw = new FileWriter(vendorDataFile); BufferedWriter bw = new BufferedWriter(fw)) { + bw.write("{}"); + } catch (IOException ex) { + s_logger.error("Failed to create file ", ex); + return new Answer(cmd, ex); + } + File networkDataFile = new File(openStackFolder, "network_data.json"); + try (FileWriter fw = new FileWriter(networkDataFile); BufferedWriter bw = new BufferedWriter(fw)) { + bw.write("{}"); + } catch (IOException ex) { + s_logger.error("Failed to create file ", ex); + return new Answer(cmd, ex); + } + } else { + s_logger.error("Failed to create folder " + openStackFolder); + return new Answer(cmd, false, "Failed to create folder " + openStackFolder); + } + + JsonObject metaData = new JsonObject(); + for (String[] item : cmd.getVmData()) { + String dataType = item[CONFIGDATA_DIR]; + String fileName = item[CONFIGDATA_FILE]; + String content = item[CONFIGDATA_CONTENT]; + s_logger.debug(String.format("[createConfigDriveIsoForVM] dataType=%s, filename=%s, content=%s", + dataType, fileName, (fileName.equals(PASSWORD_FILE)?"********":content))); + + // create file with content in folder + if (dataType != null && !dataType.isEmpty()) { + //create folder + File typeFolder = new File(tempDirName + cloudStackConfigDriveName + dataType); + if (typeFolder.exists() || typeFolder.mkdirs()) { + if (StringUtils.isNotEmpty(content)) { + File file = new File(typeFolder, fileName + ".txt"); + try (FileWriter fw = new FileWriter(file); BufferedWriter bw = new BufferedWriter(fw)) { + bw.write(content); + } catch (IOException ex) { + s_logger.error("Failed to create file ", ex); + return new Answer(cmd, ex); + } + } + } else { + s_logger.error("Failed to create folder " + typeFolder); + return new Answer(cmd, false, "Failed to create folder " + typeFolder); + } + + //now write the file to the OpenStack directory + metaData = constructOpenStackMetaData(metaData, dataType, fileName, content); + } + } + + File metaDataFile = new File(openStackFolder, "meta_data.json"); + try (FileWriter fw = new FileWriter(metaDataFile); BufferedWriter bw = new BufferedWriter(fw)) { + bw.write(metaData.toString()); + } catch (IOException ex) { + s_logger.error("Failed to create file ", ex); + return new Answer(cmd, ex); + } + + String linkResult = linkUserData(tempDirName); + if (linkResult != null) { + String errMsg = "Unable to create user_data link due to " + linkResult; + s_logger.warn(errMsg); + return new Answer(cmd, false, errMsg); + } + + File tmpIsoStore = new File(tempDirName, new File(cmd.getIsoFile()).getName()); + Script command = new Script(!_inSystemVM, "/usr/bin/genisoimage", _timeout, s_logger); + command.add("-o", tmpIsoStore.getAbsolutePath()); + command.add("-ldots"); + command.add("-allow-lowercase"); + command.add("-allow-multidot"); + command.add("-cache-inodes"); // Enable caching inode and device numbers to find hard links to files. + command.add("-l"); + command.add("-quiet"); + command.add("-J"); + command.add("-r"); + command.add("-V", cmd.getConfigDriveLabel()); + command.add(tempDirName); + s_logger.debug("execute command: " + command.toString()); + String result = command.execute(); + if (result != null) { + String errMsg = "Unable to create iso file: " + cmd.getIsoFile() + " due to " + result; + s_logger.warn(errMsg); + return new Answer(cmd, false, errMsg); + } + copyLocalToNfs(tmpIsoStore, new File(cmd.getIsoFile()), cmd.getDestStore()); + + } catch (IOException e) { + return new Answer(cmd, e); + } catch (ConfigurationException e) { + s_logger.warn("SecondStorageException ", e); + return new Answer(cmd, e); + } finally { + try { + FileUtils.deleteDirectory(tempDir.toFile()); + } catch (IOException ioe) { + s_logger.warn("Failed to delete ConfigDrive temporary directory: " + tempDirName, ioe); + } + } + } + return new Answer(cmd); + } + + JsonObject constructOpenStackMetaData(JsonObject metaData, String dataType, String fileName, String content) { + if (dataType.equals(NetworkModel.METATDATA_DIR) && StringUtils.isNotEmpty(content)) { + //keys are a special case in OpenStack format + if (NetworkModel.PUBLIC_KEYS_FILE.equals(fileName)) { + String[] keyArray = content.replace("\\n", "").split(" "); + String keyName = "key"; + if (keyArray.length > 3 && StringUtils.isNotEmpty(keyArray[2])){ + keyName = keyArray[2]; + } + + JsonObject keyLegacy = new JsonObject(); + keyLegacy.addProperty("type", "ssh"); + keyLegacy.addProperty("data", content.replace("\\n", "")); + keyLegacy.addProperty("name", keyName); + metaData.add("keys", arrayOf(keyLegacy)); + + JsonObject key = new JsonObject(); + key.addProperty(keyName, content); + metaData.add("public_keys", key); + } else if (NetworkModel.openStackFileMapping.get(fileName) != null) { + metaData.addProperty(NetworkModel.openStackFileMapping.get(fileName), content); + } + } + return metaData; + } + + private static JsonArray arrayOf(JsonElement... elements) { + JsonArray array = new JsonArray(); + for (JsonElement element : elements) { + array.add(element); + } + return array; + } + + private String linkUserData(String tempDirName) { + //Hard link the user_data.txt file with the user_data file in the OpenStack directory. + String userDataFilePath = tempDirName + cloudStackConfigDriveName + "userdata/user_data.txt"; + if ((new File(userDataFilePath).exists())) { + Script hardLink = new Script(!_inSystemVM, "ln", _timeout, s_logger); + hardLink.add(userDataFilePath); + hardLink.add(tempDirName + openStackConfigDriveName + "user_data"); + s_logger.debug("execute command: " + hardLink.toString()); + return hardLink.execute(); + } + return null; + } + + protected void copyLocalToNfs(File localFile, File isoFile, DataStoreTO destData) throws ConfigurationException, IOException { + String scriptsDir = "scripts/storage/secondary"; + String createVolScr = Script.findScript(scriptsDir, "createvolume.sh"); + if (createVolScr == null) { + throw new ConfigurationException("Unable to find createvolume.sh"); + } + s_logger.info("createvolume.sh found in " + createVolScr); + + int installTimeoutPerGig = 180 * 60 * 1000; + int imgSizeGigs = (int) Math.ceil(localFile.length() * 1.0d / (1024 * 1024 * 1024)); + imgSizeGigs++; // add one just in case + long timeout = imgSizeGigs * installTimeoutPerGig; + + Script scr = new Script(createVolScr, timeout, s_logger); + scr.add("-s", Integer.toString(imgSizeGigs)); + scr.add("-n", isoFile.getName()); + scr.add("-t", getRootDir(destData.getUrl(), _nfsVersion) + "/" + isoFile.getParent()); + scr.add("-f", localFile.getAbsolutePath()); + scr.add("-d", "configDrive"); + String result; + result = scr.execute(); + + if (result != null) { + // script execution failure + throw new CloudRuntimeException("Failed to run script " + createVolScr); + } + } + public Answer execute(GetDatadisksCommand cmd) { DataTO srcData = cmd.getData(); TemplateObjectTO template = (TemplateObjectTO)srcData; @@ -752,7 +1088,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S FormatInfo info = processor.process(destPath, null, templateUuid); TemplateLocation loc = new TemplateLocation(_storage, destPath); - loc.create(destData.getId(), true, templateUuid); + loc.create(1, true, templateUuid); loc.addFormat(info); loc.save(); TemplateProp prop = loc.getTemplateInfo(); @@ -836,7 +1172,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S FormatInfo info = processor.process(destPath, null, templateName); TemplateLocation loc = new TemplateLocation(_storage, destPath); - loc.create(destData.getId(), true, destData.getName()); + loc.create(1, true, destData.getName()); loc.addFormat(info); loc.save(); @@ -2485,10 +2821,9 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (_inSystemVM) { _localgw = (String)params.get("localgw"); if (_localgw != null) { // can only happen inside service vm - String mgmtHosts = (String)params.get("host"); - for (final String mgmtHost : mgmtHosts.split(",")) { - addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, mgmtHost); - } + String mgmtHost = (String)params.get("host"); + addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, mgmtHost); + String internalDns1 = (String)params.get("internaldns1"); if (internalDns1 == null) { s_logger.warn("No DNS entry found during configuration of NfsSecondaryStorage"); diff --git a/setup/bindir/cloud-get-vm-data-configdrive.in b/setup/bindir/cloud-get-vm-data-configdrive.in index a862fe0c098..946ec3b1cdf 100644 --- a/setup/bindir/cloud-get-vm-data-configdrive.in +++ b/setup/bindir/cloud-get-vm-data-configdrive.in @@ -31,7 +31,7 @@ public_key=$filepath/metadata/public_keys.txt vm_password=$filepath/password/vm_password.txt # If lable name is other than config, please change the below line as required -DefaultDisk=/dev/disk/by-label/config +DefaultDisk=/dev/disk/by-label/config-2 function usage { diff --git a/setup/bindir/cloud-set-guest-password-configdrive.in b/setup/bindir/cloud-set-guest-password-configdrive.in index 1aef8bc7907..ad36919c692 100644 --- a/setup/bindir/cloud-set-guest-password-configdrive.in +++ b/setup/bindir/cloud-set-guest-password-configdrive.in @@ -29,7 +29,7 @@ user=root mountdir=$(mktemp -d) # If lable name is other than config, please change the below line as required -DefaultDisk=/dev/disk/by-label/config +DefaultDisk=/dev/disk/by-label/config-2 Password_File=$mountdir/cloudstack/password/vm_password.txt diff --git a/setup/bindir/cloud-set-guest-sshkey-configdrive.in b/setup/bindir/cloud-set-guest-sshkey-configdrive.in index e8466e8f56a..31dc6df92db 100644 --- a/setup/bindir/cloud-set-guest-sshkey-configdrive.in +++ b/setup/bindir/cloud-set-guest-sshkey-configdrive.in @@ -29,7 +29,7 @@ user=root mountdir=$(mktemp -d) # If lable name is other than config, please change the below line as required -DefaultDisk=/dev/disk/by-label/config +DefaultDisk=/dev/disk/by-label/config-2 SSHKey_File=$mountdir/cloudstack/metadata/public_keys.txt keys_received=0 diff --git a/setup/bindir/cloud-set-windows-guest-password-configdrive.bat.in b/setup/bindir/cloud-set-windows-guest-password-configdrive.bat.in index 805150a42a2..4572326a40f 100644 --- a/setup/bindir/cloud-set-windows-guest-password-configdrive.bat.in +++ b/setup/bindir/cloud-set-windows-guest-password-configdrive.bat.in @@ -25,7 +25,7 @@ echo Const HKEY_CURRENT_USER = ^&H80000001 echo registryKeyPath = "SOFTWARE\CLOUDSTACKmd5Checksum\" >> %PasswordReset_Script% echo registryKeyName = "PasswordMd5Checksum" >> %PasswordReset_Script% echo CDPath = "" >> %PasswordReset_Script% -echo configLabelName = "config" >> %PasswordReset_Script% +echo configLabelName = "config-2" >> %PasswordReset_Script% echo. >> %PasswordReset_Script% echo. >> %PasswordReset_Script% echo. >> %PasswordReset_Script% diff --git a/test/integration/component/test_configdrive.py b/test/integration/component/test_configdrive.py new file mode 100644 index 00000000000..ad9ad8fbcab --- /dev/null +++ b/test/integration/component/test_configdrive.py @@ -0,0 +1,1947 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Component tests for user data, meta data, ssh keys + and password reset functionality with + ConfigDrive +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (resetSSHKeyForVirtualMachine, + updateTemplate, + restartVPC) +from marvin.lib.base import (Account, + createVlanIpRange, + FireWallRule, + Host, + listVlanIpRanges, + Network, + NetworkACL, + NetworkACLList, + NetworkOffering, + NetworkServiceProvider, + PublicIPAddress, + Router, + ServiceOffering, + createSSHKeyPair, + deleteSSHKeyPair, + StaticNATRule, + VirtualMachine, + VPC, + VpcOffering) +from marvin.lib.common import (get_domain, + get_template, + get_zone, + list_templates) +from marvin.lib.utils import random_gen +# Import System Modules +from nose.plugins.attrib import attr +from retry import retry +import tempfile +import socket +import base64 +import sys +import os + + +class MySSHKeyPair: + """Manage SSH Key pairs""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, name=None, account=None, + domainid=None, projectid=None): + """Creates SSH keypair""" + cmd = createSSHKeyPair.createSSHKeyPairCmd() + cmd.name = name + if account is not None: + cmd.account = account + if domainid is not None: + cmd.domainid = domainid + if projectid is not None: + cmd.projectid = projectid + return MySSHKeyPair(apiclient.createSSHKeyPair(cmd).__dict__) + + def delete(self, apiclient): + """Delete SSH key pair""" + cmd = deleteSSHKeyPair.deleteSSHKeyPairCmd() + cmd.name = self.name + cmd.account = self.account + cmd.domainid = self.domainid + apiclient.deleteSSHKeyPair(cmd) + + +class Services: + """Test Add Remove Network Services + """ + + def __init__(self): + self.services = { + "vpc_offering_configdrive": { + "name": 'VPC offering ConfigDrive', + "displaytext": 'VPC offering ConfigDrive', + "supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,UserData,Dns', + "serviceProviderList": { + "Dhcp": "VpcVirtualRouter", + "StaticNat": "VpcVirtualRouter", + "SourceNat": "VpcVirtualRouter", + "NetworkACL": "VpcVirtualRouter", + "UserData": "ConfigDrive", + "Dns": "VpcVirtualRouter" + } + }, + "vpc_network_offering_configdrive": { + "name": 'vpc_net_off_marvin_configdrive', + "displaytext": 'vpc_net_off_marvin_configdrive', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,UserData,Dns', + "traffictype": 'GUEST', + "availability": 'Optional', + "useVpc": 'on', + "ispersistent": 'True', + "serviceProviderList": { + "Dhcp": "VpcVirtualRouter", + "StaticNat": "VpcVirtualRouter", + "SourceNat": "VpcVirtualRouter", + "NetworkACL": "VpcVirtualRouter", + "UserData": "ConfigDrive", + "Dns": "VpcVirtualRouter" + } + }, + "isolated_configdrive_network_offering": { + "name": 'isolated_configdrive_net_off_marvin', + "displaytext": 'isolated_configdrive_net_off_marvin', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,SourceNat,StaticNat,UserData,Firewall,Dns', + "traffictype": 'GUEST', + "availability": 'Optional', + "tags": 'native', + "serviceProviderList": { + "Dhcp": 'VirtualRouter', + "StaticNat": 'VirtualRouter', + "SourceNat": 'VirtualRouter', + "Firewall": 'VirtualRouter', + "UserData": 'ConfigDrive', + "Dns": 'VirtualRouter' + } + }, + "acl": { + "network_all_1": { + "name": "SharedNetwork-All-1", + "displaytext": "SharedNetwork-All-1", + "vlan": "3998", + "gateway": "10.200.100.1", + "netmask": "255.255.255.0", + "startip": "10.200.100.21", + "endip": "10.200.100.100", + "acltype": "Domain" + }, + "network_all_2": { + "name": "SharedNetwork2-All-2", + "displaytext": "SharedNetwork2-All-2", + "vlan": "3999", + "gateway": "10.200.200.1", + "netmask": "255.255.255.0", + "startip": "10.200.200.21", + "endip": "10.200.200.100", + "acltype": "Domain" + } + } + } + + +class TestConfigDrive(cloudstackTestCase): + """Test user data and password reset functionality + using configDrive + """ + + class CreateResult: + def __init__(self, success, offering=None, network=None, vpc=None): + self.success = success + self.network = network + self.offering = offering + self.vpc = vpc + + class PasswordTest: + def __init__(self, password): + self.test_presence = False + self.presence = None + self.password = None + if type(password) is bool: + self.test_presence = True + self.presence = password + self.password = None + elif type(password) is unicode or type(password) is str: + self.test_presence = True + self.password = password + self.presence = True + + @classmethod + def setUpClass(cls): + # We want to fail quicker, if it's a failure + socket.setdefaulttimeout(60) + + test_client = super(TestConfigDrive, cls).getClsTestClient() + cls.api_client = test_client.getApiClient() + cls.db_client = test_client.getDbConnection() + cls.test_data = test_client.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.zone = get_zone(cls.api_client) + cls.domain = get_domain(cls.api_client) + cls.template = get_template(cls.api_client, + cls.zone.id, + cls.test_data["ostype"] + ) + cls.test_data["virtual_machine"]["zoneid"] = cls.zone.id + cls.test_data["virtual_machine"]["template"] = cls.template.id + cls.test_data.update(Services().services) + + # Create service offering + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.test_data["service_offering"]) + cls._cleanup = [cls.service_offering] + return + + def setUp(self): + # Create an account + self.account = Account.create(self.api_client, + self.test_data["account"], + admin=True, + domainid=self.domain.id + ) + self.tmp_files = [] + self.cleanup = [self.account] + return + + @classmethod + def tearDownClass(cls): + # Cleanup resources used + cls.debug("Cleaning up the resources") + for obj in reversed(cls._cleanup): + try: + if isinstance(obj, VirtualMachine): + obj.delete(cls.api_client, expunge=True) + else: + obj.delete(cls.api_client) + except Exception as e: + cls.error("Failed to cleanup %s, got %s" % (obj, e)) + # cleanup_resources(cls.api_client, cls._cleanup) + cls._cleanup = [] + cls.debug("Cleanup complete!") + return + + def tearDown(self): + # Cleanup resources used + self.debug("Cleaning up the resources") + for obj in reversed(self.cleanup): + try: + if isinstance(obj, VirtualMachine): + obj.delete(self.api_client, expunge=True) + else: + obj.delete(self.api_client) + except Exception as e: + self.error("Failed to cleanup %s, got %s" % (obj, e)) + # cleanup_resources(self.api_client, self.cleanup) + self.cleanup = [] + for tmp_file in self.tmp_files: + os.remove(tmp_file) + self.debug("Cleanup complete!") + return + + # updateTemplate - Updates value of the guest VM template's password + # enabled setting + def updateTemplate(self, value): + self.debug("Updating value of guest VM template's password enabled " + "setting") + cmd = updateTemplate.updateTemplateCmd() + cmd.id = self.template.id + cmd.passwordenabled = value + self.api_client.updateTemplate(cmd) + list_template_response = list_templates(self.api_client, + templatefilter="all", + id=self.template.id + ) + self.template = list_template_response[0] + self.debug("Updated guest VM template") + + # get_userdata_url - Returns user data URL for the given VM object + def get_userdata_url(self, vm): + self.debug("Getting user data url") + nic = vm.nic[0] + gateway = str(nic.gateway) + self.debug("Gateway: " + gateway) + user_data_url = 'curl "http://' + gateway + ':80/latest/user-data"' + return user_data_url + + # create_StaticNatRule_For_VM - Creates Static NAT rule on the given + # public IP for the given VM in the given network + def create_StaticNatRule_For_VM(self, vm, public_ip, network, + vmguestip=None): + self.debug("Enabling Static NAT rule on public IP - %s for VM with ID " + "- %s in network with ID - %s" % + (public_ip.ipaddress.ipaddress, vm.id, network.id)) + static_nat_rule = StaticNATRule.enable( + self.api_client, + ipaddressid=public_ip.ipaddress.id, + virtualmachineid=vm.id, + networkid=network.id, + vmguestip=vmguestip + ) + self.debug("Static NAT rule enabled on public IP - %s for VM with ID " + "- %s in network with ID - %s" % + (public_ip.ipaddress.ipaddress, vm.id, network.id)) + return static_nat_rule + + # validate_PublicIPAddress - Validates if the given public IP address is in + # the expected state form the list of fetched public IP addresses + def validate_PublicIPAddress(self, public_ip, network, static_nat=False, + vm=None): + """Validates the Public IP Address""" + self.debug("Validating the assignment and state of public IP address " + "- %s" % public_ip.ipaddress.ipaddress) + public_ips = PublicIPAddress.list(self.api_client, + id=public_ip.ipaddress.id, + networkid=network.id, + isstaticnat=static_nat, + listall=True + ) + self.assertEqual(isinstance(public_ips, list), True, + "List public IP for network should return a " + "valid list" + ) + self.assertEqual(public_ips[0].ipaddress, + public_ip.ipaddress.ipaddress, + "List public IP for network should list the assigned " + "public IP address" + ) + self.assertEqual(public_ips[0].state, "Allocated", + "Assigned public IP is not in the allocated state" + ) + if static_nat and vm: + self.assertEqual(public_ips[0].virtualmachineid, vm.id, + "Static NAT rule is not enabled for the VM on " + "the assigned public IP" + ) + self.debug("Successfully validated the assignment and state of public " + "IP address - %s" % public_ip.ipaddress.ipaddress) + + # create_FirewallRule - Creates (Ingress) Firewall rule on the given + # Static NAT rule enabled public IP for Isolated networks + def create_FirewallRule(self, public_ip, rule=None): + if not rule: + rule = self.test_data["ingress_rule"] + self.debug("Adding an (Ingress) Firewall rule to make Guest VMs " + "accessible through Static NAT rule - %s" % rule) + return FireWallRule.create(self.api_client, + ipaddressid=public_ip.ipaddress.id, + protocol=rule["protocol"], + cidrlist=rule["cidrlist"], + startport=rule["startport"], + endport=rule["endport"] + ) + + # create_and_verify_fw - Creates and verifies (Ingress) firewall rule + # with a Static NAT rule enabled public IP + def create_and_verify_fip_and_fw(self, vm, public_ip, network): + self.debug("Creating and verifying firewall rule") + self.create_StaticNatRule_For_VM(vm, public_ip, network) + + # Verification + self.validate_PublicIPAddress( + public_ip, network, static_nat=True, vm=vm) + + self.create_FirewallRule(public_ip, self.test_data["ingress_rule"]) + self.debug("Successfully created and verified firewall rule") + + def getConfigDriveContent(self, ssh): + """ + This method is to verify whether configdrive iso + is attached to vm or not + Returns mount path if config drive is attached else False + """ + mountdir = "/root/iso" + cmd = "blkid -t LABEL='config-2' /dev/sr? /dev/hd? /dev/sd? /dev/xvd? -o device" + tmp_cmd = [ + 'bash -c "if [ ! -d /root/iso ] ; then mkdir /root/iso ; fi"', + "umount /root/iso"] + for tcmd in tmp_cmd: + ssh.execute(tcmd) + configDrive = ssh.execute(cmd) + res = ssh.execute("mount {} {}".format(str(configDrive[0]), mountdir)) + if str(res).lower().find("mounting read-only") > -1: + self.debug("configDrive iso is mounted at location %s" % mountdir) + return mountdir + else: + return None + + def verifyUserData(self, ssh, iso_path, userdata): + """ + verify Userdata + """ + userdata_path = iso_path+"/cloudstack/userdata/user_data.txt" + cmd = "cat %s" % userdata_path + res = ssh.execute(cmd) + vmuserdata = str(res[0]) + self.debug("Expected userdata is %s" % userdata) + self.debug("ConfigDrive userdata is %s" % vmuserdata) + self.assertEqual(vmuserdata, userdata, + 'Userdata found: %s is not equal to expected: %s' + % (vmuserdata, userdata)) + + def verifyPassword(self, vm, ssh, iso_path, password): + self.debug("Expected VM password is %s " % password.password) + password_file = iso_path+"/cloudstack/password/vm_password.txt" + cmd = "cat %s" % password_file + res = ssh.execute(cmd) + vmpassword = str(res[0]) + self.debug("ConfigDrive password is %s " % vmpassword) + nosuchfile = "No such file or directory" + if nosuchfile in vmpassword: + self.debug("Password file is not found") + return False, False + elif (password.password is not None) \ + and (password.password in vmpassword): + self.debug("Expected Password is found in configDriveIso") + return True, True + else: + self.debug("Expected password is not found in configDriveIso") + return True, False + + def verifySshKey(self, ssh, iso_path, sshkey): + self.debug("Expected VM sshkey is %s " % sshkey) + publicKey_file = iso_path+"/cloudstack/metadata/public-keys.txt" + cmd = "cat %s" % publicKey_file + res = ssh.execute(cmd) + vmsshkey = str(res[0]) + self.debug("ConfigDrive ssh key is %s " % vmsshkey) + + def verifyMetaData(self, vm, ssh, iso_path): + + metadata_dir = iso_path+"/cloudstack/metadata/" + metadata = {} + vm_files = ["availability-zone.txt", + "instance-id.txt", + "service-offering.txt", + "vm-id.txt"] + for file in vm_files: + cmd = "cat %s" % metadata_dir+file + res = ssh.execute(cmd) + metadata[file] = res + + for mfile in vm_files: + if mfile not in metadata: + self.fail("{} file is not found in vm metadata".format(mfile)) + self.assertEqual( + str(metadata["availability-zone.txt"][0]), + self.zone.name, + "Zone name inside metadata does not match with the zone" + ) + self.assertEqual( + str(metadata["instance-id.txt"][0]), + vm.instancename, + "vm name inside metadata does not match with the " + "instance name" + ) + self.assertEqual( + str(metadata["service-offering.txt"][0]), + vm.serviceofferingname, + "Service offering inside metadata does not match " + "with the instance offering" + ) + return + + def generate_ssh_keys(self): + """ + This method generates ssh key pair and writes the private key + into a temp file and returns the file name + """ + self.keypair = MySSHKeyPair.create( + self.api_client, + name=random_gen() + ".pem", + account=self.account.user[0].account, + domainid=self.account.domainid) + + self.cleanup.append(self.keypair) + self.debug("Created keypair with name: %s" % self.keypair.name) + self.debug("Writing the private key to local file") + keyPairFilePath = tempfile.gettempdir() + os.sep + self.keypair.name + self.tmp_files.append(keyPairFilePath) + self.debug("File path: %s" % keyPairFilePath) + with open(keyPairFilePath, "w+") as f: + f.write(self.keypair.privatekey) + os.system("chmod 400 " + keyPairFilePath) + return keyPairFilePath + + def umountConfigDrive(self, ssh, iso_path): + """umount config drive iso attached inside guest vm""" + ssh.execute("umount -d %s" % iso_path) + # Give the VM time to unlock the iso device + # time.sleep(2) + # Verify umount + result = ssh.execute("ls %s" % iso_path) + self.assertTrue(len(result) == 0, + "After umount directory should be empty " + "but contains: %s" % result) + + # validate_NetworkServiceProvider - Validates the given Network Service + # Provider in the Nuage VSP Physical Network, matches the given provider + # name and state against the list of providers fetched + def validate_NetworkServiceProvider(self, provider_name, state=None): + """Validates the Network Service Provider in the Nuage VSP Physical + Network""" + self.debug("Validating the creation and state of Network Service " + "Provider - %s" % provider_name) + providers = NetworkServiceProvider.list( + self.api_client, + name=provider_name + ) + self.assertEqual(isinstance(providers, list), True, + "List Network Service Provider should return a " + "valid list" + ) + self.assertEqual(provider_name, providers[0].name, + "Name of the Network Service Provider should match " + "with the returned list data" + ) + if state: + self.assertEqual(providers[0].state, state, + "Network Service Provider state should be '%s'" % + state + ) + self.debug("Successfully validated the creation and state of Network " + "Service Provider - %s" % provider_name) + + def update_provider_state(self, new_state): + self.debug("Updating Service Provider ConfigDrive to %s" % new_state) + configdriveprovider = NetworkServiceProvider.list( + self.api_client, + name="ConfigDrive")[0] + orig_state = configdriveprovider.state + NetworkServiceProvider.update(self.api_client, + configdriveprovider.id, + state=new_state) + self.validate_NetworkServiceProvider("ConfigDrive", state=new_state) + return orig_state + + # create_NetworkOffering - Creates Network offering + def create_NetworkOffering(self, net_offering, suffix=None, + conserve_mode=False): + self.debug("Creating Network offering") + if suffix: + net_offering["name"] = "NET_OFF-" + str(suffix) + nw_off = NetworkOffering.create(self.api_client, + net_offering, + conservemode=conserve_mode + ) + # Enable Network offering + nw_off.update(self.api_client, state="Enabled") + self.debug("Created and Enabled Network offering") + return nw_off + + # validate_NetworkOffering - Validates the given Network offering, matches + # the given network offering name and state against the list of network + # offerings fetched + def validate_NetworkOffering(self, net_offering, state=None): + """Validates the Network offering""" + self.debug("Validating the creation and state of Network offering - %s" + % net_offering.name) + net_offs = NetworkOffering.list(self.api_client, + id=net_offering.id + ) + self.assertEqual(isinstance(net_offs, list), True, + "List Network offering should return a valid list" + ) + self.assertEqual(net_offering.name, net_offs[0].name, + "Name of the Network offering should match with the " + "returned list data" + ) + if state: + self.assertEqual(net_offs[0].state, state, + "Network offering state should be '%s'" % state + ) + self.debug("Successfully validated the creation and state of Network " + "offering - %s" % net_offering.name) + + # create_Network - Creates network with the given Network offering + def create_Network(self, nw_off, gateway="10.1.1.1", + netmask="255.255.255.0", vpc=None, acl_list=None, + testdata=None, account=None): + if not account: + account = self.account + self.debug("Creating a network in the account - %s" % account.name) + if not testdata: + testdata = self.test_data["network"] + testdata["name"] = "TestNet-" + gateway + "-" + str(nw_off.name) + testdata["displaytext"] = "Test Network" + testdata["gateway"] = gateway + testdata["netmask"] = netmask + network = Network.create(self.api_client, + testdata, + accountid=account.name, + domainid=account.domainid, + networkofferingid=nw_off.id, + zoneid=self.zone.id, + vpcid=vpc.id if vpc else self.vpc.id + if hasattr(self, "vpc") else None, + aclid=acl_list.id if acl_list else None + ) + self.debug("Created network with ID - %s" % network.id) + return network + + def verify_network_creation(self, offering=None, + offering_name=None, + gateway=None, + vpc=None, acl_list=None, testdata=None): + if offering is None: + self.debug("Creating network offering...") + offering = self.create_NetworkOffering( + self.test_data[offering_name]) + self.validate_NetworkOffering(offering, state="Enabled") + try: + network = self.create_Network(offering, + gateway=gateway, + vpc=vpc, + acl_list=acl_list, + testdata=testdata) + return self.CreateResult(True, offering=offering, network=network) + except Exception: + self.debug("Exception: %s" % sys.exc_info()[0]) + return self.CreateResult(False, offering=offering) + + # create_VpcOffering - Creates VPC offering + def create_VpcOffering(self, vpc_offering, suffix=None): + self.debug("Creating VPC offering") + if suffix: + vpc_offering["name"] = "VPC_OFF-" + str(suffix) + vpc_off = VpcOffering.create(self.api_client, + vpc_offering + ) + # Enable VPC offering + vpc_off.update(self.api_client, state="Enabled") + self.debug("Created and Enabled VPC offering") + return vpc_off + + # create_Vpc - Creates VPC with the given VPC offering + def create_Vpc(self, vpc_offering, cidr='10.1.0.0/16', testdata=None, + account=None, networkDomain=None): + if not account: + account = self.account + self.debug("Creating a VPC in the account - %s" % account.name) + if not testdata: + testdata = self.test_data["vpc"] + testdata["name"] = "TestVPC-" + cidr + "-" + str(vpc_offering.name) + testdata["displaytext"] = "Test VPC" + testdata["cidr"] = cidr + vpc = VPC.create(self.api_client, + testdata, + vpcofferingid=vpc_offering.id, + zoneid=self.zone.id, + account=account.name, + domainid=account.domainid, + networkDomain=networkDomain + ) + self.debug("Created VPC with ID - %s" % vpc.id) + return vpc + + # validate_VpcOffering - Validates the given VPC offering, matches the + # given VPC offering name and state against the list of VPC offerings + # fetched + def validate_VpcOffering(self, vpc_offering, state=None): + """Validates the VPC offering""" + self.debug("Validating the creation and state of VPC offering - %s" % + vpc_offering.name) + vpc_offs = VpcOffering.list(self.api_client, + id=vpc_offering.id + ) + self.assertEqual(isinstance(vpc_offs, list), True, + "List VPC offering should return a valid list" + ) + self.assertEqual(vpc_offering.name, vpc_offs[0].name, + "Name of the VPC offering should match with the " + "returned list data" + ) + if state: + self.assertEqual(vpc_offs[0].state, state, + "VPC offering state should be '%s'" % state + ) + self.debug("Successfully validated the creation and state of VPC " + "offering - %s" % vpc_offering.name) + + # validate_Vpc - Validates the given VPC, matches the given VPC name and + # state against the list of VPCs fetched + def validate_Vpc(self, vpc, state=None): + """Validates the VPC""" + self.debug("Validating the creation and state of VPC - %s" % vpc.name) + vpcs = VPC.list(self.api_client, + id=vpc.id + ) + self.assertEqual(isinstance(vpcs, list), True, + "List VPC should return a valid list" + ) + self.assertEqual(vpc.name, vpcs[0].name, + "Name of the VPC should match with the returned " + "list data" + ) + if state: + self.assertEqual(vpcs[0].state, state, + "VPC state should be '%s'" % state + ) + self.debug("Successfully validated the creation and state of VPC - %s" + % vpc.name) + + def verify_vpc_creation(self, offering=None, offering_name=None): + + if offering is None: + self.debug("Creating VPC offering...") + offering = self.create_VpcOffering( + self.test_data[offering_name]) + self.validate_VpcOffering(offering, state="Enabled") + try: + vpc = self.create_Vpc(offering, cidr='10.1.0.0/16') + self.validate_Vpc(vpc, state="Enabled") + return self.CreateResult(True, offering=offering, vpc=vpc) + except Exception: + return self.CreateResult(False, offering=offering) + + def update_password_enable_in_template(self, new_state): + self.debug("Updating guest VM template to password %s" % new_state) + orig_state = self.template.passwordenabled + if self.template.passwordenabled is not new_state: + self.updateTemplate(new_state) + self.assertEqual(self.template.passwordenabled, new_state, + "Guest VM template is not password enabled") + return orig_state + + # ssh_into_VM - Gets into the shell of the given VM using its public IP + def ssh_into_VM(self, vm, public_ip, reconnect=True, negative_test=False): + self.debug("SSH into VM with ID - %s on public IP address - %s" % + (vm.id, public_ip.ipaddress.ipaddress)) + tries = 1 if negative_test else 3 + + @retry(tries=tries) + def retry_ssh(): + ssh_client = vm.get_ssh_client( + ipaddress=public_ip.ipaddress.ipaddress, + reconnect=reconnect, + retries=3 if negative_test else 30 + ) + self.debug("Successful to SSH into VM with ID - %s on " + "public IP address - %s" % + (vm.id, public_ip.ipaddress.ipaddress)) + return ssh_client + + return retry_ssh() + + def verify_config_drive_content(self, vm, + public_ip, + password_test, + userdata=None, + metadata=False, + sshkey=None): + self.debug("SSHing into the VM %s" % vm.name) + ssh = self.ssh_into_VM(vm, public_ip) + config_drive_path = self.getConfigDriveContent(ssh) + self.assertIsNotNone(config_drive_path, + 'ConfigdriveIso is not attached to vm') + if metadata: + self.debug("Verifying metadata for vm: %s" % vm.name) + self.verifyMetaData(vm, ssh, config_drive_path) + if userdata is not None: + self.debug("Verifying userdata for vm: %s" % vm.name) + self.verifyUserData(ssh, config_drive_path, userdata) + if password_test.test_presence: + self.debug("Verifying password for vm: %s" % vm.name) + test_result = self.verifyPassword(vm, ssh, config_drive_path, + password_test) + self.assertEqual(test_result[0], password_test.presence, + "Expected is that password is present: %s " + " but found is: %s" + % (test_result[0], password_test.presence)) + if password_test.password is not None: + self.debug("Password for vm is %s" % password_test.password) + self.assertEqual(test_result[1], True, + "Password value test failed.") + if sshkey is not None: + self.debug("Verifying sshkey for vm: %s" % vm.name) + self.verifySshKey(ssh, config_drive_path, sshkey) + + self.umountConfigDrive(ssh, config_drive_path) + + # create_VM - Creates VM in the given network(s) + def create_VM(self, network_list, host_id=None, start_vm=True, + testdata=None, account=None, keypair=None): + network_ids = [] + if isinstance(network_list, list): + for network in network_list: + network_ids.append(str(network.id)) + else: + network_ids.append(str(network_list.id)) + if not account: + account = self.account + self.debug("Creating VM in network(s) with ID(s) - %s in the " + "account - %s" % (network_ids, account.name)) + if not testdata: + testdata = self.test_data["virtual_machine"] + vm = VirtualMachine.create(self.api_client, + testdata, + accountid=account.name, + domainid=account.domainid, + serviceofferingid=self.service_offering.id, + templateid=self.template.id, + zoneid=self.zone.id, + networkids=network_ids, + startvm=start_vm, + hostid=host_id, + keypair=keypair + ) + self.debug("Created VM with ID - %s in network(s) with ID(s) - %s" + % (vm.id, network_ids)) + return vm + + # check_VM_state - Checks if the given VM is in the expected state form the + # list of fetched VMs + def check_VM_state(self, vm, state=None): + """Validates the VM state""" + self.debug("Validating the deployment and state of VM - %s" % vm.name) + vms = VirtualMachine.list(self.api_client, + id=vm.id, + listall=True + ) + self.assertEqual(isinstance(vms, list), True, + "List virtual machine should return a valid list" + ) + if state: + self.assertEqual(vms[0].state, state, + "Virtual machine is not in the expected state" + ) + self.debug("Successfully validated the deployment and state of VM - %s" + % vm.name) + + # validate_Network - Validates the given network, matches the given network + # name and state against the list of networks fetched + def validate_Network(self, network, state=None): + """Validates the network""" + self.debug("Validating the creation and state of Network - %s" % + network.name) + networks = Network.list(self.api_client, + id=network.id + ) + self.assertEqual(isinstance(networks, list), True, + "List network should return a valid list" + ) + self.assertEqual(network.name, networks[0].name, + "Name of the network should match with with the " + "returned list data" + ) + if state: + self.assertEqual(networks[0].state, state, + "Network state should be '%s'" % state + ) + self.debug("Successfully validated the creation and state of Network " + "- %s" % network.name) + + def create_guest_vm(self, networks, acl_item=None, + vpc=None, keypair=None): + vm = self.create_VM( + networks, + testdata=self.test_data["virtual_machine_userdata"], + keypair=keypair) + + # Check VM + self.check_VM_state(vm, state="Running") + + # Check networks + network_list = [] + if isinstance(networks, list): + for network in networks: + network_list.append(network) + else: + network_list.append(networks) + + for network in network_list: + self.validate_Network(network, state="Implemented") + + return vm + + # nic_operation_VM - Performs NIC operations such as add, remove, and + # update default NIC in the given VM and network + def nic_operation_VM(self, vm, network, operation="add"): + self.debug("Performing %s NIC operation in VM with ID - %s and " + "network with ID - %s" % (operation, vm.id, network.id)) + if operation is "add": + vm.add_nic(self.api_client, network.id) + self.debug("Added NIC in VM with ID - %s and network with ID - %s" + % (vm.id, network.id)) + vm_info = VirtualMachine.list(self.api_client, id=vm.id)[0] + for nic in vm_info.nic: + if nic.networkid == network.id: + nic_id = nic.id + if operation is "update": + vm.update_default_nic(self.api_client, nic_id) + self.debug("Updated default NIC to NIC with ID- %s in VM with ID " + "- %s and network with ID - %s" % + (nic_id, vm.id, network.id)) + if operation is "remove": + vm.remove_nic(self.api_client, nic_id) + self.debug("Removed NIC with ID - %s in VM with ID - %s and " + "network with ID - %s" % (nic_id, vm.id, network.id)) + + def update_userdata(self, vm, expected_user_data): + updated_user_data = base64.b64encode(expected_user_data) + vm.update(self.api_client, userdata=updated_user_data) + return expected_user_data + + def reset_password(self, vm): + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + + def wait_until_done(self, thread_list, name): + for aThread in thread_list: + self.debug("[Concurrency]Join %s for vm %s" % (name, + aThread.get_vm())) + aThread.join() + + def resetsshkey(self, vm, keypair, account=None, domainid=None): + """Resets SSH key""" + cmd = resetSSHKeyForVirtualMachine.resetSSHKeyForVirtualMachineCmd() + cmd.id = vm.id + cmd.keypair = keypair + cmd.account = account + cmd.domainid = domainid + return(self.api_client.resetSSHKeyForVirtualMachine(cmd)) + + def update_sshkeypair(self, vm): + vm.stop(self.api_client) + self.resetsshkey(vm, + self.keypair.name, + account=self.account.user[0].account, + domainid=self.account.domainid) + self.debug("Sshkey reset to - %s" % self.keypair.name) + vm.start(self.api_client) + + def add_subnet_verify(self, network, services): + """verify required nic is present in the VM""" + + self.debug("Going to add new ip range in shared network %s" % + network.name) + cmd = createVlanIpRange.createVlanIpRangeCmd() + cmd.networkid = network.id + cmd.gateway = services["gateway"] + cmd.netmask = services["netmask"] + cmd.startip = services["startip"] + cmd.endip = services["endip"] + cmd.forVirtualNetwork = services["forvirtualnetwork"] + addedsubnet = self.api_client.createVlanIpRange(cmd) + + self.debug("verify above iprange is successfully added in shared " + "network %s or not" % network.name) + + cmd1 = listVlanIpRanges.listVlanIpRangesCmd() + cmd1.networkid = network.id + cmd1.id = addedsubnet.vlan.id + + allsubnets = self.api_client.listVlanIpRanges(cmd1) + self.assertEqual( + allsubnets[0].id, + addedsubnet.vlan.id, + "Check New subnet is successfully added to the shared Network" + ) + return addedsubnet + + # get_Router - Returns router for the given network + def get_Router(self, network): + self.debug("Finding the virtual router for network with ID - %s" % + network.id) + routers = Router.list(self.api_client, + networkid=network.id, + listall=True + ) + self.assertEqual(isinstance(routers, list), True, + "List routers should return a valid virtual router " + "for network" + ) + return routers[0] + + # check_Router_state - Checks if the given router is in the expected state + # form the list of fetched routers + def check_Router_state(self, router, state=None): + """Validates the Router state""" + self.debug("Validating the deployment and state of Router - %s" % + router.name) + routers = Router.list(self.api_client, + id=router.id, + listall=True + ) + self.assertEqual(isinstance(routers, list), True, + "List router should return a valid list" + ) + if state: + self.assertEqual(routers[0].state, state, + "Virtual router is not in the expected state" + ) + self.debug("Successfully validated the deployment and state of Router " + "- %s" % router.name) + + # acquire_PublicIPAddress - Acquires public IP address for the given + # network/VPC + def acquire_PublicIPAddress(self, network, vpc=None, account=None): + if not account: + account = self.account + self.debug("Associating public IP for network with ID - %s in the " + "account - %s" % (network.id, account.name)) + public_ip = PublicIPAddress.create(self.api_client, + accountid=account.name, + domainid=account.domainid, + zoneid=self.zone.id, + networkid=network.id + if vpc is None else None, + vpcid=vpc.id if vpc else self.vpc.id + if hasattr(self, "vpc") else None + ) + self.debug("Associated public IP address - %s with network with ID - " + "%s" % (public_ip.ipaddress.ipaddress, network.id)) + return public_ip + + # migrate_VM - Migrates VM to another host, if available + def migrate_VM(self, vm): + self.debug("Checking if a host is available for migration...") + hosts = Host.listForMigration(self.api_client, virtualmachineid=vm.id) + if hosts: + self.assertEqual(isinstance(hosts, list), True, + "List hosts should return a valid list" + ) + host = hosts[0] + self.debug("Migrating VM with ID: " + "%s to Host: %s" % (vm.id, host.id)) + try: + vm.migrate(self.api_client, hostid=host.id) + except Exception as e: + self.fail("Failed to migrate instance, %s" % e) + self.debug("Migrated VM with ID: " + "%s to Host: %s" % (vm.id, host.id)) + else: + self.debug("No host available for migration. " + "Test requires at-least 2 hosts") + + @attr(tags=["advanced", "isonw"], required_hardware="true") + def test_configdrive_isolated_network(self): + """Test Configdrive as provider for isolated Networks + to provide userdata and password reset functionality + """ + + # 1. When ConfigDrive is disabled as provider in zone + # Verify Isolated Network creation with a network offering + # which has userdata provided by ConfigDrive fails + # 2. When ConfigDrive is enabled as provider in zone + # Create an Isolated Network with Isolated Network + # offering specifying ConfigDrive as serviceProvider + # for userdata. + # check if it is successfully created and + # is in the "Allocated" state. + # 3. Deploy a VM in the created Isolated network with user data, + # check if the Isolated network state is changed to + # "Implemented", and the VM is successfully deployed and + # is in the "Running" state. + # 4. SSH into the deployed VM and verify its user data in the iso + # (expected user data == actual user data). + # 5. Verify that the guest VM's password in the iso. + # 6. Reset VM password, and start the VM. + # 7. Verify that the new guest VM template is password enabled by + # checking the VM's password (password != "password"). + # 8. SSH into the VM for verifying its new password + # after its password reset. + # 9. Verify various scenarios and check the data in configdriveIso + # 10. Delete all the created objects (cleanup). + + self.debug("+++Testing configdrive in an Isolated network fails..." + "as provider configdrive is still disabled...") + self.update_provider_state("Disabled") + create_network = self.verify_network_creation( + offering_name="isolated_configdrive_network_offering", + gateway='10.1.1.1') + self.assertFalse(create_network.success, + 'Network found success = %s, expected success =%s' + % (str(create_network.success), 'False')) + + self.debug("+++Test user data & password reset functionality " + "using configdrive in an Isolated network") + + self.update_provider_state("Enabled") + create_network1 = self.verify_network_creation( + offering=create_network.offering, + gateway='10.1.1.1') + self.assertTrue(create_network1.success, + 'Network found success = %s, expected success = %s' + % (str(create_network1.success), 'True')) + self.validate_Network(create_network1.network, state="Allocated") + create_network2 = self.verify_network_creation( + offering=create_network.offering, + gateway='10.1.2.1') + self.assertTrue(create_network2.success, + 'Network found success = %s,expected success = %s' + % (str(create_network2.success), 'True')) + self.validate_Network(create_network2.network, state="Allocated") + self.update_password_enable_in_template(True) + + self.debug("+++Deploy VM in the created Isolated network " + "with user data provider as configdrive") + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm1 = self.create_guest_vm(create_network1.network, + keypair=self.keypair.name) + + vr = self.get_Router(create_network1.network) + self.check_Router_state(vr, state="Running") + + # We need to have the vm password + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + public_ip_1 = self.acquire_PublicIPAddress(create_network1.network) + self.create_and_verify_fip_and_fw(vm1, public_ip_1, + create_network1.network) + + self.verify_config_drive_content( + vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=self.test_data[ + "virtual_machine_userdata"]["userdata"], + sshkey=self.keypair.name) + + expected_user_data1 = self.update_userdata(vm1, "helloworld vm1") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(True), + userdata=expected_user_data1) + + self.generate_ssh_keys() + self.update_sshkeypair(vm1) + # After sshkey reset we need to have the vm password again + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("Adding a non-default nic to the VM " + "making it a multi-nic VM...") + self.nic_operation_VM(vm1, create_network2.network, + operation="add") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + expected_user_data1 = self.update_userdata(vm1, + "hellomultinicvm1") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("updating non-default nic as the default nic " + "of the multi-nic VM and enable staticnat...") + self.nic_operation_VM(vm1, + create_network2.network, operation="update") + + public_ip_2 = \ + self.acquire_PublicIPAddress(create_network2.network) + self.create_and_verify_fip_and_fw(vm1, public_ip_2, + create_network2.network) + vm1.stop(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(False), + metadata=True, + userdata=expected_user_data1) + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(vm1.password), + userdata=expected_user_data1) + expected_user_data1 = self.update_userdata(vm1, + "hellomultinicvm1") + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(True), + userdata=expected_user_data1) + + self.debug("Updating the default nic of the multi-nic VM, " + "deleting the non-default nic...") + self.nic_operation_VM(vm1, + create_network1.network, operation="update") + vm1.stop(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + metadata=True, + userdata=expected_user_data1) + + multinicvm1 = self.create_guest_vm([create_network2.network, + create_network1.network]) + multinicvm1.password = multinicvm1.resetPassword(self.api_client) + self.debug("MultiNICVM Password reset to - %s" + % multinicvm1.password) + self.debug("MultiNICVM - %s password - %s !" + % (multinicvm1.name, multinicvm1.password)) + + public_ip_3 = self.acquire_PublicIPAddress(create_network2.network) + self.create_and_verify_fip_and_fw(multinicvm1, public_ip_3, + create_network2.network) + self.verify_config_drive_content( + multinicvm1, public_ip_3, + self.PasswordTest(multinicvm1.password), + metadata=True, + userdata=self.test_data[ + "virtual_machine_userdata"]["userdata"]) + expected_user_data2 = self.update_userdata(multinicvm1, + "hello multinicvm1") + self.verify_config_drive_content(multinicvm1, public_ip_3, + self.PasswordTest(True), + userdata=expected_user_data2) + + multinicvm1.delete(self.api_client, expunge=True) + public_ip_3.delete(self.api_client) + public_ip_2.delete(self.api_client) + self.nic_operation_VM(vm1, + create_network2.network, operation="remove") + create_network2.network.delete(self.api_client) + + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + self.debug("+++ Restarting the created Isolated network without " + "cleanup...") + create_network1.network.restart(self.api_client, cleanup=False) + self.validate_Network(create_network1.network, + state="Implemented") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created Isolated network with " + "cleanup...") + create_network1.network.restart(self.api_client, cleanup=True) + self.validate_Network(create_network1.network, + state="Implemented") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++Verifying userdata after rebootVM - %s" % vm1.name) + vm1.reboot(self.api_client) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, "hello afterboot") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password)) + + self.debug("+++ Migrating one of the VMs in the created Isolated " + "network to another host, if available...") + self.migrate_VM(vm1) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata after migrating VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, + "hello after migrate") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password)) + + self.debug("+++Verify userdata after stopstartVM - %s" % vm1.name) + vm1.stop(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, + "hello afterstopstart") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password)) + + self.debug("+++ Verify userdata after VM recover- %s" % vm1.name) + vm1.delete(self.api_client, expunge=False) + self.debug("Recover VM - %s" % vm1.name) + vm1.recover(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + self.update_provider_state("Disabled") + expected_user_data1 = self.update_userdata(vm1, + "hello after recover") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ When template is not password enabled, " + "verify configdrive of VM - %s" % vm1.name) + vm1.delete(self.api_client, expunge=True) + self.update_provider_state("Enabled") + self.updateTemplate(False) + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm1 = self.create_guest_vm(create_network1.network, + keypair=self.keypair.name) + + expected_user_data1 = self.update_userdata(vm1, + "This is sample data") + public_ip_1 = \ + self.acquire_PublicIPAddress(create_network1.network) + self.create_and_verify_fip_and_fw(vm1, public_ip_1, + create_network1.network) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + vm1.delete(self.api_client, expunge=True) + create_network1.network.delete(self.api_client) + + # create_NetworkAclList - Creates network ACL list in the given VPC + def create_NetworkAclList(self, name, description, vpc): + self.debug("Adding NetworkACL list in VPC with ID - %s" % vpc.id) + return NetworkACLList.create(self.api_client, + services={}, + name=name, + description=description, + vpcid=vpc.id + ) + + # create_NetworkAclRule - Creates Ingress/Egress Network ACL rule in the + # given VPC network/acl list + def create_NetworkAclRule(self, rule, traffic_type="Ingress", network=None, + acl_list=None): + self.debug("Adding NetworkACL rule - %s" % rule) + if acl_list: + return NetworkACL.create(self.api_client, + networkid=network.id if network else None, + services=rule, + traffictype=traffic_type, + aclid=acl_list.id + ) + else: + return NetworkACL.create(self.api_client, + networkid=network.id if network else None, + services=rule, + traffictype=traffic_type + ) + + # restart_Vpc - Restarts the given VPC with/without cleanup + def restart_Vpc(self, vpc, cleanup=False): + self.debug("Restarting VPC with ID - %s" % vpc.id) + cmd = restartVPC.restartVPCCmd() + cmd.id = vpc.id + cmd.cleanup = cleanup + cmd.makeredundant = False + self.api_client.restartVPC(cmd) + self.debug("Restarted VPC with ID - %s" % vpc.id) + + @attr(tags=["advanced", "vpc"], required_hardware="true") + def test_configdrive_vpc_network(self): + """Test Configdrive for VPC Networks + choose user data with configDrive as service provider + and test password reset functionality using ConfigDrive + """ + + # 1. Verify VPC Network creation with ConfigDrive fails + # as ConfigDrive is disabled as provider + # 2. Create a VPC Network with VPC tier Network + # offering specifying ConfigDrive as serviceProvider for userdata. + # check if it is successfully created and is in "Allocated" state. + # 3. Deploy a VM in the created VPC tier network with user data, + # check if the Isolated network state is changed to "Implemented", + # and the VM is successfully deployed and is in "Running" state. + # 4. SSH into the deployed VM and verify its user data in the iso + # (expected user data == actual user data). + # 5. Verify that the guest VM's password in the iso. + # 6. Reset VM password, and start the VM. + # 7. Verify that the new guest VM template is password enabled by + # checking the VM's password (password != "password"). + # 8. SSH into the VM for verifying its new password + # after its password reset. + # 9. Verify various scenarios and check the data in configdrive iso + # 10. Delete all the created objects (cleanup). + self.update_provider_state("Enabled") + create_vpc = self.verify_vpc_creation( + offering_name="vpc_offering_configdrive") + self.assertTrue(create_vpc.success, + "Vpc found success = %s, expected success = %s" + % (str(create_vpc.success), 'True')) + acl_list = self.create_NetworkAclList( + name="acl", description="acl", vpc=create_vpc.vpc) + acl_item = self.create_NetworkAclRule( + self.test_data["ingress_rule"], acl_list=acl_list) + self.update_provider_state("Disabled") + self.debug("+++Testing configdrive in a VPC Tier network fails..." + "as provider configdrive is still disabled...") + create_networkfails = \ + self.verify_network_creation( + offering_name="vpc_network_offering_configdrive", + gateway='10.1.1.1', + vpc=create_vpc.vpc, + acl_list=acl_list) + self.assertFalse(create_networkfails.success, + "Create Network found success = %s, " + "expected success = %s" + % (str(create_networkfails.success), 'False')) + self.debug("Testing user data&password reset functionality using" + "configdrive in a VPC network...") + self.update_provider_state("Enabled") + + create_tiernetwork = \ + self.verify_network_creation( + offering=create_networkfails.offering, + gateway='10.1.1.1', + vpc=create_vpc.vpc, + acl_list=acl_list) + self.assertTrue(create_tiernetwork.success, + "Create Network found success = %s, " + "expected success = %s" + % (str(create_tiernetwork.success), 'True')) + self.validate_Network(create_tiernetwork.network, + state="Implemented") + + vpc_vr = self.get_Router(create_tiernetwork.network) + self.check_Router_state(vpc_vr, state="Running") + + create_tiernetwork2 = \ + self.verify_network_creation( + offering=create_networkfails.offering, + gateway='10.1.2.1', + vpc=create_vpc.vpc, + acl_list=acl_list) + self.assertTrue(create_tiernetwork2.success, + 'Network found success= %s, expected success= %s' + % (str(create_tiernetwork2.success), 'True')) + self.validate_Network(create_tiernetwork2.network, + state="Implemented") + + vpc_vr2 = self.get_Router(create_tiernetwork2.network) + self.check_Router_state(vpc_vr2, state="Running") + + self.update_password_enable_in_template(True) + + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm = self.create_guest_vm(create_tiernetwork.network, + acl_item, + vpc=create_vpc.vpc, + keypair=self.keypair.name) + + vpc_public_ip_1 = \ + self.acquire_PublicIPAddress(create_tiernetwork.network, + create_vpc.vpc) + self.create_StaticNatRule_For_VM(vm, vpc_public_ip_1, + create_tiernetwork.network) + + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(True), + metadata=True, + sshkey=self.keypair.name) + + expected_user_data = self.update_userdata(vm, "helloworld vm1") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(True), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + + self.debug("Resetting password for VM - %s" % vm.name) + self.reset_password(vm) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + sshkey=self.keypair.name) + + self.generate_ssh_keys() + self.update_sshkeypair(vm) + # After sshkey reset we need to have the vm password again + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created vpc without " + "cleanup...") + self.restart_Vpc(create_vpc.vpc, cleanup=False) + self.validate_Vpc(create_vpc.vpc, state="Enabled") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Adding a non-default nic to the VM " + "making it a multi-nic VM...") + self.nic_operation_VM(vm, create_tiernetwork2.network, + operation="add") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + vpc_public_ip_2 = \ + self.acquire_PublicIPAddress(create_tiernetwork2.network, + create_vpc.vpc) + self.create_StaticNatRule_For_VM(vm, vpc_public_ip_2, + create_tiernetwork2.network) + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(vm.password), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + expected_user_data1 = self.update_userdata(vm, "hellomultinicvm1") + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("updating non-default nic as the default nic " + "of the multi-nic VM and enable staticnat...") + self.nic_operation_VM(vm, + create_tiernetwork2.network, + operation="update") + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(vm.password), + metadata=True, + userdata=expected_user_data1) + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(vm.password), + userdata=expected_user_data1) + expected_user_data1 = self.update_userdata(vm, "hellomultinicvm1") + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(True), + userdata=expected_user_data1) + + self.debug("Updating the default nic of the multi-nic VM, " + "deleting the non-default nic...") + self.nic_operation_VM(vm, + create_tiernetwork.network, + operation="update") + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(True), + metadata=True, + userdata=expected_user_data1) + vpc_public_ip_2.delete(self.api_client) + self.nic_operation_VM(vm, + create_tiernetwork2.network, + operation="remove") + create_tiernetwork2.network.delete(self.api_client) + + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created vpc with " + "cleanup...") + self.restart_Vpc(create_vpc.vpc, cleanup=True) + self.validate_Vpc(create_vpc.vpc, state="Enabled") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created VPC Tier network without " + "cleanup...") + create_tiernetwork.network.restart(self.api_client, cleanup=False) + self.validate_Network(create_tiernetwork.network, + state="Implemented") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created VPC Tier network with " + "cleanup...") + create_tiernetwork.network.restart(self.api_client, cleanup=True) + self.validate_Network(create_tiernetwork.network, + state="Implemented") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created vpc without " + "cleanup...") + self.restart_Vpc(create_vpc.vpc, cleanup=False) + self.validate_Vpc(create_vpc.vpc, state="Enabled") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True) + + self.debug("+++ Restarting the created vpc with " + "cleanup...") + self.restart_Vpc(create_vpc.vpc, cleanup=True) + self.validate_Vpc(create_vpc.vpc, state="Enabled") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True) + + self.debug("+++ Verify userdata after rebootVM - %s" % vm.name) + vm.reboot(self.api_client) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm.name) + expected_user_data = self.update_userdata(vm, + "hellovm after reboot") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm.name) + self.reset_password(vm) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password)) + + self.debug("+++ Migrating one of the VMs in the created " + "VPC Tier network to another host, if available...") + self.migrate_VM(vm) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata after migrating VM - %s" % vm.name) + expected_user_data = self.update_userdata(vm, + "hellovm after migrate") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm.name) + self.reset_password(vm) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password)) + + self.debug("+++ Verify userdata after stopstartVM - %s" % vm.name) + vm.stop(self.api_client) + vm.start(self.api_client) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm.name) + expected_user_data = self.update_userdata(vm, + "hello after stopstart") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm.name) + self.reset_password(vm) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password)) + + self.debug("+++ Verify userdata after recoverVM - %s" % vm.name) + vm.delete(self.api_client, expunge=False) + self.debug("Recover VM - %s" % vm.name) + vm.recover(self.api_client) + vm.start(self.api_client) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + self.update_provider_state("Disabled") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ When template is not password enabled " + "verify configdrive of VM - %s" % vm.name) + vm.delete(self.api_client, expunge=True) + self.update_provider_state("Enabled") + self.updateTemplate(False) + + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm = self.create_guest_vm(create_tiernetwork.network, + acl_item, + vpc=create_vpc.vpc, + keypair=self.keypair.name) + + expected_user_data = self.update_userdata(vm, + "This is sample data") + vpc_public_ip_1 = \ + self.acquire_PublicIPAddress(create_tiernetwork.network, + create_vpc.vpc) + self.create_StaticNatRule_For_VM(vm, vpc_public_ip_1, + create_tiernetwork.network) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + vm.delete(self.api_client, expunge=True) + create_tiernetwork.network.delete(self.api_client) + + @attr(tags=["advanced", "shared"], required_hardware="true") + def test_configdrive_shared_network(self): + """Test Configdrive as provider for shared Networks + to provide userdata and password reset functionality + """ + + # 1. When ConfigDrive is disabled as provider in zone + # Verify Shared Network creation with a network offering + # which has userdata provided by ConfigDrive fails + # 2. When ConfigDrive is enabled as provider in zone + # Create a shared Network with Isolated Network + # offering specifying ConfigDrive as serviceProvider + # for userdata. + # check if it is successfully created and + # is in the "Setup" state. + # 3. Deploy a VM in the created Shared network with user data, + # check if the Shared network state is changed to + # "Implemented", and the VM is successfully deployed and + # is in the "Running" state. + # 4. Verify that the guest VM's password in the iso. + # 5. Reset VM password, and start the VM. + # 6. Verify that the new guest VM template is password enabled by + # checking the VM's password (password != "password"). + # 7. Verify various scenarios and check the data in configdriveIso + # 8. Delete all the created objects (cleanup). + + self.debug("+++Testing configdrive in an shared network fails..." + "as provider configdrive is still disabled...") + self.update_provider_state("Disabled") + shared_test_data = self.test_data["acl"]["network_all_1"] + shared_network = self.verify_network_creation( + offering_name="shared_network_config_drive_offering", + testdata=shared_test_data) + self.assertFalse(shared_network.success, + 'Network found success = %s, expected success =%s' + % (str(shared_network.success), 'False')) + + self.update_provider_state("Enabled") + shared_network = self.verify_network_creation( + offering=shared_network.offering, testdata=shared_test_data) + self.assertTrue(shared_network.success, + 'Network found success = %s, expected success = %s' + % (str(shared_network.success), 'True')) + + self.validate_Network(shared_network.network, state="Setup") + + shared_test_data2 = self.test_data["acl"]["network_all_2"] + shared_network2 = self.verify_network_creation( + offering=shared_network.offering, + testdata=shared_test_data2) + self.assertTrue(shared_network2.success, + 'Network found success = %s, expected success = %s' + % (str(shared_network2.success), 'True')) + + self.validate_Network(shared_network2.network, state="Setup") + + self.debug("+++Test user data & password reset functionality " + "using configdrive in an Isolated network") + + self.update_password_enable_in_template(True) + + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + + self.debug("+++Deploy of a VM on a shared network with multiple " + "ip ranges, all should have the same value for the " + "underlay flag.") + # Add subnet of different gateway + self.debug("+++ Adding subnet of different gateway") + + self.add_subnet_verify( + shared_network.network, + self.test_data["publiciprange2"]) + self.test_data["virtual_machine"]["ipaddress"] = \ + self.test_data["acl"]["network_all_1"]["endip"] + + # with self.assertRaises(Exception): + # self.create_VM( + # [shared_network.network], + # testdata=self.test_data["virtual_machine_userdata"]) + + self.debug("+++ In a shared network with multiple ip ranges, " + "userdata with config drive must be allowed.") + + vm1 = self.create_VM( + [shared_network.network], + testdata=self.test_data["virtual_machine_userdata"], + keypair=self.keypair.name) + # Check VM + self.check_VM_state(vm1, state="Running") + + shared_vr = self.get_Router(shared_network.network) + self.check_Router_state(shared_vr, state="Running") + + # We need to have the vm password + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + self.update_userdata(vm1, "helloworld vm1") + + self.debug("Adding a non-default nic to the VM " + "making it a multi-nic VM...") + self.nic_operation_VM(vm1, shared_network2.network, + operation="add") + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + self.debug("updating non-default nic as the default nic " + "of the multi-nic VM...") + self.nic_operation_VM(vm1, + shared_network2.network, operation="update") + vm1.stop(self.api_client) + vm1.start(self.api_client) + + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + self.update_userdata(vm1, "hellomultinicvm1") + + self.debug("Updating the default nic of the multi-nic VM, " + "deleting the non-default nic...") + self.nic_operation_VM(vm1, + shared_network.network, operation="update") + vm1.stop(self.api_client) + vm1.start(self.api_client) + + self.nic_operation_VM(vm1, + shared_network2.network, operation="remove") + shared_network2.network.delete(self.api_client) + # We need to have the vm password + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + self.debug("+++ When template is not password enabled, " + "verify configdrive of VM - %s" % vm1.name) + vm1.delete(self.api_client, expunge=True) + self.update_provider_state("Enabled") + self.updateTemplate(False) + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm1 = self.create_VM( + [shared_network.network], + testdata=self.test_data["virtual_machine_userdata"], + keypair=self.keypair.name) + vm1.delete(self.api_client, expunge=True) + shared_network.network.delete(self.api_client) diff --git a/test/integration/component/test_dhcp_dns_offload.py b/test/integration/component/test_dhcp_dns_offload.py index dc44f09237f..eea5c26ff16 100755 --- a/test/integration/component/test_dhcp_dns_offload.py +++ b/test/integration/component/test_dhcp_dns_offload.py @@ -198,7 +198,7 @@ class TestDeployVMs(cloudstackTestCase): Returns mount path if config drive is attached else False """ mountdir = "/root/iso" - cmd = "blkid -t LABEL='config' /dev/hd? /dev/sd? /dev/xvd? -o device" + cmd = "blkid -t LABEL='config-2' /dev/sr? /dev/hd? /dev/sd? /dev/xvd? -o device" try: self.debug("SSH into VM: %s" % vm_ip) ssh = self.vm.get_ssh_client(ipaddress=vm_ip, reconnect=True) @@ -261,7 +261,7 @@ class TestDeployVMs(cloudstackTestCase): def verifySshKey(self, vm_ip, iso_path): - publicKey_file = iso_path+"/cloudstack/metadata/public_keys.txt" + publicKey_file = iso_path+"/cloudstack/metadata/public-keys.txt" try: self.debug("SSH into VM: %s" % vm_ip) ssh = self.vm.get_ssh_client(ipaddress=vm_ip) @@ -285,7 +285,7 @@ class TestDeployVMs(cloudstackTestCase): response = {} self.debug("SSH into VM: %s" % vm_ip) ssh = self.vm.get_ssh_client(ipaddress=vm_ip, reconnect=True) - vm_files = ["availability_zone.txt", "instance_id.txt", "service_offering.txt", "vm_id.txt"] + vm_files = ["availability-zone.txt", "instance-id.txt", "service-offering.txt", "vm-id.txt"] for file in vm_files: cmd = "cat %s" % metadata_dir+file res = ssh.execute(cmd) @@ -296,22 +296,22 @@ class TestDeployVMs(cloudstackTestCase): def verifyMetaData(self, metadata): - metadata_files = ["availability_zone.txt", "instance_id.txt", "service_offering.txt", "vm_id.txt"] + metadata_files = ["availability-zone.txt", "instance-id.txt", "service-offering.txt", "vm-id.txt"] for mfile in metadata_files: if mfile not in metadata: self.fail("{} file is not found in vm metadata".format(mfile)) self.assertEqual( - str(metadata["availability_zone.txt"][0]), + str(metadata["availability-zone.txt"][0]), self.zone.name, "Zone name inside metadata does not match with the zone" ) self.assertEqual( - str(metadata["instance_id.txt"][0]), + str(metadata["instance-id.txt"][0]), self.vm.instancename, "vm name inside metadata does not match with the instance name" ) self.assertEqual( - str(metadata["service_offering.txt"][0]), + str(metadata["service-offering.txt"][0]), self.vm.serviceofferingname, "Service offering inside metadata does not match with the instance offering" ) @@ -320,7 +320,7 @@ class TestDeployVMs(cloudstackTestCase): self.vm.instancename) self.assertEqual(validateList(qresultset)[0], PASS, "sql query returned invalid response") self.assertEqual( - metadata["vm_id.txt"][0], + metadata["vm-id.txt"][0], unicode(qresultset[0][0]), "vm id in metadata does not match with the vm id from cloud db" ) diff --git a/test/integration/plugins/nuagevsp/nuageTestCase.py b/test/integration/plugins/nuagevsp/nuageTestCase.py index 45db7913ceb..5df0754a360 100644 --- a/test/integration/plugins/nuagevsp/nuageTestCase.py +++ b/test/integration/plugins/nuagevsp/nuageTestCase.py @@ -44,6 +44,8 @@ from marvin.lib.common import (get_domain, get_template, get_zone) from marvin.cloudstackAPI import (restartVPC, + enableNuageUnderlayVlanIpRange, + disableNuageUnderlayVlanIpRange, listNuageUnderlayVlanIpRanges) # Import System Modules from retry import retry @@ -320,6 +322,31 @@ class nuageTestCase(cloudstackTestCase): self.debug("Cleanup complete!") return + # enable_NuageUnderlayPublicIpRange - Enables/configures underlay + # networking for the given public IP range in Nuage VSP + def enable_NuageUnderlayPublicIpRange(self, vlanid): + cmd = enableNuageUnderlayVlanIpRange. \ + enableNuageUnderlayVlanIpRangeCmd() + cmd.id = vlanid + self.api_client.enableNuageUnderlayVlanIpRange(cmd) + + # disable_NuageUnderlayPublicIpRange - Disables/de-configures underlay + # networking for the given public IP range in Nuage VSP + def disable_NuageUnderlayPublicIpRange(self, public_ip_range): + cmd = disableNuageUnderlayVlanIpRange. \ + disableNuageUnderlayVlanIpRangeCmd() + cmd.id = public_ip_range.vlan.id + self.api_client.enableNuageUnderlayVlanIpRange(cmd) + + # list_NuageUnderlayPublicIpRanges - Lists underlay networking + # enabled/configured public IP ranges in Nuage VSP + def list_NuageUnderlayPublicIpRanges(self, public_ip_range=None): + cmd = listNuageUnderlayVlanIpRanges.listNuageUnderlayVlanIpRangesCmd() + if public_ip_range: + cmd.id = public_ip_range.vlan.id + cmd.underlay = True + return self.api_client.listNuageUnderlayVlanIpRanges(cmd) + # create_VpcOffering - Creates VPC offering @needscleanup def create_VpcOffering(cls, vpc_offering, suffix=None): @@ -395,6 +422,7 @@ class nuageTestCase(cloudstackTestCase): testdata = cls.test_data["network"] testdata["name"] = "TestNet-" + gateway + "-" + str(nw_off.name) testdata["displaytext"] = "Test Network" + testdata["gateway"] = gateway testdata["netmask"] = netmask network = Network.create(cls.api_client, testdata, @@ -402,7 +430,6 @@ class nuageTestCase(cloudstackTestCase): domainid=account.domainid, networkofferingid=nw_off.id, zoneid=cls.zone.id, - gateway=gateway, vlan=vlan, externalid=externalid, vpcid=vpc.id if vpc else cls.vpc.id @@ -435,7 +462,7 @@ class nuageTestCase(cloudstackTestCase): # create_VM - Creates VM in the given network(s) @needscleanup def create_VM(cls, network_list, host_id=None, start_vm=True, - testdata=None, account=None): + testdata=None, account=None, keypair=None): network_ids = [] if isinstance(network_list, list): for network in network_list: @@ -457,7 +484,8 @@ class nuageTestCase(cloudstackTestCase): zoneid=cls.zone.id, networkids=network_ids, startvm=start_vm, - hostid=host_id + hostid=host_id, + keypair=keypair ) cls.debug("Created VM with ID - %s in network(s) with ID(s) - %s" % (vm.id, network_ids)) @@ -490,22 +518,22 @@ class nuageTestCase(cloudstackTestCase): def migrate_VM(self, vm): self.debug("Checking if a host is available for migration...") hosts = Host.listForMigration(self.api_client, virtualmachineid=vm.id) - self.assertEqual(isinstance(hosts, list), True, - "List hosts should return a valid list" - ) - # Remove the host of current VM from the hosts list - vm_info = VirtualMachine.list(self.api_client, id=vm.id)[0] - hosts[:] = [host for host in hosts if host.id != vm_info.hostid] - if len(hosts) <= 0: - self.skipTest("No host available for migration. " - "Test requires at-least 2 hosts") - host = hosts[0] - self.debug("Migrating VM with ID: %s to Host: %s" % (vm.id, host.id)) - try: - vm.migrate(self.api_client, hostid=host.id) - except Exception as e: - self.fail("Failed to migrate instance, %s" % e) - self.debug("Migrated VM with ID: %s to Host: %s" % (vm.id, host.id)) + if hosts: + self.assertEqual(isinstance(hosts, list), True, + "List hosts should return a valid list" + ) + host = hosts[0] + self.debug("Migrating VM with ID: " + "%s to Host: %s" % (vm.id, host.id)) + try: + vm.migrate(self.api_client, hostid=host.id) + except Exception as e: + self.fail("Failed to migrate instance, %s" % e) + self.debug("Migrated VM with ID: " + "%s to Host: %s" % (vm.id, host.id)) + else: + self.debug("No host available for migration. " + "Test requires at-least 2 hosts") # delete_VM - Deletes the given VM def delete_VM(self, vm, expunge=True): diff --git a/test/integration/plugins/nuagevsp/test_nuage_configdrive.py b/test/integration/plugins/nuagevsp/test_nuage_configdrive.py new file mode 100644 index 00000000000..78321224438 --- /dev/null +++ b/test/integration/plugins/nuagevsp/test_nuage_configdrive.py @@ -0,0 +1,2278 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Component tests for user data, meta data, ssh keys + and password reset functionality with + ConfigDrive and Nuage VSP SDN plugin +""" +# Import Local Modules +from nuageTestCase import nuageTestCase +from marvin.cloudstackAPI import updateTemplate, resetSSHKeyForVirtualMachine +from marvin.lib.base import (Account, + createVlanIpRange, + listVlanIpRanges, + NetworkServiceProvider, + PublicIpRange, + PublicIPAddress, + createSSHKeyPair, + deleteSSHKeyPair, + VirtualMachine) + +from marvin.lib.common import list_templates +from marvin.lib.utils import random_gen +# Import System Modules +from nose.plugins.attrib import attr +from datetime import datetime +import threading +import tempfile +import base64 +import sys +import time +import os +import copy +import json + + +class MySSHKeyPair: + """Manage SSH Key pairs""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, name=None, account=None, + domainid=None, projectid=None): + """Creates SSH keypair""" + cmd = createSSHKeyPair.createSSHKeyPairCmd() + cmd.name = name + if account is not None: + cmd.account = account + if domainid is not None: + cmd.domainid = domainid + if projectid is not None: + cmd.projectid = projectid + return MySSHKeyPair(apiclient.createSSHKeyPair(cmd).__dict__) + + def delete(self, apiclient): + """Delete SSH key pair""" + cmd = deleteSSHKeyPair.deleteSSHKeyPairCmd() + cmd.name = self.name + cmd.account = self.account + cmd.domainid = self.domainid + apiclient.deleteSSHKeyPair(cmd) + + +class Services: + """Test Add Remove Network Services + """ + + def __init__(self): + self.services = { + "isolated_configdrive_network_offering_withoutdns" : { + "name": 'nuage_configdrive_withoutDns_marvin', + "displaytext": 'nuage_configdrive_withoutDns_marvin', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,SourceNat,Connectivity,StaticNat,UserData,Firewall', + "traffictype": 'GUEST', + "availability": 'Optional', + "serviceProviderList": { + "Dhcp": 'NuageVsp', + "StaticNat": 'NuageVsp', + "SourceNat": 'NuageVsp', + "Firewall": 'NuageVsp', + "Connectivity": 'NuageVsp', + "UserData": 'ConfigDrive' + }, + "serviceCapabilityList": { + "SourceNat": {"SupportedSourceNatTypes": "perzone"} + } + }, + "isolated_configdrive_network_offering": { + "name": 'nuage_configdrive_marvin', + "displaytext": 'nuage_configdrive_marvin', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,SourceNat,Connectivity,StaticNat,UserData,Firewall,Dns', + "traffictype": 'GUEST', + "availability": 'Optional', + "serviceProviderList": { + "Dhcp": 'NuageVsp', + "StaticNat": 'NuageVsp', + "SourceNat": 'NuageVsp', + "Firewall": 'NuageVsp', + "Connectivity": 'NuageVsp', + "UserData": 'ConfigDrive', + "Dns": 'VirtualRouter' + }, + "serviceCapabilityList": { + "SourceNat": {"SupportedSourceNatTypes": "perzone"} + } + }, + "vpc_network_offering_configdrive_withoutdns" : { + "name": 'nuage_vpc_marvin_configdrive_withoutdns', + "displaytext": 'nuage_vpc_marvin_configdrive_withoutdns', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,Connectivity,UserData', + "traffictype": 'GUEST', + "availability": 'Optional', + "useVpc": 'on', + "ispersistent": 'True', + "serviceProviderList": { + "Dhcp": "NuageVsp", + "StaticNat": "NuageVsp", + "SourceNat": "NuageVsp", + "NetworkACL": "NuageVsp", + "Connectivity": "NuageVsp", + "UserData": "ConfigDrive" + }, + "serviceCapabilityList": { + "SourceNat": {"SupportedSourceNatTypes": "perzone"} + } + }, + "vpc_network_offering_configdrive_withdns" : { + "name": 'nuage_vpc_marvin_configdrive_withdns', + "displaytext": 'nuage_vpc_marvin_configdrive_withdns', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,Connectivity,UserData,Dns', + "traffictype": 'GUEST', + "availability": 'Optional', + "useVpc": 'on', + "ispersistent": 'True', + "serviceProviderList": { + "Dhcp": "NuageVsp", + "StaticNat": "NuageVsp", + "SourceNat": "NuageVsp", + "NetworkACL": "NuageVsp", + "Connectivity": "NuageVsp", + "UserData": "ConfigDrive", + "Dns": "VpcVirtualRouter" + }, + "serviceCapabilityList": { + "SourceNat": {"SupportedSourceNatTypes": "perzone"} + } + }, + "vpc_offering_configdrive_withoutdns" : { + "name": 'Nuage VSP VPC offering ConfigDrive', + "displaytext": 'Nuage VSP VPC offering ConfigDrive', + "supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,Connectivity,UserData', + "serviceProviderList": { + "Dhcp": "NuageVsp", + "StaticNat": "NuageVsp", + "SourceNat": "NuageVsp", + "NetworkACL": "NuageVsp", + "Connectivity": "NuageVsp", + "UserData": "ConfigDrive" + } + }, + "vpc_offering_configdrive_withdns" :{ + "name": 'Nuage VSP VPC offering ConfigDrive withVR', + "displaytext": 'Nuage VSP VPC offering ConfigDrive withVR', + "supportedservices": 'Dhcp,StaticNat,SourceNat,NetworkACL,Connectivity,UserData,Dns', + "serviceProviderList": { + "Dhcp": "NuageVsp", + "StaticNat": "NuageVsp", + "SourceNat": "NuageVsp", + "NetworkACL": "NuageVsp", + "Connectivity": "NuageVsp", + "UserData": "ConfigDrive", + "Dns": "VpcVirtualRouter" + } + }, + "shared_nuage_network_config_drive_offering" : { + "name": 'nuage_marvin', + "displaytext": 'nuage_marvin', + "guestiptype": 'shared', + "supportedservices": 'Dhcp,Connectivity,UserData', + "traffictype": 'GUEST', + "specifyVlan": "False", + "specifyIpRanges": "True", + "availability": 'Optional', + "serviceProviderList": { + "Dhcp": "NuageVsp", + "Connectivity": "NuageVsp", + "UserData": 'ConfigDrive' + }, + "serviceCapabilityList": { + "Connectivity": { + "PublicAccess": "true" + } + } + }, + "network_all2" : { + "name": "SharedNetwork2-All-nuage", + "displaytext": "SharedNetwork2-All-nuage", + "gateway": "10.200.200.1", + "netmask": "255.255.255.0", + "startip": "10.200.200.21", + "endip": "10.200.200.100", + "acltype": "Domain" + } + } + + +class TestNuageConfigDrive(nuageTestCase): + """Test user data and password reset functionality + using configDrive with Nuage VSP SDN plugin + """ + + class CreateResult: + def __init__(self, success, offering=None, network=None, vpc=None): + self.success = success + self.network = network + self.offering = offering + self.vpc = vpc + + class PasswordTest: + def __init__(self, password): + self.test_presence = False + self.presence = None + self.password = None + if type(password) is bool: + self.test_presence = True + self.presence = password + self.password = None + elif type(password) is unicode or type(password) is str: + self.test_presence = True + self.password = password + self.presence = True + + class StartVM(threading.Thread): + + def __init__(self, nuagetestcase, network, index): + threading.Thread.__init__(self) + self.network = network + self.nuagetestcase = nuagetestcase + self.vm = None + self.index = index + + def run(self): + self.vm = self.nuagetestcase.create_VM( + self.network, + account=self.nuagetestcase.account, + cleanup=False) + self.nuagetestcase.check_VM_state(self.vm, state="Running") + self.nuagetestcase.debug("[Concurrency]VM %d running, name = %s" + % (self.index + 1, self.vm.name)) + + def get_vm(self): + return self.vm + + def stop(self): + self.vm.delete(self.nuagetestcase.api_client) + + def update(self): + expected_user_data = "hello world vm %s" % self.vm.name + user_data = base64.b64encode(expected_user_data) + self.vm.update(self.nuagetestcase.api_client, userdata=user_data) + + class StopVM(threading.Thread): + + def __init__(self, nuagetestcase, vm, **kwargs): + threading.Thread.__init__(self) + self.vm = vm + self.nuagetestcase = nuagetestcase + + def run(self): + self.vm.delete(self.nuagetestcase.api_client) + if self.vm in self.nuagetestcase.cleanup: + self.nuagetestcase.cleanup.remove(self.vm) + + def get_vm(self): + return self.vm + + @staticmethod + def get_name(): + return "delete" + + class UpdateVM(threading.Thread): + + def __init__(self, nuagetestcase, vm, **kwargs): + threading.Thread.__init__(self) + self.vm = vm + self.nuagetestcase = nuagetestcase + self.idx = kwargs["idx"] + + def run(self): + self.expected_user_data = "hello world vm %s" % self.vm.name + user_data = base64.b64encode(self.expected_user_data) + self.start = datetime.now() + self.vm.update(self.nuagetestcase.api_client, userdata=user_data) + self.end = datetime.now() + self.nuagetestcase.debug("[Concurrency]Update userdata idx=%d " + "for vm: %s. Duration in seconds: %s " % + (self.idx, self.vm.name, + (self.end - self.start).total_seconds())) + return self.expected_user_data + + def get_vm(self): + return self.vm + + def get_timestamps(self): + return [self.start, self.end] + + def get_userdata(self): + return self.expected_user_data + + @staticmethod + def get_name(): + return "userdata" + + class ResetPassword(threading.Thread): + + def __init__(self, nuagetestcase, vm, **kwargs): + threading.Thread.__init__(self) + self.vm = vm + self.nuagetestcase = nuagetestcase + + def run(self): + self.start = datetime.now() + self.vm.password = self.vm.resetPassword( + self.nuagetestcase.api_client) + self.nuagetestcase.debug("[Concurrency]Password reset to - %s" + % self.vm.password) + self.nuagetestcase.debug("[Concurrency]VM - %s password - %s !" + % (self.vm.name, self.vm.password)) + self.end = datetime.now() + self.nuagetestcase.debug("[Concurrency]Reset password for vm: %s. " + "Duration in seconds: %s " + % + (self.vm.name, + (self.end - self.start).total_seconds())) + + def get_vm(self): + return self.vm + + def get_timestamps(self): + return [self.start, self.end] + + def get_password(self): + return self.vm.password + + @staticmethod + def get_name(): + return "reset password" + + @classmethod + def setUpClass(cls): + super(TestNuageConfigDrive, cls).setUpClass() + cls.test_data["nuagevsp"].update(Services().services) + return + + def setUp(self): + # Create an account + self.account = Account.create(self.api_client, + self.test_data["account"], + admin=True, + domainid=self.domain.id + ) + self.tmp_files = [] + self.cleanup = [self.account] + return + + def tearDown(self): + super(TestNuageConfigDrive, self).tearDown() + for tmp_file in self.tmp_files: + os.remove(tmp_file) + + self.updateTemplate(False) + return + + # updateTemplate - Updates value of the guest VM template's password + # enabled setting + def updateTemplate(self, value): + self.debug("Updating value of guest VM template's password enabled " + "setting") + cmd = updateTemplate.updateTemplateCmd() + cmd.id = self.template.id + cmd.passwordenabled = value + self.api_client.updateTemplate(cmd) + list_template_response = list_templates(self.api_client, + templatefilter="all", + id=self.template.id + ) + self.template = list_template_response[0] + self.debug("Updated guest VM template") + + # get_userdata_url - Returns user data URL for the given VM object + def get_userdata_url(self, vm): + self.debug("Getting user data url") + nic = vm.nic[0] + gateway = str(nic.gateway) + self.debug("Gateway: " + gateway) + user_data_url = 'curl "http://' + gateway + ':80/latest/user-data"' + return user_data_url + + # create_and_verify_fw - Creates and verifies (Ingress) firewall rule + # with a Static NAT rule enabled public IP + def create_and_verify_fip_and_fw(self, vm, public_ip, network): + self.debug("Creating and verifying firewall rule") + self.create_StaticNatRule_For_VM(vm, public_ip, network) + + # VSD verification + self.verify_vsd_floating_ip(network, vm, public_ip.ipaddress) + + fw_rule = self.create_FirewallRule( + public_ip, self.test_data["ingress_rule"]) + + # VSD verification + self.verify_vsd_firewall_rule(fw_rule) + self.debug("Successfully created and verified firewall rule") + + def getConfigDriveContent(self, ssh): + """ + This method is to verify whether configdrive iso + is attached to vm or not + Returns mount path if config drive is attached else False + """ + mountdir = "/root/iso" + cmd = "blkid -t LABEL='config-2' /dev/sr? /dev/hd? /dev/sd? /dev/xvd? -o device" + tmp_cmd = [ + 'bash -c "if [ ! -d /root/iso ] ; then mkdir /root/iso ; fi"', + "umount /root/iso"] + for tcmd in tmp_cmd: + ssh.execute(tcmd) + configDrive = ssh.execute(cmd) + res = ssh.execute("mount {} {}".format(str(configDrive[0]), mountdir)) + if str(res).lower().find("mounting read-only") > -1: + self.debug("configDrive iso is mounted at location %s" % mountdir) + return mountdir + else: + return None + + def verifyUserData(self, ssh, iso_path, userdata): + """ + verify Userdata + """ + userdata_path = iso_path+"/cloudstack/userdata/user_data.txt" + cmd = "cat %s" % userdata_path + res = ssh.execute(cmd) + vmuserdata = str(res[0]) + self.debug("Expected userdata is %s" % userdata) + self.debug("ConfigDrive userdata acsformat is %s" % vmuserdata) + self.assertEqual(vmuserdata, userdata, + 'Userdata found: %s is not equal to expected: %s' + % (vmuserdata, userdata)) + + def verifyOpenStackUserData(self, ssh, iso_path, userdata): + """ + verify Userdata in Openstack format + """ + userdata_path = iso_path+"/openstack/latest/user_data" + cmd = "cat %s" % userdata_path + res = ssh.execute(cmd) + vmuserdata = str(res[0]) + self.debug("Expected userdata is %s" % userdata) + self.debug("ConfigDrive userdata openstackformat is %s" % vmuserdata) + self.assertEqual(vmuserdata, userdata, + 'Userdata found: %s is not equal to expected: %s' + % (vmuserdata, userdata)) + + def verifyPassword(self, vm, ssh, iso_path, password): + self.debug("Expected VM password is %s " % password.password) + password_file = iso_path+"/cloudstack/password/vm_password.txt" + cmd = "cat %s" % password_file + res = ssh.execute(cmd) + vmpassword = str(res[0]) + self.debug("ConfigDrive password is %s " % vmpassword) + nosuchfile = "No such file or directory" + if nosuchfile in vmpassword: + self.debug("Password file is not found") + return False, False + elif (password.password is not None) \ + and (password.password in vmpassword): + self.debug("Expected Password is found in configDriveIso") + return True, True + else: + self.debug("Expected password is not found in configDriveIso") + return True, False + + def verifySshKey(self, ssh, iso_path, sshkey): + self.debug("Expected VM sshkey is %s " % sshkey) + publicKey_file = iso_path+"/cloudstack/metadata/public-keys.txt" + cmd = "cat %s" % publicKey_file + res = ssh.execute(cmd) + vmsshkey = str(res[0]) + self.debug("ConfigDrive ssh key is %s " % vmsshkey) + + def verifyMetaData(self, vm, ssh, iso_path): + + metadata_dir = iso_path+"/cloudstack/metadata/" + metadata = {} + vm_files = ["availability-zone.txt", + "instance-id.txt", + "service-offering.txt", + "vm-id.txt"] + for file in vm_files: + cmd = "cat %s" % metadata_dir+file + res = ssh.execute(cmd) + metadata[file] = res + + metadata_files = ["availability-zone.txt", + "instance-id.txt", + "service-offering.txt", + "vm-id.txt"] + for mfile in metadata_files: + if mfile not in metadata: + self.fail("{} file is not found in vm metadata".format(mfile)) + self.assertEqual( + str(metadata["availability-zone.txt"][0]), + self.zone.name, + "Zone name inside metadata does not match with the zone" + ) + self.assertEqual( + str(metadata["instance-id.txt"][0]), + vm.instancename, + "vm name inside metadata does not match with the " + "instance name" + ) + self.assertEqual( + str(metadata["service-offering.txt"][0]), + vm.serviceofferingname, + "Service offering inside metadata does not match " + "with the instance offering" + ) + return + + def verifyOpenStackData(self, vm, ssh, iso_path): + + openstackdata_dir = iso_path+"/openstack/latest/" + openstackdata = {} + openstackdata_files = ["user_data", + "meta_data.json", + "vendor_data.json", + "network_data.json"] + for file in openstackdata_files: + cmd = "cat %s" % openstackdata_dir+file + res = ssh.execute(cmd) + openstackdata[file] = res + if file not in openstackdata: + self.fail("{} file not found in vm openstack".format(file)) + return + + def generate_ssh_keys(self): + """ + This method generates ssh key pair and writes the private key + into a temp file and returns the file name + """ + self.keypair = MySSHKeyPair.create( + self.api_client, + name=random_gen() + ".pem", + account=self.account.user[0].account, + domainid=self.account.domainid) + + self.cleanup.append(self.keypair) + self.debug("Created keypair with name: %s" % self.keypair.name) + self.debug("Writing the private key to local file") + keyPairFilePath = tempfile.gettempdir() + os.sep + self.keypair.name + self.tmp_files.append(keyPairFilePath) + self.debug("File path: %s" % keyPairFilePath) + with open(keyPairFilePath, "w+") as f: + f.write(self.keypair.privatekey) + os.system("chmod 400 " + keyPairFilePath) + return keyPairFilePath + + def umountConfigDrive(self, ssh, iso_path): + """umount config drive iso attached inside guest vm""" + ssh.execute("umount -d %s" % iso_path) + # Give the VM time to unlock the iso device + time.sleep(2) + # Verify umount + result = ssh.execute("ls %s" % iso_path) + self.assertTrue(len(result) == 0, + "After umount directory should be empty " + "but contains: %s" % result) + + def update_provider_state(self, new_state): + self.debug("Updating Service Provider ConfigDrive to %s" % new_state) + configdriveprovider = NetworkServiceProvider.list( + self.api_client, + name="ConfigDrive", + physicalnetworkid=self.vsp_physical_network.id)[0] + orig_state = configdriveprovider.state + NetworkServiceProvider.update(self.api_client, + configdriveprovider.id, + state=new_state) + self.validate_NetworkServiceProvider("ConfigDrive", state=new_state) + return orig_state + + def verify_network_creation(self, offering=None, + offering_name=None, + gateway=None, + vpc=None, acl_list=None, testdata=None): + if offering is None: + self.debug("Creating Nuage VSP network offering...") + offering = self.create_NetworkOffering( + self.test_data["nuagevsp"][offering_name]) + self.validate_NetworkOffering(offering, state="Enabled") + try: + network = self.create_Network(offering, + gateway=gateway, + vpc=vpc, + acl_list=acl_list, + testdata=testdata) + return self.CreateResult(True, offering=offering, network=network) + except Exception: + self.debug("Exception: %s" % sys.exc_info()[0]) + return self.CreateResult(False, offering=offering) + + def verify_vpc_creation(self, offering=None, offering_name=None): + + if offering is None: + self.debug("Creating Nuage VSP VPC offering...") + offering = self.create_VpcOffering( + self.test_data["nuagevsp"][offering_name]) + self.validate_VpcOffering(offering, state="Enabled") + try: + vpc = self.create_Vpc(offering, cidr='10.1.0.0/16') + self.validate_Vpc(vpc, state="Enabled") + return self.CreateResult(True, offering=offering, vpc=vpc) + except Exception: + return self.CreateResult(False, offering=offering) + + def update_password_enable_in_template(self, new_state): + self.debug("Updating guest VM template to password %s" % new_state) + orig_state = self.template.passwordenabled + if self.template.passwordenabled is not new_state: + self.updateTemplate(new_state) + self.assertEqual(self.template.passwordenabled, new_state, + "Guest VM template is not password enabled") + return orig_state + + def verify_config_drive_content(self, vm, + public_ip, + password_test, + userdata=None, + metadata=False, + sshkey=None, + ssh_client=None): + self.debug("SSHing into the VM %s" % vm.name) + if ssh_client is None: + ssh = self.ssh_into_VM(vm, public_ip) + else: + ssh = ssh_client + d = {x.name: x for x in ssh.logger.handlers} + ssh.logger.handlers = list(d.values()) + config_drive_path = self.getConfigDriveContent(ssh) + self.assertIsNotNone(config_drive_path, + 'ConfigdriveIso is not attached to vm') + if metadata: + self.debug("Verifying metadata for vm: %s" % vm.name) + self.verifyMetaData(vm, ssh, config_drive_path) + self.debug("Verifying openstackdata for vm: %s" % vm.name) + self.verifyOpenStackData(vm, ssh, config_drive_path) + + if userdata is not None: + self.debug("Verifying userdata for vm: %s" % vm.name) + self.verifyUserData(ssh, config_drive_path, userdata) + self.verifyOpenStackUserData(ssh, config_drive_path, userdata) + if password_test.test_presence: + self.debug("Verifying password for vm: %s" % vm.name) + test_result = self.verifyPassword(vm, ssh, config_drive_path, + password_test) + self.assertEqual(test_result[0], password_test.presence, + "Expected is that password is present: %s " + " but found is: %s" + % (test_result[0], password_test.presence)) + if password_test.password is not None: + self.debug("Password for vm is %s" % password_test.password) + self.assertEqual(test_result[1], True, + "Password value test failed.") + if sshkey is not None: + self.debug("Verifying sshkey for vm: %s" % vm.name) + self.verifySshKey(ssh, config_drive_path, sshkey) + + self.umountConfigDrive(ssh, config_drive_path) + return ssh + + def create_guest_vm(self, networks, acl_item=None, + vpc=None, keypair=None): + vm = self.create_VM( + networks, + testdata=self.test_data["virtual_machine_userdata"], + keypair=keypair) + # Check VM + self.check_VM_state(vm, state="Running") + self.verify_vsd_vm(vm) + # Check networks + network_list = [] + if isinstance(networks, list): + for network in networks: + network_list.append(network) + else: + network_list.append(networks) + + for network in network_list: + self.validate_Network(network, state="Implemented") + self.verify_vsd_network(self.domain.id, network, vpc=vpc) + + if acl_item is not None: + self.verify_vsd_firewall_rule(acl_item) + return vm + + # nic_operation_VM - Performs NIC operations such as add, remove, and + # update default NIC in the given VM and network + def nic_operation_VM(self, vm, network, operation="add"): + self.debug("Performing %s NIC operation in VM with ID - %s and " + "network with ID - %s" % (operation, vm.id, network.id)) + if operation is "add": + vm.add_nic(self.api_client, network.id) + self.debug("Added NIC in VM with ID - %s and network with ID - %s" + % (vm.id, network.id)) + vm_info = VirtualMachine.list(self.api_client, id=vm.id)[0] + for nic in vm_info.nic: + if nic.networkid == network.id: + nic_id = nic.id + if operation is "update": + vm.update_default_nic(self.api_client, nic_id) + self.debug("Updated default NIC to NIC with ID- %s in VM with ID " + "- %s and network with ID - %s" % + (nic_id, vm.id, network.id)) + if operation is "remove": + vm.remove_nic(self.api_client, nic_id) + self.debug("Removed NIC with ID - %s in VM with ID - %s and " + "network with ID - %s" % (nic_id, vm.id, network.id)) + + def update_userdata(self, vm, expected_user_data): + updated_user_data = base64.b64encode(expected_user_data) + vm.update(self.api_client, userdata=updated_user_data) + return expected_user_data + + def reset_password(self, vm): + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + + def wait_until_done(self, thread_list, name): + for aThread in thread_list: + self.debug("[Concurrency]Join %s for vm %s" % (name, + aThread.get_vm())) + aThread.join() + + def resetsshkey(self, vm, keypair, account=None, domainid=None): + """Resets SSH key""" + cmd = resetSSHKeyForVirtualMachine.resetSSHKeyForVirtualMachineCmd() + cmd.id = vm.id + cmd.keypair = keypair + cmd.account = account + cmd.domainid = domainid + return(self.api_client.resetSSHKeyForVirtualMachine(cmd)) + + def update_sshkeypair(self, vm): + vm.stop(self.api_client) + self.resetsshkey(vm, + self.keypair.name, + account=self.account.user[0].account, + domainid=self.account.domainid) + self.debug("Sshkey reset to - %s" % self.keypair.name) + vm.start(self.api_client) + + def add_subnet_verify(self, network, services): + """verify required nic is present in the VM""" + + self.debug("Going to add new ip range in shared network %s" % + network.name) + cmd = createVlanIpRange.createVlanIpRangeCmd() + cmd.networkid = network.id + cmd.gateway = services["gateway"] + cmd.netmask = services["netmask"] + cmd.startip = services["startip"] + cmd.endip = services["endip"] + cmd.forVirtualNetwork = services["forvirtualnetwork"] + addedsubnet = self.api_client.createVlanIpRange(cmd) + + self.debug("verify above iprange is successfully added in shared " + "network %s or not" % network.name) + + cmd1 = listVlanIpRanges.listVlanIpRangesCmd() + cmd1.networkid = network.id + cmd1.id = addedsubnet.vlan.id + + allsubnets = self.api_client.listVlanIpRanges(cmd1) + self.assertEqual( + allsubnets[0].id, + addedsubnet.vlan.id, + "Check New subnet is successfully added to the shared Network" + ) + return addedsubnet + + @attr(tags=["advanced", "nuagevsp", "isonw"], required_hardware="true") + def test_nuage_configdrive_isolated_network(self): + """Test Configdrive as provider for isolated Networks + to provide userdata and password reset functionality + with Nuage VSP SDN plugin + """ + + # 1. When ConfigDrive is disabled as provider in zone + # Verify Isolated Network creation with a network offering + # which has userdata provided by ConfigDrive fails + # 2. When ConfigDrive is enabled as provider in zone + # Create an Isolated Network with Nuage VSP Isolated Network + # offering specifying ConfigDrive as serviceProvider + # for userdata, + # make sure no Dns is in the offering so no VR is spawned. + # check if it is successfully created and + # is in the "Allocated" state. + # 3. Deploy a VM in the created Isolated network with user data, + # check if the Isolated network state is changed to + # "Implemented", and the VM is successfully deployed and + # is in the "Running" state. + # Check that no VR is deployed. + # 4. SSH into the deployed VM and verify its user data in the iso + # (expected user data == actual user data). + # 5. Verify that the guest VM's password in the iso. + # 6. Reset VM password, and start the VM. + # 7. Verify that the new guest VM template is password enabled by + # checking the VM's password (password != "password"). + # 8. SSH into the VM for verifying its new password + # after its password reset. + # 9. Verify various scenarios and check the data in configdriveIso + # 10. Delete all the created objects (cleanup). + + for zone in self.zones: + self.debug("Zone - %s" % zone.name) + # Get Zone details + self.getZoneDetails(zone=zone) + # Configure VSD sessions + self.configureVSDSessions() + + self.debug("+++Testing configdrive in an Isolated network fails..." + "as provider configdrive is still disabled...") + self.update_provider_state("Disabled") + create_network = self.verify_network_creation( + offering_name="isolated_configdrive_network_offering_" + "withoutdns", + gateway='10.1.1.1') + self.assertFalse(create_network.success, + 'Network found success = %s, expected success =%s' + % (str(create_network.success), 'False')) + + self.debug("+++Test user data & password reset functionality " + "using configdrive in an Isolated network without VR") + self.update_provider_state("Enabled") + create_network1 = self.verify_network_creation( + offering=create_network.offering, + gateway='10.1.1.1') + self.assertTrue(create_network1.success, + 'Network found success = %s, expected success = %s' + % (str(create_network1.success), 'True')) + self.validate_Network(create_network1.network, state="Allocated") + create_network2 = self.verify_network_creation( + offering=create_network.offering, + gateway='10.1.2.1') + self.assertTrue(create_network2.success, + 'Network found success = %s,expected success = %s' + % (str(create_network2.success), 'True')) + self.validate_Network(create_network2.network, state="Allocated") + self.update_password_enable_in_template(True) + + self.debug("+++Deploy VM in the created Isolated network " + "with as user data provider configdrive without VR") + + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm1 = self.create_guest_vm(create_network1.network, + keypair=self.keypair.name) + + with self.assertRaises(Exception): + self.get_Router(create_network1) + self.debug("+++Verified no VR is spawned for this network ") + # We need to have the vm password + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + public_ip_1 = self.acquire_PublicIPAddress(create_network1.network) + self.create_and_verify_fip_and_fw(vm1, public_ip_1, + create_network1.network) + + self.verify_config_drive_content( + vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=self.test_data[ + "virtual_machine_userdata"]["userdata"], + sshkey=self.keypair.name) + + expected_user_data1 = self.update_userdata(vm1, "helloworld vm1") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(True), + userdata=expected_user_data1) + + self.generate_ssh_keys() + self.update_sshkeypair(vm1) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(True), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + # After sshkey reset we need to have the vm password again + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + self.debug("Adding a non-default nic to the VM " + "making it a multi-nic VM...") + self.nic_operation_VM(vm1, create_network2.network, + operation="add") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + expected_user_data1 = self.update_userdata(vm1, + "hellomultinicvm1") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("updating non-default nic as the default nic " + "of the multi-nic VM and enable staticnat...") + self.nic_operation_VM(vm1, + create_network2.network, operation="update") + + public_ip_2 = \ + self.acquire_PublicIPAddress(create_network2.network) + self.create_and_verify_fip_and_fw(vm1, public_ip_2, + create_network2.network) + vm1.stop(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(False), + metadata=True, + userdata=expected_user_data1) + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(vm1.password), + userdata=expected_user_data1) + expected_user_data1 = self.update_userdata(vm1, + "hellomultinicvm1") + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(True), + userdata=expected_user_data1) + + self.debug("Updating the default nic of the multi-nic VM, " + "deleting the non-default nic...") + self.nic_operation_VM(vm1, + create_network1.network, operation="update") + vm1.stop(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + metadata=True, + userdata=expected_user_data1) + + multinicvm1 = self.create_guest_vm([create_network2.network, + create_network1.network]) + multinicvm1.password = multinicvm1.resetPassword(self.api_client) + self.debug("MultiNICVM Password reset to - %s" + % multinicvm1.password) + self.debug("MultiNICVM - %s password - %s !" + % (multinicvm1.name, multinicvm1.password)) + + public_ip_3 = self.acquire_PublicIPAddress(create_network2.network) + self.create_and_verify_fip_and_fw(multinicvm1, public_ip_3, + create_network2.network) + self.verify_config_drive_content( + multinicvm1, public_ip_3, + self.PasswordTest(multinicvm1.password), + metadata=True, + userdata=self.test_data[ + "virtual_machine_userdata"]["userdata"]) + expected_user_data2 = self.update_userdata(multinicvm1, + "hello multinicvm1") + self.verify_config_drive_content(multinicvm1, public_ip_3, + self.PasswordTest(True), + userdata=expected_user_data2) + + multinicvm1.delete(self.api_client, expunge=True) + public_ip_3.delete(self.api_client) + public_ip_2.delete(self.api_client) + self.nic_operation_VM(vm1, + create_network2.network, operation="remove") + create_network2.network.delete(self.api_client) + + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + self.debug("+++ Restarting the created Isolated network without " + "VR without cleanup...") + create_network1.network.restart(self.api_client, cleanup=False) + self.validate_Network(create_network1.network, + state="Implemented") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created Isolated network without " + "VR with cleanup...") + create_network1.network.restart(self.api_client, cleanup=True) + self.validate_Network(create_network1.network, + state="Implemented") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Upgrade offering of created Isolated network with " + "a dns offering which spins a VR") + self.upgrade_Network(self.test_data["nuagevsp"][ + "isolated_configdrive_network_offering"], + create_network1.network) + vr = self.get_Router(create_network1.network) + self.check_Router_state(vr, state="Running") + # VSD verification + self.verify_vsd_network(self.domain.id, create_network1.network) + self.verify_vsd_router(vr) + + self.debug("+++Test user data & password reset functionality " + "using configdrive in an Isolated network with VR") + create_vrnetwork1 = self.verify_network_creation( + offering_name="isolated_configdrive_network_offering", + gateway='10.1.3.1') + self.assertTrue(create_vrnetwork1.success, + 'Network found success = %s, expected success = %s' + % (str(create_vrnetwork1.success), 'True')) + self.validate_Network(create_vrnetwork1.network, state="Allocated") + self.debug("+++Deploying a VM in the created Isolated network " + "with as user data provider configdrive with VR") + vm2 = self.create_guest_vm(create_vrnetwork1.network) + + vr2 = self.get_Router(create_vrnetwork1.network) + self.check_Router_state(vr2, state="Running") + + # VSD verification + self.verify_vsd_network(self.domain.id, create_vrnetwork1.network) + self.verify_vsd_router(vr2) + self.debug("+++Verified VR is spawned for this network ") + + # We need to have the vm password + vm2.password = vm2.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm2.password) + self.debug("VM2 - %s password - %s !" % + (vm2.name, vm2.password)) + public_ip_3 = self.acquire_PublicIPAddress( + create_vrnetwork1.network) + self.create_and_verify_fip_and_fw(vm2, public_ip_3, + create_vrnetwork1.network) + + self.verify_config_drive_content( + vm2, public_ip_3, + self.PasswordTest(vm2.password), + metadata=True, + userdata=self.test_data[ + "virtual_machine_userdata"]["userdata"]) + + expected_user_data2 = self.update_userdata(vm2, "helloworld vm2") + self.verify_config_drive_content(vm2, public_ip_3, + self.PasswordTest(vm2.password), + userdata=expected_user_data2) + + self.debug("+++ Restarting the created Isolated network with " + "VR without cleanup...") + create_vrnetwork1.network.restart(self.api_client, cleanup=False) + self.validate_Network(create_vrnetwork1.network, + state="Implemented") + self.verify_config_drive_content(vm2, public_ip_3, + self.PasswordTest(vm2.password), + userdata=expected_user_data2, + metadata=True) + + self.debug("+++ Restarting the created Isolated network with " + "VR with cleanup...") + create_vrnetwork1.network.restart(self.api_client, cleanup=True) + self.validate_Network(create_vrnetwork1.network, + state="Implemented") + self.verify_config_drive_content(vm2, public_ip_3, + self.PasswordTest(vm2.password), + userdata=expected_user_data2, + metadata=True) + + self.debug("+++ Upgrade offering of created Isolated network with " + "an offering which removes the VR...") + self.upgrade_Network( + self.test_data["nuagevsp"][ + "isolated_configdrive_network_offering_withoutdns"], + create_vrnetwork1.network) + with self.assertRaises(Exception): + self.get_Router(create_vrnetwork1.network) + + self.verify_config_drive_content(vm2, public_ip_3, + self.PasswordTest(vm2.password), + userdata=expected_user_data2, + metadata=True) + vm2.delete(self.api_client, expunge=True) + create_vrnetwork1.network.delete(self.api_client) + + self.debug("+++Verifying userdata after rebootVM - %s" % vm1.name) + vm1.reboot(self.api_client) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, "hello afterboot") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password)) + + self.debug("+++ Migrating one of the VMs in the created Isolated " + "network to another host, if available...") + self.migrate_VM(vm1) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata after migrating VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, + "hello after migrate") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password), + userdata=expected_user_data1) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password)) + + self.debug("+++Verify userdata after stopstartVM - %s" % vm1.name) + vm1.stop(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, + "hello afterstopstart") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(vm1.password)) + + self.debug("+++ Verify userdata after VM recover- %s" % vm1.name) + vm1.delete(self.api_client, expunge=False) + self.debug("Recover VM - %s" % vm1.name) + vm1.recover(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + self.update_provider_state("Disabled") + expected_user_data1 = self.update_userdata(vm1, + "hello after recover") + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ When template is not password enabled, " + "verify configdrive of VM - %s" % vm1.name) + vm1.delete(self.api_client, expunge=True) + self.update_provider_state("Enabled") + self.updateTemplate(False) + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm1 = self.create_guest_vm(create_network1.network, + keypair=self.keypair.name) + + expected_user_data1 = self.update_userdata(vm1, + "This is sample data") + public_ip_1 = \ + self.acquire_PublicIPAddress(create_network1.network) + self.create_and_verify_fip_and_fw(vm1, public_ip_1, + create_network1.network) + self.verify_config_drive_content(vm1, public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + vm1.delete(self.api_client, expunge=True) + create_network1.network.delete(self.api_client) + + @attr(tags=["advanced", "nuagevsp", "vpc"], required_hardware="true") + def test_nuage_configdrive_vpc_network(self): + """Test Configdrive for VPC Networks + choose user data with configDrive as service provider + and test password reset functionality using ConfigDrive + with Nuage VSP SDN plugin + """ + + # 1. Verify VPC Network creation with ConfigDrive fails + # as ConfigDrive is disabled as provider + # 2. Create a VPC Network with Nuage VSP VPC tier Network + # offering specifying ConfigDrive as serviceProvider for userdata, + # make sure no Dns is in the offering so no VR is spawned. + # check if it is successfully created and is in "Allocated" state. + # 3. Deploy a VM in the created VPC tier network with user data, + # check if the Isolated network state is changed to "Implemented", + # and the VM is successfully deployed and is in "Running" state. + # Check that no VR is deployed. + # 4. SSH into the deployed VM and verify its user data in the iso + # (expected user data == actual user data). + # 5. Verify that the guest VM's password in the iso. + # 6. Reset VM password, and start the VM. + # 7. Verify that the new guest VM template is password enabled by + # checking the VM's password (password != "password"). + # 8. SSH into the VM for verifying its new password + # after its password reset. + # 9. Verify various scenarios and check the data in configdrive iso + # 10. Delete all the created objects (cleanup). + + for zone in self.zones: + self.debug("Zone - %s" % zone.name) + # Get Zone details + self.getZoneDetails(zone=zone) + # Configure VSD sessions + self.configureVSDSessions() + + self.update_provider_state("Disabled") + create_vpc = self.verify_vpc_creation( + offering_name="vpc_offering_configdrive_withoutdns") + self.assertTrue(create_vpc.success, + "Vpc found success = %s, expected success = %s" + % (str(create_vpc.success), 'True')) + acl_list = self.create_NetworkAclList( + name="acl", description="acl", vpc=create_vpc.vpc) + acl_item = self.create_NetworkAclRule( + self.test_data["ingress_rule"], acl_list=acl_list) + + self.debug("+++Testing configdrive in a VPC Tier network fails..." + "as provider configdrive is still disabled...") + create_networkfails = \ + self.verify_network_creation( + offering_name="vpc_network_offering_configdrive_" + "withoutdns", + gateway='10.1.1.1', + vpc=create_vpc.vpc, + acl_list=acl_list) + self.assertFalse(create_networkfails.success, + "Create Network found success = %s, " + "expected success = %s" + % (str(create_networkfails.success), 'False')) + self.debug("Testing user data&password reset functionality using" + "configdrive in a VPC network without VR...") + self.update_provider_state("Enabled") + + create_tiernetwork = \ + self.verify_network_creation( + offering=create_networkfails.offering, + gateway='10.1.1.1', + vpc=create_vpc.vpc, + acl_list=acl_list) + self.assertTrue(create_tiernetwork.success, + "Create Network found success = %s, " + "expected success = %s" + % (str(create_tiernetwork.success), 'True')) + self.validate_Network(create_tiernetwork.network, + state="Implemented") + + create_tiernetwork2 = \ + self.verify_network_creation( + offering=create_networkfails.offering, + gateway='10.1.2.1', + vpc=create_vpc.vpc, + acl_list=acl_list) + self.assertTrue(create_tiernetwork2.success, + 'Network found success= %s, expected success= %s' + % (str(create_tiernetwork2.success), 'True')) + self.validate_Network(create_tiernetwork2.network, + state="Implemented") + + self.update_password_enable_in_template(True) + + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm = self.create_guest_vm(create_tiernetwork.network, + acl_item, + vpc=create_vpc.vpc, + keypair=self.keypair.name) + + vpc_public_ip_1 = \ + self.acquire_PublicIPAddress(create_tiernetwork.network, + create_vpc.vpc) + self.create_StaticNatRule_For_VM(vm, vpc_public_ip_1, + create_tiernetwork.network) + + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(True), + metadata=True, + sshkey=self.keypair.name) + + expected_user_data = self.update_userdata(vm, "helloworld vm1") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(True), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + + self.debug("Resetting password for VM - %s" % vm.name) + self.reset_password(vm) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + sshkey=self.keypair.name) + + self.generate_ssh_keys() + self.update_sshkeypair(vm) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(True), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + # After sshkey reset we need to have the vm password again + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + + self.debug("+++ Restarting the created vpc without " + "cleanup...") + self.restart_Vpc(create_vpc.vpc, cleanup=False) + self.validate_Vpc(create_vpc.vpc, state="Enabled") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Adding a non-default nic to the VM " + "making it a multi-nic VM...") + self.nic_operation_VM(vm, create_tiernetwork2.network, + operation="add") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + + expected_user_data1 = self.update_userdata(vm, "hellomultinicvm1") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("updating non-default nic as the default nic " + "of the multi-nic VM and enable staticnat...") + self.nic_operation_VM(vm, + create_tiernetwork2.network, + operation="update") + + vpc_public_ip_2 = \ + self.acquire_PublicIPAddress(create_tiernetwork2.network, + create_vpc.vpc) + self.create_StaticNatRule_For_VM(vm, vpc_public_ip_2, + create_tiernetwork2.network) + vm.stop(self.api_client) + vm.start(self.api_client) + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(False), + metadata=True, + userdata=expected_user_data1) + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(vm.password), + userdata=expected_user_data1) + expected_user_data1 = self.update_userdata(vm, "hellomultinicvm1") + self.verify_config_drive_content(vm, vpc_public_ip_2, + self.PasswordTest(True), + userdata=expected_user_data1) + + self.debug("Updating the default nic of the multi-nic VM, " + "deleting the non-default nic...") + self.nic_operation_VM(vm, + create_tiernetwork.network, + operation="update") + vm.stop(self.api_client) + vm.start(self.api_client) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + metadata=True, + userdata=expected_user_data1) + vpc_public_ip_2.delete(self.api_client) + self.nic_operation_VM(vm, + create_tiernetwork2.network, + operation="remove") + create_tiernetwork2.network.delete(self.api_client) + + vm.password = vm.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm.password) + self.debug("VM - %s password - %s !" % + (vm.name, vm.password)) + + self.debug("+++ Restarting the created vpc with " + "cleanup...") + self.restart_Vpc(create_vpc.vpc, cleanup=True) + self.validate_Vpc(create_vpc.vpc, state="Enabled") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created VPC Tier network without " + "cleanup...") + create_tiernetwork.network.restart(self.api_client, cleanup=False) + self.validate_Network(create_tiernetwork.network, + state="Implemented") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ Restarting the created VPC Tier network with " + "cleanup...") + create_tiernetwork.network.restart(self.api_client, cleanup=True) + self.validate_Network(create_tiernetwork.network, + state="Implemented") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Testing user data & password reset functionality " + " using configdrive in a VPC network with VR...") + create_vrvpc = self.verify_vpc_creation( + offering_name="vpc_offering_configdrive_withdns") + self.assertTrue(create_vrvpc.success, + 'Vpc found success = %s, expected success = %s' + % (str(create_vrvpc.success), 'True')) + acl_list2 = self.create_NetworkAclList( + name="acl", description="acl", vpc=create_vrvpc.vpc) + acl_item2 = self.create_NetworkAclRule( + self.test_data["ingress_rule"], acl_list=acl_list2) + create_vrnetwork = \ + self.verify_network_creation( + offering_name="vpc_network_offering_configdrive_withdns", + gateway='10.1.3.1', + vpc=create_vrvpc.vpc, + acl_list=acl_list2) + self.assertTrue(create_vrnetwork.success, + "Create Network found success = %s, " + "expected success = %s" + % (str(create_vrnetwork.success), 'True')) + self.validate_Network(create_vrnetwork.network, + state="Implemented") + vm2 = self.create_guest_vm(create_vrnetwork.network, + acl_item2, + vpc=create_vrvpc.vpc) + vr2 = self.get_Router(create_vrnetwork.network) + self.check_Router_state(vr2, state="Running") + + # VSD verification + self.verify_vsd_network(self.domain.id, create_vrnetwork.network, + create_vrvpc.vpc) + self.verify_vsd_router(vr2) + self.debug("+++Verified VR is spawned for this network ") + # We need to have the vm password + vm2.password = vm2.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm2.password) + self.debug("VM2 - %s password - %s !" % + (vm2.name, vm2.password)) + vpc_public_ip_2 = \ + self.acquire_PublicIPAddress(create_vrnetwork.network, + create_vrvpc.vpc) + self.create_StaticNatRule_For_VM(vm2, vpc_public_ip_2, + create_vrnetwork.network) + + self.verify_config_drive_content( + vm2, vpc_public_ip_2, + self.PasswordTest(vm2.password), + metadata=True, + userdata=self.test_data["virtual_machine_userdata"][ + "userdata"]) + + expected_user_data2 = self.update_userdata(vm2, "helloworld vm2") + self.verify_config_drive_content(vm2, vpc_public_ip_2, + self.PasswordTest(vm2.password), + userdata=expected_user_data2) + + self.debug("+++ Restarting the created vpc without " + "cleanup...") + self.restart_Vpc(create_vrvpc.vpc, cleanup=False) + self.validate_Vpc(create_vrvpc.vpc, state="Enabled") + self.verify_config_drive_content(vm2, vpc_public_ip_2, + self.PasswordTest(vm2.password), + userdata=expected_user_data2, + metadata=True) + + self.debug("+++ Restarting the created vpc with " + "cleanup...") + self.restart_Vpc(create_vrvpc.vpc, cleanup=True) + self.validate_Vpc(create_vrvpc.vpc, state="Enabled") + self.verify_config_drive_content(vm2, vpc_public_ip_2, + self.PasswordTest(vm2.password), + userdata=expected_user_data2, + metadata=True) + + self.debug("+++ Restarting the created VPC Tier network without " + "cleanup...") + create_vrnetwork.network.restart(self.api_client, cleanup=False) + self.validate_Network(create_vrnetwork.network, + state="Implemented") + self.verify_config_drive_content(vm2, vpc_public_ip_2, + self.PasswordTest(vm2.password), + userdata=expected_user_data2, + metadata=True) + + self.debug("+++ Restarting the created VPC Tier network with " + "cleanup...") + create_vrnetwork.network.restart(self.api_client, cleanup=True) + self.validate_Network(create_vrnetwork.network, + state="Implemented") + self.verify_config_drive_content(vm2, vpc_public_ip_2, + self.PasswordTest(vm2.password), + userdata=expected_user_data2, + metadata=True) + + self.debug("+++ Upgrade offering of created VPC network with " + "an offering which removes the VR...") + self.upgrade_Network(self.test_data["nuagevsp"][ + "vpc_network_offering_configdrive_" + "withoutdns"], + create_vrnetwork.network) + + self.verify_config_drive_content(vm2, vpc_public_ip_2, + self.PasswordTest(vm2.password), + userdata=expected_user_data2, + metadata=True) + + vm2.delete(self.api_client, expunge=True) + create_vrnetwork.network.delete(self.api_client) + create_vrvpc.vpc.delete(self.api_client) + + self.debug("+++ Verify userdata after rebootVM - %s" % vm.name) + vm.reboot(self.api_client) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm.name) + expected_user_data = self.update_userdata(vm, + "hellovm after reboot") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm.name) + self.reset_password(vm) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password)) + + self.debug("+++ Migrating one of the VMs in the created " + "VPC Tier network to another host, if available...") + self.migrate_VM(vm) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata after migrating VM - %s" % vm.name) + expected_user_data = self.update_userdata(vm, + "hellovm after migrate") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password), + userdata=expected_user_data, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm.name) + self.reset_password(vm) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password)) + + self.debug("+++ Verify userdata after stopstartVM - %s" % vm.name) + vm.stop(self.api_client) + vm.start(self.api_client) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm.name) + expected_user_data = self.update_userdata(vm, + "hello after stopstart") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm.name) + self.reset_password(vm) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(vm.password)) + + self.debug("+++ Verify userdata after recoverVM - %s" % vm.name) + vm.delete(self.api_client, expunge=False) + self.debug("Recover VM - %s" % vm.name) + vm.recover(self.api_client) + vm.start(self.api_client) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + self.update_provider_state("Disabled") + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ When template is not password enabled " + "verify configdrive of VM - %s" % vm.name) + vm.delete(self.api_client, expunge=True) + self.update_provider_state("Enabled") + self.updateTemplate(False) + + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm = self.create_guest_vm(create_tiernetwork.network, + acl_item, + vpc=create_vpc.vpc, + keypair=self.keypair.name) + + expected_user_data = self.update_userdata(vm, + "This is sample data") + vpc_public_ip_1 = \ + self.acquire_PublicIPAddress(create_tiernetwork.network, + create_vpc.vpc) + self.create_StaticNatRule_For_VM(vm, vpc_public_ip_1, + create_tiernetwork.network) + self.verify_config_drive_content(vm, vpc_public_ip_1, + self.PasswordTest(False), + userdata=expected_user_data, + metadata=True, + sshkey=self.keypair.name) + vm.delete(self.api_client, expunge=True) + create_tiernetwork.network.delete(self.api_client) + + def handle_threads(self, source_threads, thread_class, **kwargs): + my_threads = [] + for aThread in source_threads: + my_vm = aThread.get_vm() + self.debug("[Concurrency]%s in vm: %s" + % (thread_class.get_name(), my_vm.name)) + new_thread = thread_class(self, my_vm, **kwargs) + my_threads.append(new_thread) + new_thread.start() + # + # Wait until all threads are finished + self.wait_until_done(my_threads, thread_class.get_name()) + return my_threads + + @attr( + tags=["advanced", "nuagevsp", "concurrency"], required_hardware="true") + def test_nuage_configDrive_concurrency(self): + """ Verify concurrency of ConfigDrive userdata update & password reset + """ + + # Validate the following + # 1. When ConfigDrive is enabled as provider in zone + # Create an Isolated Network with Nuage VSP Isolated Network + # offering specifying ConfigDrive as serviceProvider for userdata, + # make sure no Dns is in the offering so no VR is spawned. + # check if it is successfully created and is in the "Allocated" + # state. + # 2. Concurrently create a number of VM's in the above isolated network + # 3. Wait until all VM's are running + # 4. Concurrently update the userdata of all the VM's + # 5. Wait util all updates are finished + # 6. Repeat above (5-6) x times + # 7. Check userdata in all VM's + # 8. Concurrently reset password on all VM's + # 9. Wait until all resets are finished + # 10. Verify all passwords + # 11. Concurrently delete all VM's. + # 12. Restore ConfigDrive provider state + # 13. Delete all the created objects (cleanup). + + # + # 1. When ConfigDrive enabled create network + default_state = self.update_provider_state("Enabled") + create_network = self.verify_network_creation( + offering_name="isolated_configdrive_network_offering_withoutdns", + gateway='10.1.1.1') + # + # 2. Concurrently create all VMs + self.password_enabled = self.update_password_enable_in_template(False) + my_create_threads = [] + nbr_vms = 5 + for i in range(nbr_vms): + # Add VM + self.debug("+++ [Concurrency]Going to verify %d VM's, starting " + "the %d VM" % (nbr_vms, i + 1)) + vm_thread = self.StartVM(self, create_network.network, i) + my_create_threads.append(vm_thread) + vm_thread.start() + # + # 3. Wait until all VM's are running + self.wait_until_done(my_create_threads, "creation") + self.assertEqual( + nbr_vms, len(my_create_threads), "Not all VM's are up") + + try: + for i in range(2): + + self.debug("\n+++ [Concurrency]Start update on all VM's") + # + # 5. Concurrently update all VM's + my_update_threads = self.handle_threads(my_create_threads, + self.UpdateVM, idx=i) + + first = my_update_threads[0].get_timestamps() + last = my_update_threads[-1].get_timestamps() + self.debug("[Concurrency] Update report: first start %s, " + "last start %s. Duration in seconds: %s" % + (first[0].strftime("%H:%M:%S-%f"), + last[0].strftime("%H:%M:%S-%f"), + (last[0] - first[0]).total_seconds())) + self.debug("[Concurrency] Update report: first end %s, " + "last end %s. Duration in seconds: %s" % + (first[1].strftime("%H:%M:%S-%f"), + last[1].strftime("%H:%M:%S-%f"), + (last[0] - first[0]).total_seconds())) + # + # 7. Check userdata in all VM's + self.debug("\n+++ [Concurrency]Check userdata") + public_ip_1 = self.acquire_PublicIPAddress(create_network.network) + for aThread in my_update_threads: + # + # create floating ip + self.create_and_verify_fip_and_fw(aThread.get_vm(), + public_ip_1, + create_network.network) + # + # verify userdata + self.debug("[Concurrency]verify userdata for vm %s" + % aThread.get_vm().name) + self.verify_config_drive_content( + aThread.get_vm(), public_ip_1, + self.PasswordTest(None), + userdata=aThread.get_userdata()) + self.delete_StaticNatRule_For_VM(public_ip_1) + # + # 8. Concurrently reset password on all VM's + self.update_password_enable_in_template(True) + my_reset_threads = self.handle_threads(my_create_threads, + self.ResetPassword) + # + # 10. Verify the passwords + self.debug("\n+++ [Concurrency]Verify passwords on all VM's") + for aThread in my_reset_threads: + + # create floating ip + self.create_and_verify_fip_and_fw(aThread.get_vm(), + public_ip_1, + create_network.network) + + # verify password + self.debug("[Concurrency]verify password for vm %s" + % aThread.get_vm().name) + self.verify_config_drive_content( + aThread.get_vm(), public_ip_1, + self.PasswordTest(aThread.get_password())) + self.delete_StaticNatRule_For_VM(public_ip_1) + public_ip_1.delete(self.api_client) + + self.debug("\n+++ [Concurrency]Stop all VM's") + + finally: + self.update_password_enable_in_template(self.password_enabled) + # + # 11. Concurrently delete all VM's. + self.handle_threads(my_create_threads, self.StopVM) + # + # 12. Restore ConfigDrive provider state + self.update_provider_state(default_state) + # + # 13. Delete all the created objects (cleanup). + self.delete_Network(create_network.network) + + @attr(tags=["advanced", "nuagevsp", "shared"], required_hardware="true") + def test_nuage_configdrive_shared_network(self): + """Test Configdrive as provider for shared Networks + to provide userdata and password reset functionality + with Nuage VSP SDN plugin + """ + + # 1. When ConfigDrive is disabled as provider in zone + # Verify Shared Network creation with a network offering + # which has userdata provided by ConfigDrive fails + # 2. When ConfigDrive is enabled as provider in zone + # Create a shared Network with Nuage VSP Isolated Network + # offering specifying ConfigDrive as serviceProvider + # for userdata, + # make sure no Dns is in the offering so no VR is spawned. + # check if it is successfully created and + # is in the "Allocated" state. + # 3. Deploy a VM in the created Shared network with user data, + # check if the Shared network state is changed to + # "Implemented", and the VM is successfully deployed and + # is in the "Running" state. + # Check that no VR is deployed. + # 4. SSH into the deployed VM and verify its user data in the iso + # (expected user data == actual user data). + # 5. Verify that the guest VM's password in the iso. + # 6. Reset VM password, and start the VM. + # 7. Verify that the new guest VM template is password enabled by + # checking the VM's password (password != "password"). + # 8. SSH into the VM for verifying its new password + # after its password reset. + # 9. Verify various scenarios and check the data in configdriveIso + # 10. Delete all the created objects (cleanup). + + for zone in self.zones: + self.debug("Zone - %s" % zone.name) + # Get Zone details + self.getZoneDetails(zone=zone) + # Configure VSD sessions + self.configureVSDSessions() + if not self.isNuageInfraUnderlay: + self.skipTest("Configured Nuage VSP SDN platform infrastructure " + "does not support underlay networking: " + "skipping test") + + self.debug("+++Testing configdrive in an shared network fails..." + "as provider configdrive is still disabled...") + self.update_provider_state("Disabled") + shared_test_data = self.test_data["nuagevsp"]["network_all"] + shared_network = self.verify_network_creation( + offering_name="shared_nuage_network_config_drive_offering", + testdata=shared_test_data) + self.assertFalse(shared_network.success, + 'Network found success = %s, expected success =%s' + % (str(shared_network.success), 'False')) + + self.update_provider_state("Enabled") + shared_network = self.verify_network_creation( + offering=shared_network.offering, testdata=shared_test_data) + self.assertTrue(shared_network.success, + 'Network found success = %s, expected success = %s' + % (str(shared_network.success), 'True')) + + self.validate_Network(shared_network.network, state="Allocated") + + shared_test_data2 = self.test_data["nuagevsp"]["network_all2"] + shared_network2 = self.verify_network_creation( + offering=shared_network.offering, + testdata=shared_test_data2) + self.assertTrue(shared_network2.success, + 'Network found success = %s, expected success = %s' + % (str(shared_network2.success), 'True')) + + self.validate_Network(shared_network2.network, state="Allocated") + + self.debug("+++Test user data & password reset functionality " + "using configdrive in an Isolated network without VR") + + self.update_password_enable_in_template(True) + public_ip_ranges = PublicIpRange.list(self.api_client) + for ip_range in public_ip_ranges: + if shared_network.network.id == ip_range.networkid \ + or shared_network2.network.id == ip_range.networkid: + self.enable_NuageUnderlayPublicIpRange(ip_range.id) + + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + + self.debug("+++Deploy of a VM on a shared network with multiple " + "ip ranges, all should have the same value for the " + "underlay flag.") + # Add subnet of different gateway + self.debug("+++ Adding subnet of different gateway") + + subnet = self.add_subnet_verify( + shared_network.network, + self.test_data["nuagevsp"]["publiciprange2"]) + tmp_test_data = copy.deepcopy( + self.test_data["virtual_machine"]) + + tmp_test_data["ipaddress"] = \ + self.test_data["nuagevsp"]["network_all"]["endip"] + + with self.assertRaises(Exception): + self.create_VM( + [shared_network.network], + testdata=tmp_test_data) + + self.debug("+++ In a shared network with multiple ip ranges, " + "userdata with config drive must be allowed.") + + self.enable_NuageUnderlayPublicIpRange(subnet.vlan.id) + + vm1 = self.create_VM( + [shared_network.network], + testdata=self.test_data["virtual_machine_userdata"], + keypair=self.keypair.name) + # Check VM + self.check_VM_state(vm1, state="Running") + # Verify shared Network and VM in VSD + self.verify_vsd_shared_network( + self.domain.id, + shared_network.network, + gateway=self.test_data["nuagevsp"]["network_all"]["gateway"]) + subnet_id = self.get_subnet_id( + shared_network.network.id, + self.test_data["nuagevsp"]["network_all"]["gateway"]) + self.verify_vsd_enterprise_vm( + self.domain.id, + shared_network.network, vm1, + sharedsubnetid=subnet_id) + + with self.assertRaises(Exception): + self.get_Router(shared_network) + self.debug("+++ Verified no VR is spawned for this network ") + # We need to have the vm password + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + public_ip = PublicIPAddress({"ipaddress": vm1}) + self.verify_config_drive_content( + vm1, public_ip, + self.PasswordTest(vm1.password), + metadata=True, + userdata=self.test_data["virtual_machine_userdata"][ + "userdata"]) + expected_user_data = self.update_userdata(vm1, "helloworld vm1") + self.verify_config_drive_content( + vm1, public_ip, self.PasswordTest(vm1.password), + userdata=expected_user_data) + + self.debug("+++ Adding a non-default nic to the VM " + "making it a multi-nic VM...") + self.nic_operation_VM(vm1, shared_network2.network, + operation="add") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + expected_user_data1 = self.update_userdata(vm1, + "hellomultinicvm1") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("+++ Updating non-default nic as the default nic " + "of the multi-nic VM...") + self.nic_operation_VM(vm1, + shared_network2.network, operation="update") + vm1.stop(self.api_client) + vm1.start(self.api_client) + + public_ip_2 = PublicIPAddress( + {"ipaddress": VirtualMachine.list(self.api_client, + id=vm1.id)[0].nic[1]}) + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(False), + metadata=True, + userdata=expected_user_data1) + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(vm1.password), + userdata=expected_user_data1) + expected_user_data1 = self.update_userdata(vm1, + "hellomultinicvm1") + self.verify_config_drive_content(vm1, public_ip_2, + self.PasswordTest(True), + userdata=expected_user_data1) + + self.debug("+++ Updating the default nic of the multi-nic VM, " + "deleting the non-default nic...") + self.nic_operation_VM(vm1, + shared_network.network, operation="update") + vm1.stop(self.api_client) + vm1.start(self.api_client) + public_ip = PublicIPAddress({"ipaddress": vm1}) + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(False), + metadata=True, + userdata=expected_user_data1) + + self.nic_operation_VM(vm1, + shared_network2.network, operation="remove") + + multinicvm1 = self.create_VM([shared_network2.network, + shared_network.network]) + multinicvm1.password = multinicvm1.resetPassword(self.api_client) + self.debug("+++ MultiNICVM Password reset to - %s" + % multinicvm1.password) + self.debug("MultiNICVM - %s password - %s !" + % (multinicvm1.name, multinicvm1.password)) + public_ip_3 = \ + PublicIPAddress( + {"ipaddress": VirtualMachine.list( + self.api_client, id=multinicvm1.id)[0].nic[0]}) + self.verify_config_drive_content( + multinicvm1, public_ip_3, + self.PasswordTest(multinicvm1.password), + metadata=True) + expected_user_data2 = self.update_userdata(multinicvm1, + "hello multinicvm1") + self.verify_config_drive_content(multinicvm1, public_ip_3, + self.PasswordTest(True), + userdata=expected_user_data2) + multinicvm1.delete(self.api_client, expunge=True) + + shared_network2.network.delete(self.api_client) + # We need to have the vm password + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + public_ip = PublicIPAddress({"ipaddress": vm1}) + + self.debug("+++ Verifying userdata after rebootVM - %s" % vm1.name) + vm1.reboot(self.api_client) + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data1, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, "hello afterboot") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + sshkey=self.keypair.name) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password)) + + self.debug("+++ Migrating one of the VMs in the created Isolated " + "network to another host, if available...") + self.migrate_VM(vm1) + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata after migrating VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, + "hello after migrate") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password), + userdata=expected_user_data1) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password)) + + self.debug("+++ Verify userdata after stopstartVM - %s" % vm1.name) + vm1.stop(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("Updating userdata for VM - %s" % vm1.name) + expected_user_data1 = self.update_userdata(vm1, + "hello afterstopstart") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(False), + userdata=expected_user_data1) + self.debug("Resetting password for VM - %s" % vm1.name) + self.reset_password(vm1) + self.debug("SSHing into the VM for verifying its new password " + "after its password reset...") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(vm1.password)) + + self.debug("+++ Verify userdata after VM recover- %s" % vm1.name) + vm1.delete(self.api_client, expunge=False) + self.debug("Recover VM - %s" % vm1.name) + vm1.recover(self.api_client) + vm1.start(self.api_client) + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + self.update_provider_state("Disabled") + expected_user_data1 = self.update_userdata(vm1, + "hello after recover") + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + + self.debug("+++ When template is not password enabled, " + "verify configdrive of VM - %s" % vm1.name) + vm1.delete(self.api_client, expunge=True) + self.update_provider_state("Enabled") + self.updateTemplate(False) + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm1 = self.create_VM( + [shared_network.network], + testdata=self.test_data["virtual_machine_userdata"], + keypair=self.keypair.name) + expected_user_data1 = self.update_userdata(vm1, + "This is sample data") + public_ip = PublicIPAddress({"ipaddress": vm1}) + self.verify_config_drive_content(vm1, public_ip, + self.PasswordTest(False), + userdata=expected_user_data1, + metadata=True, + sshkey=self.keypair.name) + vm1.delete(self.api_client, expunge=True) + shared_network.network.delete(self.api_client) + + @attr(tags=["advanced", "nuagevsp", "endurance"], required_hardware="true") + def test_nuage_configdrive_endurance(self): + """ Verify endurance of ConfigDrive userdata update + """ + # Validate the following + # 1. When ConfigDrive is enabled as provider in zone + # Create an Isolated Network with Nuage VSP Isolated Network + # offering specifying ConfigDrive as serviceProvider for userdata, + # make sure no Dns is in the offering so no VR is spawned. + # 2. create a VM in the above isolated network + # 3. Wait until VM is running + # 4. Concurrently update the userdata for the VM + # 5. Wait util all updates are finished + # 6. Check userdata in VM + # 7. Delete all the created objects (cleanup). + for zone in self.zones: + self.debug("Zone - %s" % zone.name) + # Get Zone details + self.getZoneDetails(zone=zone) + # Configure VSD sessions + self.configureVSDSessions() + self.update_provider_state("Enabled") + create_network = self.verify_network_creation( + offering_name="isolated_configdrive_network_offering_" + "withoutdns", + gateway='10.1.1.1') + self.assertTrue(create_network.success, + 'Network found success = %s, expected success = %s' + % (str(create_network.success), 'True')) + + self.validate_Network(create_network.network, state="Allocated") + self.update_password_enable_in_template(True) + self.generate_ssh_keys() + self.debug("keypair name %s " % self.keypair.name) + vm1 = self.create_guest_vm(create_network.network, + keypair=self.keypair.name) + + with self.assertRaises(Exception): + self.get_Router(create_network) + self.debug("+++Verified no VR is spawned for this network ") + # We need to have the vm password + vm1.password = vm1.resetPassword(self.api_client) + self.debug("Password reset to - %s" % vm1.password) + self.debug("VM - %s password - %s !" % + (vm1.name, vm1.password)) + + public_ip_1 = self.acquire_PublicIPAddress(create_network.network) + self.create_and_verify_fip_and_fw(vm1, public_ip_1, + create_network.network) + + expected_user_data = self.test_data[ + "virtual_machine_userdata"]["userdata"] + ssh_client = self.verify_config_drive_content( + vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name) + + for i in range(0, 300): + self.verify_config_drive_content( + vm1, public_ip_1, + self.PasswordTest(vm1.password), + metadata=True, + userdata=expected_user_data, + sshkey=self.keypair.name, + ssh_client=ssh_client) + expected_user_data = \ + self.update_userdata(vm1, + 'This is sample data %s' % i) diff --git a/test/integration/plugins/nuagevsp/test_nuage_static_nat.py b/test/integration/plugins/nuagevsp/test_nuage_static_nat.py index 8601a179bf4..3aa36d1da1e 100644 --- a/test/integration/plugins/nuagevsp/test_nuage_static_nat.py +++ b/test/integration/plugins/nuagevsp/test_nuage_static_nat.py @@ -23,9 +23,7 @@ from marvin.lib.base import (Account, PublicIpRange, Network, VirtualMachine) -from marvin.cloudstackAPI import (enableNuageUnderlayVlanIpRange, - disableNuageUnderlayVlanIpRange, - listNuageUnderlayVlanIpRanges) +from marvin.lib.common import list_virtual_machines from marvin.lib.common import list_virtual_machines # Import System Modules @@ -54,31 +52,6 @@ class TestNuageStaticNat(nuageTestCase): self.cleanup = [self.account] return - # enable_NuageUnderlayPublicIpRange - Enables/configures underlay - # networking for the given public IP range in Nuage VSP - def enable_NuageUnderlayPublicIpRange(self, public_ip_range): - cmd = enableNuageUnderlayVlanIpRange.\ - enableNuageUnderlayVlanIpRangeCmd() - cmd.id = public_ip_range.vlan.id - self.api_client.enableNuageUnderlayVlanIpRange(cmd) - - # disable_NuageUnderlayPublicIpRange - Disables/de-configures underlay - # networking for the given public IP range in Nuage VSP - def disable_NuageUnderlayPublicIpRange(self, public_ip_range): - cmd = disableNuageUnderlayVlanIpRange.\ - disableNuageUnderlayVlanIpRangeCmd() - cmd.id = public_ip_range.vlan.id - self.api_client.enableNuageUnderlayVlanIpRange(cmd) - - # list_NuageUnderlayPublicIpRanges - Lists underlay networking - # enabled/configured public IP ranges in Nuage VSP - def list_NuageUnderlayPublicIpRanges(self, public_ip_range=None): - cmd = listNuageUnderlayVlanIpRanges.listNuageUnderlayVlanIpRangesCmd() - if public_ip_range: - cmd.id = public_ip_range.vlan.id - cmd.underlay = True - return self.api_client.listNuageUnderlayVlanIpRanges(cmd) - # create_PublicIpRange - Creates public IP range def create_PublicIpRange(self): self.debug("Creating public IP range") @@ -115,20 +88,21 @@ class TestNuageStaticNat(nuageTestCase): # validate_NuageUnderlayPublicIpRange - Validates Nuage underlay enabled # public IP range creation and state def validate_NuageUnderlayPublicIpRange(self, public_ip_range): - nuage_underlay_public_ip_ranges = \ + self.nuage_underlay_public_ip_ranges = \ self.list_NuageUnderlayPublicIpRanges(public_ip_range) - self.assertEqual(isinstance(nuage_underlay_public_ip_ranges, list), + self.assertEqual(isinstance(self.nuage_underlay_public_ip_ranges, + list), True, "List Nuage Underlay Public IP Range should return " "a valid list" ) self.assertEqual(public_ip_range.vlan.startip, - nuage_underlay_public_ip_ranges[0].startip, + self.nuage_underlay_public_ip_ranges[0].startip, "Start IP of the public IP range should match with " "the returned list data" ) self.assertEqual(public_ip_range.vlan.endip, - nuage_underlay_public_ip_ranges[0].endip, + self.nuage_underlay_public_ip_ranges[0].endip, "End IP of the public IP range should match with the " "returned list data" ) @@ -358,7 +332,7 @@ class TestNuageStaticNat(nuageTestCase): self.debug("Enabling Nuage underlay capability (underlay networking) " "for the created public IP range...") - self.enable_NuageUnderlayPublicIpRange(public_ip_range) + self.enable_NuageUnderlayPublicIpRange(public_ip_range.vlan.id) self.validate_NuageUnderlayPublicIpRange(public_ip_range) self.debug("Nuage underlay capability (underlay networking) for the " "created public IP range is successfully enabled") @@ -532,6 +506,7 @@ class TestNuageStaticNat(nuageTestCase): self.debug("Acquired public IP in the created Isolated network " "successfully released in CloudStack") self.delete_VM(vm_1) + # Bug CLOUDSTACK-9398 """ self.debug("Creating a persistent Isolated network with Static NAT " diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index 9fb8e8a527e..097b890f64d 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -49,6 +49,27 @@ test_data = { "forvirtualnetwork": "true", "vlan": "300" }, + "publiciprange1": { + "gateway": "10.200.100.1", + "netmask": "255.255.255.0", + "startip": "10.200.100.101", + "endip": "10.200.100.105", + "forvirtualnetwork": "false" + }, + "publiciprange2": { + "gateway": "10.219.1.1", + "netmask": "255.255.255.0", + "startip": "10.219.1.2", + "endip": "10.219.1.5", + "forvirtualnetwork": "false" + }, + "publiciprange3": { + "gateway": "10.200.100.1", + "netmask": "255.255.255.0", + "startip": "10.200.100.2", + "endip": "10.200.100.20", + "forvirtualnetwork": "false" + }, "private_gateway": { "ipaddress": "172.16.1.2", "gateway": "172.16.1.1", @@ -473,6 +494,20 @@ test_data = { "SecurityGroup": "SecurityGroupProvider" } }, + "shared_network_config_drive_offering": { + "name": 'shared_network_config_drive_offering', + "displaytext": 'shared_network_config_drive_offering', + "guestiptype": 'shared', + "supportedservices": 'Dhcp,UserData', + "traffictype": 'GUEST', + "specifyVlan": "True", + "specifyIpRanges": "True", + "availability": 'Optional', + "serviceProviderList": { + "Dhcp": "VirtualRouter", + "UserData": 'ConfigDrive' + } + }, "shared_network_sg": { "name": "Shared-Network-SG-Test", "displaytext": "Shared-Network_SG-Test", @@ -1744,8 +1779,8 @@ test_data = { }, "test_34_DeployVM_in_SecondSGNetwork": { "zone": "advsg", - "config": "D:\ACS-Repo\setup\dev\\advancedsg.cfg", #Absolute path to cfg file - #For sample configuration please refer to /setup/dev/advancedsg.cfg + "config": "D:\ACS-Repo\setup\dev\\advancedsg.cfg", # Absolute path to cfg file + # For sample configuration please refer to /setup/dev/advancedsg.cfg "template": "CentOS 5.3(64-bit) no GUI (Simulator)", "dbSvr": { "dbSvr": "10.146.0.133", @@ -2277,10 +2312,10 @@ test_data = { "network_all": { "name": "SharedNetwork-All-nuage", "displaytext": "SharedNetwork-All-nuage", - "gateway": "10.223.1.1", + "gateway": "10.200.100.1", "netmask": "255.255.255.0", - "startip": "10.223.1.21", - "endip": "10.223.1.100", + "startip": "10.200.100.21", + "endip": "10.200.100.100", "acltype": "Domain" }, "network_domain_with_no_subdomain_access": { @@ -2313,10 +2348,10 @@ test_data = { "acltype": "Account" }, "publiciprange1": { - "gateway": "10.223.1.1", + "gateway": "10.200.100.1", "netmask": "255.255.255.0", - "startip": "10.223.1.101", - "endip": "10.223.1.105", + "startip": "10.200.100.101", + "endip": "10.200.100.105", "forvirtualnetwork": "false" }, "publiciprange2": { @@ -2327,10 +2362,10 @@ test_data = { "forvirtualnetwork": "false" }, "publiciprange3": { - "gateway": "10.223.1.1", + "gateway": "10.200.100.1", "netmask": "255.255.255.0", - "startip": "10.223.1.2", - "endip": "10.223.1.20", + "startip": "10.200.100.2", + "endip": "10.200.100.20", "forvirtualnetwork": "false" } } diff --git a/ui/scripts/configuration.js b/ui/scripts/configuration.js index 4de24626cc6..fdc9e492da7 100644 --- a/ui/scripts/configuration.js +++ b/ui/scripts/configuration.js @@ -2422,7 +2422,7 @@ //p.s. Netscaler is supported in both vpc and non-vpc if ($useVpc.is(':visible') && $useVpcCb.is(':checked')) { //*** vpc *** $optionsOfProviders.each(function(index) { - if ($(this).val() == 'InternalLbVm' || $(this).val() == 'VpcVirtualRouter' || $(this).val() == 'Netscaler' || $(this).val() == 'NuageVsp' || $(this).val() == 'NuageVspVpc' || $(this).val() == 'BigSwitchBcf') { + if ($(this).val() == 'InternalLbVm' || $(this).val() == 'VpcVirtualRouter' || $(this).val() == 'Netscaler' || $(this).val() == 'NuageVsp' || $(this).val() == 'NuageVspVpc' || $(this).val() == 'BigSwitchBcf' || $(this).val() == 'ConfigDrive') { $(this).attr('disabled', false); } else { $(this).attr('disabled', true); diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 38f6074a3d9..ad3b7cd0690 100755 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -7640,6 +7640,120 @@ } } } + }, + + ConfigDrive: { + id: "ConfigDrive", + label: "ConfigDrive", + isMaximized: true, + type: 'detailView', + fields: { + name: { + label: 'label.name' + }, + state: { + label: 'label.status', + indicator: { + 'Enabled': 'on' + } + } + }, + tabs: { + network: { + title: 'label.network', + fields: [{ + name: { + label: 'label.name' + } + }, { + state: { + label: 'label.state' + }, + supportedServices: { + label: 'label.supported.services' + }, + id: { + label: 'label.id' + }, + physicalnetworkid: { + label: 'label.physical.network.ID' + } + }], + dataProvider: function(args) { + refreshNspData("ConfigDrive"); + args.response.success({ + actionFilter: ovsProviderActionFilter, + data: $.extend(nspMap["ConfigDrive"], { + supportedServices: nspMap["ConfigDrive"] == undefined? "": nspMap["ConfigDrive"].servicelist.join(', ') + }) + }); + } + } + }, + actions: { + enable: { + label: 'label.enable.provider', + action: function(args) { + $.ajax({ + url: createURL("updateNetworkServiceProvider&id=" + nspMap["ConfigDrive"].id + "&state=Enabled"), + dataType: "json", + success: function(json) { + var jid = json.updatenetworkserviceproviderresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function(json) { + $(window).trigger('cloudStack.fullRefresh'); + } + } + }); + } + }); + }, + messages: { + confirm: function(args) { + return 'message.confirm.enable.provider'; + }, + notification: function() { + return 'label.enable.provider'; + } + }, + notification: { + poll: pollAsyncJobResult + } + }, + disable: { + label: 'label.disable.provider', + action: function(args) { + $.ajax({ + url: createURL("updateNetworkServiceProvider&id=" + nspMap["ConfigDrive"].id + "&state=Disabled"), + dataType: "json", + success: function(json) { + var jid = json.updatenetworkserviceproviderresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function(json) { + $(window).trigger('cloudStack.fullRefresh'); + } + } + }); + } + }); + }, + messages: { + confirm: function(args) { + return 'message.confirm.disable.provider'; + }, + notification: function() { + return 'label.disable.provider'; + } + }, + notification: { + poll: pollAsyncJobResult + } + } + } } } } @@ -22190,6 +22304,9 @@ case "GloboDns": nspMap["GloboDns"] = items[i]; break; + case "ConfigDrive": + nspMap["ConfigDrive"] = items[i]; + break; } } } @@ -22287,6 +22404,11 @@ name: 'GloboDNS', state: nspMap.GloboDns ? nspMap.GloboDns.state : 'Disabled' }); + nspHardcodingArray.push({ + id: "ConfigDrive", + name: "ConfigDrive", + state: nspMap.ConfigDrive ? nspMap.ConfigDrive.state : 'Disabled' + }); //CLOUDSTACK-6840: OVS refers to SDN provider. However, we are not supporting SDN in this release. /* diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index a4f26db4293..963813bae06 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -116,6 +116,10 @@ import com.cloud.utils.script.Script; public class VirtualMachineMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(VirtualMachineMO.class); private static final ExecutorService MonitorServiceExecutor = Executors.newCachedThreadPool(new NamedThreadFactory("VM-Question-Monitor")); + + public static final String ANSWER_YES = "0"; + public static final String ANSWER_NO = "1"; + private ManagedObjectReference _vmEnvironmentBrowser = null; public VirtualMachineMO(VmwareContext context, ManagedObjectReference morVm) { @@ -1402,6 +1406,12 @@ public class VirtualMachineMO extends BaseMO { // isoDatastorePath: [datastore name] isoFilePath public void attachIso(String isoDatastorePath, ManagedObjectReference morDs, boolean connect, boolean connectAtBoot) throws Exception { + attachIso(isoDatastorePath, morDs, connect, connectAtBoot, null); + } + + // isoDatastorePath: [datastore name] isoFilePath + public void attachIso(String isoDatastorePath, ManagedObjectReference morDs, + boolean connect, boolean connectAtBoot, Integer key) throws Exception { if (s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - attachIso(). target MOR: " + _mor.getValue() + ", isoDatastorePath: " + isoDatastorePath + ", datastore: " + @@ -1411,7 +1421,12 @@ public class VirtualMachineMO extends BaseMO { assert (morDs != null); boolean newCdRom = false; - VirtualCdrom cdRom = (VirtualCdrom)getIsoDevice(); + VirtualCdrom cdRom; + if (key == null) { + cdRom = (VirtualCdrom) getIsoDevice(); + } else { + cdRom = (VirtualCdrom) getIsoDevice(key); + } if (cdRom == null) { newCdRom = true; cdRom = new VirtualCdrom(); @@ -1461,11 +1476,15 @@ public class VirtualMachineMO extends BaseMO { s_logger.trace("vCenter API trace - detachIso() done(successfully)"); } - public void detachIso(String isoDatastorePath) throws Exception { + public int detachIso(String isoDatastorePath) throws Exception { + return detachIso(isoDatastorePath, false); + } + + public int detachIso(String isoDatastorePath, final boolean force) throws Exception { if (s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.getValue() + ", isoDatastorePath: " + isoDatastorePath); - VirtualDevice device = getIsoDevice(); + VirtualDevice device = getIsoDevice(isoDatastorePath); if (device == null) { if (s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachIso() done(failed)"); @@ -1514,7 +1533,7 @@ public class VirtualMachineMO extends BaseMO { if ("msg.cdromdisconnect.locked".equalsIgnoreCase(msg.getId())) { s_logger.info("Found that VM has a pending question that we need to answer programmatically, question id: " + msg.getId() + ", for safe operation we will automatically decline it"); - vmMo.answerVM(question.getId(), "1"); + vmMo.answerVM(question.getId(), force ? ANSWER_YES : ANSWER_NO); break; } } @@ -1531,7 +1550,7 @@ public class VirtualMachineMO extends BaseMO { if ("msg.cdromdisconnect.locked".equalsIgnoreCase(msgId)) { s_logger.info("Found that VM has a pending question that we need to answer programmatically, question id: " + question.getId() + ". Message id : " + msgId + ". Message text : " + msgText + ", for safe operation we will automatically decline it."); - vmMo.answerVM(question.getId(), "1"); + vmMo.answerVM(question.getId(), force ? ANSWER_YES : ANSWER_NO); } } @@ -1570,6 +1589,7 @@ public class VirtualMachineMO extends BaseMO { flags[0] = true; future.cancel(true); } + return device.getKey(); } public Pair getVmdkFileInfo(String vmdkDatastorePath) throws Exception { @@ -2826,6 +2846,32 @@ public class VirtualMachineMO extends BaseMO { return null; } + public VirtualDevice getIsoDevice(int key) throws Exception { + List devices = _context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); + if (devices != null && devices.size() > 0) { + for (VirtualDevice device : devices) { + if (device instanceof VirtualCdrom && device.getKey() == key) { + return device; + } + } + } + return null; + } + + public VirtualDevice getIsoDevice(String filename) throws Exception { + List devices = (List)_context.getVimClient(). + getDynamicProperty(_mor, "config.hardware.device"); + if(devices != null && devices.size() > 0) { + for(VirtualDevice device : devices) { + if(device instanceof VirtualCdrom && device.getBacking() instanceof VirtualCdromIsoBackingInfo && + ((VirtualCdromIsoBackingInfo)device.getBacking()).getFileName().equals(filename)) { + return device; + } + } + } + return null; + } + public int getNextDeviceNumber(int controllerKey) throws Exception { List devices = _context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); @@ -2982,7 +3028,7 @@ public class VirtualMachineMO extends BaseMO { if ("msg.cdromdisconnect.locked".equalsIgnoreCase(msg.getId())) { s_logger.info("Found that VM has a pending question that we need to answer programmatically, question id: " + msg.getId() + ", for safe operation we will automatically decline it"); - vmMo.answerVM(question.getId(), "1"); + vmMo.answerVM(question.getId(), ANSWER_NO); break; } } @@ -2999,7 +3045,7 @@ public class VirtualMachineMO extends BaseMO { if ("msg.cdromdisconnect.locked".equalsIgnoreCase(msgId)) { s_logger.info("Found that VM has a pending question that we need to answer programmatically, question id: " + question.getId() + ". Message id : " + msgId + ". Message text : " + msgText + ", for safe operation we will automatically decline it."); - vmMo.answerVM(question.getId(), "1"); + vmMo.answerVM(question.getId(), ANSWER_NO); } }