mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
configdrive: fix some failures in tests/component/test_configdrive.py (#5144)
* server: fix failed to apply userdata when enable static nat * server: fix cannot expunge vm as applyUserdata fails * configdrive: fix ISO is not recognized when plug a new nic * configdrive: detach and attach configdrive ISO as it is changed when plug a new nic or migrate vm * configdrive test: (1) password file does not exists in recreated ISO; (2) vm hostname should be changed after migration * configdrive: use centos55 template with sshkey and configdrive support * configdrive: disklabel is 'config-2' for configdrive ISO * configdrive: use copy for configdrive ISO and move for other template/volume/iso * configdrive: use public-keys.txt * configdrive test: fix (1) update_template ; (2) ssh into vm by keypair
This commit is contained in:
parent
0406e27af3
commit
cf0f1feb5e
@ -303,6 +303,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
protected String _agentHooksVmOnStopScript = "libvirt-vm-state-change.groovy";
|
||||
protected String _agentHooksVmOnStopMethod = "onStop";
|
||||
|
||||
private static final String CONFIG_DRIVE_ISO_DISK_LABEL = "hdd";
|
||||
private static final int CONFIG_DRIVE_ISO_DEVICE_ID = 4;
|
||||
|
||||
protected File _qemuSocketsPath;
|
||||
private final String _qemuGuestAgentSocketName = "org.qemu.guest_agent.0";
|
||||
@ -2756,6 +2758,32 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
||||
return _storagePoolMgr;
|
||||
}
|
||||
|
||||
public void detachAndAttachConfigDriveISO(final Connect conn, final String vmName) {
|
||||
// detach and re-attach configdrive ISO
|
||||
List<DiskDef> disks = getDisks(conn, vmName);
|
||||
DiskDef configdrive = null;
|
||||
for (DiskDef disk : disks) {
|
||||
if (disk.getDeviceType() == DiskDef.DeviceType.CDROM && disk.getDiskLabel() == CONFIG_DRIVE_ISO_DISK_LABEL) {
|
||||
configdrive = disk;
|
||||
}
|
||||
}
|
||||
if (configdrive != null) {
|
||||
try {
|
||||
String result = attachOrDetachISO(conn, vmName, configdrive.getDiskPath(), false, CONFIG_DRIVE_ISO_DEVICE_ID);
|
||||
if (result != null) {
|
||||
s_logger.warn("Detach ConfigDrive ISO with result: " + result);
|
||||
}
|
||||
result = attachOrDetachISO(conn, vmName, configdrive.getDiskPath(), true, CONFIG_DRIVE_ISO_DEVICE_ID);
|
||||
if (result != null) {
|
||||
s_logger.warn("Attach ConfigDrive ISO with result: " + result);
|
||||
}
|
||||
} catch (final LibvirtException | InternalErrorException | URISyntaxException e) {
|
||||
final String msg = "Detach and attach ConfigDrive ISO failed due to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized String attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach, final Integer diskSeq) throws LibvirtException, URISyntaxException,
|
||||
InternalErrorException {
|
||||
final DiskDef iso = new DiskDef();
|
||||
|
||||
@ -76,6 +76,7 @@ import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.google.common.base.Strings;
|
||||
|
||||
@ResourceWrapper(handles = MigrateCommand.class)
|
||||
@ -180,6 +181,10 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
||||
|
||||
dconn = libvirtUtilitiesHelper.retrieveQemuConnection(destinationUri);
|
||||
|
||||
if (to.getType() == VirtualMachine.Type.User) {
|
||||
libvirtComputingResource.detachAndAttachConfigDriveISO(conn, vmName);
|
||||
}
|
||||
|
||||
//run migration in thread so we can monitor it
|
||||
s_logger.info("Live migration of instance " + vmName + " initiated to destination host: " + dconn.getURI());
|
||||
final ExecutorService executor = Executors.newFixedThreadPool(1);
|
||||
|
||||
@ -72,6 +72,10 @@ public final class LibvirtPlugNicCommandWrapper extends CommandWrapper<PlugNicCo
|
||||
libvirtComputingResource.applyDefaultNetworkRulesOnNic(conn, vmName, vmId, nic, false, false);
|
||||
}
|
||||
|
||||
if (vmType == VirtualMachine.Type.User) {
|
||||
libvirtComputingResource.detachAndAttachConfigDriveISO(conn, vmName);
|
||||
}
|
||||
|
||||
return new PlugNicAnswer(command, true, "success");
|
||||
} catch (final LibvirtException e) {
|
||||
final String msg = " Plug Nic failed due to " + e.toString();
|
||||
|
||||
@ -139,6 +139,16 @@ create_from_file() {
|
||||
|
||||
}
|
||||
|
||||
copy_from_file() {
|
||||
local tmpltfs=$1
|
||||
local tmpltimg=$2
|
||||
local tmpltname=$3
|
||||
|
||||
[ -n "$verbose" ] && echo "Copying to $tmpltfs/$tmpltname...could take a while" >&2
|
||||
cp -rf $tmpltimg /$tmpltfs/$tmpltname
|
||||
rm -rf $tmpltimg
|
||||
}
|
||||
|
||||
tflag=
|
||||
nflag=
|
||||
fflag=
|
||||
@ -228,7 +238,11 @@ fi
|
||||
|
||||
imgsize=$(ls -l $tmpltimg2| awk -F" " '{print $5}')
|
||||
|
||||
create_from_file $tmpltfs $tmpltimg2 $tmpltname
|
||||
if [ "$descr" = "configDrive" ] && [ "$Sflag" = "" ];then
|
||||
copy_from_file $tmpltfs $tmpltimg2 $tmpltname
|
||||
else
|
||||
create_from_file $tmpltfs $tmpltimg2 $tmpltname
|
||||
fi
|
||||
|
||||
touch /$tmpltfs/volume.properties
|
||||
rollback_if_needed $tmpltfs $? "Failed to create volume.properties file"
|
||||
|
||||
@ -630,20 +630,23 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules
|
||||
}
|
||||
|
||||
protected void applyUserData(long vmId, Network network, Nic guestNic) throws ResourceUnavailableException {
|
||||
UserVmVO vm = _vmDao.findById(vmId);
|
||||
VMTemplateVO template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId());
|
||||
NicProfile nicProfile = new NicProfile(guestNic, network, null, null, null,
|
||||
_networkModel.isSecurityGroupSupportedInNetwork(network),
|
||||
_networkModel.getNetworkTag(template.getHypervisorType(), network));
|
||||
VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm);
|
||||
UserDataServiceProvider element = _networkModel.getUserDataUpdateProvider(network);
|
||||
if (element == null) {
|
||||
s_logger.error("Can't find network element for " + Service.UserData.getName() + " provider needed for UserData update");
|
||||
} else {
|
||||
boolean result = element.saveUserData(network, nicProfile, vmProfile);
|
||||
if (!result) {
|
||||
UserVmVO vm = _vmDao.findById(vmId);
|
||||
try {
|
||||
VMTemplateVO template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId());
|
||||
NicProfile nicProfile = new NicProfile(guestNic, network, null, null, null,
|
||||
_networkModel.isSecurityGroupSupportedInNetwork(network),
|
||||
_networkModel.getNetworkTag(template.getHypervisorType(), network));
|
||||
VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm);
|
||||
if (!element.saveUserData(network, nicProfile, vmProfile)) {
|
||||
s_logger.error("Failed to update userdata for vm " + vm + " and nic " + guestNic);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Failed to update userdata for vm " + vm + " and nic " + guestNic + " due to " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ mountdir=$(mktemp -d)
|
||||
# If lable name is other than config, please change the below line as required
|
||||
DefaultDisk=/dev/disk/by-label/config-2
|
||||
|
||||
SSHKey_File=$mountdir/cloudstack/metadata/public_keys.txt
|
||||
SSHKey_File=$mountdir/cloudstack/metadata/public-keys.txt
|
||||
keys_received=0
|
||||
|
||||
function prepare_mount
|
||||
@ -44,7 +44,7 @@ function prepare_mount
|
||||
if [ -e $DefaultDisk ]; then
|
||||
Disk=$DefaultDisk
|
||||
else
|
||||
BLOCK_DEVICE=$(blkid -t LABEL='config' /dev/hd? /dev/sd? /dev/xvd? /dev/vd? -o device)
|
||||
BLOCK_DEVICE=$(blkid -t LABEL='config-2' /dev/hd? /dev/sd? /dev/xvd? /dev/vd? -o device)
|
||||
if [ -n $BLOCK_DEVICE ]; then
|
||||
Disk=$BLOCK_DEVICE
|
||||
else
|
||||
|
||||
@ -53,7 +53,8 @@ from marvin.lib.base import (Account,
|
||||
Hypervisor, Template)
|
||||
from marvin.lib.common import (get_domain,
|
||||
get_template,
|
||||
get_zone, get_test_template,
|
||||
get_zone,
|
||||
get_test_template,
|
||||
is_config_suitable)
|
||||
from marvin.lib.utils import random_gen
|
||||
# Import System Modules
|
||||
@ -104,12 +105,12 @@ class Services:
|
||||
self.services = {
|
||||
"test_templates": {
|
||||
"kvm": {
|
||||
"name": "Centos-5.5-configdrive",
|
||||
"displaytext": "ConfigDrive enabled CentOS",
|
||||
"name": "Centos-5.5-sshkey-and-configdrive",
|
||||
"displaytext": "SSHkey and ConfigDrive enabled CentOS",
|
||||
"format": "qcow2",
|
||||
"hypervisor": "kvm",
|
||||
"ostype": "CentOS 5.5 (64-bit)",
|
||||
"url": "http://people.apache.org/~fmaximus/centos55-extended.qcow2.bz2",
|
||||
"url": "http://people.apache.org/~weizhou/centos55-sshkey-configdrive.qcow2.bz2",
|
||||
"requireshvm": "False",
|
||||
"ispublic": "True",
|
||||
"isextractable": "True"
|
||||
@ -653,8 +654,7 @@ class ConfigDriveUtils:
|
||||
orig_state = self.template.passwordenabled
|
||||
self.debug("Updating guest VM template to password enabled "
|
||||
"from %s to %s" % (orig_state, new_state))
|
||||
if orig_state != new_state:
|
||||
self.update_template(passwordenabled=new_state)
|
||||
self.update_template(passwordenabled=new_state)
|
||||
self.assertEqual(self.template.passwordenabled, new_state,
|
||||
"Guest VM template is not password enabled")
|
||||
return orig_state
|
||||
@ -850,7 +850,7 @@ class ConfigDriveUtils:
|
||||
|
||||
self.debug("SSHing into the VM %s" % vm.name)
|
||||
|
||||
ssh = self.ssh_into_VM(vm, public_ip, reconnect=reconnect)
|
||||
ssh = self.ssh_into_VM(vm, public_ip, reconnect=reconnect, keypair=vm.key_pair)
|
||||
|
||||
d = {x.name: x for x in ssh.logger.handlers}
|
||||
ssh.logger.handlers = list(d.values())
|
||||
@ -974,6 +974,7 @@ class ConfigDriveUtils:
|
||||
vm.add_nic(self.api_client, network.id)
|
||||
self.debug("Added NIC in VM with ID - %s and network with ID - %s"
|
||||
% (vm.id, network.id))
|
||||
vm.password_test = ConfigDriveUtils.PasswordTest(expect_pw=False)
|
||||
|
||||
def unplug_nic(self, vm, network):
|
||||
nic = self._find_nic(vm, network)
|
||||
@ -1530,12 +1531,14 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
self.debug("SSH into VM with ID - %s on public IP address - %s" %
|
||||
(vm.id, public_ip.ipaddress.ipaddress))
|
||||
tries = 1 if negative_test else 3
|
||||
private_key_file_location = keypair.private_key_file if keypair else None
|
||||
|
||||
@retry(tries=tries)
|
||||
def retry_ssh():
|
||||
ssh_client = vm.get_ssh_client(
|
||||
ipaddress=public_ip.ipaddress.ipaddress,
|
||||
reconnect=reconnect,
|
||||
keyPairFileLocation=private_key_file_location,
|
||||
retries=3 if negative_test else 30
|
||||
)
|
||||
self.debug("Successful to SSH into VM with ID - %s on "
|
||||
@ -1702,6 +1705,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
"%s to Host: %s" % (vm.id, host.id))
|
||||
try:
|
||||
vm.migrate(self.api_client, hostid=host.id)
|
||||
vm.password_test = ConfigDriveUtils.PasswordTest(expect_pw=False)
|
||||
except Exception as e:
|
||||
self.fail("Failed to migrate instance, %s" % e)
|
||||
self.debug("Migrated VM with ID: "
|
||||
@ -1917,7 +1921,8 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
# =====================================================================
|
||||
self.debug("+++ Scenario: "
|
||||
"update userdata and reset password after migrate")
|
||||
self.migrate_VM(vm1)
|
||||
host = self.migrate_VM(vm1)
|
||||
vm1.hostname = host.name
|
||||
self.then_config_drive_is_as_expected(vm1, public_ip_1, metadata=True)
|
||||
self.debug("Updating userdata after migrating VM - %s" % vm1.name)
|
||||
self.update_and_validate_userdata(vm1, "hello after migrate",
|
||||
@ -1955,7 +1960,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
# =====================================================================
|
||||
self.debug("+++ Scenario: "
|
||||
"Update Userdata on a VM that is not password enabled")
|
||||
self.update_template(passwordenabled=False)
|
||||
self.given_template_password_enabled_is(False)
|
||||
vm1 = self.when_I_deploy_a_vm_with_keypair_in(network1)
|
||||
|
||||
public_ip_1 = \
|
||||
@ -2112,7 +2117,8 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
# =====================================================================
|
||||
self.debug("+++ Scenario: "
|
||||
"update userdata and reset password after migrate")
|
||||
self.migrate_VM(vm)
|
||||
host = self.migrate_VM(vm)
|
||||
vm.hostname = host.name
|
||||
self.then_config_drive_is_as_expected(vm, public_ip_1, metadata=True)
|
||||
self.update_and_validate_userdata(vm, "hello migrate", public_ip_1)
|
||||
|
||||
@ -2150,7 +2156,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
self.debug("+++ Scenario: "
|
||||
"Update Userdata on a VM that is not password enabled")
|
||||
|
||||
self.update_template(passwordenabled=False)
|
||||
self.given_template_password_enabled_is(False)
|
||||
|
||||
vm = self.when_I_deploy_a_vm(network1,
|
||||
keypair=self.keypair.name)
|
||||
@ -2285,7 +2291,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
self.delete(vm1, expunge=True)
|
||||
|
||||
self.given_config_drive_provider_is("Enabled")
|
||||
self.update_template(passwordenabled=False)
|
||||
self.given_template_password_enabled_is(False)
|
||||
|
||||
vm1 = self.create_VM(
|
||||
[shared_network.network],
|
||||
@ -2362,6 +2368,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
self.debug("+++Deploy VM in the created Isolated network "
|
||||
"with user data provider as configdrive")
|
||||
|
||||
self.given_template_password_enabled_is(True)
|
||||
vm1 = self.when_I_deploy_a_vm(network1)
|
||||
|
||||
public_ip_1 = self.when_I_create_a_static_nat_ip_to(vm1, network1)
|
||||
@ -2476,6 +2483,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
|
||||
# =====================================================================
|
||||
self.debug("+++ Scenario: "
|
||||
"Deploy VM in the Tier 1 with user data")
|
||||
self.given_template_password_enabled_is(True)
|
||||
vm = self.when_I_deploy_a_vm(network1)
|
||||
public_ip_1 = self.when_I_create_a_static_nat_ip_to(vm, network1)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user