CLOUDSTACK-10013: Fix VMware related issues and fix misc tests

This fixes test failures around VMware with the new systemvmtemplate.
In addition:

- Does not skip rVR related test cases for VMware
- Removes rc.local
- Processes unprocessed cmd_line.json
- Fixed NPEs around VMware tests/code
- On VMware, use udevadm to reconfigure nic/mac address than rebooting
- Fix proper acpi shutdown script for faster systemvm shutdowns
- Give at least 256MB of swap for VRs to avoid OOM on VMware
- Fixes smoke tests for environment related failures

Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com>
This commit is contained in:
Rohit Yadav 2017-12-13 01:42:05 +05:30
parent ce3303212b
commit 15b11a3b27
26 changed files with 92 additions and 103 deletions

View File

@ -4776,8 +4776,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId());
if (vm == null) { if (vm == null) {
s_logger.info("Unable to find vm " + work.getVmId()); s_logger.info("Unable to find vm " + work.getVmId());
throw new CloudRuntimeException("Unable to find VM id=" + work.getVmId());
} }
assert vm != null;
orchestrateStop(vm.getUuid(), work.isCleanup()); orchestrateStop(vm.getUuid(), work.isCleanup());
return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, null);

View File

@ -216,8 +216,8 @@ class serviceOpsUbuntu(serviceOps):
class serviceOpsRedhat7(serviceOps): class serviceOpsRedhat7(serviceOps):
def isServiceRunning(self, servicename): def isServiceRunning(self, servicename):
try: try:
o = bash("systemctl status " + servicename) o = bash("systemctl is-active " + servicename)
if "running" in o.getStdout() or "start" in o.getStdout() or "Running" in o.getStdout(): if "inactive" not in o.getStdout():
return True return True
else: else:
return False return False

View File

@ -64,6 +64,7 @@ import com.cloud.cluster.ManagementServerHostVO;
import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.cluster.dao.ManagementServerHostDao;
import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.Vlan.VlanType;
import com.cloud.dc.VlanVO; import com.cloud.dc.VlanVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.VlanDao; import com.cloud.dc.dao.VlanDao;
import com.cloud.exception.StorageUnavailableException; import com.cloud.exception.StorageUnavailableException;
import com.cloud.gpu.dao.HostGpuGroupsDao; import com.cloud.gpu.dao.HostGpuGroupsDao;
@ -92,18 +93,20 @@ import com.cloud.network.as.dao.AutoScaleVmGroupVmMapDao;
import com.cloud.network.as.dao.AutoScaleVmProfileDao; import com.cloud.network.as.dao.AutoScaleVmProfileDao;
import com.cloud.network.as.dao.ConditionDao; import com.cloud.network.as.dao.ConditionDao;
import com.cloud.network.as.dao.CounterDao; import com.cloud.network.as.dao.CounterDao;
import com.cloud.org.Cluster;
import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceManager;
import com.cloud.resource.ResourceState; import com.cloud.resource.ResourceState;
import com.cloud.service.ServiceOfferingVO; import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.ImageStoreDetailsUtil; import com.cloud.storage.ImageStoreDetailsUtil;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageManager; import com.cloud.storage.StorageManager;
import com.cloud.storage.StorageStats; import com.cloud.storage.StorageStats;
import com.cloud.storage.VolumeStats; import com.cloud.storage.VolumeStats;
import com.cloud.storage.VolumeVO; import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.UserStatisticsVO; import com.cloud.user.UserStatisticsVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.user.VmDiskStatisticsVO; import com.cloud.user.VmDiskStatisticsVO;
import com.cloud.user.dao.UserStatisticsDao; import com.cloud.user.dao.UserStatisticsDao;
import com.cloud.user.dao.VmDiskStatisticsDao; import com.cloud.user.dao.VmDiskStatisticsDao;
@ -173,6 +176,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
@Inject @Inject
private HostDao _hostDao; private HostDao _hostDao;
@Inject @Inject
private ClusterDao _clusterDao;
@Inject
private UserVmDao _userVmDao; private UserVmDao _userVmDao;
@Inject @Inject
private VolumeDao _volsDao; private VolumeDao _volsDao;
@ -916,7 +921,18 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
} }
} }
try { try {
HashMap<String, VolumeStatsEntry> volumeStatsByUuid = _userVmMgr.getVolumeStatistics(pool.getClusterId(), pool.getUuid(), pool.getPoolType(), volumeLocators, StatsTimeout.value()); Map<String, VolumeStatsEntry> volumeStatsByUuid;
if (pool.getScope() == ScopeType.ZONE) {
volumeStatsByUuid = new HashMap<>();
for (final Cluster cluster: _clusterDao.listByZoneId(pool.getDataCenterId())) {
final Map<String, VolumeStatsEntry> volumeStatsForCluster = _userVmMgr.getVolumeStatistics(cluster.getId(), pool.getUuid(), pool.getPoolType(), volumeLocators, StatsTimeout.value());
if (volumeStatsForCluster != null) {
volumeStatsByUuid.putAll(volumeStatsForCluster);
}
}
} else {
volumeStatsByUuid = _userVmMgr.getVolumeStatistics(pool.getClusterId(), pool.getUuid(), pool.getPoolType(), volumeLocators, StatsTimeout.value());
}
if (volumeStatsByUuid != null){ if (volumeStatsByUuid != null){
for (final Map.Entry<String, VolumeStatsEntry> entry : volumeStatsByUuid.entrySet()) { for (final Map.Entry<String, VolumeStatsEntry> entry : volumeStatsByUuid.entrySet()) {
if (entry == null || entry.getKey() == null || entry.getValue() == null) { if (entry == null || entry.getKey() == null || entry.getValue() == null) {

View File

@ -24,7 +24,7 @@ under the License.
<!-- Preserve messages in a local file --> <!-- Preserve messages in a local file -->
<!-- ================================= --> <!-- ================================= -->
<appender name="FILE1" class="org.apache.log4j.RollingFileAppender"> <appender name="cloudLog" class="org.apache.log4j.RollingFileAppender">
<param name="File" value="/var/log/cloud.log"/> <param name="File" value="/var/log/cloud.log"/>
<param name="MaxFileSize" value="10000KB"/> <param name="MaxFileSize" value="10000KB"/>
<param name="MaxBackupIndex" value="4"/> <param name="MaxBackupIndex" value="4"/>
@ -34,7 +34,7 @@ under the License.
</layout> </layout>
</appender> </appender>
<appender name="FILE2" class="org.apache.log4j.RollingFileAppender"> <appender name="cloudOut" class="org.apache.log4j.RollingFileAppender">
<param name="File" value="/var/log/cloud/cloud.out"/> <param name="File" value="/var/log/cloud/cloud.out"/>
<param name="Append" value="true"/> <param name="Append" value="true"/>
<param name="MaxFileSize" value="10000KB"/> <param name="MaxFileSize" value="10000KB"/>
@ -45,7 +45,7 @@ under the License.
</layout> </layout>
</appender> </appender>
<appender name="FILE3" class="org.apache.log4j.rolling.RollingFileAppender"> <appender name="cloudSystemvmLog" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="File" value="/usr/local/cloud/systemvm/cloud.log"/> <param name="File" value="/usr/local/cloud/systemvm/cloud.log"/>
<param name="Append" value="true"/> <param name="Append" value="true"/>
<param name="MaxFileSize" value="10000KB"/> <param name="MaxFileSize" value="10000KB"/>
@ -123,9 +123,9 @@ under the License.
<root> <root>
<level value="INFO"/> <level value="INFO"/>
<appender-ref ref="CONSOLE"/> <appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE1"/> <appender-ref ref="cloudLog"/>
<appender-ref ref="FILE2"/> <appender-ref ref="cloudOut"/>
<appender-ref ref="FILE3"/> <appender-ref ref="cloudSystemvmLog"/>
</root> </root>
</log4j:configuration> </log4j:configuration>

View File

@ -1,3 +0,0 @@
__?.o/ Apache CloudStack SystemVM
( )# https://cloudstack.apache.org
(___(_) \s \r \n \l

View File

@ -1,19 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

View File

@ -80,7 +80,7 @@ class CsDhcp(CsDataBag):
# DNS search order # DNS search order
if gn.get_dns() and device: if gn.get_dns() and device:
sline = "dhcp-option=tag:interface-%s-%s,6" % (device, idx) sline = "dhcp-option=tag:interface-%s-%s,6" % (device, idx)
dns_list = [x for x in gn.get_dns() if x is not None] dns_list = [x for x in gn.get_dns() if not (not x)]
line = "dhcp-option=tag:interface-%s-%s,6,%s" % (device, idx, ','.join(dns_list)) line = "dhcp-option=tag:interface-%s-%s,6,%s" % (device, idx, ','.join(dns_list))
self.conf.search(sline, line) self.conf.search(sline, line)
# Gateway # Gateway

View File

@ -331,14 +331,14 @@ setup_common() {
ip route add default via $GW dev $gwdev ip route add default via $GW dev $gwdev
fi fi
# a hacking way to activate vSwitch under VMware # Workaround to activate vSwitch under VMware
ping -n -c 3 $GW & timeout 3 ping -n -c 3 $GW || true
if [ -n "$MGMTNET" -a -n "$LOCAL_GW" ] if [ -n "$MGMTNET" -a -n "$LOCAL_GW" ]
then then
ping -n -c 3 $LOCAL_GW & timeout 3 ping -n -c 3 $LOCAL_GW || true
#This code is added to address ARP issue by pinging MGMT_GW #This code is added to address ARP issue by pinging MGMT_GW
MGMT_GW=$(echo $MGMTNET | awk -F "." '{print $1"."$2"."$3".1"}') MGMT_GW=$(echo $MGMTNET | awk -F "." '{print $1"."$2"."$3".1"}')
ping -n -c 3 $MGMT_GW & timeout 3 ping -n -c 3 $MGMT_GW || true
fi fi
if [ "$HYPERVISOR" == "vmware" ]; then if [ "$HYPERVISOR" == "vmware" ]; then

View File

@ -59,10 +59,8 @@ setup_router() {
if [ "$oldmd5" != "$newmd5" ] if [ "$oldmd5" != "$newmd5" ]
then then
log_it "udev NIC assignment requires reboot to take effect" log_it "Reloading udev for new udev NIC assignment"
sync udevadm control --reload-rules && udevadm trigger
sleep 2
reboot
fi fi
fi fi

View File

@ -65,11 +65,8 @@ EOF
if [ "$HYPERVISOR" == "vmware" ] || [ "$HYPERVISOR" == "hyperv" ]; if [ "$HYPERVISOR" == "vmware" ] || [ "$HYPERVISOR" == "hyperv" ];
then then
ip route add $MGMTNET via $LOCAL_GW dev eth0 ip route add $MGMTNET via $LOCAL_GW dev eth0
# workaround to activate vSwitch under VMware
# a hacking way to activate vSwitch under VMware timeout 3 ping -n -c 3 $LOCAL_GW || true
ping -n -c 3 $LOCAL_GW &
sleep 3
pkill ping
fi fi
fi fi

View File

@ -112,6 +112,12 @@ def is_guestnet_configured(guestnet_dict, keys):
return exists return exists
# If the command line json file is unprocessed process it
# This is important or, the control interfaces will get deleted!
if jsonFilename != "cmd_line.json" and os.path.isfile(jsonPath % "cmd_line.json"):
qf = QueueFile()
qf.setFile("cmd_line.json")
qf.load(None)
if not (os.path.isfile(jsonConfigFile) and os.access(jsonConfigFile, os.R_OK)): if not (os.path.isfile(jsonConfigFile) and os.access(jsonConfigFile, os.R_OK)):
print "[ERROR] update_config.py :: Unable to read and access %s to process it" % jsonConfigFile print "[ERROR] update_config.py :: Unable to read and access %s to process it" % jsonConfigFile

View File

@ -25,7 +25,7 @@ vrrp_script heartbeat {
} }
vrrp_instance inside_network { vrrp_instance inside_network {
state EQUAL state BACKUP
interface eth2 interface eth2
virtual_router_id 51 virtual_router_id 51
nopreempt nopreempt

View File

@ -219,7 +219,8 @@ class TestDeployVirtioSCSIVM(cloudstackTestCase):
"controller index") "controller index")
elif child.tag.lower() == "driver": elif child.tag.lower() == "driver":
discard = child.get("discard") discard = child.get("discard")
self.assertEqual(discard, "unmap", "discard settings not unmap") if discard: # may not be defined by older qemu/libvirt
self.assertEqual(discard, "unmap", "discard settings not unmap")
def verifyGuestState(self, diskcount): def verifyGuestState(self, diskcount):
ssh = self.virtual_machine.get_ssh_client(reconnect=True) ssh = self.virtual_machine.get_ssh_client(reconnect=True)

56
test/integration/smoke/test_deploy_vm_root_resize.py Executable file → Normal file
View File

@ -53,8 +53,8 @@ class TestDeployVmRootSize(cloudstackTestCase):
cls.services = cls.testClient.getParsedTestDataConfig() cls.services = cls.testClient.getParsedTestDataConfig()
cls.services["mode"] = cls.zone.networktype cls.services["mode"] = cls.zone.networktype
cls._cleanup = [] cls._cleanup = []
cls.storageID = None
cls.updateclone = False cls.updateclone = False
cls.restartreq = False
cls.defaultdiskcontroller = "ide" cls.defaultdiskcontroller = "ide"
cls.template = get_template(cls.api_client, cls.zone.id) cls.template = get_template(cls.api_client, cls.zone.id)
if cls.template == FAILED: if cls.template == FAILED:
@ -70,7 +70,8 @@ class TestDeployVmRootSize(cloudstackTestCase):
list_pool_resp = list_storage_pools(cls.api_client, list_pool_resp = list_storage_pools(cls.api_client,
account=cls.account.name, account=cls.account.name,
domainid=cls.domain.id) domainid=cls.domain.id)
#Identify the storage pool type and set vmware fullclone to
# Identify the storage pool type and set vmware fullclone to
# true if storage is VMFS # true if storage is VMFS
if cls.hypervisor == 'vmware': if cls.hypervisor == 'vmware':
# please make sure url of templateregister dictionary in # please make sure url of templateregister dictionary in
@ -89,26 +90,13 @@ class TestDeployVmRootSize(cloudstackTestCase):
value="scsi") value="scsi")
cls.updateclone = True cls.updateclone = True
cls.restartreq = True
list_config_fullclone_global_response = list_configurations(
cls.api_client
, name=
"vmware.create.full.clone")
if list_config_fullclone_global_response[0].value=="false":
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="true")
cls.updateclone = True
cls.restartreq = True
for strpool in list_pool_resp: for strpool in list_pool_resp:
if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem": if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem":
list_config_storage_response = list_configurations( list_config_storage_response = list_configurations(
cls.api_client cls.api_client, name="vmware.create.full.clone",
, name= storageid=strpool.id)
"vmware.create.full.clone",storageid=strpool.id)
res = validateList(list_config_storage_response) res = validateList(list_config_storage_response)
if res[2]== INVALID_INPUT: if res[2]== INVALID_INPUT:
raise Exception("Failed to list configurations ") raise Exception("Failed to list configurations ")
@ -123,12 +111,16 @@ class TestDeployVmRootSize(cloudstackTestCase):
tags="scsi") tags="scsi")
cls.storageID = strpool.id cls.storageID = strpool.id
break break
if cls.restartreq:
cls.restartServer()
#Giving 30 seconds to management to warm-up, list_config_fullclone_global_response = list_configurations(
#Experienced failures when trying to deploy a VM exactly when management came up cls.api_client, name="vmware.create.full.clone")
time.sleep(30)
if list_config_fullclone_global_response[0].value=="false":
Configurations.update(cls.api_client,
"vmware.create.full.clone",
value="true")
cls.updateclone = True
#create a service offering #create a service offering
cls.service_offering = ServiceOffering.create( cls.service_offering = ServiceOffering.create(
@ -147,21 +139,17 @@ class TestDeployVmRootSize(cloudstackTestCase):
if cls.updateclone: if cls.updateclone:
Configurations.update(cls.api_client, Configurations.update(cls.api_client,
"vmware.create.full.clone", "vmware.root.disk.controller",
value="false",storageid=cls.storageID) value=cls.defaultdiskcontroller)
Configurations.update(cls.api_client, Configurations.update(cls.api_client,
"vmware.create.full.clone", "vmware.create.full.clone",
value="false") value="false")
Configurations.update(cls.api_client, Configurations.update(cls.api_client,
"vmware.root.disk.controller", "vmware.create.full.clone",
value=cls.defaultdiskcontroller) value="false", storageid=cls.storageID)
StoragePool.update(cls.api_client, id=cls.storageID, if cls.storageID:
tags="") StoragePool.update(cls.api_client, id=cls.storageID,
cls.restartServer() tags="")
#Giving 30 seconds to management to warm-up,
#Experienced failures when trying to deploy a VM exactly when management came up
time.sleep(30)
cleanup_resources(cls.api_client, cls._cleanup) cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e: except Exception as e:

0
test/integration/smoke/test_iso.py Executable file → Normal file
View File

0
test/integration/smoke/test_list_ids_parameter.py Executable file → Normal file
View File

0
test/integration/smoke/test_nested_virtualization.py Executable file → Normal file
View File

View File

@ -73,7 +73,7 @@ class TestSSVMs(cloudstackTestCase):
return list_host_response[0].state == 'Up', None return list_host_response[0].state == 'Up', None
return False, None return False, None
res, _ = wait_until(3, self.services["sleep"], checkRunningAgent) res, _ = wait_until(3, 300, checkRunningAgent)
if not res: if not res:
raise Exception("Failed to wait for SSVM agent to be Up") raise Exception("Failed to wait for SSVM agent to be Up")
@ -99,7 +99,7 @@ class TestSSVMs(cloudstackTestCase):
return ssvm_response.state == 'Running', ssvm_response return ssvm_response.state == 'Running', ssvm_response
return False, None return False, None
res, ssvm_response = wait_until(3, self.services["sleep"], checkRunningState) res, ssvm_response = wait_until(3, 300, checkRunningState)
if not res: if not res:
self.fail("Failed to reach systemvm state to Running") self.fail("Failed to reach systemvm state to Running")
return ssvm_response return ssvm_response

0
test/integration/smoke/test_vm_life_cycle.py Executable file → Normal file
View File

View File

@ -246,6 +246,8 @@ class TestCreateVolume(cloudstackTestCase):
ret = checkVolumeSize(ssh_handle=ssh,volume_name=volume_name,size_to_verify=vol_sz) ret = checkVolumeSize(ssh_handle=ssh,volume_name=volume_name,size_to_verify=vol_sz)
elif list_volume_response[0].hypervisor.lower() == "hyperv": elif list_volume_response[0].hypervisor.lower() == "hyperv":
ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz) ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz)
elif list_volume_response[0].hypervisor.lower() == "vmware":
ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sda",size_to_verify=vol_sz)
else: else:
ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz) ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz)
self.debug(" Volume Size Expected %s Actual :%s" %(vol_sz,ret[1])) self.debug(" Volume Size Expected %s Actual :%s" %(vol_sz,ret[1]))

View File

@ -20,18 +20,11 @@ set -e
set -x set -x
function configure_acpid() { function configure_acpid() {
grep /usr/local/sbin/power.sh /etc/acpi/events/power && return
mkdir -p /etc/acpi/events mkdir -p /etc/acpi/events
cat >> /etc/acpi/events/power << EOF cat > /etc/acpi/events/powerbtn <<EOF
event=button/power.* event=button[ /]power
action=/usr/local/sbin/power.sh "%e" action=/sbin/poweroff
EOF EOF
cat >> /usr/local/sbin/power.sh << EOF
#!/bin/bash
/sbin/poweroff
EOF
chmod a+x /usr/local/sbin/power.sh
} }
return 2>/dev/null || configure_acpid return 2>/dev/null || configure_acpid

View File

@ -20,14 +20,14 @@ set -e
set -x set -x
function configure_grub() { function configure_grub() {
cat <<EOF > /etc/default/grub cat > /etc/default/grub <<EOF
# If you change this file, run 'update-grub' afterwards to update # If you change this file, run 'update-grub' afterwards to update
# /boot/grub/grub.cfg. # /boot/grub/grub.cfg.
GRUB_DEFAULT=0 GRUB_DEFAULT=0
GRUB_TIMEOUT=0 GRUB_TIMEOUT=0
GRUB_DISTRIBUTOR=Debian GRUB_DISTRIBUTOR=Debian
GRUB_CMDLINE_LINUX_DEFAULT="" GRUB_CMDLINE_LINUX_DEFAULT="loglevel=4"
GRUB_CMDLINE_LINUX="console=tty0 console=ttyS0,115200n8 console=hvc0 earlyprintk=xen net.ifnames=0 biosdevname=0 debian-installer=en_US nomodeset" GRUB_CMDLINE_LINUX="console=tty0 console=ttyS0,115200n8 console=hvc0 earlyprintk=xen net.ifnames=0 biosdevname=0 debian-installer=en_US nomodeset"
GRUB_CMDLINE_XEN="com1=115200 console=com1" GRUB_CMDLINE_XEN="com1=115200 console=com1"
GRUB_TERMINAL="console serial" GRUB_TERMINAL="console serial"

View File

@ -52,6 +52,15 @@ function do_signature() {
echo "Cloudstack Release $CLOUDSTACK_RELEASE $(date)" > /etc/cloudstack-release echo "Cloudstack Release $CLOUDSTACK_RELEASE $(date)" > /etc/cloudstack-release
} }
function configure_issue() {
cat > /etc/issue <<EOF
__?.o/ Apache CloudStack SystemVM $CLOUDSTACK_RELEASE
( )# https://cloudstack.apache.org
(___(_) Debian GNU/Linux 9 \n \l
EOF
}
function configure_strongswan() { function configure_strongswan() {
# change the charon stroke timeout from 3 minutes to 30 seconds # change the charon stroke timeout from 3 minutes to 30 seconds
sed -i "s/# timeout = 0/timeout = 30000/" /etc/strongswan.d/charon/stroke.conf sed -i "s/# timeout = 0/timeout = 30000/" /etc/strongswan.d/charon/stroke.conf
@ -92,6 +101,7 @@ function configure_services() {
configure_apache2 configure_apache2
configure_strongswan configure_strongswan
configure_issue
} }
return 2>/dev/null || configure_services return 2>/dev/null || configure_services

View File

@ -53,10 +53,10 @@ function install_packages() {
fi fi
${apt_get} install grub-legacy \ ${apt_get} install grub-legacy \
rsyslog logrotate cron net-tools ifupdown tmux vim netbase iptables \ rsyslog logrotate cron net-tools ifupdown tmux vim htop netbase iptables \
openssh-server e2fsprogs tcpdump socat wget \ openssh-server e2fsprogs tcpdump socat wget \
python bzip2 sed gawk diffutils grep gzip less tar telnet ftp rsync traceroute psmisc lsof procps \ python bzip2 sed gawk diffutils grep gzip less tar telnet ftp rsync traceroute psmisc lsof procps \
inetutils-ping iputils-arping httping curl \ inetutils-ping iputils-arping httping curl \
dnsutils zip unzip ethtool uuid file iproute acpid sudo \ dnsutils zip unzip ethtool uuid file iproute acpid sudo \
sysstat python-netaddr \ sysstat python-netaddr \
apache2 ssl-cert \ apache2 ssl-cert \

View File

@ -57,13 +57,13 @@ d-i partman-auto/method string regular
d-i partman-auto/choose_recipe select atomic d-i partman-auto/choose_recipe select atomic
d-i partman-auto/expert_recipe string \ d-i partman-auto/expert_recipe string \
boot-root :: \ boot-root :: \
100 50 200 ext2 \ 50 50 100 ext2 \
$primary{ } $bootable{ } \ $primary{ } $bootable{ } \
method{ format } format{ } \ method{ format } format{ } \
use_filesystem{ } filesystem{ ext2 } \ use_filesystem{ } filesystem{ ext2 } \
mountpoint{ /boot } \ mountpoint{ /boot } \
. \ . \
1450 40 1600 ext4 \ 1300 40 1600 ext4 \
method{ format } format{ } \ method{ format } format{ } \
use_filesystem{ } filesystem{ ext4 } \ use_filesystem{ } filesystem{ ext4 } \
mountpoint{ / } \ mountpoint{ / } \
@ -78,7 +78,7 @@ d-i partman-auto/expert_recipe string \
use_filesystem{ } filesystem{ ext4 } \ use_filesystem{ } filesystem{ ext4 } \
mountpoint{ /tmp } \ mountpoint{ /tmp } \
. \ . \
100 100 1024 linux-swap \ 256 100 1024 linux-swap \
method{ swap } format{ } \ method{ swap } format{ } \
. .

View File

@ -844,7 +844,7 @@
'Released': 'off', 'Released': 'off',
'Destroy': 'off', 'Destroy': 'off',
'Shutdown': 'off', 'Shutdown': 'off',
'Setup': 'warning', 'Setup': 'on',
'Implemented': 'on' 'Implemented': 'on'
} }
} }