fix systemvm template for kvm, using chiradeep's latest debian template

This commit is contained in:
edison 2010-10-05 20:34:58 -07:00
parent e67e7d0e7b
commit 139ff273ff
11 changed files with 195 additions and 305 deletions

View File

@ -121,6 +121,8 @@ import com.cloud.agent.api.StartConsoleProxyAnswer;
import com.cloud.agent.api.StartConsoleProxyCommand;
import com.cloud.agent.api.StartRouterAnswer;
import com.cloud.agent.api.StartRouterCommand;
import com.cloud.agent.api.StartSecStorageVmAnswer;
import com.cloud.agent.api.StartSecStorageVmCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StopAnswer;
@ -184,6 +186,7 @@ import com.cloud.utils.script.Script;
import com.cloud.vm.ConsoleProxyVO;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.DomainRouter;
import com.cloud.vm.SecondaryStorageVmVO;
import com.cloud.vm.State;
import com.cloud.vm.VirtualMachineName;
@ -415,6 +418,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
protected String _hypervisorType;
protected String _hypervisorURI;
protected String _hypervisorPath;
protected String _sysvmISOPath;
protected String _privNwName;
protected String _privBridgeName;
protected String _linkLocalBridgeName;
@ -424,6 +428,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
protected String _domrKernel;
protected String _domrRamdisk;
protected String _pool;
protected String _localGateway;
private boolean _can_bridge_firewall;
private Pair<String, String> _pifs;
private final Map<String, vmStats> _vmStats = new ConcurrentHashMap<String, vmStats>();
@ -683,18 +688,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
_domrArch = "i686";
} else if (!"i686".equalsIgnoreCase(_domrArch) && !"x86_64".equalsIgnoreCase(_domrArch)) {
throw new ConfigurationException("Invalid architecture (domr.arch) -- needs to be i686 or x86_64");
}
_domrKernel = (String)params.get("domr.kernel");
if (_domrKernel == null ) {
_domrKernel = new File("/var/lib/libvirt/images/vmops-domr-kernel").getAbsolutePath();
}
_domrRamdisk = (String)params.get("domr.ramdisk");
if (_domrRamdisk == null ) {
_domrRamdisk = new File("/var/lib/libvirt/images/vmops-domr-initramfs").getAbsolutePath();
}
}
value = (String)params.get("host.reserved.mem.mb");
_dom0MinMem = NumbersUtil.parseInt(value, 0)*1024*1024;
@ -731,6 +725,20 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
} catch (ClassNotFoundException e) {
throw new ConfigurationException("Unable to find class " + "com.cloud.storage.JavaStorageLayer");
}
_sysvmISOPath = (String)params.get("systemvm.iso.path");
if (_sysvmISOPath == null) {
String[] isoPaths = {"/usr/lib64/cloud/agent/vms/systemvm.iso", "/usr/lib/cloud/agent/vms/systemvm.iso"};
for (String isoPath : isoPaths) {
if (_storage.exists(isoPath)) {
_sysvmISOPath = isoPath;
break;
}
}
if (_sysvmISOPath == null) {
throw new ConfigurationException("Can't find system vm ISO");
}
}
//_can_bridge_firewall = can_bridge_firewall();
@ -779,6 +787,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
throw new ConfigurationException("Failed to get public nic name");
}
s_logger.debug("Found pif: " + _pifs.first() + " on " + _privBridgeName + ", pif: " + _pifs.second() + " on " + _publicBridgeName);
_localGateway = Script.runSimpleBashScript("ip route |grep default|awk '{print $3}'");
if (_localGateway == null) {
s_logger.debug("Failed to found the local gateway");
}
return true;
}
@ -836,7 +850,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
String dataDiskPath = null;
for (diskDef disk : disks) {
if (disk.getDiskLabel().equalsIgnoreCase("hdb")) {
if (disk.getDiskLabel().equalsIgnoreCase("vdb")) {
dataDiskPath = disk.getDiskPath();
}
}
@ -845,7 +859,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
patchSystemVm(cmd.getBootArgs(), dataDiskPath, vmName);
String uuid = UUID.nameUUIDFromBytes(vmName.getBytes()).toString();
String domXML = defineVMXML(cmd.getVmName(), uuid, router.getRamSize(), 1, _domrArch, nics, disks, router.getVncPassword(), "Fedora 12");
String domXML = defineVMXML(cmd.getVmName(), uuid, router.getRamSize(), 1, _domrArch, nics, disks, router.getVncPassword(), cmd.getGuestOSDescription());
s_logger.debug(domXML);
@ -879,19 +893,24 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
ConsoleProxyVO console = cmd.getProxy();
List<interfaceDef> nics = null;
try {
nics = createConsoleVMNetworks(cmd);
nics = createSysVMNetworks(console.getGuestMacAddress(), console.getPrivateMacAddress(), console.getPublicMacAddress(), console.getVlanId());
List<diskDef> disks = createSystemVMDisk(cmd.getVolumes());
String dataDiskPath = null;
for (diskDef disk : disks) {
if (disk.getDiskLabel().equalsIgnoreCase("hdb")) {
if (disk.getDiskLabel().equalsIgnoreCase("vdb")) {
dataDiskPath = disk.getDiskPath();
}
}
String bootArgs = cmd.getBootArgs() + " zone=" + _dcId;
bootArgs += " pod=" + _pod;
bootArgs += " guid=Proxy." + console.getId();
bootArgs += " proxy_vm=" + console.getId();
bootArgs += " localgw=" + _localGateway;
String vmName = cmd.getVmName();
patchSystemVm(cmd.getBootArgs(), dataDiskPath, vmName);
patchSystemVm(bootArgs, dataDiskPath, vmName);
String uuid = UUID.nameUUIDFromBytes(vmName.getBytes()).toString();
String domXML = defineVMXML(cmd.getVmName(), uuid, console.getRamSize(), 1, _domrArch, nics, disks, console.getVncPassword(), "Fedora 12");
@ -909,12 +928,50 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return null;
}
protected String startSecStorageVM(StartSecStorageVmCommand cmd) {
SecondaryStorageVmVO secVm = cmd.getSecondaryStorageVmVO();
List<interfaceDef> nics = null;
try {
nics = createSysVMNetworks(secVm.getGuestMacAddress(), secVm.getPrivateMacAddress(), secVm.getPublicMacAddress(), secVm.getVlanId());
List<diskDef> disks = createSystemVMDisk(cmd.getVolumes());
String dataDiskPath = null;
for (diskDef disk : disks) {
if (disk.getDiskLabel().equalsIgnoreCase("vdb")) {
dataDiskPath = disk.getDiskPath();
}
}
String vmName = cmd.getVmName();
String bootArgs = cmd.getBootArgs();
bootArgs += " zone=" + _dcId;
bootArgs += " pod=" + _pod;
bootArgs += " localgw=" + _localGateway;
patchSystemVm(bootArgs, dataDiskPath, vmName);
String uuid = UUID.nameUUIDFromBytes(vmName.getBytes()).toString();
String domXML = defineVMXML(cmd.getVmName(), uuid, secVm.getRamSize(), 1, _domrArch, nics, disks, secVm.getVncPassword(), cmd.getGuestOSDescription());
s_logger.debug(domXML);
startDomain(vmName, domXML);
} catch (LibvirtException e) {
s_logger.debug("Failed to start domr: " + e.toString());
return e.toString();
}catch (InternalErrorException e) {
s_logger.debug("Failed to start domr: " + e.toString());
return e.toString();
}
return null;
}
private String defineVMXML(String vmName, String uuid, int memSize, int cpus, String arch, List<interfaceDef> nics, List<diskDef> disks, String vncPaswd, String guestOSType) {
LibvirtVMDef vm = new LibvirtVMDef();
vm.setHvsType(_hypervisorType);
vm.setDomainName(vmName);
vm.setDomUUID(uuid);
vm.setDomDescription(guestOSType);
vm.setDomDescription(KVMGuestOsMapper.getGuestOsName(guestOSType));
guestDef guest = new guestDef();
guest.setGuestType(guestDef.guestType.KVM);
@ -1100,6 +1157,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return execute((StartRouterCommand)cmd);
} else if(cmd instanceof StartConsoleProxyCommand) {
return execute((StartConsoleProxyCommand)cmd);
} else if(cmd instanceof StartSecStorageVmCommand) {
return execute((StartSecStorageVmCommand)cmd);
} else if (cmd instanceof AttachIsoCommand) {
return execute((AttachIsoCommand) cmd);
} else if (cmd instanceof AttachVolumeCommand) {
@ -1854,18 +1913,52 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
_vms.put(cmd.getVmName(), State.Starting);
}
try {
result = startConsoleProxy(cmd);
if (result != null) {
throw new ExecutionException(result, null);
}
result = _virtRouterResource.connect(router.getPrivateIpAddress(), cmd.getProxyCmdPort());
result = _virtRouterResource.connect(router.getGuestIpAddress(), cmd.getProxyCmdPort());
if (result != null) {
throw new ExecutionException(result, null);
}
state = State.Running;
return new StartConsoleProxyAnswer(cmd, router.getPrivateIpAddress(), router.getPrivateMacAddress());
return new StartConsoleProxyAnswer(cmd);
} catch (final ExecutionException e) {
return new Answer(cmd, false, e.getMessage());
} catch (final Throwable th) {
s_logger.warn("Exception while starting router.", th);
return createErrorAnswer(cmd, "Unable to start router", th);
} finally {
synchronized(_vms) {
_vms.put(cmd.getVmName(), state);
}
}
}
private Answer execute(StartSecStorageVmCommand cmd) {
final SecondaryStorageVmVO secVm = cmd.getSecondaryStorageVmVO();
String result = null;
State state = State.Stopped;
synchronized(_vms) {
_vms.put(cmd.getVmName(), State.Starting);
}
try {
result = startSecStorageVM(cmd);
if (result != null) {
throw new ExecutionException(result, null);
}
result = _virtRouterResource.connect(secVm.getGuestIpAddress(), cmd.getProxyCmdPort());
if (result != null) {
throw new ExecutionException(result, null);
}
state = State.Running;
return new StartSecStorageVmAnswer(cmd);
} catch (final ExecutionException e) {
return new Answer(cmd, false, e.getMessage());
} catch (final Throwable th) {
@ -3247,25 +3340,34 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return nics;
}
private List<interfaceDef> createConsoleVMNetworks(StartConsoleProxyCommand cmd) {
private List<interfaceDef> createSysVMNetworks(String guestMac, String privMac, String pubMac, String vlanId) throws InternalErrorException {
List<interfaceDef> nics = new ArrayList<interfaceDef>();
ConsoleProxyVO console = cmd.getProxy();
String privateMac = console.getPrivateMacAddress();
String pubMac = console.getPublicMacAddress();
String brName;
interfaceDef pubNic = new interfaceDef();
interfaceDef privNic = new interfaceDef();
interfaceDef vnetNic = new interfaceDef();
/*guest network is vnet: 0 is not used, 1 is link local, 2 is pub nic*/
vnetNic.defPrivateNet("default", null, null, interfaceDef.nicModel.VIRTIO);
nics.add(vnetNic);
privNic.defPrivateNet(_privNwName, null, privateMac, interfaceDef.nicModel.VIRTIO);
nics.add(privNic);
pubNic.defBridgeNet(_publicBridgeName, null, pubMac, interfaceDef.nicModel.VIRTIO);
nics.add(pubNic);
/*nic 0: link local*/
privNic.defPrivateNet(_privNwName, null, guestMac, interfaceDef.nicModel.VIRTIO);
nics.add(privNic);
/*nic 1, priv network*/
vnetNic.defBridgeNet(_privBridgeName, null, privMac, interfaceDef.nicModel.VIRTIO);
nics.add(vnetNic);
/*nic 2: public */
if ("untagged".equalsIgnoreCase(vlanId)) {
pubNic.defBridgeNet(_publicBridgeName, null, pubMac, interfaceDef.nicModel.VIRTIO);
} else {
String vnetId = getVnetId(vlanId);
brName = setVnetBrName(vnetId);
String vnetDev = "vtap" + vnetId;
createVnet(vnetId, _pifs.second());
pubNic.defBridgeNet(brName, null, pubMac, interfaceDef.nicModel.VIRTIO);
}
nics.add(pubNic);
return nics;
}
@ -3293,13 +3395,18 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
String datadiskPath = tmplVol.getKey();
diskDef hda = new diskDef();
hda.defFileBasedDisk(rootkPath, "hda", diskDef.diskBus.IDE, diskDef.diskFmtType.QCOW2);
hda.defFileBasedDisk(rootkPath, "vda", diskDef.diskBus.VIRTIO, diskDef.diskFmtType.QCOW2);
disks.add(hda);
diskDef hdb = new diskDef();
hdb.defFileBasedDisk(datadiskPath, "hdb", diskDef.diskBus.IDE, diskDef.diskFmtType.RAW);
hdb.defFileBasedDisk(datadiskPath, "vdb", diskDef.diskBus.VIRTIO, diskDef.diskFmtType.RAW);
disks.add(hdb);
diskDef hdc = new diskDef();
hdc.defFileBasedDisk(_sysvmISOPath, "hdc", diskDef.diskBus.IDE, diskDef.diskFmtType.RAW);
hdc.setDeviceType(diskDef.deviceType.CDROM);
disks.add(hdc);
return disks;
}

View File

@ -518,15 +518,15 @@
<target name="build-systemvm-patch" depends="-init">
<mkdir dir="${dist.dir}" />
<tar destfile="${dist.dir}/patch.tar">
<tarfileset dir="${base.dir}/patches/systemvm" filemode="755">
<tarfileset dir="${base.dir}/patches/systemvm/debian/config/" filemode="755">
<include name="**/*"/>
<exclude name="**/.classpath" />
<exclude name="**/.project" />
<exclude name="**/wscript_build" />
</tarfileset>
</tar>
<copy file="${base.dir}/patches/systemvm/root/.ssh/authorized_keys" todir="${dist.dir}/"/>
<gzip destfile="${dist.dir}/patch.tgz" src="${dist.dir}/patch.tar"/>
<copy file="${base.dir}/patches/systemvm/debian/config/root/.ssh/authorized_keys" todir="${dist.dir}/"/>
<gzip destfile="${dist.dir}/cloud-scripts.tgz" src="${dist.dir}/patch.tar"/>
<delete file="${dist.dir}/patch.tar"/>
</target>

View File

@ -14,12 +14,9 @@ do
CP=${CP}:$file
done
keyvalues=
if [ -f /mnt/cmdline ]
then
CMDLINE=$(cat /mnt/cmdline)
else
CMDLINE=$(cat /proc/cmdline)
fi
CMDLINE=$(cat /var/cache/cloud/cmdline)
#CMDLINE="graphical utf8 eth0ip=0.0.0.0 eth0mask=255.255.255.0 eth1ip=192.168.140.40 eth1mask=255.255.255.0 eth2ip=172.24.0.50 eth2mask=255.255.0.0 gateway=172.24.0.1 dns1=72.52.126.11 template=domP dns2=72.52.126.12 host=192.168.1.142 port=8250 mgmtcidr=192.168.1.0/24 localgw=192.168.140.1 zone=5 pod=5"
for i in $CMDLINE
do

View File

@ -491,7 +491,7 @@ public class VirtualRoutingResource implements Manager {
_startTimeout = NumbersUtil.parseInt(value, 360) * 1000;
value = (String)params.get("ssh.sleep");
_sleep = NumbersUtil.parseInt(value, 5) * 1000;
_sleep = NumbersUtil.parseInt(value, 10) * 1000;
value = (String)params.get("ssh.retry");
_retry = NumbersUtil.parseInt(value, 24);

View File

@ -15,12 +15,7 @@
ENABLED=0
[ -e /etc/default/cloud ] && . /etc/default/cloud
if [ -f /mnt/cmdline ]
then
CMDLINE=$(cat /mnt/cmdline)
else
CMDLINE=$(cat /proc/cmdline)
fi
CMDLINE=$(cat /var/cache/cloud/cmdline)
TYPE="router"
for i in $CMDLINE

View File

@ -26,13 +26,35 @@ EOF
patch() {
local PATCH_MOUNT=/media/cdrom
local EXTRA_MOUNT=/media/extra
local patchfile=$PATCH_MOUNT/cloud-scripts.tgz
local md5file=/var/cache/cloud/cloud-scripts-signature
local privkey=$PATCH_MOUNT/authorized_keys
local shouldpatch=false
local cdrom_dev=
mkdir -p $PATCH_MOUNT
if [ -e /dev/vdb ]; then
# KVM needs to mount another disk, to get cmdline
mkdir -p $EXTRA_MOUNT
mount /dev/vdb $EXTRA_MOUNT
cp -f $EXTRA_MOUNT/cmdline /var/cache/cloud/cmdline
cp -f $EXTRA_MOUNT/authorized_keys /var/cache/cloud/authorized_keys
privkey=/var/cache/cloud/authorized_keys
umount $EXTRA_MOUNT
else
cat /proc/cmdline > /var/cache/cloud/cmdline
fi
if [ -e /dev/xvdd ]; then
mount -o ro /dev/xvdd $PATCH_MOUNT
cdrom_dev=/dev/xvdd
elif [ -e /dev/cdrom ]; then
# For KVM
cdrom_dev=/dev/cdrom
fi
if [ -n "$cdrom_dev" ]; then
mount -o ro $cdrom_dev $PATCH_MOUNT
[ -f $privkey ] && cp -f $privkey /root/.ssh/ && chmod go-rwx /root/.ssh/authorized_keys
local oldmd5=
[ -f ${md5file} ] && oldmd5=$(cat ${md5file})
@ -43,10 +65,9 @@ patch() {
then
shouldpatch=true
logger -t cloud "Patching scripts"
tar xzf $patchfile -C ${path}
tar xzf $patchfile -C /
echo ${newmd5} > ${md5file}
fi
cat /proc/cmdline > /var/cache/cloud/cmdline
logger -t cloud "Patching cloud service"
/opt/cloud/bin/patchsystemvm.sh $PATCH_MOUNT
umount $PATCH_MOUNT
@ -57,9 +78,6 @@ patch() {
reboot
fi
fi
if [ -f /mnt/cmdline ]; then
cat /mnt/cmdline > /var/cache/cloud/cmdline
fi
return 0
}
@ -256,6 +274,7 @@ EOF
start() {
patch
parse_cmd_line
case $TYPE in
router)
[ "$NAME" == "" ] && NAME=router
@ -282,14 +301,9 @@ start() {
}
disable_hvc
if [ -f /mnt/cmdline ]
then
CMDLINE=$(cat /mnt/cmdline)
else
CMDLINE=$(cat /proc/cmdline)
fi
parse_cmd_line() {
CMDLINE=$(cat /var/cache/cloud/cmdline)
TYPE="unknown"
BOOTPROTO="static"
@ -352,6 +366,7 @@ for i in $CMDLINE
;;
esac
done
}
case "$1" in

View File

@ -36,6 +36,8 @@ secstorage_svcs() {
chkconfig cloud-passwd-srvr off
chkconfig haproxy off ;
chkconfig dnsmasq off
chkconfig portmap on
chkconfig nfs-common on
chkconfig ssh on
chkconfig apache2 off
echo "cloud postinit ssh nfs-common portmap" > /var/cache/cloud/enabled_svcs

View File

@ -4,15 +4,12 @@ bld.substitute("*/**",name="patchsubst")
for virttech in Utils.to_list(bld.path.ant_glob("*",dir=True)):
if virttech in ["shared","wscript_build"]: continue
patchfiles = bld.path.ant_glob('shared/** %s/**'%virttech,src=False,bld=True,dir=False,flat=True)
patchfiles = bld.path.ant_glob('shared/** %s/debian/config/**'%virttech,src=False,bld=True,dir=False,flat=True)
tgen = bld(
features = 'tar',#Utils.tar_up,
source = patchfiles,
target = '%s-patch.tgz'%virttech,
name = '%s-patch_tgz'%virttech,
root = os.path.join("patches",virttech),
target = 'cloud-scripts.tgz',
name = 'cloud-scripts_tgz',
root = os.path.join("patches", virttech + "/debian/config"),
rename = lambda x: re.sub(".subst$","",x),
)
if virttech != "xenserver":
# xenserver uses the patch.tgz file later to make an ISO, so we do not need to install it
bld.install_as("${AGENTLIBDIR}/scripts/vm/hypervisor/%s/patch.tgz"%virttech, "%s-patch.tgz"%virttech)

View File

@ -12,69 +12,6 @@ mntpath() {
echo "/mnt/$vmname"
}
NBD=kvm-nbd
mount_local() {
local vmname=$1
local disk=$2
local path=$(mntpath $vmname)
lsmod | grep nbd &> /dev/null
if [ $? -ne 0 ]
then
modprobe nbd max_part=8 &> /dev/null
if [ $? -ne 0 ]
then
printf "No nbd module installed, failed to mount qcow2 image\n"
return 1
fi
fi
$NBD -c /dev/nbd0 $disk &> /dev/null
if [ $? -ne 0 ]
then
printf "failed to create /dev/nbd0\n"
return 2
fi
mkdir -p ${path}
retry=5
while [ $retry -gt 0 ]
do
sleep 10
mount -o sync /dev/nbd0p1 ${path} &> /dev/null
if [ $? -eq 0 ]
then
break
fi
retry=$(($retry-1))
done
if [ $retry -eq 0 ]
then
$NBD -d /dev/nbd0p1 &> /dev/null
sleep 2
$NBD -d /dev/nbd0 &> /dev/null
printf "Faild to mount qcow2 image\n"
return 3
fi
return $?
}
umount_local() {
local vmname=$1
local path=$(mntpath $vmname)
umount $path
$NBD -d /dev/nbd0p1
sleep 2
$NBD -d /dev/nbd0
local ret=$?
rm -rf $path
return $ret
}
mount_raw_disk() {
local vmname=$1
local datadisk=$2
@ -123,121 +60,21 @@ umount_raw_disk() {
return $?
}
get_kernel() {
local vmname=$1
local rootdisk=$2
local path=$(mntpath $vmname)
local rootdiskFoder=`echo $rootdisk|sed 's/rootdisk//'`
if [ ! -f $rootdiskFoder/vmops-domr-kernel ]
then
cp $path/boot/vmlinuz* $rootdiskFoder/vmops-domr-kernel -f
fi
if [ ! -f $rootdiskFoder/vmops-domr-initramfs ]
then
cp $path/boot/initramfs* $rootdiskFoder/vmops-domr-initramfs -f
fi
}
patch() {
local vmname=$1
local patchfile=$2
local path=$(mntpath $vmname)
local oldmd5=
local md5file=${path}/md5sum
[ -f ${md5file} ] && oldmd5=$(cat ${md5file})
local newmd5=$(md5sum $patchfile | awk '{print $1}')
if [ "$oldmd5" != "$newmd5" ]
then
tar xzf $patchfile -C ${path}
echo ${newmd5} > ${md5file}
fi
return 0
}
#
# To use existing console proxy .zip-based package file
#
patch_console_proxy() {
local vmname=$1
local patchfile=$2
local path=$(mntpath $vmname)
local oldmd5=
if [ ! -d ${path}/usr/local/vmops/consoleproxy ]
then
mkdir -p ${path}/usr/local/vmops/consoleproxy
fi
local md5file=${path}/usr/local/vmops/consoleproxy/md5sum
[ -f ${md5file} ] && oldmd5=$(cat ${md5file})
local newmd5=$(md5sum $patchfile | awk '{print $1}')
if [ "$oldmd5" != "$newmd5" ]
then
echo "All" | unzip $patchfile -d ${path}/usr/local/vmops/consoleproxy >/dev/null 2>&1
chmod 555 ${path}/usr/local/vmops/consoleproxy/run.sh
echo ${newmd5} > ${md5file}
fi
return 0
}
patch_all() {
local vmname=$1
local domrpatch=$2
local domppatch=$3
local cmdline=$4
local datadisk=$5
local cmdline=$2
local datadisk=$3
local path=$(mntpath $vmname)
if [ ! -f $path/$domrpatch ]
then
cp $domrpatch $path/
fi
if [ ! -f $path/console-proxy.zip ]
then
cp $domppatch $path/console-proxy.zip
fi
if [ -f ~/.ssh/id_rsa.pub.cloud ]
then
cp ~/.ssh/id_rsa.pub.cloud $path/id_rsa.pub
cp ~/.ssh/id_rsa.pub.cloud $path/authorized_keys
fi
echo $cmdline > $path/cmdline
sed -i "s/,/\ /g" $path/cmdline
return 0
}
consoleproxy_svcs() {
local vmname=$1
local path=$(mntpath $vmname)
chroot ${path} /sbin/chkconfig vmops on
chroot ${path} /sbin/chkconfig domr_webserver off
chroot ${path} /sbin/chkconfig haproxy off ;
chroot ${path} /sbin/chkconfig dnsmasq off
chroot ${path} /sbin/chkconfig sshd off
chroot ${path} /sbin/chkconfig httpd off
chroot ${path} /sbin/chkconfig seteth1 on
cp ${path}/etc/sysconfig/iptables-domp ${path}/etc/sysconfig/iptables
}
routing_svcs() {
local vmname=$1
local path=$(mntpath $vmname)
chroot ${path} /sbin/chkconfig vmops off
chroot ${path} /sbin/chkconfig domr_webserver on ;
chroot ${path} /sbin/chkconfig haproxy on ;
chroot ${path} /sbin/chkconfig dnsmasq on
chroot ${path} /sbin/chkconfig sshd on
chroot ${path} /sbin/chkconfig seteth1 on
cp ${path}/etc/sysconfig/iptables-domr ${path}/etc/sysconfig/iptables
}
lflag=
dflag=
@ -274,72 +111,12 @@ then
printf "Failed to mount $rootdisk"
exit $?
fi
cpfile=$(dirname $0)/../../../../vms/systemvm.zip
if [ -f $cpfile ]; then
patch_all $vmname $(dirname $0)/patch.tgz $cpfile $cmdline $rootdisk
fi
patch_all $vmname $cmdline $rootdisk
umount_raw_disk $vmname $rootdisk
exit $?
fi
mount_local $vmname $rootdisk
if [ $? -gt 0 ]
then
printf "Failed to mount disk $rootdisk for $vmname\n" >&2
exit 1
fi
if [ -f $(dirname $0)/patch.tgz ]
then
patch $vmname $(dirname $0)/patch.tgz
if [ $? -gt 0 ]
then
printf "Failed to apply patch patch.zip to $vmname\n" >&2
umount_local $vmname
exit 4
fi
fi
cpfile=$(dirname $0)/../../../../vms/systemvm.zip
if [ "$vmtype" = "domp" ] && [ -f $cpfile ]
then
patch_console_proxy $vmname $cpfile
if [ $? -gt 0 ]
then
printf "Failed to apply patch $cpfile to $vmname\n" >&2
umount_local $vmname
exit 5
fi
fi
get_kernel $vmname $rootdisk
if [ "$vmtype" = "domr" ]
then
routing_svcs $vmname
if [ $? -gt 0 ]
then
printf "Failed to execute routing_svcs\n" >&2
umount_local $vmname
exit 6
fi
fi
if [ "$vmtype" = "domp" ]
then
consoleproxy_svcs $vmname
if [ $? -gt 0 ]
then
printf "Failed to execute consoleproxy_svcs\n" >&2
umount_local $vmname
exit 7
fi
fi
umount_local $vmname
exit $?

View File

@ -4,7 +4,7 @@ INSERT INTO `cloud`.`vm_template` (id, unique_name, name, public, created, type,
VALUES (2, 'centos53-x86_64', 'CentOS 5.3(x86_64) no GUI', 1, now(), 'builtin', 0, 64, 1, 'http://download.cloud.com/templates/builtin/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2', 'b63d854a9560c013142567bbae8d98cf', 0, 'CentOS 5.3(x86_64) no GUI', 'VHD', 11, 1, 1, 'Xenserver');
INSERT INTO `cloud`.`vm_template` (id, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type)
VALUES (3, 'routing_kvm', 'SystemVM Template (KVM)', 0, now(), 'system', 0, 64, 1, 'http://nfs1.lab.vmops.com/templates/routing/debian/systemvm.qcow2.bz2', '78c91a8d2b3441fc5d5129399fca2bb9', 0, 'SystemVM Template KVM', 'QCOW2', 15, 0, 1, 'KVM');
VALUES (3, 'routing_kvm', 'SystemVM Template (KVM)', 0, now(), 'system', 0, 64, 1, 'http://nfs1.lab.vmops.com/templates/routing/debian/systemvm.qcow2.bz2', 'f366fe5c31f267a407236878988ce7bd', 0, 'SystemVM Template KVM', 'QCOW2', 15, 0, 1, 'KVM');
INSERT INTO `cloud`.`vm_template` (id, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, display_text, enable_password, format, guest_os_id, featured, cross_zones, hypervisor_type)
VALUES (4, 'centos55-x86_64', 'CentOS 5.5(x86_64) no GUI', 1, now(), 'builtin', 0, 64, 1, 'http://download.cloud.com/templates/builtin/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2', '1da20ae69b54f761f3f733dce97adcc0', 'CentOS 5.5(x86_64) no GUI', 0, 'QCOW2', 112, 1, 1, 'KVM');

View File

@ -265,7 +265,7 @@ if bld.env.DISTRO not in ["Windows","Mac"]:
# systemvm.zip cannot be built on Windows or Mac because system deps do not exist there
tgen = bld(
rule = iso_up,
source = "patches/systemvm-patch.tgz target/oss/systemvm.zip patches/systemvm/root/.ssh/authorized_keys",
source = "patches/cloud-scripts.tgz target/oss/systemvm.zip patches/systemvm/debian/config/root/.ssh/authorized_keys",
target = 'target/oss/systemvm.iso',
name = 'systemvm_iso',
rename = lambda x: x.split(sep)[-1].replace('systemvm-',''),
@ -275,7 +275,7 @@ if bld.env.DISTRO not in ["Windows","Mac"]:
if buildpremium:
tgen = bld(
rule = iso_up,
source = "patches/systemvm-patch.tgz target/premium/systemvm.zip patches/systemvm/root/.ssh/authorized_keys",
source = "patches/cloud-scripts.tgz target/premium/systemvm.zip patches/systemvm/debian/config/root/.ssh/authorized_keys",
target = 'target/premium/systemvm.iso',
name = 'systemvm-premium_iso',
rename = lambda x: x.split(sep)[-1].replace('systemvm-',''),