mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
add clouddev
This commit is contained in:
parent
3938c24b45
commit
5f6387e113
@ -27,6 +27,7 @@
|
||||
<import file="${base.dir}/build/build-marvin.xml" optional="true"/>
|
||||
<import file="${base.dir}/build/package.xml" optional="true"/>
|
||||
<import file="${base.dir}/build/developer.xml" optional="true"/>
|
||||
<import file="${base.dir}/build/build-clouddev.xml" optional="true"/>
|
||||
<import file="${base.dir}/build/build-usage.xml" optional="false"/>
|
||||
<import file="${base.dir}/build/build-aws-api.xml" optional="false"/>
|
||||
</project>
|
||||
|
||||
93
build/build-clouddev.xml
Normal file
93
build/build-clouddev.xml
Normal file
@ -0,0 +1,93 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
|
||||
<project name="CloudDev Targets" basedir=".">
|
||||
|
||||
|
||||
<condition property="port" value="${rport}" else="2222">
|
||||
<isset property="rport"/>
|
||||
</condition>
|
||||
|
||||
<condition property="host" value="${rhost}" else="localhost">
|
||||
<isset property="rhost"/>
|
||||
</condition>
|
||||
|
||||
<target name="deploydbIfSet" if="deploydb.is.set">
|
||||
<echo message="ant deploydb"/>
|
||||
<sshexec host="${host}" port="${port}" username="root" password="password" command="echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;killall java;cd /opt/incubator-cloudstack;ant deploycddb"/>
|
||||
</target>
|
||||
|
||||
<target name="rdeploydb">
|
||||
<echo message="ant rdeploydb"/>
|
||||
<sshexec host="${host}" port="${port}" username="root" password="password" command="echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;killall java;cd /opt/incubator-cloudstack;ant deploycddb"/>
|
||||
</target>
|
||||
|
||||
<target name="deploycddb" description="deploy specific db configuration for clouddev" depends="deploydb">
|
||||
<exec dir="${db.scripts.dir}" executable="bash">
|
||||
<arg value="deploy-db-clouddev.sh" />
|
||||
</exec>
|
||||
</target>
|
||||
|
||||
<target name="rdebug-suspend" >
|
||||
<echo message="ant debug-suspend"/>
|
||||
<sshexec host="${host}" port="${port}" username="root" password="password" command="killall java;sleep 1;echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;cd /opt/incubator-cloudstack;ant deploy-server;ant debug-suspend"/>
|
||||
</target>
|
||||
|
||||
<target name="rdebug">
|
||||
<echo message="ant debug"/>
|
||||
<sshexec host="${host}" port="${port}" username="root" password="password" command="killall java;sleep 1;echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;cd /opt/incubator-cloudstack;ant deploy-server; ant debug"/>
|
||||
</target>
|
||||
|
||||
|
||||
<target name="rdeploy" description="deploy to remote">
|
||||
<condition property="zip.uptodate">
|
||||
<available file="${deploy.work.dir}/client.zip" type="file"/>
|
||||
</condition>
|
||||
|
||||
|
||||
|
||||
<echo message="copying build folder to remote"/>
|
||||
<scp trust="yes" port="${port}" todir="root:password@${host}:/opt/incubator-cloudstack/build">
|
||||
<fileset dir="build">
|
||||
</fileset>
|
||||
</scp>
|
||||
|
||||
<echo message="copying deps folder to remote"/>
|
||||
<scp trust="yes" port="${port}" todir="root:password@${host}:/opt/incubator-cloudstack/deps">
|
||||
<fileset dir="deps">
|
||||
</fileset>
|
||||
</scp>
|
||||
|
||||
<echo message="copying target folder to remote"/>
|
||||
<scp trust="yes" port="${port}" todir="root:password@${host}:/opt/incubator-cloudstack/target">
|
||||
<fileset dir="target">
|
||||
</fileset>
|
||||
</scp>
|
||||
|
||||
<echo message="copying dist folder to remote"/>
|
||||
<scp trust="yes" port="${port}" todir="root:password@${host}:/opt/incubator-cloudstack/dist">
|
||||
<fileset dir="dist">
|
||||
</fileset>
|
||||
</scp>
|
||||
|
||||
<sshexec host="${host}" port="${port}" username="root" password="password" command="echo $CATALINA_HOME; export CATALINA_HOME=/opt/apache-tomcat-6.0.32;cd /opt/incubator-cloudstack;ant deploy-server"/>
|
||||
|
||||
</target>
|
||||
|
||||
</project>
|
||||
@ -59,6 +59,7 @@ import com.cloud.host.Status;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.hypervisor.xen.resource.CitrixResourceBase;
|
||||
import com.cloud.hypervisor.xen.resource.XcpOssResource;
|
||||
import com.cloud.hypervisor.xen.resource.XcpServerResource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServer56FP1Resource;
|
||||
import com.cloud.hypervisor.xen.resource.XenServer56Resource;
|
||||
@ -253,8 +254,15 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
String hostAddr = record.address;
|
||||
|
||||
String prodVersion = record.softwareVersion.get("product_version");
|
||||
if (prodVersion == null) {
|
||||
prodVersion = record.softwareVersion.get("platform_version");
|
||||
}
|
||||
String xenVersion = record.softwareVersion.get("xen");
|
||||
String hostOS = record.softwareVersion.get("product_brand");
|
||||
if (hostOS == null) {
|
||||
hostOS = record.softwareVersion.get("platform_name");
|
||||
}
|
||||
|
||||
String hostOSVer = prodVersion;
|
||||
String hostKernelVer = record.softwareVersion.get("linux");
|
||||
|
||||
@ -400,34 +408,49 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
}
|
||||
|
||||
protected CitrixResourceBase createServerResource(long dcId, Long podId, Host.Record record) {
|
||||
String prodBrand = record.softwareVersion.get("product_brand").trim();
|
||||
String prodVersion = record.softwareVersion.get("product_version").trim();
|
||||
|
||||
if(prodBrand.equals("XCP") && (prodVersion.equals("1.0.0") || prodVersion.equals("1.1.0") || prodVersion.equals("5.6.100") || prodVersion.startsWith("1.4") ))
|
||||
return new XcpServerResource();
|
||||
String prodBrand = record.softwareVersion.get("product_brand");
|
||||
if (prodBrand == null) {
|
||||
prodBrand = record.softwareVersion.get("platform_name").trim();
|
||||
} else {
|
||||
prodBrand = prodBrand.trim();
|
||||
}
|
||||
String prodVersion = record.softwareVersion.get("product_version");
|
||||
if (prodVersion == null) {
|
||||
prodVersion = record.softwareVersion.get("platform_version").trim();
|
||||
} else {
|
||||
prodVersion = prodVersion.trim();
|
||||
}
|
||||
|
||||
if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0"))
|
||||
return new XenServer56Resource();
|
||||
|
||||
if (prodBrand.equals("XenServer") && prodVersion.equals("6.0.0"))
|
||||
return new XenServer600Resource();
|
||||
|
||||
if (prodBrand.equals("XenServer") && prodVersion.equals("6.0.2"))
|
||||
return new XenServer602Resource();
|
||||
if(prodBrand.equals("XCP") && (prodVersion.equals("1.0.0") || prodVersion.equals("1.1.0") || prodVersion.equals("5.6.100") || prodVersion.startsWith("1.4") ))
|
||||
return new XcpServerResource();
|
||||
|
||||
if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0"))
|
||||
return new XenServer56Resource();
|
||||
|
||||
if (prodBrand.equals("XenServer") && prodVersion.equals("6.0.0"))
|
||||
return new XenServer600Resource();
|
||||
|
||||
if (prodBrand.equals("XenServer") && prodVersion.equals("6.0.2"))
|
||||
return new XenServer602Resource();
|
||||
|
||||
if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.100")) {
|
||||
String prodVersionTextShort = record.softwareVersion.get("product_version_text_short").trim();
|
||||
if("5.6 SP2".equals(prodVersionTextShort)) {
|
||||
return new XenServer56SP2Resource();
|
||||
} else if("5.6 FP1".equals(prodVersionTextShort)) {
|
||||
return new XenServer56FP1Resource();
|
||||
}
|
||||
}
|
||||
|
||||
if (prodBrand.equals("XCP_Kronos")) {
|
||||
return new XcpOssResource();
|
||||
}
|
||||
|
||||
String msg = "Only support XCP 1.0.0, 1.1.0, 1.5 beta; XenServer 5.6, XenServer 5.6 FP1, XenServer 5.6 SP2, Xenserver 6.0, 6.0.2 but this one is " + prodBrand + " " + prodVersion;
|
||||
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, dcId, podId, msg, msg);
|
||||
s_logger.debug(msg);
|
||||
throw new RuntimeException(msg);
|
||||
|
||||
if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.100")) {
|
||||
String prodVersionTextShort = record.softwareVersion.get("product_version_text_short").trim();
|
||||
if("5.6 SP2".equals(prodVersionTextShort)) {
|
||||
return new XenServer56SP2Resource();
|
||||
} else if("5.6 FP1".equals(prodVersionTextShort)) {
|
||||
return new XenServer56FP1Resource();
|
||||
}
|
||||
}
|
||||
|
||||
String msg = "Only support XCP 1.0.0, 1.1.0, 1.5 beta; XenServer 5.6, XenServer 5.6 FP1, XenServer 5.6 SP2, Xenserver 6.0, 6.0.2 but this one is " + prodBrand + " " + prodVersion;
|
||||
_alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, dcId, podId, msg, msg);
|
||||
s_logger.debug(msg);
|
||||
throw new RuntimeException(msg);
|
||||
}
|
||||
|
||||
protected void serverConfig() {
|
||||
@ -457,8 +480,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
Boolean.parseBoolean(value);
|
||||
|
||||
value = _params.get("xen.check.hvm");
|
||||
_checkHvm = value == null ? true : Boolean.parseBoolean(value);
|
||||
|
||||
_checkHvm = false;
|
||||
_connPool = XenServerConnectionPool.getInstance();
|
||||
|
||||
_agentMgr.registerForHostEvents(this, true, false, true);
|
||||
@ -567,7 +589,10 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
|
||||
} else if("5.6 FP1".equals(prodVersionTextShort)) {
|
||||
resource = XenServer56FP1Resource.class.getName();
|
||||
}
|
||||
} else if (prodBrand.equals("XCP_Kronos")) {
|
||||
resource = XcpOssResource.class.getName();
|
||||
}
|
||||
|
||||
if( resource == null ){
|
||||
String msg = "Only support XCP 1.0.0, 1.1.0, 1.5 beta; XenServer 5.6, 5.6 FP1, 5.6 SP2 and Xenserver 6.0 , 6.0.2 but this one is " + prodBrand + " " + prodVersion;
|
||||
s_logger.debug(msg);
|
||||
|
||||
@ -286,7 +286,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
protected List<VIF> _tmpDom0Vif = new ArrayList<VIF>();
|
||||
|
||||
public enum SRType {
|
||||
NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA, EXT;
|
||||
NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA, EXT, FILE;
|
||||
|
||||
String _str;
|
||||
|
||||
@ -1066,7 +1066,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
}
|
||||
|
||||
protected VBD createPatchVbd(Connection conn, String vmName, VM vm) throws XmlRpcException, XenAPIException {
|
||||
|
||||
|
||||
if( _host.systemvmisouuid == null ) {
|
||||
Set<SR> srs = SR.getByNameLabel(conn, "XenServer Tools");
|
||||
if( srs.size() != 1 ) {
|
||||
@ -1100,8 +1100,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
cdromVBDR.type = Types.VbdType.CD;
|
||||
VBD cdromVBD = VBD.create(conn, cdromVBDR);
|
||||
cdromVBD.insert(conn, VDI.getByUuid(conn, _host.systemvmisouuid));
|
||||
|
||||
return cdromVBD;
|
||||
|
||||
return cdromVBD;
|
||||
}
|
||||
|
||||
protected void destroyPatchVbd(Connection conn, String vmName) throws XmlRpcException, XenAPIException {
|
||||
@ -3870,7 +3870,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
Map<SR, SR.Record> map = SR.getAllRecords(conn);
|
||||
for (Map.Entry<SR, SR.Record> entry : map.entrySet()) {
|
||||
SR.Record srRec = entry.getValue();
|
||||
if (SRType.EXT.equals(srRec.type)) {
|
||||
if (SRType.FILE.equals(srRec.type) || SRType.EXT.equals(srRec.type)) {
|
||||
Set<PBD> pbds = srRec.PBDs;
|
||||
if (pbds == null) {
|
||||
continue;
|
||||
@ -3902,6 +3902,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
SR lvmsr = getLocalLVMSR(conn);
|
||||
if (lvmsr != null) {
|
||||
try {
|
||||
_host.localSRuuid = lvmsr.getUuid(conn);
|
||||
|
||||
String lvmuuid = lvmsr.getUuid(conn);
|
||||
long cap = lvmsr.getPhysicalSize(conn);
|
||||
if (cap > 0) {
|
||||
@ -3932,6 +3934,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
if (extsr != null) {
|
||||
try {
|
||||
String extuuid = extsr.getUuid(conn);
|
||||
_host.localSRuuid = extuuid;
|
||||
long cap = extsr.getPhysicalSize(conn);
|
||||
if (cap > 0) {
|
||||
long avail = cap - extsr.getPhysicalUtilisation(conn);
|
||||
@ -3956,6 +3959,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -4033,7 +4037,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
break;
|
||||
}
|
||||
Host.Record hr = myself.getRecord(conn);
|
||||
_host.product_version = hr.softwareVersion.get("product_version").trim();
|
||||
|
||||
_host.product_version = hr.softwareVersion.get("product_version");
|
||||
if (_host.product_version == null) {
|
||||
_host.product_version = hr.softwareVersion.get("platform_version");
|
||||
} else {
|
||||
_host.product_version = _host.product_version.trim();
|
||||
}
|
||||
|
||||
XsLocalNetwork privateNic = getManagementNetwork(conn);
|
||||
_privateNetworkName = privateNic.getNetworkRecord(conn).nameLabel;
|
||||
@ -4493,8 +4503,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
} finally {
|
||||
sshConnection.close();
|
||||
}
|
||||
|
||||
hr.tags.add("vmops-version-" + version);
|
||||
host.setTags(conn, hr.tags);
|
||||
|
||||
return true;
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "Xen setup failed due to " + e.toString();
|
||||
@ -5106,13 +5118,19 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
if (details == null) {
|
||||
details = new HashMap<String, String>();
|
||||
}
|
||||
details.put("product_brand", hr.softwareVersion.get("product_brand"));
|
||||
details.put("product_version", hr.softwareVersion.get("product_version"));
|
||||
|
||||
String productBrand = hr.softwareVersion.get("product_brand");
|
||||
if (productBrand == null) {
|
||||
productBrand = hr.softwareVersion.get("platform_name");
|
||||
}
|
||||
details.put("product_brand", productBrand);
|
||||
details.put("product_version", _host.product_version);
|
||||
|
||||
if( hr.softwareVersion.get("product_version_text_short") != null ) {
|
||||
details.put("product_version_text_short", hr.softwareVersion.get("product_version_text_short"));
|
||||
cmd.setHypervisorVersion(hr.softwareVersion.get("product_version_text_short"));
|
||||
}else{
|
||||
cmd.setHypervisorVersion(hr.softwareVersion.get("product_version"));
|
||||
cmd.setHypervisorVersion(_host.product_version);
|
||||
}
|
||||
if (_privateNetworkName != null) {
|
||||
details.put("private.network.device", _privateNetworkName);
|
||||
@ -5165,9 +5183,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
cmd.setPrivateMacAddress(pifr.MAC);
|
||||
cmd.setPrivateNetmask(pifr.netmask);
|
||||
} else {
|
||||
String msg = "Private network " + _privateNetworkName + " doesn't have IP address, please check the host network configuration";
|
||||
s_logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
cmd.setPrivateIpAddress(_host.ip);
|
||||
cmd.setPrivateMacAddress(pifr.MAC);
|
||||
cmd.setPrivateNetmask("255.255.255.0");
|
||||
}
|
||||
|
||||
pif = PIF.getByUuid(conn, _host.storagePif1);
|
||||
@ -5330,7 +5348,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
vdir.virtualSize = dskch.getSize();
|
||||
vdi = VDI.create(conn, vdir);
|
||||
}
|
||||
|
||||
VDI.Record vdir;
|
||||
vdir = vdi.getRecord(conn);
|
||||
s_logger.debug("Succesfully created VDI for " + cmd + ". Uuid = " + vdir.uuid);
|
||||
@ -6764,6 +6781,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
public int speed;
|
||||
public int cpus;
|
||||
public String product_version;
|
||||
public String localSRuuid;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
|
||||
@ -0,0 +1,127 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package com.cloud.hypervisor.xen.resource;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.xmlrpc.XmlRpcException;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.NetworkUsageAnswer;
|
||||
import com.cloud.agent.api.NetworkUsageCommand;
|
||||
import com.cloud.agent.api.StartupRoutingCommand;
|
||||
import com.cloud.resource.ServerResource;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
import com.xensource.xenapi.Connection;
|
||||
import com.xensource.xenapi.Types;
|
||||
import com.xensource.xenapi.VBD;
|
||||
import com.xensource.xenapi.VDI;
|
||||
import com.xensource.xenapi.VM;
|
||||
import com.xensource.xenapi.Types.XenAPIException;
|
||||
|
||||
@Local(value=ServerResource.class)
|
||||
public class XcpOssResource extends CitrixResourceBase {
|
||||
private final static Logger s_logger = Logger.getLogger(XcpServerResource.class);
|
||||
@Override
|
||||
protected List<File> getPatchFiles() {
|
||||
List<File> files = new ArrayList<File>();
|
||||
String patch = "scripts/vm/hypervisor/xenserver/xcposs/patch";
|
||||
String patchfilePath = Script.findScript("", patch);
|
||||
if (patchfilePath == null) {
|
||||
throw new CloudRuntimeException("Unable to find patch file " + patch);
|
||||
}
|
||||
File file = new File(patchfilePath);
|
||||
files.add(file);
|
||||
return files;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void fillHostInfo(Connection conn, StartupRoutingCommand cmd) {
|
||||
super.fillHostInfo(conn, cmd);
|
||||
cmd.setCaps(cmd.getCapabilities() + " , hvm");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getGuestOsType(String stdType, boolean bootFromCD) {
|
||||
return CitrixHelper.getXcpGuestOsType(stdType);
|
||||
}
|
||||
|
||||
protected VBD createPatchVbd(Connection conn, String vmName, VM vm) throws XmlRpcException, XenAPIException {
|
||||
if (_host.localSRuuid != null) {
|
||||
//create an iso vdi on it
|
||||
String result = callHostPlugin(conn, "vmops", "createISOVHD", "uuid", _host.localSRuuid);
|
||||
if (result == null || result.equalsIgnoreCase("Failed")) {
|
||||
throw new CloudRuntimeException("can not create systemvm vdi");
|
||||
}
|
||||
|
||||
Set<VDI> vdis = VDI.getByNameLabel(conn, "systemvm-vdi");
|
||||
if (vdis.size() != 1) {
|
||||
throw new CloudRuntimeException("can not find systemvmiso");
|
||||
}
|
||||
VDI systemvmVDI = vdis.iterator().next();
|
||||
|
||||
VBD.Record cdromVBDR = new VBD.Record();
|
||||
cdromVBDR.VM = vm;
|
||||
cdromVBDR.empty = false;
|
||||
cdromVBDR.bootable = false;
|
||||
cdromVBDR.userdevice = "3";
|
||||
cdromVBDR.mode = Types.VbdMode.RO;
|
||||
cdromVBDR.type = Types.VbdType.DISK;
|
||||
cdromVBDR.VDI = systemvmVDI;
|
||||
VBD cdromVBD = VBD.create(conn, cdromVBDR);
|
||||
return cdromVBD;
|
||||
} else {
|
||||
throw new CloudRuntimeException("can not find local sr");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected NetworkUsageAnswer execute(NetworkUsageCommand cmd) {
|
||||
try {
|
||||
Connection conn = getConnection();
|
||||
if(cmd.getOption()!=null && cmd.getOption().equals("create") ){
|
||||
String result = networkUsage(conn, cmd.getPrivateIP(), "create", null);
|
||||
NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, result, 0L, 0L);
|
||||
return answer;
|
||||
}
|
||||
long[] stats = getNetworkStats(conn, cmd.getPrivateIP());
|
||||
NetworkUsageAnswer answer = new NetworkUsageAnswer(cmd, "", stats[0], stats[1]);
|
||||
return answer;
|
||||
} catch (Exception ex) {
|
||||
s_logger.warn("Failed to get network usage stats due to ", ex);
|
||||
return new NetworkUsageAnswer(cmd, ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer executeRequest(Command cmd) {
|
||||
if (cmd instanceof NetworkUsageCommand) {
|
||||
return execute((NetworkUsageCommand) cmd);
|
||||
} else {
|
||||
return super.executeRequest(cmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
258
scripts/vm/hypervisor/xenserver/xcposs/NFSSR.py
Normal file
258
scripts/vm/hypervisor/xenserver/xcposs/NFSSR.py
Normal file
@ -0,0 +1,258 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
# FileSR: local-file storage repository
|
||||
|
||||
import SR, VDI, SRCommand, FileSR, util
|
||||
import errno
|
||||
import os, re, sys, stat
|
||||
import time
|
||||
import xml.dom.minidom
|
||||
import xs_errors
|
||||
import nfs
|
||||
import vhdutil
|
||||
from lock import Lock
|
||||
import cleanup
|
||||
|
||||
CAPABILITIES = ["SR_PROBE","SR_UPDATE", "SR_CACHING", \
|
||||
"VDI_CREATE","VDI_DELETE","VDI_ATTACH","VDI_DETACH", \
|
||||
"VDI_UPDATE", "VDI_CLONE","VDI_SNAPSHOT","VDI_RESIZE", \
|
||||
"VDI_RESIZE_ONLINE", "VDI_RESET_ON_BOOT", "ATOMIC_PAUSE"]
|
||||
|
||||
CONFIGURATION = [ [ 'server', 'hostname or IP address of NFS server (required)' ], \
|
||||
[ 'serverpath', 'path on remote server (required)' ] ]
|
||||
|
||||
|
||||
DRIVER_INFO = {
|
||||
'name': 'NFS VHD',
|
||||
'description': 'SR plugin which stores disks as VHD files on a remote NFS filesystem',
|
||||
'vendor': 'Citrix Systems Inc',
|
||||
'copyright': '(C) 2008 Citrix Systems Inc',
|
||||
'driver_version': '1.0',
|
||||
'required_api_version': '1.0',
|
||||
'capabilities': CAPABILITIES,
|
||||
'configuration': CONFIGURATION
|
||||
}
|
||||
|
||||
|
||||
# The mountpoint for the directory when performing an sr_probe. All probes
|
||||
PROBE_MOUNTPOINT = "probe"
|
||||
NFSPORT = 2049
|
||||
DEFAULT_TRANSPORT = "tcp"
|
||||
|
||||
|
||||
class NFSSR(FileSR.FileSR):
|
||||
"""NFS file-based storage repository"""
|
||||
def handles(type):
|
||||
return type == 'nfs'
|
||||
handles = staticmethod(handles)
|
||||
|
||||
|
||||
def load(self, sr_uuid):
|
||||
self.ops_exclusive = FileSR.OPS_EXCLUSIVE
|
||||
self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
|
||||
self.sr_vditype = SR.DEFAULT_TAP
|
||||
if not self.dconf.has_key('server'):
|
||||
raise xs_errors.XenError('ConfigServerMissing')
|
||||
self.remoteserver = self.dconf['server']
|
||||
self.path = os.path.join(SR.MOUNT_BASE, sr_uuid)
|
||||
|
||||
# Test for the optional 'nfsoptions' dconf attribute
|
||||
self.transport = DEFAULT_TRANSPORT
|
||||
if self.dconf.has_key('useUDP') and self.dconf['useUDP'] == 'true':
|
||||
self.transport = "udp"
|
||||
|
||||
|
||||
def validate_remotepath(self, scan):
|
||||
if not self.dconf.has_key('serverpath'):
|
||||
if scan:
|
||||
try:
|
||||
self.scan_exports(self.dconf['server'])
|
||||
except:
|
||||
pass
|
||||
raise xs_errors.XenError('ConfigServerPathMissing')
|
||||
if not self._isvalidpathstring(self.dconf['serverpath']):
|
||||
raise xs_errors.XenError('ConfigServerPathBad', \
|
||||
opterr='serverpath is %s' % self.dconf['serverpath'])
|
||||
|
||||
def check_server(self):
|
||||
try:
|
||||
nfs.check_server_tcp(self.remoteserver)
|
||||
except nfs.NfsException, exc:
|
||||
raise xs_errors.XenError('NFSVersion',
|
||||
opterr=exc.errstr)
|
||||
|
||||
|
||||
def mount(self, mountpoint, remotepath):
|
||||
try:
|
||||
nfs.soft_mount(mountpoint, self.remoteserver, remotepath, self.transport)
|
||||
except nfs.NfsException, exc:
|
||||
raise xs_errors.XenError('NFSMount', opterr=exc.errstr)
|
||||
|
||||
|
||||
def attach(self, sr_uuid):
|
||||
self.validate_remotepath(False)
|
||||
#self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid)
|
||||
self.remotepath = self.dconf['serverpath']
|
||||
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
|
||||
self.mount_remotepath(sr_uuid)
|
||||
|
||||
|
||||
def mount_remotepath(self, sr_uuid):
|
||||
if not self._checkmount():
|
||||
self.check_server()
|
||||
self.mount(self.path, self.remotepath)
|
||||
|
||||
return super(NFSSR, self).attach(sr_uuid)
|
||||
|
||||
|
||||
def probe(self):
|
||||
# Verify NFS target and port
|
||||
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
|
||||
|
||||
self.validate_remotepath(True)
|
||||
self.check_server()
|
||||
|
||||
temppath = os.path.join(SR.MOUNT_BASE, PROBE_MOUNTPOINT)
|
||||
|
||||
self.mount(temppath, self.dconf['serverpath'])
|
||||
try:
|
||||
return nfs.scan_srlist(temppath)
|
||||
finally:
|
||||
try:
|
||||
nfs.unmount(temppath, True)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def detach(self, sr_uuid):
|
||||
"""Detach the SR: Unmounts and removes the mountpoint"""
|
||||
if not self._checkmount():
|
||||
return
|
||||
util.SMlog("Aborting GC/coalesce")
|
||||
cleanup.abort(self.uuid)
|
||||
|
||||
# Change directory to avoid unmount conflicts
|
||||
os.chdir(SR.MOUNT_BASE)
|
||||
|
||||
try:
|
||||
nfs.unmount(self.path, True)
|
||||
except nfs.NfsException, exc:
|
||||
raise xs_errors.XenError('NFSUnMount', opterr=exc.errstr)
|
||||
|
||||
return super(NFSSR, self).detach(sr_uuid)
|
||||
|
||||
|
||||
def create(self, sr_uuid, size):
|
||||
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
|
||||
self.validate_remotepath(True)
|
||||
if self._checkmount():
|
||||
raise xs_errors.XenError('NFSAttached')
|
||||
|
||||
# Set the target path temporarily to the base dir
|
||||
# so that we can create the target SR directory
|
||||
self.remotepath = self.dconf['serverpath']
|
||||
try:
|
||||
self.mount_remotepath(sr_uuid)
|
||||
except Exception, exn:
|
||||
try:
|
||||
os.rmdir(self.path)
|
||||
except:
|
||||
pass
|
||||
raise exn
|
||||
|
||||
#newpath = os.path.join(self.path, sr_uuid)
|
||||
#if util.ioretry(lambda: util.pathexists(newpath)):
|
||||
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
|
||||
# self.detach(sr_uuid)
|
||||
# raise xs_errors.XenError('SRExists')
|
||||
#else:
|
||||
# try:
|
||||
# util.ioretry(lambda: util.makedirs(newpath))
|
||||
# except util.CommandException, inst:
|
||||
# if inst.code != errno.EEXIST:
|
||||
# self.detach(sr_uuid)
|
||||
# raise xs_errors.XenError('NFSCreate',
|
||||
# opterr='remote directory creation error is %d'
|
||||
# % inst.code)
|
||||
self.detach(sr_uuid)
|
||||
|
||||
def delete(self, sr_uuid):
|
||||
# try to remove/delete non VDI contents first
|
||||
super(NFSSR, self).delete(sr_uuid)
|
||||
try:
|
||||
if self._checkmount():
|
||||
self.detach(sr_uuid)
|
||||
|
||||
# Set the target path temporarily to the base dir
|
||||
# so that we can remove the target SR directory
|
||||
self.remotepath = self.dconf['serverpath']
|
||||
self.mount_remotepath(sr_uuid)
|
||||
newpath = os.path.join(self.path, sr_uuid)
|
||||
|
||||
if util.ioretry(lambda: util.pathexists(newpath)):
|
||||
util.ioretry(lambda: os.rmdir(newpath))
|
||||
self.detach(sr_uuid)
|
||||
except util.CommandException, inst:
|
||||
self.detach(sr_uuid)
|
||||
if inst.code != errno.ENOENT:
|
||||
raise xs_errors.XenError('NFSDelete')
|
||||
|
||||
def vdi(self, uuid, loadLocked = False):
|
||||
if not loadLocked:
|
||||
return NFSFileVDI(self, uuid)
|
||||
return NFSFileVDI(self, uuid)
|
||||
|
||||
def _checkmount(self):
|
||||
return util.ioretry(lambda: util.pathexists(self.path)) \
|
||||
and util.ioretry(lambda: util.ismount(self.path))
|
||||
|
||||
def scan_exports(self, target):
|
||||
util.SMlog("scanning2 (target=%s)" % target)
|
||||
dom = nfs.scan_exports(target)
|
||||
print >>sys.stderr,dom.toprettyxml()
|
||||
|
||||
class NFSFileVDI(FileSR.FileVDI):
|
||||
def attach(self, sr_uuid, vdi_uuid):
|
||||
try:
|
||||
vdi_ref = self.sr.srcmd.params['vdi_ref']
|
||||
self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
|
||||
"vdi-type")
|
||||
self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
|
||||
"storage-type")
|
||||
self.session.xenapi.VDI.add_to_xenstore_data(vdi_ref, \
|
||||
"storage-type", "nfs")
|
||||
except:
|
||||
util.logException("NFSSR:attach")
|
||||
pass
|
||||
return super(NFSFileVDI, self).attach(sr_uuid, vdi_uuid)
|
||||
|
||||
def get_mtime(self, path):
|
||||
st = util.ioretry_stat(lambda: os.stat(path))
|
||||
return st[stat.ST_MTIME]
|
||||
|
||||
def clone(self, sr_uuid, vdi_uuid):
|
||||
timestamp_before = int(self.get_mtime(self.sr.path))
|
||||
ret = super(NFSFileVDI, self).clone(sr_uuid, vdi_uuid)
|
||||
timestamp_after = int(self.get_mtime(self.sr.path))
|
||||
if timestamp_after == timestamp_before:
|
||||
util.SMlog("SR dir timestamp didn't change, updating")
|
||||
timestamp_after += 1
|
||||
os.utime(self.sr.path, (timestamp_after, timestamp_after))
|
||||
return ret
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
SRCommand.run(NFSSR, DRIVER_INFO)
|
||||
else:
|
||||
SR.registerSR(NFSSR)
|
||||
@ -0,0 +1,184 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
#set -x
|
||||
|
||||
usage() {
|
||||
printf "Usage: %s [vhd file in secondary storage] [uuid of the source sr] [name label] \n" $(basename $0)
|
||||
}
|
||||
|
||||
cleanup()
|
||||
{
|
||||
if [ ! -z $localmp ]; then
|
||||
umount -fl $localmp
|
||||
if [ $? -eq 0 ]; then
|
||||
rmdir $localmp
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -z $1 ]; then
|
||||
usage
|
||||
echo "2#no mountpoint"
|
||||
exit 0
|
||||
else
|
||||
mountpoint=${1%/*}
|
||||
vhdfilename=${1##*/}
|
||||
fi
|
||||
|
||||
if [ -z $2 ]; then
|
||||
usage
|
||||
echo "3#no uuid of the source sr"
|
||||
exit 0
|
||||
else
|
||||
sruuid=$2
|
||||
fi
|
||||
|
||||
type=$(xe sr-param-get uuid=$sruuid param-name=type)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "4#sr $sruuid doesn't exist"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -z $3 ]; then
|
||||
usage
|
||||
echo "3#no namelabel"
|
||||
exit 0
|
||||
else
|
||||
namelabel=$3
|
||||
fi
|
||||
|
||||
localmp=/var/run/cloud_mount/$(uuidgen -r)
|
||||
|
||||
mkdir -p $localmp
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "5#can't make dir $localmp"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mount -o tcp,soft,ro,timeo=133,retrans=1 $mountpoint $localmp
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "6#can't mount $mountpoint to $localmp"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
vhdfile=$localmp/$vhdfilename
|
||||
if [ ${vhdfile%.vhd} == ${vhdfile} ] ; then
|
||||
vhdfile=$(ls $vhdfile/*.vhd)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "7#There is no vhd file under $mountpoint"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
||||
VHDUTIL="/usr/bin/vhd-util"
|
||||
|
||||
copyvhd()
|
||||
{
|
||||
local desvhd=$1
|
||||
local srcvhd=$2
|
||||
local vsize=$3
|
||||
local type=$4
|
||||
local parent=`$VHDUTIL query -p -n $srcvhd`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "30#failed to query $srcvhd"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
if [ "${parent##*vhd has}" = " no parent" ]; then
|
||||
dd if=$srcvhd of=$desvhd bs=2M
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "31#failed to dd $srcvhd to $desvhd"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
if [ $type != "nfs" -a $type != "ext" -a $type != "file" ]; then
|
||||
dd if=$srcvhd of=$desvhd bs=512 seek=$(($(($vsize/512))-1)) count=1
|
||||
$VHDUTIL modify -s $vsize -n $desvhd
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "32#failed to set new vhd physical size for vdi vdi $uuid"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
else
|
||||
copyvhd $desvhd $parent $vsize $type
|
||||
$VHDUTIL coalesce -p $desvhd -n $srcvhd
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "32#failed to coalesce $desvhd to $srcvhd"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
size=$($VHDUTIL query -v -n $vhdfile)
|
||||
uuid=$(xe vdi-create sr-uuid=$sruuid virtual-size=${size}MiB type=user name-label=$namelabel)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "9#can not create vdi in sr $sruuid"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
if [ $type == "nfs" -o $type == "ext" ]; then
|
||||
desvhd=/run/sr-mount/$sruuid/$uuid.vhd
|
||||
copyvhd $desvhd $vhdfile 0 $type
|
||||
|
||||
elif [ $type == "lvmoiscsi" -o $type == "lvm" -o $type == "lvmohba" ]; then
|
||||
lvsize=$(xe vdi-param-get uuid=$uuid param-name=physical-utilisation)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "12#failed to get physical size of vdi $uuid"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
desvhd=/dev/VG_XenStorage-$sruuid/VHD-$uuid
|
||||
lvchange -ay $desvhd
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "10#lvm can not make VDI $uuid visible"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
copyvhd $desvhd $vhdfile $lvsize $type
|
||||
elif [ $type == "file" ]; then
|
||||
pbd=`xe sr-param-list uuid=$sruuid |grep PBDs | awk '{print $3}'`
|
||||
path=`xe pbd-param-list uuid=$pbd |grep device-config |awk '{print $4}'`
|
||||
desvhd=$path/$uuid.vhd
|
||||
copyvhd $desvhd $vhdfile 0 $type
|
||||
|
||||
else
|
||||
echo "15#doesn't support sr type $type"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
|
||||
$VHDUTIL set -n $desvhd -f "hidden" -v "0" > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "21#failed to set hidden to 0 $desvhd"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
xe sr-scan uuid=$sruuid
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "14#failed to scan sr $sruuid"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "0#$uuid"
|
||||
cleanup
|
||||
exit 0
|
||||
@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
#set -x
|
||||
|
||||
usage() {
|
||||
printf "Usage: %s [mountpoint in secondary storage] [uuid of the source vdi] [uuid of the source sr]\n" $(basename $0)
|
||||
}
|
||||
|
||||
cleanup()
|
||||
{
|
||||
if [ ! -z $localmp ]; then
|
||||
umount $localmp
|
||||
if [ $? -eq 0 ]; then
|
||||
rmdir $localmp
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -z $1 ]; then
|
||||
usage
|
||||
echo "1#no mountpoint"
|
||||
exit 0
|
||||
else
|
||||
mountpoint=$1
|
||||
fi
|
||||
|
||||
if [ -z $2 ]; then
|
||||
usage
|
||||
echo "2#no uuid of the source sr"
|
||||
exit 0
|
||||
else
|
||||
vdiuuid=$2
|
||||
fi
|
||||
|
||||
|
||||
if [ -z $3 ]; then
|
||||
usage
|
||||
echo "3#no uuid of the source sr"
|
||||
exit 0
|
||||
else
|
||||
sruuid=$3
|
||||
fi
|
||||
|
||||
type=$(xe sr-param-get uuid=$sruuid param-name=type)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "4#sr $sruuid doesn't exist"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
localmp=/var/run/cloud_mount/$(uuidgen -r)
|
||||
|
||||
mkdir -p $localmp
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "5#can't make dir $localmp"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mount -o tcp,soft,timeo=133,retrans=1 $mountpoint $localmp
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "6#can't mount $mountpoint to $localmp"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
vhdfile=$localmp/${vdiuuid}.vhd
|
||||
|
||||
if [ $type == "nfs" -o $type == "ext" ]; then
|
||||
dd if=/var/run/sr-mount/$sruuid/${vdiuuid}.vhd of=$vhdfile bs=2M
|
||||
if [ $? -ne 0 ]; then
|
||||
rm -f $vhdfile
|
||||
echo "8#failed to copy /var/run/sr-mount/$sruuid/${vdiuuid}.vhd to secondarystorage"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
elif [ $type == "lvmoiscsi" -o $type == "lvm" -o $type == "lvmohba" ]; then
|
||||
lvchange -ay /dev/VG_XenStorage-$sruuid/VHD-$vdiuuid
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "9#lvm can not make VDI $vdiuuid visible"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
size=$(vhd-util query -s -n /dev/VG_XenStorage-$sruuid/VHD-$vdiuuid)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "10#can not get physical size of /dev/VG_XenStorage-$sruuid/VHD-$vdiuuid"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
#in 2M unit
|
||||
size=$((size>>21))
|
||||
size=$((size+1))
|
||||
dd if=/dev/VG_XenStorage-$sruuid/VHD-$vdiuuid of=$vhdfile bs=2M count=$size
|
||||
if [ $? -ne 0 ]; then
|
||||
rm -f $vhdfile
|
||||
echo "8#failed to copy /dev/VG_XenStorage-$sruuid/VHD-$vdiuuid to secondarystorage"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
#in byte unit
|
||||
size=$((size<<21))
|
||||
vhd-util modify -s $size -n $vhdfile
|
||||
if [ $? -ne 0 ]; then
|
||||
rm -f $vhdfile
|
||||
echo "11#failed to change $vhdfile physical size"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
echo "15#doesn't support sr type $type"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "0#$vdiuuid"
|
||||
cleanup
|
||||
exit 0
|
||||
@ -0,0 +1,134 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
# Apache License, Version 2.0 (the "License"); you may not use this
|
||||
# file except in compliance with the License. Citrix Systems, Inc.
|
||||
# reserves all rights not expressly granted by the License.
|
||||
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Automatically generated by addcopyright.py at 04/03/2012
|
||||
|
||||
#set -x
|
||||
|
||||
usage() {
|
||||
printf "Usage: %s [vhd file in secondary storage] [template directory in secondary storage] [template local dir] \n" $(basename $0)
|
||||
}
|
||||
options='tcp,soft,timeo=133,retrans=1'
|
||||
cleanup()
|
||||
{
|
||||
if [ ! -z $snapshotdir ]; then
|
||||
umount $snapshotdir
|
||||
if [ $? -eq 0 ]; then
|
||||
rmdir $snapshotdir
|
||||
fi
|
||||
fi
|
||||
if [ ! -z $templatedir ]; then
|
||||
umount $templatedir
|
||||
if [ $? -eq 0 ]; then
|
||||
rmdir $templatedir
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -z $1 ]; then
|
||||
usage
|
||||
echo "2#no vhd file path"
|
||||
exit 0
|
||||
else
|
||||
snapshoturl=${1%/*}
|
||||
vhdfilename=${1##*/}
|
||||
fi
|
||||
|
||||
if [ -z $2 ]; then
|
||||
usage
|
||||
echo "3#no template path"
|
||||
exit 0
|
||||
else
|
||||
templateurl=$2
|
||||
fi
|
||||
|
||||
if [ -z $3 ]; then
|
||||
usage
|
||||
echo "3#no template local dir"
|
||||
exit 0
|
||||
else
|
||||
tmpltLocalDir=$3
|
||||
fi
|
||||
|
||||
|
||||
snapshotdir=/run/cloud_mount/$(uuidgen -r)
|
||||
mkdir -p $snapshotdir
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "4#cann't make dir $snapshotdir"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mount -o $options $snapshoturl $snapshotdir
|
||||
if [ $? -ne 0 ]; then
|
||||
rmdir $snapshotdir
|
||||
echo "5#can not mount $snapshoturl to $snapshotdir"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
templatedir=/run/cloud_mount/$tmpltLocalDir
|
||||
mkdir -p $templatedir
|
||||
if [ $? -ne 0 ]; then
|
||||
templatedir=""
|
||||
cleanup
|
||||
echo "6#cann't make dir $templatedir"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mount -o $options $templateurl $templatedir
|
||||
if [ $? -ne 0 ]; then
|
||||
rmdir $templatedir
|
||||
templatedir=""
|
||||
cleanup
|
||||
echo "7#can not mount $templateurl to $templatedir"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VHDUTIL="vhd-util"
|
||||
|
||||
copyvhd()
|
||||
{
|
||||
local desvhd=$1
|
||||
local srcvhd=$2
|
||||
local parent=
|
||||
parent=`$VHDUTIL query -p -n $srcvhd`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "30#failed to query $srcvhd"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
if [[ "${parent}" =~ " no parent" ]]; then
|
||||
dd if=$srcvhd of=$desvhd bs=2M
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "31#failed to dd $srcvhd to $desvhd"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
copyvhd $desvhd $parent
|
||||
$VHDUTIL coalesce -p $desvhd -n $srcvhd
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "32#failed to coalesce $desvhd to $srcvhd"
|
||||
cleanup
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
templateuuid=$(uuidgen -r)
|
||||
desvhd=$templatedir/$templateuuid.vhd
|
||||
srcvhd=$snapshotdir/$vhdfilename
|
||||
copyvhd $desvhd $srcvhd
|
||||
virtualSize=`$VHDUTIL query -v -n $desvhd`
|
||||
physicalSize=`ls -l $desvhd | awk '{print $5}'`
|
||||
cleanup
|
||||
echo "0#$templateuuid#$physicalSize#$virtualSize"
|
||||
exit 0
|
||||
49
scripts/vm/hypervisor/xenserver/xcposs/patch
Normal file
49
scripts/vm/hypervisor/xenserver/xcposs/patch
Normal file
@ -0,0 +1,49 @@
|
||||
# This file specifies the files that need
|
||||
# to be transferred over to the XenServer.
|
||||
# The format of this file is as follows:
|
||||
# [Name of file]=[source path],[file permission],[destination path]
|
||||
# [destination path] is required.
|
||||
# If [file permission] is missing, 755 is assumed.
|
||||
# If [source path] is missing, it looks in the same
|
||||
# directory as the patch file.
|
||||
# If [source path] starts with '/', then it is absolute path.
|
||||
# If [source path] starts with '~', then it is path relative to management server home directory.
|
||||
# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file.
|
||||
NFSSR.py=/usr/lib/xcp/sm
|
||||
vmops=.,0755,/usr/lib/xcp/plugins
|
||||
ovsgre=..,0755,/usr/lib/xcp/plugins
|
||||
ovstunnel=..,0755,/usr/lib/xcp/plugins
|
||||
vmopsSnapshot=.,0755,/usr/lib/xcp/plugins
|
||||
hostvmstats.py=..,0755,/usr/lib/xcp/sm
|
||||
systemvm.iso=../../../../../vms,0644,/usr/share/xcp/packages/iso/
|
||||
id_rsa.cloud=../../../systemvm,0600,/root/.ssh
|
||||
network_info.sh=..,0755,/usr/lib/xcp/bin
|
||||
setupxenserver.sh=..,0755,/usr/lib/xcp/bin
|
||||
make_migratable.sh=..,0755,/usr/lib/xcp/bin
|
||||
setup_iscsi.sh=..,0755,/usr/lib/xcp/bin
|
||||
pingtest.sh=../../..,0755,/usr/lib/xcp/bin
|
||||
dhcp_entry.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
ipassoc.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
vm_data.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
save_password_to_domr.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
networkUsage.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
call_firewall.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
call_loadbalancer.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
l2tp_vpn.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
cloud-setup-bonding.sh=..,0755,/usr/lib/xcp/bin
|
||||
copy_vhd_to_secondarystorage.sh=.,0755,/usr/lib/xcp/bin
|
||||
copy_vhd_from_secondarystorage.sh=.,0755,/usr/lib/xcp/bin
|
||||
setup_heartbeat_sr.sh=..,0755,/usr/lib/xcp/bin
|
||||
setup_heartbeat_file.sh=..,0755,/usr/lib/xcp/bin
|
||||
check_heartbeat.sh=..,0755,/usr/lib/xcp/bin
|
||||
xenheartbeat.sh=..,0755,/usr/lib/xcp/bin
|
||||
launch_hb.sh=..,0755,/usr/lib/xcp/bin
|
||||
vhd-util=..,0755,/usr/lib/xcp/bin
|
||||
vmopspremium=.,0755,/usr/lib/xcp/plugins
|
||||
create_privatetemplate_from_snapshot.sh=.,0755,/usr/lib/xcp/bin
|
||||
upgrade_snapshot.sh=..,0755,/usr/lib/xcp/bin
|
||||
cloud-clean-vlan.sh=..,0755,/usr/lib/xcp/bin
|
||||
cloud-prepare-upgrade.sh=..,0755,/usr/lib/xcp/bin
|
||||
getRouterStatus.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
bumpUpPriority.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
getDomRVersion.sh=../../../../network/domr/,0755,/usr/lib/xcp/bin
|
||||
1519
scripts/vm/hypervisor/xenserver/xcposs/vmops
Normal file
1519
scripts/vm/hypervisor/xenserver/xcposs/vmops
Normal file
File diff suppressed because it is too large
Load Diff
552
scripts/vm/hypervisor/xenserver/xcposs/vmopsSnapshot
Normal file
552
scripts/vm/hypervisor/xenserver/xcposs/vmopsSnapshot
Normal file
@ -0,0 +1,552 @@
|
||||
#!/usr/bin/python
|
||||
# Version @VERSION@
|
||||
#
|
||||
# A plugin for executing script needed by vmops cloud
|
||||
|
||||
import os, sys, time
|
||||
import XenAPIPlugin
|
||||
sys.path.append("/usr/lib/xcp/sm/")
|
||||
import SR, VDI, SRCommand, util, lvutil
|
||||
from util import CommandException
|
||||
import vhdutil
|
||||
import shutil
|
||||
import lvhdutil
|
||||
import errno
|
||||
import subprocess
|
||||
import xs_errors
|
||||
import cleanup
|
||||
import stat
|
||||
import random
|
||||
|
||||
VHD_UTIL = 'vhd-util'
|
||||
VHD_PREFIX = 'VHD-'
|
||||
CLOUD_DIR = '/run/cloud_mount'
|
||||
|
||||
def echo(fn):
|
||||
def wrapped(*v, **k):
|
||||
name = fn.__name__
|
||||
util.SMlog("#### VMOPS enter %s ####" % name )
|
||||
res = fn(*v, **k)
|
||||
util.SMlog("#### VMOPS exit %s ####" % name )
|
||||
return res
|
||||
return wrapped
|
||||
|
||||
|
||||
@echo
|
||||
def create_secondary_storage_folder(session, args):
|
||||
local_mount_path = None
|
||||
|
||||
util.SMlog("create_secondary_storage_folder, args: " + str(args))
|
||||
|
||||
try:
|
||||
try:
|
||||
# Mount the remote resource folder locally
|
||||
remote_mount_path = args["remoteMountPath"]
|
||||
local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
|
||||
mount(remote_mount_path, local_mount_path)
|
||||
|
||||
# Create the new folder
|
||||
new_folder = local_mount_path + "/" + args["newFolder"]
|
||||
if not os.path.isdir(new_folder):
|
||||
current_umask = os.umask(0)
|
||||
os.makedirs(new_folder)
|
||||
os.umask(current_umask)
|
||||
except OSError, (errno, strerror):
|
||||
errMsg = "create_secondary_storage_folder failed: errno: " + str(errno) + ", strerr: " + strerror
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
except:
|
||||
errMsg = "create_secondary_storage_folder failed."
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
finally:
|
||||
if local_mount_path != None:
|
||||
# Unmount the local folder
|
||||
umount(local_mount_path)
|
||||
# Remove the local folder
|
||||
os.system("rmdir " + local_mount_path)
|
||||
|
||||
return "1"
|
||||
|
||||
@echo
|
||||
def delete_secondary_storage_folder(session, args):
|
||||
local_mount_path = None
|
||||
|
||||
util.SMlog("delete_secondary_storage_folder, args: " + str(args))
|
||||
|
||||
try:
|
||||
try:
|
||||
# Mount the remote resource folder locally
|
||||
remote_mount_path = args["remoteMountPath"]
|
||||
local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
|
||||
mount(remote_mount_path, local_mount_path)
|
||||
|
||||
# Delete the specified folder
|
||||
folder = local_mount_path + "/" + args["folder"]
|
||||
if os.path.isdir(folder):
|
||||
os.system("rm -f " + folder + "/*")
|
||||
os.system("rmdir " + folder)
|
||||
except OSError, (errno, strerror):
|
||||
errMsg = "delete_secondary_storage_folder failed: errno: " + str(errno) + ", strerr: " + strerror
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
except:
|
||||
errMsg = "delete_secondary_storage_folder failed."
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
finally:
|
||||
if local_mount_path != None:
|
||||
# Unmount the local folder
|
||||
umount(local_mount_path)
|
||||
# Remove the local folder
|
||||
os.system("rmdir " + local_mount_path)
|
||||
|
||||
return "1"
|
||||
|
||||
@echo
|
||||
def post_create_private_template(session, args):
|
||||
local_mount_path = None
|
||||
try:
|
||||
try:
|
||||
# get local template folder
|
||||
templatePath = args["templatePath"]
|
||||
local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
|
||||
mount(templatePath, local_mount_path)
|
||||
# Retrieve args
|
||||
filename = args["templateFilename"]
|
||||
name = args["templateName"]
|
||||
description = args["templateDescription"]
|
||||
checksum = args["checksum"]
|
||||
file_size = args["size"]
|
||||
virtual_size = args["virtualSize"]
|
||||
template_id = args["templateId"]
|
||||
|
||||
# Create the template.properties file
|
||||
template_properties_install_path = local_mount_path + "/template.properties"
|
||||
f = open(template_properties_install_path, "w")
|
||||
f.write("filename=" + filename + "\n")
|
||||
f.write("vhd=true\n")
|
||||
f.write("id=" + template_id + "\n")
|
||||
f.write("vhd.filename=" + filename + "\n")
|
||||
f.write("public=false\n")
|
||||
f.write("uniquename=" + name + "\n")
|
||||
f.write("vhd.virtualsize=" + virtual_size + "\n")
|
||||
f.write("virtualsize=" + virtual_size + "\n")
|
||||
f.write("checksum=" + checksum + "\n")
|
||||
f.write("hvm=true\n")
|
||||
f.write("description=" + description + "\n")
|
||||
f.write("vhd.size=" + str(file_size) + "\n")
|
||||
f.write("size=" + str(file_size) + "\n")
|
||||
f.close()
|
||||
util.SMlog("Created template.properties file")
|
||||
|
||||
# Set permissions
|
||||
permissions = stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH
|
||||
os.chmod(template_properties_install_path, permissions)
|
||||
util.SMlog("Set permissions on template and template.properties")
|
||||
|
||||
except:
|
||||
errMsg = "post_create_private_template failed."
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
finally:
|
||||
if local_mount_path != None:
|
||||
# Unmount the local folder
|
||||
umount(local_mount_path)
|
||||
# Remove the local folder
|
||||
os.system("rmdir " + local_mount_path)
|
||||
return "1"
|
||||
|
||||
def isfile(path, isISCSI):
|
||||
errMsg = ''
|
||||
exists = True
|
||||
if isISCSI:
|
||||
exists = checkVolumeAvailablility(path)
|
||||
else:
|
||||
exists = os.path.isfile(path)
|
||||
|
||||
if not exists:
|
||||
errMsg = "File " + path + " does not exist."
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return errMsg
|
||||
|
||||
def copyfile(fromFile, toFile, isISCSI):
|
||||
util.SMlog("Starting to copy " + fromFile + " to " + toFile)
|
||||
errMsg = ''
|
||||
try:
|
||||
cmd = ['dd', 'if=' + fromFile, 'of=' + toFile, 'bs=4M']
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
try:
|
||||
os.system("rm -f " + toFile)
|
||||
except:
|
||||
txt = ''
|
||||
txt = ''
|
||||
errMsg = "Error while copying " + fromFile + " to " + toFile + " in secondary storage"
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
util.SMlog("Successfully copied " + fromFile + " to " + toFile)
|
||||
return errMsg
|
||||
|
||||
def chdir(path):
|
||||
try:
|
||||
os.chdir(path)
|
||||
except OSError, (errno, strerror):
|
||||
errMsg = "Unable to chdir to " + path + " because of OSError with errno: " + str(errno) + " and strerr: " + strerror
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
util.SMlog("Chdired to " + path)
|
||||
return
|
||||
|
||||
def scanParent(path):
|
||||
# Do a scan for the parent for ISCSI volumes
|
||||
# Note that the parent need not be visible on the XenServer
|
||||
parentUUID = ''
|
||||
try:
|
||||
lvName = os.path.basename(path)
|
||||
dirname = os.path.dirname(path)
|
||||
vgName = os.path.basename(dirname)
|
||||
vhdInfo = vhdutil.getVHDInfoLVM(lvName, lvhdutil.extractUuid, vgName)
|
||||
parentUUID = vhdInfo.parentUuid
|
||||
except:
|
||||
errMsg = "Could not get vhd parent of " + path
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return parentUUID
|
||||
|
||||
def getParent(path, isISCSI):
|
||||
parentUUID = ''
|
||||
try :
|
||||
if isISCSI:
|
||||
parentUUID = vhdutil.getParent(path, lvhdutil.extractUuid)
|
||||
else:
|
||||
parentUUID = vhdutil.getParent(path, cleanup.FileVDI.extractUuid)
|
||||
except:
|
||||
errMsg = "Could not get vhd parent of " + path
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return parentUUID
|
||||
|
||||
def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI):
|
||||
snapshotVHD = getVHD(snapshotUuid, isISCSI)
|
||||
snapshotPath = os.path.join(primarySRPath, snapshotVHD)
|
||||
|
||||
baseCopyUuid = ''
|
||||
if isISCSI:
|
||||
checkVolumeAvailablility(snapshotPath)
|
||||
baseCopyUuid = scanParent(snapshotPath)
|
||||
else:
|
||||
baseCopyUuid = getParent(snapshotPath, isISCSI)
|
||||
|
||||
util.SMlog("Base copy of snapshotUuid: " + snapshotUuid + " is " + baseCopyUuid)
|
||||
return baseCopyUuid
|
||||
|
||||
def setParent(parent, child):
|
||||
try:
|
||||
cmd = [VHD_UTIL, "modify", "-p", parent, "-n", child]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
errMsg = "Unexpected error while trying to set parent of " + child + " to " + parent
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
util.SMlog("Successfully set parent of " + child + " to " + parent)
|
||||
return
|
||||
|
||||
def rename(originalVHD, newVHD):
|
||||
try:
|
||||
os.rename(originalVHD, newVHD)
|
||||
except OSError, (errno, strerror):
|
||||
errMsg = "OSError while renaming " + origiinalVHD + " to " + newVHD + "with errno: " + str(errno) + " and strerr: " + strerror
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return
|
||||
|
||||
def makedirs(path):
|
||||
if not os.path.isdir(path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError, (errno, strerror):
|
||||
umount(path)
|
||||
if os.path.isdir(path):
|
||||
return
|
||||
errMsg = "OSError while creating " + path + " with errno: " + str(errno) + " and strerr: " + strerror
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return
|
||||
|
||||
def mount(remoteDir, localDir):
|
||||
makedirs(localDir)
|
||||
options = "soft,tcp,timeo=133,retrans=1"
|
||||
try:
|
||||
cmd = ['mount', '-o', options, remoteDir, localDir]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = ''
|
||||
errMsg = "Unexpected error while trying to mount " + remoteDir + " to " + localDir
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
util.SMlog("Successfully mounted " + remoteDir + " to " + localDir)
|
||||
|
||||
return
|
||||
|
||||
def umount(localDir):
|
||||
try:
|
||||
cmd = ['umount', localDir]
|
||||
util.pread2(cmd)
|
||||
except CommandException:
|
||||
errMsg = "CommandException raised while trying to umount " + localDir
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
util.SMlog("Successfully unmounted " + localDir)
|
||||
return
|
||||
|
||||
def mountSnapshotsDir(secondaryStorageMountPath, relativeDir, dcId, accountId, instanceId):
|
||||
# The aim is to mount secondaryStorageMountPath on
|
||||
# And create <accountId>/<instanceId> dir on it, if it doesn't exist already.
|
||||
# Assuming that secondaryStorageMountPath exists remotely
|
||||
|
||||
# Alex's suggestion and currently implemented:
|
||||
# Just mount secondaryStorageMountPath/<relativeDir> everytime
|
||||
# Never unmount.
|
||||
snapshotsDir = os.path.join(secondaryStorageMountPath, relativeDir)
|
||||
|
||||
# Mkdir local mount point dir, if it doesn't exist.
|
||||
localMountPointPath = os.path.join(CLOUD_DIR, dcId)
|
||||
localMountPointPath = os.path.join(localMountPointPath, relativeDir)
|
||||
|
||||
makedirs(localMountPointPath)
|
||||
# if something is not mounted already on localMountPointPath,
|
||||
# mount secondaryStorageMountPath on localMountPath
|
||||
if os.path.ismount(localMountPointPath):
|
||||
# There is only one secondary storage per zone.
|
||||
# And we are mounting each sec storage under a zone-specific directory
|
||||
# So two secondary storage snapshot dirs will never get mounted on the same point on the same XenServer.
|
||||
util.SMlog("The remote snapshots directory has already been mounted on " + localMountPointPath)
|
||||
else:
|
||||
mount(snapshotsDir, localMountPointPath)
|
||||
|
||||
# Create accountId/instanceId dir on localMountPointPath, if it doesn't exist
|
||||
backupsDir = os.path.join(localMountPointPath, accountId)
|
||||
backupsDir = os.path.join(backupsDir, instanceId)
|
||||
makedirs(backupsDir)
|
||||
return backupsDir
|
||||
|
||||
@echo
|
||||
def unmountSnapshotsDir(session, args):
|
||||
dcId = args['dcId']
|
||||
localMountPointPath = os.path.join(CLOUD_DIR, dcId)
|
||||
localMountPointPath = os.path.join(localMountPointPath, "snapshots")
|
||||
try:
|
||||
umount(localMountPointPath)
|
||||
except:
|
||||
util.SMlog("Ignoring the error while trying to unmount the snapshots dir.")
|
||||
|
||||
return "1"
|
||||
|
||||
def getPrimarySRPath(session, primaryStorageSRUuid, isISCSI):
|
||||
sr = session.xenapi.SR.get_by_uuid(primaryStorageSRUuid)
|
||||
srrec = session.xenapi.SR.get_record(sr)
|
||||
srtype = srrec["type"]
|
||||
if srtype == "file":
|
||||
pbd = session.xenapi.SR.get_PBDs(sr)[0]
|
||||
pbdrec = session.xenapi.PBD.get_record(pbd)
|
||||
primarySRPath = pbdrec["device_config"]["location"]
|
||||
return primarySRPath
|
||||
if isISCSI:
|
||||
primarySRDir = lvhdutil.VG_PREFIX + primaryStorageSRUuid
|
||||
return os.path.join(lvhdutil.VG_LOCATION, primarySRDir)
|
||||
else:
|
||||
return os.path.join(SR.MOUNT_BASE, primaryStorageSRUuid)
|
||||
|
||||
def getBackupVHD(UUID):
|
||||
return UUID + '.' + SR.DEFAULT_TAP
|
||||
|
||||
def getVHD(UUID, isISCSI):
|
||||
if isISCSI:
|
||||
return VHD_PREFIX + UUID
|
||||
else:
|
||||
return UUID + '.' + SR.DEFAULT_TAP
|
||||
|
||||
def getIsTrueString(stringValue):
|
||||
booleanValue = False
|
||||
if (stringValue and stringValue == 'true'):
|
||||
booleanValue = True
|
||||
return booleanValue
|
||||
|
||||
def makeUnavailable(uuid, primarySRPath, isISCSI):
|
||||
if not isISCSI:
|
||||
return
|
||||
VHD = getVHD(uuid, isISCSI)
|
||||
path = os.path.join(primarySRPath, VHD)
|
||||
manageAvailability(path, '-an')
|
||||
return
|
||||
|
||||
def manageAvailability(path, value):
|
||||
if path.__contains__("/var/run/sr-mount"):
|
||||
return
|
||||
util.SMlog("Setting availability of " + path + " to " + value)
|
||||
try:
|
||||
cmd = ['/usr/sbin/lvchange', value, path]
|
||||
util.pread2(cmd)
|
||||
except: #CommandException, (rc, cmdListStr, stderr):
|
||||
#errMsg = "CommandException thrown while executing: " + cmdListStr + " with return code: " + str(rc) + " and stderr: " + stderr
|
||||
errMsg = "Unexpected exception thrown by lvchange"
|
||||
util.SMlog(errMsg)
|
||||
if value == "-ay":
|
||||
# Raise an error only if we are trying to make it available.
|
||||
# Just warn if we are trying to make it unavailable after the
|
||||
# snapshot operation is done.
|
||||
raise xs_errors.XenError(errMsg)
|
||||
return
|
||||
|
||||
|
||||
def checkVolumeAvailablility(path):
|
||||
try:
|
||||
if not isVolumeAvailable(path):
|
||||
# The VHD file is not available on XenSever. The volume is probably
|
||||
# inactive or detached.
|
||||
# Do lvchange -ay to make it available on XenServer
|
||||
manageAvailability(path, '-ay')
|
||||
except:
|
||||
errMsg = "Could not determine status of ISCSI path: " + path
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
success = False
|
||||
i = 0
|
||||
while i < 6:
|
||||
i = i + 1
|
||||
# Check if the vhd is actually visible by checking for the link
|
||||
# set isISCSI to true
|
||||
success = isVolumeAvailable(path)
|
||||
if success:
|
||||
util.SMlog("Made vhd: " + path + " available and confirmed that it is visible")
|
||||
break
|
||||
|
||||
# Sleep for 10 seconds before checking again.
|
||||
time.sleep(10)
|
||||
|
||||
# If not visible within 1 min fail
|
||||
if not success:
|
||||
util.SMlog("Could not make vhd: " + path + " available despite waiting for 1 minute. Does it exist?")
|
||||
|
||||
return success
|
||||
|
||||
def isVolumeAvailable(path):
|
||||
# Check if iscsi volume is available on this XenServer.
|
||||
status = "0"
|
||||
try:
|
||||
p = subprocess.Popen(["/bin/bash", "-c", "if [ -L " + path + " ]; then echo 1; else echo 0;fi"], stdout=subprocess.PIPE)
|
||||
status = p.communicate()[0].strip("\n")
|
||||
except:
|
||||
errMsg = "Could not determine status of ISCSI path: " + path
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
return (status == "1")
|
||||
|
||||
def getVhdParent(session, args):
|
||||
util.SMlog("getParent with " + str(args))
|
||||
primaryStorageSRUuid = args['primaryStorageSRUuid']
|
||||
snapshotUuid = args['snapshotUuid']
|
||||
isISCSI = getIsTrueString(args['isISCSI'])
|
||||
|
||||
primarySRPath = getPrimarySRPath(session, primaryStorageSRUuid, isISCSI)
|
||||
util.SMlog("primarySRPath: " + primarySRPath)
|
||||
|
||||
baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI)
|
||||
|
||||
return baseCopyUuid
|
||||
|
||||
|
||||
def backupSnapshot(session, args):
|
||||
util.SMlog("Called backupSnapshot with " + str(args))
|
||||
primaryStorageSRUuid = args['primaryStorageSRUuid']
|
||||
dcId = args['dcId']
|
||||
accountId = args['accountId']
|
||||
volumeId = args['volumeId']
|
||||
secondaryStorageMountPath = args['secondaryStorageMountPath']
|
||||
snapshotUuid = args['snapshotUuid']
|
||||
prevBackupUuid = args['prevBackupUuid']
|
||||
backupUuid = args['backupUuid']
|
||||
isISCSI = getIsTrueString(args['isISCSI'])
|
||||
|
||||
primarySRPath = getPrimarySRPath(session, primaryStorageSRUuid, isISCSI)
|
||||
util.SMlog("primarySRPath: " + primarySRPath)
|
||||
|
||||
baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI)
|
||||
baseCopyVHD = getVHD(baseCopyUuid, isISCSI)
|
||||
baseCopyPath = os.path.join(primarySRPath, baseCopyVHD)
|
||||
util.SMlog("Base copy path: " + baseCopyPath)
|
||||
|
||||
|
||||
# Mount secondary storage mount path on XenServer along the path
|
||||
# /var/run/sr-mount/<dcId>/snapshots/ and create <accountId>/<volumeId> dir
|
||||
# on it.
|
||||
backupsDir = mountSnapshotsDir(secondaryStorageMountPath, "snapshots", dcId, accountId, volumeId)
|
||||
util.SMlog("Backups dir " + backupsDir)
|
||||
|
||||
# Check existence of snapshot on primary storage
|
||||
isfile(baseCopyPath, isISCSI)
|
||||
if prevBackupUuid:
|
||||
# Check existence of prevBackupFile
|
||||
prevBackupVHD = getBackupVHD(prevBackupUuid)
|
||||
prevBackupFile = os.path.join(backupsDir, prevBackupVHD)
|
||||
isfile(prevBackupFile, False)
|
||||
|
||||
# copy baseCopyPath to backupsDir with new uuid
|
||||
backupVHD = getBackupVHD(backupUuid)
|
||||
backupFile = os.path.join(backupsDir, backupVHD)
|
||||
util.SMlog("Back up " + baseCopyUuid + " to Secondary Storage as " + backupUuid)
|
||||
copyfile(baseCopyPath, backupFile, isISCSI)
|
||||
vhdutil.setHidden(backupFile, False)
|
||||
|
||||
# Because the primary storage is always scanned, the parent of this base copy is always the first base copy.
|
||||
# We don't want that, we want a chain of VHDs each of which is a delta from the previous.
|
||||
# So set the parent of the current baseCopyVHD to prevBackupVHD
|
||||
if prevBackupUuid:
|
||||
# If there was a previous snapshot
|
||||
setParent(prevBackupFile, backupFile)
|
||||
|
||||
txt = "1#" + backupUuid
|
||||
return txt
|
||||
|
||||
@echo
|
||||
def deleteSnapshotBackup(session, args):
|
||||
util.SMlog("Calling deleteSnapshotBackup with " + str(args))
|
||||
dcId = args['dcId']
|
||||
accountId = args['accountId']
|
||||
volumeId = args['volumeId']
|
||||
secondaryStorageMountPath = args['secondaryStorageMountPath']
|
||||
backupUUID = args['backupUUID']
|
||||
|
||||
backupsDir = mountSnapshotsDir(secondaryStorageMountPath, "snapshots", dcId, accountId, volumeId)
|
||||
# chdir to the backupsDir for convenience
|
||||
chdir(backupsDir)
|
||||
|
||||
backupVHD = getBackupVHD(backupUUID)
|
||||
util.SMlog("checking existence of " + backupVHD)
|
||||
|
||||
# The backupVHD is on secondary which is NFS and not ISCSI.
|
||||
if not os.path.isfile(backupVHD):
|
||||
util.SMlog("backupVHD " + backupVHD + "does not exist. Not trying to delete it")
|
||||
return "1"
|
||||
util.SMlog("backupVHD " + backupVHD + " exists.")
|
||||
|
||||
# Just delete the backupVHD
|
||||
try:
|
||||
os.remove(backupVHD)
|
||||
except OSError, (errno, strerror):
|
||||
errMsg = "OSError while removing " + backupVHD + " with errno: " + str(errno) + " and strerr: " + strerror
|
||||
util.SMlog(errMsg)
|
||||
raise xs_errors.XenError(errMsg)
|
||||
|
||||
return "1"
|
||||
|
||||
if __name__ == "__main__":
|
||||
XenAPIPlugin.dispatch({"getVhdParent":getVhdParent, "create_secondary_storage_folder":create_secondary_storage_folder, "delete_secondary_storage_folder":delete_secondary_storage_folder, "post_create_private_template":post_create_private_template, "backupSnapshot": backupSnapshot, "deleteSnapshotBackup": deleteSnapshotBackup, "unmountSnapshotsDir": unmountSnapshotsDir})
|
||||
|
||||
129
scripts/vm/hypervisor/xenserver/xcposs/vmopspremium
Normal file
129
scripts/vm/hypervisor/xenserver/xcposs/vmopspremium
Normal file
@ -0,0 +1,129 @@
|
||||
#!/usr/bin/python
|
||||
# Version @VERSION@
|
||||
#
|
||||
# A plugin for executing script needed by vmops cloud
|
||||
|
||||
import os, sys, time
|
||||
import XenAPIPlugin
|
||||
sys.path.append("/usr/lib/xcp/sm/")
|
||||
import util
|
||||
import socket
|
||||
|
||||
def echo(fn):
|
||||
def wrapped(*v, **k):
|
||||
name = fn.__name__
|
||||
util.SMlog("#### VMOPS enter %s ####" % name )
|
||||
res = fn(*v, **k)
|
||||
util.SMlog("#### VMOPS exit %s ####" % name )
|
||||
return res
|
||||
return wrapped
|
||||
|
||||
@echo
|
||||
def forceShutdownVM(session, args):
|
||||
domId = args['domId']
|
||||
try:
|
||||
cmd = ["/usr/lib/xcp/debug/xenops", "destroy_domain", "-domid", domId]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = '10#failed'
|
||||
return txt
|
||||
|
||||
|
||||
@echo
|
||||
def create_privatetemplate_from_snapshot(session, args):
|
||||
templatePath = args['templatePath']
|
||||
snapshotPath = args['snapshotPath']
|
||||
tmpltLocalDir = args['tmpltLocalDir']
|
||||
try:
|
||||
cmd = ["bash", "/usr/lib/xcp/bin/create_privatetemplate_from_snapshot.sh",snapshotPath, templatePath, tmpltLocalDir]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = '10#failed'
|
||||
return txt
|
||||
|
||||
@echo
|
||||
def upgrade_snapshot(session, args):
|
||||
templatePath = args['templatePath']
|
||||
snapshotPath = args['snapshotPath']
|
||||
try:
|
||||
cmd = ["bash", "/usr/lib/xcp/bin/upgrate_snapshot.sh",snapshotPath, templatePath]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = '10#failed'
|
||||
return txt
|
||||
|
||||
@echo
|
||||
def copy_vhd_to_secondarystorage(session, args):
|
||||
mountpoint = args['mountpoint']
|
||||
vdiuuid = args['vdiuuid']
|
||||
sruuid = args['sruuid']
|
||||
try:
|
||||
cmd = ["bash", "/usr/lib/xcp/bin/copy_vhd_to_secondarystorage.sh", mountpoint, vdiuuid, sruuid]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = '10#failed'
|
||||
return txt
|
||||
|
||||
@echo
|
||||
def copy_vhd_from_secondarystorage(session, args):
|
||||
mountpoint = args['mountpoint']
|
||||
sruuid = args['sruuid']
|
||||
namelabel = args['namelabel']
|
||||
try:
|
||||
cmd = ["bash", "/usr/lib/xcp/bin/copy_vhd_from_secondarystorage.sh", mountpoint, sruuid, namelabel]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = '10#failed'
|
||||
return txt
|
||||
|
||||
@echo
|
||||
def setup_heartbeat_sr(session, args):
|
||||
host = args['host']
|
||||
sr = args['sr']
|
||||
try:
|
||||
cmd = ["bash", "/usr/lib/xcp/bin/setup_heartbeat_sr.sh", host, sr]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = ''
|
||||
return txt
|
||||
|
||||
@echo
|
||||
def setup_heartbeat_file(session, args):
|
||||
host = args['host']
|
||||
sr = args['sr']
|
||||
add = args['add']
|
||||
try:
|
||||
cmd = ["bash", "/usr/lib/xcp/bin/setup_heartbeat_file.sh", host, sr, add]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt = ''
|
||||
return txt
|
||||
|
||||
@echo
|
||||
def check_heartbeat(session, args):
|
||||
host = args['host']
|
||||
interval = args['interval']
|
||||
try:
|
||||
cmd = ["bash", "/usr/lib/xcp/bin/check_heartbeat.sh", host, interval]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt=''
|
||||
return txt
|
||||
|
||||
|
||||
@echo
|
||||
def heartbeat(session, args):
|
||||
'''
|
||||
host = args['host']
|
||||
interval = args['interval']
|
||||
try:
|
||||
cmd = ["/bin/bash", "/usr/lib/xcp/bin/launch_hb.sh", host, interval]
|
||||
txt = util.pread2(cmd)
|
||||
except:
|
||||
txt='fail'
|
||||
'''
|
||||
return '> DONE <'
|
||||
|
||||
if __name__ == "__main__":
|
||||
XenAPIPlugin.dispatch({"forceShutdownVM":forceShutdownVM, "upgrade_snapshot":upgrade_snapshot, "create_privatetemplate_from_snapshot":create_privatetemplate_from_snapshot, "copy_vhd_to_secondarystorage":copy_vhd_to_secondarystorage, "copy_vhd_from_secondarystorage":copy_vhd_from_secondarystorage, "setup_heartbeat_sr":setup_heartbeat_sr, "setup_heartbeat_file":setup_heartbeat_file, "check_heartbeat":check_heartbeat, "heartbeat": heartbeat})
|
||||
|
||||
@ -1508,7 +1508,9 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
|
||||
throw new ConfigurationException(msg);
|
||||
}
|
||||
} else {
|
||||
_serviceOffering = new ServiceOfferingVO("System Offering For Console Proxy", 1, DEFAULT_PROXY_VM_RAMSIZE, DEFAULT_PROXY_VM_CPUMHZ, 0, 0, false, null, useLocalStorage, true, null, true, VirtualMachine.Type.ConsoleProxy, true);
|
||||
int ramSize = NumbersUtil.parseInt(_configDao.getValue("console.ram.size"), DEFAULT_PROXY_VM_RAMSIZE);
|
||||
int cpuFreq = NumbersUtil.parseInt(_configDao.getValue("console.cpu.mhz"), DEFAULT_PROXY_VM_CPUMHZ);
|
||||
_serviceOffering = new ServiceOfferingVO("System Offering For Console Proxy", 1, ramSize, cpuFreq, 0, 0, false, null, useLocalStorage, true, null, true, VirtualMachine.Type.ConsoleProxy, true);
|
||||
_serviceOffering.setUniqueName(ServiceOffering.consoleProxyDefaultOffUniqueName);
|
||||
_serviceOffering = _offeringDao.persistSystemServiceOffering(_serviceOffering);
|
||||
|
||||
|
||||
@ -603,8 +603,8 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian
|
||||
final Map<String, String> configs = _configDao.getConfiguration("AgentManager", params);
|
||||
|
||||
_mgmt_host = configs.get("host");
|
||||
_routerRamSize = NumbersUtil.parseInt(configs.get("router.ram.size"), DEFAULT_ROUTER_VM_RAMSIZE);
|
||||
_routerCpuMHz = NumbersUtil.parseInt(configs.get("router.cpu.mhz"), DEFAULT_ROUTER_CPU_MHZ);
|
||||
_routerRamSize = NumbersUtil.parseInt(_configDao.getValue("router.ram.size"), DEFAULT_ROUTER_VM_RAMSIZE);
|
||||
_routerCpuMHz = NumbersUtil.parseInt(_configDao.getValue("router.cpu.mhz"), DEFAULT_ROUTER_CPU_MHZ);
|
||||
|
||||
_routerExtraPublicNics = NumbersUtil.parseInt(_configDao.getValue(Config.RouterExtraPublicNics.key()), 2);
|
||||
|
||||
|
||||
@ -841,8 +841,10 @@ public class SecondaryStorageManagerImpl implements SecondaryStorageVmManager, V
|
||||
throw new ConfigurationException(msg);
|
||||
}
|
||||
} else {
|
||||
int ramSize = NumbersUtil.parseInt(_configDao.getValue("ssvm.ram.size"), DEFAULT_SS_VM_RAMSIZE);
|
||||
int cpuFreq = NumbersUtil.parseInt(_configDao.getValue("ssvm.cpu.mhz"), DEFAULT_SS_VM_CPUMHZ);
|
||||
_useLocalStorage = Boolean.parseBoolean(configs.get(Config.SystemVMUseLocalStorage.key()));
|
||||
_serviceOffering = new ServiceOfferingVO("System Offering For Secondary Storage VM", 1, DEFAULT_SS_VM_RAMSIZE, DEFAULT_SS_VM_CPUMHZ, null, null, false, null, _useLocalStorage, true, null, true, VirtualMachine.Type.SecondaryStorageVm, true);
|
||||
_serviceOffering = new ServiceOfferingVO("System Offering For Secondary Storage VM", 1, ramSize, cpuFreq, null, null, false, null, _useLocalStorage, true, null, true, VirtualMachine.Type.SecondaryStorageVm, true);
|
||||
_serviceOffering.setUniqueName(ServiceOffering.ssvmDefaultOffUniqueName);
|
||||
_serviceOffering = _offeringDao.persistSystemServiceOffering(_serviceOffering);
|
||||
|
||||
|
||||
@ -330,6 +330,8 @@ public class DatabaseConfig {
|
||||
s_defaultConfigurationValues.put("system.vm.use.local.storage", "false");
|
||||
s_defaultConfigurationValues.put("use.local.storage", "false");
|
||||
s_defaultConfigurationValues.put("init", "false");
|
||||
s_defaultConfigurationValues.put("cpu.overprovisioning.factor", "1");
|
||||
s_defaultConfigurationValues.put("mem.overprovisioning.factor", "1");
|
||||
}
|
||||
|
||||
protected DatabaseConfig() {
|
||||
|
||||
33
setup/db/clouddev.sql
Normal file
33
setup/db/clouddev.sql
Normal file
@ -0,0 +1,33 @@
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one
|
||||
-- or more contributor license agreements. See the NOTICE file
|
||||
-- distributed with this work for additional information
|
||||
-- regarding copyright ownership. The ASF licenses this file
|
||||
-- to you under the Apache License, Version 2.0 (the
|
||||
-- "License"); you may not use this file except in compliance
|
||||
-- with the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing,
|
||||
-- software distributed under the License is distributed on an
|
||||
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
-- KIND, either express or implied. See the License for the
|
||||
-- specific language governing permissions and limitations
|
||||
-- under the License.
|
||||
|
||||
|
||||
UPDATE `cloud`.`configuration` SET value = 'true' where name = 'use.local.storage';
|
||||
UPDATE `cloud`.`configuration` SET value = 'true' where name = 'system.vm.use.local.storage';
|
||||
INSERT INTO `cloud`.`disk_offering` (id, name, uuid, display_text, created, use_local_storage, type) VALUES (17, 'tinyOffering', UUID(), 'tinyOffering', NOW(), 1, 'Service');
|
||||
INSERT INTO `cloud`.`service_offering` (id, cpu, speed, ram_size) VALUES (17, 1, 100, 100);
|
||||
INSERT INTO `cloud`.`disk_offering` (id, name, uuid, display_text, created, type, disk_size) VALUES (18, 'tinyDiskOffering', UUID(), 'tinyDiskOffering', NOW(), 'Disk', 1073741824);
|
||||
INSERT INTO `cloud`.`configuration` (name,value) VALUE('router.ram.size', '100');
|
||||
INSERT INTO `cloud`.`configuration` (name,value) VALUE('router.cpu.mhz','100');
|
||||
INSERT INTO `cloud`.`configuration` (name,value) VALUE('console.ram.size','100');
|
||||
INSERT INTO `cloud`.`configuration` (name,value) VALUE('console.cpu.mhz', '100');
|
||||
INSERT INTO `cloud`.`configuration` (name,value) VALUE('ssvm.ram.size','100');
|
||||
INSERT INTO `cloud`.`configuration` (name,value) VALUE('ssvm.cpu.mhz','100');
|
||||
UPDATE `cloud`.`configuration` SET value='10' where name = 'storage.overprovisioning.factor';
|
||||
UPDATE `cloud`.`configuration` SET value='10' where name = 'cpu.overprovisioning.factor';
|
||||
UPDATE `cloud`.`configuration` SET value='10' where name = 'mem.overprovisioning.factor';
|
||||
UPDATE `cloud`.`vm_template` SET unique_name="tiny Linux",name="tiny Linux",url="http://nfs1.lab.vmops.com/templates/ttylinux_pv.vhd",checksum="046e134e642e6d344b34648223ba4bc1",display_text="tiny Linux" where id=5;
|
||||
23
setup/db/deploy-db-clouddev.sh
Normal file
23
setup/db/deploy-db-clouddev.sh
Normal file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
mysql --user=cloud --password=cloud < clouddev.sql
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "failed to init cloudev db"
|
||||
fi
|
||||
Loading…
x
Reference in New Issue
Block a user