From 33b3e02c17418d02df8f5e2a3cf64263a729e86e Mon Sep 17 00:00:00 2001 From: anthony Date: Tue, 9 Aug 2011 19:47:52 -0700 Subject: [PATCH] bug 10695: add XCP 1.0 support status 10695: resolved fixed --- .../hypervisor/xen/resource/CitrixHelper.java | 149 +- .../xen/resource/CitrixResourceBase.java | 4 +- .../xen/resource/XcpServerResource.java | 38 + .../hypervisor/xenserver/xcpserver/NFSSR.py | 19 +- .../hypervisor/xenserver/xcpserver/cleanup.py | 2280 ----------------- .../vm/hypervisor/xenserver/xcpserver/patch | 36 +- .../xenserver/xcpserver/scsiutil.py | 468 ---- .../ConfigurationManagerImpl.java | 4 +- .../xen/discoverer/XcpServerDiscoverer.java | 8 +- .../src/com/cloud/server/StatsCollector.java | 2 +- 10 files changed, 154 insertions(+), 2854 deletions(-) delete mode 100755 scripts/vm/hypervisor/xenserver/xcpserver/cleanup.py delete mode 100755 scripts/vm/hypervisor/xenserver/xcpserver/scsiutil.py diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java index 2f64866aaca..5a5c54ffbe4 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java +++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java @@ -39,67 +39,80 @@ public class CitrixHelper { static { - _xcpGuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5"); - _xcpGuestOsMap.put("CentOS 4.6 (32-bit)", "CentOS 4.6"); - _xcpGuestOsMap.put("CentOS 4.7 (32-bit)", "CentOS 4.7"); - _xcpGuestOsMap.put("CentOS 4.8 (32-bit)", "CentOS 4.8"); - _xcpGuestOsMap.put("CentOS 5.0 (32-bit)", "CentOS 5.0"); - _xcpGuestOsMap.put("CentOS 5.0 (64-bit)", "CentOS 5.0 x64"); - _xcpGuestOsMap.put("CentOS 5.1 (32-bit)", "CentOS 5.1"); - _xcpGuestOsMap.put("CentOS 5.1 (64-bit)", "CentOS 5.1 x64"); - _xcpGuestOsMap.put("CentOS 5.2 (32-bit)", "CentOS 5.2"); - _xcpGuestOsMap.put("CentOS 5.2 (64-bit)", "CentOS 5.2 x64"); - _xcpGuestOsMap.put("CentOS 5.3 (32-bit)", "CentOS 5.3"); - _xcpGuestOsMap.put("CentOS 5.3 (64-bit)", "CentOS 5.3 x64"); - _xcpGuestOsMap.put("CentOS 5.4 (32-bit)", "CentOS 5.4"); - _xcpGuestOsMap.put("CentOS 5.4 (64-bit)", "CentOS 5.4 x64"); - _xcpGuestOsMap.put("Debian GNU/Linux 5.0 (32-bit)", "Debian Lenny 5.0 (32-bit)"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.0 (32-bit)", "Oracle Enterprise Linux 5.0"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.0 (64-bit)", "Oracle Enterprise Linux 5.0 x64"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.1 (32-bit)", "Oracle Enterprise Linux 5.1"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.1 (64-bit)", "Oracle Enterprise Linux 5.1 x64"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.2 (32-bit)", "Oracle Enterprise Linux 5.2"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.2 (64-bit)", "Oracle Enterprise Linux 5.2 x64"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.3 (32-bit)", "Oracle Enterprise Linux 5.3"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.3 (64-bit)", "Oracle Enterprise Linux 5.3 x64"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.4 (32-bit)", "Oracle Enterprise Linux 5.4"); - _xcpGuestOsMap.put("Oracle Enterprise Linux 5.4 (64-bit)", "Oracle Enterprise Linux 5.4 x64"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 4.5 (32-bit)", "Red Hat Enterprise Linux 4.5"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 4.6 (32-bit)", "Red Hat Enterprise Linux 4.6"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 4.7 (32-bit)", "Red Hat Enterprise Linux 4.7"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 4.8 (32-bit)", "Red Hat Enterprise Linux 4.8"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.0 (32-bit)", "Red Hat Enterprise Linux 5.0"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.0 (64-bit)", "Red Hat Enterprise Linux 5.0 x64"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.1 (32-bit)", "Red Hat Enterprise Linux 5.1"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.1 (64-bit)", "Red Hat Enterprise Linux 5.1 x64"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.2 (32-bit)", "Red Hat Enterprise Linux 5.2"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.2 (64-bit)", "Red Hat Enterprise Linux 5.2 x64"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.3 (32-bit)", "Red Hat Enterprise Linux 5.3"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.3 (64-bit)", "Red Hat Enterprise Linux 5.3 x64"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.4 (32-bit)", "Red Hat Enterprise Linux 5.4"); - _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.4 (64-bit)", "Red Hat Enterprise Linux 5.4 x64"); - _xcpGuestOsMap.put("SUSE Linux Enterprise Server 9 SP4 (32-bit)", "SUSE Linux Enterprise Server 9 SP4"); - _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (32-bit)", "SUSE Linux Enterprise Server 10 SP1"); - _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (64-bit)", "SUSE Linux Enterprise Server 10 SP1 x64"); - _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (32-bit)", "SUSE Linux Enterprise Server 10 SP2"); - _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (64-bit)", "SUSE Linux Enterprise Server 10 SP2 x64"); - _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP3 (64-bit)", "Other install media"); - _xcpGuestOsMap.put("SUSE Linux Enterprise Server 11 (32-bit)", "SUSE Linux Enterprise Server 11"); - _xcpGuestOsMap.put("SUSE Linux Enterprise Server 11 (64-bit)", "SUSE Linux Enterprise Server 11 x64"); - _xcpGuestOsMap.put("Windows 7 (32-bit)", "Windows 7"); - _xcpGuestOsMap.put("Windows 7 (64-bit)", "Windows 7 x64"); - _xcpGuestOsMap.put("Windows Server 2003 (32-bit)", "Windows Server 2003"); - _xcpGuestOsMap.put("Windows Server 2003 (64-bit)", "Windows Server 2003 x64"); - _xcpGuestOsMap.put("Windows Server 2008 (32-bit)", "Windows Server 2008"); - _xcpGuestOsMap.put("Windows Server 2008 (64-bit)", "Windows Server 2008 x64"); - _xcpGuestOsMap.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008 R2 x64"); - _xcpGuestOsMap.put("Windows 2000 SP4 (32-bit)", "Windows 2000 SP4"); - _xcpGuestOsMap.put("Windows Vista (32-bit)", "Windows Vista"); - _xcpGuestOsMap.put("Windows XP SP2 (32-bit)", "Windows XP SP2"); - _xcpGuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3"); - _xcpGuestOsMap.put("Other install media", "Other install media"); - _xcpGuestOsMap.put("Other PV (32-bit)", "CentOS 5.5 (32-bit)"); - _xcpGuestOsMap.put("Other PV (64-bit)", "CentOS 5.5 (64-bit)"); + _xcpGuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)"); + _xcpGuestOsMap.put("CentOS 4.6 (32-bit)", "CentOS 4.6 (32-bit)"); + _xcpGuestOsMap.put("CentOS 4.7 (32-bit)", "CentOS 4.7 (32-bit)"); + _xcpGuestOsMap.put("CentOS 4.8 (32-bit)", "CentOS 4.8 (32-bit)"); + _xcpGuestOsMap.put("CentOS 5.0 (32-bit)", "CentOS 5 (32-bit)"); + _xcpGuestOsMap.put("CentOS 5.0 (64-bit)", "CentOS 5 (64-bit)"); + _xcpGuestOsMap.put("CentOS 5.1 (32-bit)", "CentOS 5 (32-bit)"); + _xcpGuestOsMap.put("CentOS 5.1 (64-bit)", "CentOS 5 (64-bit)"); + _xcpGuestOsMap.put("CentOS 5.2 (32-bit)", "CentOS 5 (32-bit)"); + _xcpGuestOsMap.put("CentOS 5.2 (64-bit)", "CentOS 5 (64-bit)"); + _xcpGuestOsMap.put("CentOS 5.3 (32-bit)", "CentOS 5 (32-bit)"); + _xcpGuestOsMap.put("CentOS 5.3 (64-bit)", "CentOS 5 (64-bit)"); + _xcpGuestOsMap.put("CentOS 5.4 (32-bit)", "CentOS 5 (32-bit)"); + _xcpGuestOsMap.put("CentOS 5.4 (64-bit)", "CentOS 5 (64-bit)"); + _xcpGuestOsMap.put("CentOS 5.5 (32-bit)", "CentOS 5 (32-bit)"); + _xcpGuestOsMap.put("CentOS 5.5 (64-bit)", "CentOS 5 (64-bit)"); + _xcpGuestOsMap.put("Debian GNU/Linux 5.0 (32-bit)", "Debian Lenny 5.0 (32-bit)"); + _xcpGuestOsMap.put("Debian GNU/Linux 6(32-bit)", "Debian Squeeze 6.0 (32-bit)"); + _xcpGuestOsMap.put("Debian GNU/Linux 6(64-bit)", "Debian Squeeze 6.0 (64-bit) (experimental)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.0 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.0 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.1 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.1 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.2 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.2 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.3 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.3 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.4 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.4 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.5 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Oracle Enterprise Linux 5.5 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 4.5 (32-bit)", "Red Hat Enterprise Linux 4.5 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 4.6 (32-bit)", "Red Hat Enterprise Linux 4.6 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 4.7 (32-bit)", "Red Hat Enterprise Linux 4.7 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 4.8 (32-bit)", "Red Hat Enterprise Linux 4.8 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.0 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.0 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.1 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.1 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.2 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.2 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.3 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.3 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.4 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.4 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.5 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 5.5 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 6.0 (32-bit)", "Red Hat Enterprise Linux 6 (32-bit) (experimental)"); + _xcpGuestOsMap.put("Red Hat Enterprise Linux 6.0 (64-bit)", "Red Hat Enterprise Linux 6 (64-bit) (experimental)"); + ; + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 9 SP4 (32-bit)", "SUSE Linux Enterprise Server 9 SP4"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (32-bit)", "SUSE Linux Enterprise Server 10 SP1"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (64-bit)", "SUSE Linux Enterprise Server 10 SP1 x64"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (32-bit)", "SUSE Linux Enterprise Server 10 SP2"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (64-bit)", "SUSE Linux Enterprise Server 10 SP2 x64"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 10 SP3 (64-bit)", "Other install media"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 11 (32-bit)", "SUSE Linux Enterprise Server 11"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 11 (64-bit)", "SUSE Linux Enterprise Server 11 x64"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 11 SP1 (32-bit)", "SUSE Linux Enterprise Server 11 SP1 (32-bit)"); + _xcpGuestOsMap.put("SUSE Linux Enterprise Server 11 SP1 (64-bit)", "SUSE Linux Enterprise Server 11 SP1 (64-bit)"); + _xcpGuestOsMap.put("Windows 7 (32-bit)", "Windows 7 (32-bit)"); + _xcpGuestOsMap.put("Windows 7 (64-bit)", "Windows 7 (64-bit)"); + _xcpGuestOsMap.put("Windows Server 2003 (32-bit)", "Windows Server 2003 (32-bit)"); + _xcpGuestOsMap.put("Windows Server 2003 (64-bit)", "Windows Server 2003 (64-bit)"); + _xcpGuestOsMap.put("Windows Server 2008 (32-bit)", "Windows Server 2008 (32-bit)"); + _xcpGuestOsMap.put("Windows Server 2008 (64-bit)", "Windows Server 2008 (64-bit)"); + _xcpGuestOsMap.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008 R2 (64-bit)"); + _xcpGuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3 (32-bit)"); + _xcpGuestOsMap.put("Windows Vista (32-bit)", "Windows Vista (32-bit)"); + _xcpGuestOsMap.put("Ubuntu 10.04 (32-bit)", "Ubuntu Lucid Lynx 10.04 (32-bit) (experimental)"); + _xcpGuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit) (experimental)"); + _xcpGuestOsMap.put("Other install media", "Other install media"); + _xcpGuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)"); + _xcpGuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)"); } static { @@ -164,8 +177,6 @@ public class CitrixHelper { _xenServerGuestOsMap.put("Other install media", "Other install media"); _xenServerGuestOsMap.put("Other PV (32-bit)", "CentOS 5.4 (32-bit)"); _xenServerGuestOsMap.put("Other PV (64-bit)", "CentOS 5.4 (64-bit)"); -// _xenServerGuestOsMap.put("Ubuntu 10.04 (32-bit)", "CentOS 5.3 (32-bit"); -// _xenServerGuestOsMap.put("Ubuntu 10.04 (64-bit)", "CentOS 5.3 (64-bit"); } static { @@ -239,8 +250,8 @@ public class CitrixHelper { _xenServer56FP1GuestOsMap.put("Ubuntu 10.04 (32-bit)", "Ubuntu Lucid Lynx 10.04 (32-bit) (experimental)"); _xenServer56FP1GuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit) (experimental)"); _xenServer56FP1GuestOsMap.put("Other install media", "Other install media"); - _xenServer56FP1GuestOsMap.put("Other PV (32-bit)", "CentOS 5.5 (32-bit)"); - _xenServer56FP1GuestOsMap.put("Other PV (64-bit)", "CentOS 5.5 (64-bit)"); + _xenServer56FP1GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP1GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)"); } @@ -315,8 +326,8 @@ public class CitrixHelper { _xenServer56FP2GuestOsMap.put("Ubuntu 10.04 (32-bit)", "Ubuntu Lucid Lynx 10.04 (32-bit) (experimental)"); _xenServer56FP2GuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit) (experimental)"); _xenServer56FP2GuestOsMap.put("Other install media", "Other install media"); - _xenServer56FP2GuestOsMap.put("Other PV (32-bit)", "CentOS 5.5 (32-bit)"); - _xenServer56FP2GuestOsMap.put("Other PV (64-bit)", "CentOS 5.5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)"); } @@ -408,8 +419,8 @@ public class CitrixHelper { _xenServer60GuestOsMap.put("Ubuntu 10.10 (32-bit)", "Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)"); _xenServer60GuestOsMap.put("Ubuntu 10.10 (64-bit)", "Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)"); _xenServer60GuestOsMap.put("Other install media", "Other install media"); - _xenServer60GuestOsMap.put("Other PV (32-bit)", "CentOS 5.5 (32-bit)"); - _xenServer60GuestOsMap.put("Other PV (64-bit)", "CentOS 5.5 (64-bit)"); + _xenServer60GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)"); + _xenServer60GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)"); } diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index b939f7ae874..eefd4cc9dec 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -333,7 +333,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } protected boolean isRefNull(XenAPIObject object) { - return (object == null || object.toWireString().equals("OpaqueRef:NULL")); + return (object == null || object.toWireString().equals("OpaqueRef:NULL") || object.toWireString().equals("")); } @Override @@ -2578,7 +2578,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe final State state = convertToState(ps); Host host = record.residentOn; String host_uuid = null; - if( host != null ) { + if( ! isRefNull(host) ) { host_uuid = host.getUuid(conn); if( host_uuid.equals(_host.uuid)) { synchronized (_vms) { diff --git a/core/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java b/core/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java index 57a8a343c5f..f61acb581e3 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java +++ b/core/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java @@ -17,9 +17,20 @@ */ package com.cloud.hypervisor.xen.resource; +import java.io.File; +import java.util.ArrayList; +import java.util.List; + import javax.ejb.Local; +import org.apache.xmlrpc.XmlRpcException; + import com.cloud.resource.ServerResource; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import com.xensource.xenapi.Connection; +import com.xensource.xenapi.VM; +import com.xensource.xenapi.Types.XenAPIException; @Local(value=ServerResource.class) public class XcpServerResource extends CitrixResourceBase { @@ -31,4 +42,31 @@ public class XcpServerResource extends CitrixResourceBase { protected String getGuestOsType(String stdType, boolean bootFromCD) { return CitrixHelper.getXcpGuestOsType(stdType); } + + @Override + protected List getPatchFiles() { + List files = new ArrayList(); + String patch = "scripts/vm/hypervisor/xenserver/xcpserver/patch"; + String patchfilePath = Script.findScript("", patch); + if (patchfilePath == null) { + throw new CloudRuntimeException("Unable to find patch file " + patch); + } + File file = new File(patchfilePath); + files.add(file); + return files; + } + + @Override + protected void setMemory(Connection conn, VM vm, long memsize) throws XmlRpcException, XenAPIException { + + vm.setMemoryStaticMin(conn, 33554432L); + vm.setMemoryDynamicMin(conn, 33554432L); + vm.setMemoryDynamicMax(conn, 33554432L); + vm.setMemoryStaticMax(conn, 33554432L); + + vm.setMemoryStaticMax(conn, memsize); + vm.setMemoryDynamicMax(conn, memsize); + vm.setMemoryDynamicMin(conn, memsize); + vm.setMemoryStaticMin(conn, memsize); + } } diff --git a/scripts/vm/hypervisor/xenserver/xcpserver/NFSSR.py b/scripts/vm/hypervisor/xenserver/xcpserver/NFSSR.py index 9f596aa9944..81b1a38dd76 100755 --- a/scripts/vm/hypervisor/xenserver/xcpserver/NFSSR.py +++ b/scripts/vm/hypervisor/xenserver/xcpserver/NFSSR.py @@ -24,9 +24,10 @@ import vhdutil from lock import Lock import cleanup -CAPABILITIES = ["SR_PROBE","SR_UPDATE", \ +CAPABILITIES = ["SR_PROBE","SR_UPDATE", "SR_CACHING", \ "VDI_CREATE","VDI_DELETE","VDI_ATTACH","VDI_DETACH", \ - "VDI_UPDATE", "VDI_CLONE","VDI_SNAPSHOT","VDI_RESIZE","VDI_RESIZE_ONLINE"] + "VDI_UPDATE", "VDI_CLONE","VDI_SNAPSHOT","VDI_RESIZE", \ + "VDI_RESIZE_ONLINE", "VDI_RESET_ON_BOOT", "ATOMIC_PAUSE"] CONFIGURATION = [ [ 'server', 'hostname or IP address of NFS server (required)' ], \ [ 'serverpath', 'path on remote server (required)' ] ] @@ -59,6 +60,7 @@ class NFSSR(FileSR.FileSR): def load(self, sr_uuid): + self.ops_exclusive = FileSR.OPS_EXCLUSIVE self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = SR.DEFAULT_TAP if not self.dconf.has_key('server'): @@ -99,12 +101,10 @@ class NFSSR(FileSR.FileSR): raise xs_errors.XenError('NFSMount', opterr=exc.errstr) - @FileSR.locking("SRUnavailable") def attach(self, sr_uuid): self.validate_remotepath(False) #self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid) self.remotepath = self.dconf['serverpath'] - util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget') self.mount_remotepath(sr_uuid) @@ -117,7 +117,6 @@ class NFSSR(FileSR.FileSR): return super(NFSSR, self).attach(sr_uuid) - @FileSR.locking("SRUnavailable") def probe(self): # Verify NFS target and port util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget') @@ -137,7 +136,6 @@ class NFSSR(FileSR.FileSR): pass - @FileSR.locking("SRUnavailable") def detach(self, sr_uuid): """Detach the SR: Unmounts and removes the mountpoint""" if not self._checkmount(): @@ -156,7 +154,6 @@ class NFSSR(FileSR.FileSR): return super(NFSSR, self).detach(sr_uuid) - @FileSR.locking("SRUnavailable") def create(self, sr_uuid, size): util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget') self.validate_remotepath(True) @@ -191,7 +188,6 @@ class NFSSR(FileSR.FileSR): # % inst.code) self.detach(sr_uuid) - @FileSR.locking("SRUnavailable") def delete(self, sr_uuid): # try to remove/delete non VDI contents first super(NFSSR, self).delete(sr_uuid) @@ -215,7 +211,7 @@ class NFSSR(FileSR.FileSR): def vdi(self, uuid, loadLocked = False): if not loadLocked: - return NFSFileVDINolock(self, uuid) + return NFSFileVDI(self, uuid) return NFSFileVDI(self, uuid) def _checkmount(self): @@ -253,11 +249,6 @@ class NFSFileVDI(FileSR.FileVDI): os.utime(self.sr.path, (timestamp_after, timestamp_after)) return ret -class NFSFileVDINolock(NFSFileVDI): - def load(self, vdi_uuid): - self.lock = self.sr.lock - self._load(vdi_uuid) - if __name__ == '__main__': SRCommand.run(NFSSR, DRIVER_INFO) diff --git a/scripts/vm/hypervisor/xenserver/xcpserver/cleanup.py b/scripts/vm/hypervisor/xenserver/xcpserver/cleanup.py deleted file mode 100755 index b17ec91823f..00000000000 --- a/scripts/vm/hypervisor/xenserver/xcpserver/cleanup.py +++ /dev/null @@ -1,2280 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2006-2007 XenSource Ltd. -# Copyright (C) 2008-2009 Citrix Ltd. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation; version 2.1 only. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# Script to coalesce and garbage collect VHD-based SR's in the background -# - -import os -import sys -import time -import signal -import subprocess -import getopt -import datetime -import exceptions -import traceback -import base64 -import zlib - -import XenAPI -import util -import lvutil -import vhdutil -import lvhdutil -import lvmcache -import journaler -import fjournaler -import lock -import atomicop -from refcounter import RefCounter -from ipc import IPCFlag -from lvmanager import LVActivator - -# Disable automatic leaf-coalescing. Online leaf-coalesce is currently not -# possible due to lvhd_stop_using_() not working correctly. However, we leave -# this option available through the explicit LEAFCLSC_FORCE flag in the VDI -# record for use by the offline tool (which makes the operation safe by pausing -# the VM first) -AUTO_ONLINE_LEAF_COALESCE_ENABLED = False - -LOG_FILE = "/var/log/SMlog" -FLAG_TYPE_ABORT = "abort" # flag to request aborting of GC/coalesce - -# process "lock", used simply as an indicator that a process already exists -# that is doing GC/coalesce on this SR (such a process holds the lock, and we -# check for the fact by trying the lock). -LOCK_TYPE_RUNNING = "running" -lockRunning = None - - -class AbortException(util.SMException): - pass - -################################################################################ -# -# Util -# -class Util: - RET_RC = 1 - RET_STDOUT = 2 - RET_STDERR = 4 - - PREFIX = {"G": 1024 * 1024 * 1024, "M": 1024 * 1024, "K": 1024} - - def log(text): - f = open(LOG_FILE, 'a') - f.write("<%d> %s\t%s\n" % (os.getpid(), datetime.datetime.now(), text)) - f.close() - log = staticmethod(log) - - def logException(tag): - info = sys.exc_info() - if info[0] == exceptions.SystemExit: - # this should not be happening when catching "Exception", but it is - sys.exit(0) - tb = reduce(lambda a, b: "%s%s" % (a, b), traceback.format_tb(info[2])) - Util.log("*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*") - Util.log(" ***********************") - Util.log(" * E X C E P T I O N *") - Util.log(" ***********************") - Util.log("%s: EXCEPTION %s, %s" % (tag, info[0], info[1])) - Util.log(tb) - Util.log("*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*") - logException = staticmethod(logException) - - def doexec(args, expectedRC, inputtext=None, ret=None, log=True): - "Execute a subprocess, then return its return code, stdout, stderr" - proc = subprocess.Popen(args, - stdin=subprocess.PIPE,\ - stdout=subprocess.PIPE,\ - stderr=subprocess.PIPE,\ - shell=True,\ - close_fds=True) - (stdout, stderr) = proc.communicate(inputtext) - stdout = str(stdout) - stderr = str(stderr) - rc = proc.returncode - if log: - Util.log("`%s`: %s" % (args, rc)) - if type(expectedRC) != type([]): - expectedRC = [expectedRC] - if not rc in expectedRC: - reason = stderr.strip() - if stdout.strip(): - reason = "%s (stdout: %s)" % (reason, stdout.strip()) - Util.log("Failed: %s" % reason) - raise util.CommandException(rc, args, reason) - - if ret == Util.RET_RC: - return rc - if ret == Util.RET_STDERR: - return stderr - return stdout - doexec = staticmethod(doexec) - - def runAbortable(func, ret, ns, abortTest, pollInterval, timeOut): - """execute func in a separate thread and kill it if abortTest signals - so""" - resultFlag = IPCFlag(ns) - pid = os.fork() - if pid: - startTime = time.time() - while True: - if resultFlag.test("success"): - Util.log(" Child process completed successfully") - resultFlag.clear("success") - return - if resultFlag.test("failure"): - resultFlag.clear("failure") - raise util.SMException("Child process exited with error") - if abortTest(): - os.killpg(pid, signal.SIGKILL) - raise AbortException("Aborting due to signal") - if timeOut and time.time() - startTime > timeOut: - os.killpg(pid, signal.SIGKILL) - raise util.SMException("Timed out") - time.sleep(pollInterval) - else: - os.setpgrp() - try: - if func() == ret: - resultFlag.set("success") - else: - resultFlag.set("failure") - except: - resultFlag.set("failure") - os._exit(0) - runAbortable = staticmethod(runAbortable) - - def num2str(number): - for prefix in ("G", "M", "K"): - if number >= Util.PREFIX[prefix]: - return "%.2f%s" % (float(number) / Util.PREFIX[prefix], prefix) - return "%s" % number - num2str = staticmethod(num2str) - - def numBits(val): - count = 0 - while val: - count += val & 1 - val = val >> 1 - return count - numBits = staticmethod(numBits) - - def countBits(bitmap1, bitmap2): - """return bit count in the bitmap produced by ORing the two bitmaps""" - len1 = len(bitmap1) - len2 = len(bitmap2) - lenLong = len1 - lenShort = len2 - bitmapLong = bitmap1 - if len2 > len1: - lenLong = len2 - lenShort = len1 - bitmapLong = bitmap2 - - count = 0 - for i in range(lenShort): - val = ord(bitmap1[i]) | ord(bitmap2[i]) - count += Util.numBits(val) - - for i in range(i + 1, lenLong): - val = ord(bitmapLong[i]) - count += Util.numBits(val) - return count - countBits = staticmethod(countBits) - - def getThisScript(): - thisScript = util.get_real_path(__file__) - if thisScript.endswith(".pyc"): - thisScript = thisScript[:-1] - return thisScript - getThisScript = staticmethod(getThisScript) - - def getThisHost(): - uuid = None - f = open("/etc/xensource-inventory", 'r') - for line in f.readlines(): - if line.startswith("INSTALLATION_UUID"): - uuid = line.split("'")[1] - f.close() - return uuid - getThisHost = staticmethod(getThisHost) - - -################################################################################ -# -# XAPI -# -class XAPI: - USER = "root" - PLUGIN_ON_SLAVE = "on-slave" - PLUGIN_PAUSE_VDIS = "atomicop.py" - - CONFIG_SM = 0 - CONFIG_OTHER = 1 - - class LookupError(util.SMException): - pass - - def getSession(): - session = XenAPI.xapi_local() - session.xenapi.login_with_password(XAPI.USER, '') - return session - getSession = staticmethod(getSession) - - def __init__(self, session, srUuid): - self.sessionPrivate = False - self.session = session - if self.session == None: - self.session = self.getSession() - self.sessionPrivate = True - self._srRef = self.session.xenapi.SR.get_by_uuid(srUuid) - self.srRecord = self.session.xenapi.SR.get_record(self._srRef) - self.hostUuid = Util.getThisHost() - self._hostRef = self.session.xenapi.host.get_by_uuid(self.hostUuid) - - def __del__(self): - if self.sessionPrivate: - self.session.xenapi.session.logout() - - def isInvalidHandleError(exception): - return exception.details[0] == "HANDLE_INVALID" - isInvalidHandleError = staticmethod(isInvalidHandleError) - - def isPluggedHere(self): - pbds = self.getAttachedPBDs() - for pbdRec in pbds: - if pbdRec["host"] == self._hostRef: - return True - return False - - def isMaster(self): - if self.srRecord["shared"]: - pool = self.session.xenapi.pool.get_all_records().values()[0] - return pool["master"] == self._hostRef - else: - pbds = self.getAttachedPBDs() - if len(pbds) < 1: - raise util.SMException("Local SR not attached") - elif len(pbds) > 1: - raise util.SMException("Local SR multiply attached") - return pbds[0]["host"] == self._hostRef - - def getAttachedPBDs(self): - """Return PBD records for all PBDs of this SR that are currently - attached""" - attachedPBDs = [] - pbds = self.session.xenapi.PBD.get_all_records() - for pbdRec in pbds.values(): - if pbdRec["SR"] == self._srRef and pbdRec["currently_attached"]: - attachedPBDs.append(pbdRec) - return attachedPBDs - - def getOnlineHosts(self): - onlineHosts = [] - hosts = self.session.xenapi.host.get_all_records() - for hostRef, hostRecord in hosts.iteritems(): - metricsRef = hostRecord["metrics"] - metrics = self.session.xenapi.host_metrics.get_record(metricsRef) - if metrics["live"]: - onlineHosts.append(hostRef) - return onlineHosts - - def ensureInactive(self, hostRef, args): - text = self.session.xenapi.host.call_plugin( \ - hostRef, self.PLUGIN_ON_SLAVE, "multi", args) - Util.log("call-plugin returned: '%s'" % text) - - def getRefVDI(self, vdi): - return self.session.xenapi.VDI.get_by_uuid(vdi.uuid) - - def singleSnapshotVDI(self, vdi): - return self.session.xenapi.VDI.snapshot(vdi.getRef(), {"type":"single"}) - - def forgetVDI(self, vdiUuid): - """Forget the VDI, but handle the case where the VDI has already been - forgotten (i.e. ignore errors)""" - try: - vdiRef = self.session.xenapi.VDI.get_by_uuid(vdiUuid) - self.session.xenapi.VDI.forget(vdiRef) - except XenAPI.Failure: - pass - - def getConfigVDI(self, kind, vdi): - if kind == self.CONFIG_SM: - return self.session.xenapi.VDI.get_sm_config(vdi.getRef()) - elif kind == self.CONFIG_OTHER: - return self.session.xenapi.VDI.get_other_config(vdi.getRef()) - assert(False) - - def setConfigVDI(self, kind, vdi, map): - if kind == self.CONFIG_SM: - self.session.xenapi.VDI.set_sm_config(vdi.getRef(), map) - elif kind == self.CONFIG_OTHER: - self.session.xenapi.VDI.set_other_config(vdi.getRef(), map) - else: - assert(False) - - def srUpdate(self): - Util.log("Starting asynch srUpdate for SR %s" % self.srRecord["uuid"]) - abortFlag = IPCFlag(self.srRecord["uuid"]) - task = self.session.xenapi.Async.SR.update(self._srRef) - for i in range(60): - status = self.session.xenapi.task.get_status(task) - if not status == "pending": - Util.log("SR.update_asynch status changed to [%s]" % status) - return - if abortFlag.test(FLAG_TYPE_ABORT): - Util.log("Abort signalled during srUpdate, cancelling task...") - try: - self.session.xenapi.task.cancel(task) - Util.log("Task cancelled") - except: - pass - return - time.sleep(1) - Util.log("Asynch srUpdate still running, but timeout exceeded.") - - def atomicOp(self, vdiList, op, args, mustExist = False): - vdiRefs = [] - for vdi in vdiList: - Util.log("atomicOp: will pause %s" % vdi.toString()) - try: - vdiRefs.append(vdi.getRef()) - except XenAPI.Failure: - Util.log("atomicOp: can't find %s" % vdi.toString()) - if mustExist: - raise - if len(vdiRefs) == 0: - Util.log("atomicOp: no VDIs found in DB, not pausing anything") - fn = getattr(atomicop, op) - ret = fn(self.session, args) - else: - ret = self.session.xenapi.SR.lvhd_stop_using_these_vdis_and_call_script(\ - vdiRefs, self.PLUGIN_PAUSE_VDIS, op, args) - Util.log("Plugin returned: %s" % ret) - if ret == atomicop.RET_EXCEPTION: - raise util.SMException("Exception in atomic %s" % op) - if ret == atomicop.RET_SUCCESS: - return True - return False - - -################################################################################ -# -# VDI -# -class VDI: - """Object representing a VDI of a VHD-based SR""" - - POLL_INTERVAL = 1 - POLL_TIMEOUT = 30 - DEVICE_MAJOR = 202 - DRIVER_NAME_VHD = "vhd" - - # config keys & values - DB_VHD_PARENT = "vhd-parent" - DB_VDI_TYPE = "vdi_type" - DB_VHD_BLOCKS = "vhd-blocks" - DB_LEAFCLSC = "leaf-coalesce" # config key - LEAFCLSC_DISABLED = "no" # set by user; means do not leaf-coalesce - LEAFCLSC_FORCE = "force" # set by user; means skip snap-coalesce - LEAFCLSC_OFFLINE = "offline" # set here for informational purposes: means - # no space to snap-coalesce or unable to keep - # up with VDI - CONFIG_TYPE = { - DB_VHD_PARENT: XAPI.CONFIG_SM, - DB_VDI_TYPE: XAPI.CONFIG_SM, - DB_VHD_BLOCKS: XAPI.CONFIG_SM, - DB_LEAFCLSC: XAPI.CONFIG_OTHER } - - LIVE_LEAF_COALESCE_MAX_SIZE = 100 * 1024 * 1024 # bytes - LIVE_LEAF_COALESCE_TIMEOUT = 10 # seconds - - JRN_RELINK = "relink" # journal entry type for relinking children - JRN_COALESCE = "coalesce" # to communicate which VDI is being coalesced - JRN_LEAF = "leaf" # used in coalesce-leaf - - PRINT_INDENTATION = 4 - - def __init__(self, sr, uuid): - self.sr = sr - self.scanError = True - self.uuid = uuid - self.parentUuid = "" - self.sizeVirt = -1 - self._sizeVHD = -1 - self.hidden = False - self.parent = None - self.children = [] - self._vdiRef = None - self._config = {} - self._configDirty = {} - self._clearRef() - - def load(self): - """Load VDI info""" - pass # abstract - - def getDriverName(self): - return self.DRIVER_NAME_VHD - - def getRef(self): - if self._vdiRef == None: - self._vdiRef = self.sr.xapi.getRefVDI(self) - return self._vdiRef - - def getConfig(self, key, default = None): - kind = self.CONFIG_TYPE[key] - self._configLazyInit(kind) - if self._config[kind].get(key): - return self._config[kind][key] - return default - - def setConfig(self, key, val): - kind = self.CONFIG_TYPE[key] - self._configLazyInit(kind) - self._config[kind][key] = val - self._configDirty[kind] = True - - def delConfig(self, key): - kind = self.CONFIG_TYPE[key] - self._configLazyInit(kind) - if self._config[kind].get(key): - del self._config[kind][key] - self._configDirty[kind] = True - - def updateConfig(self): - for kind in self._config.keys(): - if self._configDirty[kind]: - self.sr.xapi.setConfigVDI(kind, self, self._config[kind]) - self._configDirty[kind] = False - - def setConfigUpdate(self, key, val): - self.setConfig(key, val) - self.updateConfig() - - def delConfigUpdate(self, key): - self.delConfig(key) - self.updateConfig() - - def getVHDBlocks(self): - val = self.getConfig(VDI.DB_VHD_BLOCKS) - if not val: - self.updateBlockInfo() - val = self.getConfig(VDI.DB_VHD_BLOCKS) - bitmap = zlib.decompress(base64.b64decode(val)) - return bitmap - - def isCoalesceable(self): - """A VDI is coalesceable if it has no siblings and is not a leaf""" - return not self.scanError and \ - self.parent and \ - len(self.parent.children) == 1 and \ - self.hidden and \ - len(self.children) > 0 - - def isLeafCoalesceable(self): - """A VDI is leaf-coalesceable if it has no siblings and is a leaf""" - return not self.scanError and \ - self.parent and \ - len(self.parent.children) == 1 and \ - not self.hidden and \ - len(self.children) == 0 - - def canLiveCoalesce(self): - """Can we stop-and-leaf-coalesce this VDI? The VDI must be - isLeafCoalesceable() already""" - return self.getSizeVHD() <= self.LIVE_LEAF_COALESCE_MAX_SIZE or \ - self.getConfig(self.DB_LEAFCLSC) == self.LEAFCLSC_FORCE - - def getAllPrunable(self): - if len(self.children) == 0: # base case - # it is possible to have a hidden leaf that was recently coalesced - # onto its parent, its children already relinked but not yet - # reloaded - in which case it may not be garbage collected yet: - # some tapdisks could still be using the file. - if self.sr.journaler.get(self.JRN_RELINK, self.uuid): - return [] - if not self.scanError and self.hidden: - return [self] - return [] - - thisPrunable = True - vdiList = [] - for child in self.children: - childList = child.getAllPrunable() - vdiList.extend(childList) - if child not in childList: - thisPrunable = False - - if not self.scanError and thisPrunable: - vdiList.append(self) - return vdiList - - def getSizeVHD(self): - return self._sizeVHD - - def getTreeRoot(self): - "Get the root of the tree that self belongs to" - root = self - while root.parent: - root = root.parent - return root - - def getTreeHeight(self): - "Get the height of the subtree rooted at self" - if len(self.children) == 0: - return 1 - - maxChildHeight = 0 - for child in self.children: - childHeight = child.getTreeHeight() - if childHeight > maxChildHeight: - maxChildHeight = childHeight - - return maxChildHeight + 1 - - def updateBlockInfo(self): - val = base64.b64encode(self._queryVHDBlocks()) - self.setConfigUpdate(VDI.DB_VHD_BLOCKS, val) - - def rename(self, uuid): - "Rename the VDI file" - assert(not self.sr.vdis.get(uuid)) - self._clearRef() - oldUuid = self.uuid - self.uuid = uuid - self.children = [] - # updating the children themselves is the responsiblity of the caller - del self.sr.vdis[oldUuid] - self.sr.vdis[self.uuid] = self - - def delete(self): - "Physically delete the VDI" - lock.Lock.cleanup(self.uuid, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) - self._clear() - - def printTree(self, indentSize = 0): - indent = " " * indentSize - Util.log("%s%s" % (indent, self.toString())) - for child in self.children: - child.printTree(indentSize + VDI.PRINT_INDENTATION) - - def toString(self): - strHidden = "" - if self.hidden: - strHidden = "*" - strSizeVHD = "" - if self._sizeVHD > 0: - strSizeVHD = Util.num2str(self._sizeVHD) - - return "%s%s(%s/%s)" % (strHidden, self.uuid[0:8], - Util.num2str(self.sizeVirt), strSizeVHD) - - def validate(self): - if not vhdutil.check(self.path): - raise util.SMException("VHD %s corrupted" % self.toString()) - - def _clear(self): - self.uuid = "" - self.path = "" - self.parentUuid = "" - self.parent = None - self._clearRef() - - def _clearRef(self): - self._vdiRef = None - for kind in [XAPI.CONFIG_SM, XAPI.CONFIG_OTHER]: - self._config[kind] = None - self._configDirty[kind] = False - - def _configLazyInit(self, kind): - if self._config[kind] == None: - self._config[kind] = self.sr.xapi.getConfigVDI(kind, self) - - def _coalesceBegin(self): - """Coalesce self onto parent. Only perform the actual coalescing of - VHD, but not the subsequent relinking. We'll do that as the next step, - after reloading the entire SR in case things have changed while we - were coalescing""" - self.validate() - self.parent.validate() - if self.sr.journaler.get(self.JRN_RELINK, self.uuid): - # this means we had done the actual coalescing already and just - # need to finish relinking and/or refreshing the children - Util.log("==> Coalesce apparently already done: skipping") - else: - self.parent._increaseSizeVirt(self.sizeVirt) - self._coalesceVHD(0) - self.parent.validate() - #self._verifyContents(0) - self.parent.updateBlockInfo() - - def _verifyContents(self, timeOut): - Util.log(" Coalesce verification on %s" % self.toString()) - abortTest = lambda:IPCFlag(self.sr.uuid).test(FLAG_TYPE_ABORT) - Util.runAbortable(lambda: self._runTapdiskDiff(), True, - self.sr.uuid, abortTest, VDI.POLL_INTERVAL, timeOut) - Util.log(" Coalesce verification succeeded") - - def _runTapdiskDiff(self): - cmd = "tapdisk-diff -n %s:%s -m %s:%s" % \ - (self.getDriverName(), self.path, \ - self.parent.getDriverName(), self.parent.path) - Util.doexec(cmd, 0) - return True - - def _coalesceVHD(self, timeOut): - Util.log(" Running VHD coalesce on %s" % self.toString()) - abortTest = lambda:IPCFlag(self.sr.uuid).test(FLAG_TYPE_ABORT) - Util.runAbortable(lambda: vhdutil.coalesce(self.path), None, - self.sr.uuid, abortTest, VDI.POLL_INTERVAL, timeOut) - util.fistpoint.activate("LVHDRT_coalescing_VHD_data",self.sr.uuid) - - def _relinkSkip(self): - """Relink children of this VDI to point to the parent of this VDI""" - abortFlag = IPCFlag(self.sr.uuid) - for child in self.children: - if abortFlag.test(FLAG_TYPE_ABORT): - raise AbortException("Aborting due to signal") - Util.log(" Relinking %s from %s to %s" % (child.toString(), \ - self.toString(), self.parent.toString())) - util.fistpoint.activate("LVHDRT_relinking_grandchildren",self.sr.uuid) - child._setParent(self.parent) - self.children = [] - - def _reloadChildren(self, vdiSkip): - """Pause & unpause all VDIs in the subtree to cause blktap to reload - the VHD metadata for this file in any online VDI""" - abortFlag = IPCFlag(self.sr.uuid) - for child in self.children: - if child == vdiSkip: - continue - if abortFlag.test(FLAG_TYPE_ABORT): - raise AbortException("Aborting due to signal") - Util.log(" Reloading VDI %s" % child.toString()) - child._reload() - - def _reload(self): - """Pause & unpause to cause blktap to reload the VHD metadata""" - for child in self.children: - child._reload() - - # only leaves can be attached - if len(self.children) == 0: - try: - self.sr.xapi.atomicOp([self], "noop", {}) - except XenAPI.Failure, e: - if self.sr.xapi.isInvalidHandleError(e): - Util.log("VDI %s appears to have been deleted, ignoring" % \ - self.toString()) - else: - raise - - def _loadInfoParent(self): - ret = vhdutil.getParent(self.path, lvhdutil.extractUuid) - if ret: - self.parentUuid = ret - - def _setParent(self, parent): - vhdutil.setParent(self.path, parent.path, False) - self.parent = parent - self.parentUuid = parent.uuid - parent.children.append(self) - try: - self.setConfigUpdate(self.DB_VHD_PARENT, self.parentUuid) - Util.log("Updated the vhd-parent field for child %s with %s" % \ - (self.uuid, self.parentUuid)) - except: - Util.log("Failed to update %s with vhd-parent field %s" % \ - (self.uuid, self.parentUuid)) - - def _loadInfoHidden(self): - hidden = vhdutil.getHidden(self.path) - self.hidden = (hidden != 0) - - def _setHidden(self, hidden = True): - vhdutil.setHidden(self.path, hidden) - self.hidden = hidden - - def _increaseSizeVirt(self, size, atomic = True): - """ensure the virtual size of 'self' is at least 'size'. Note that - resizing a VHD must always be offline and atomically: the file must - not be open by anyone and no concurrent operations may take place. - Thus we use the Agent API call for performing paused atomic - operations. If the caller is already in the atomic context, it must - call with atomic = False""" - if self.sizeVirt >= size: - return - Util.log(" Expanding VHD virt size for VDI %s: %s -> %s" % \ - (self.toString(), Util.num2str(self.sizeVirt), \ - Util.num2str(size))) - - msize = vhdutil.getMaxResizeSize(self.path) * 1024 * 1024 - if (size <= msize): - vhdutil.setSizeVirtFast(self.path, size) - else: - if atomic: - args = self._resizeArgs(size) - vdiList = self._getAllSubtree() - if not self.sr.xapi.atomicOp(vdiList, "resize", args): - raise util.SMException("Failed to resize atomically") - else: - self._setSizeVirt(size) - - self.sizeVirt = vhdutil.getSizeVirt(self.path) - - def _setSizeVirt(self, size): - """WARNING: do not call this method directly unless all VDIs in the - subtree are guaranteed to be unplugged (and remain so for the duration - of the operation): this operation is only safe for offline VHDs""" - jFile = os.path.join(vhdutil.VHD_JOURNAL_LOCATION, self.uuid) - vhdutil.setSizeVirt(self.path, size, jFile) - - def _queryVHDBlocks(self): - return vhdutil.getBlockBitmap(self.path) - - def _getCoalescedSizeData(self): - """Get the data size of the resulting VHD if we coalesce self onto - parent. We calculate the actual size by using the VHD block allocation - information (as opposed to just adding up the two VHD sizes to get an - upper bound)""" - # make sure we don't use stale BAT info from vdi_rec since the child - # was writable all this time - self.delConfigUpdate(VDI.DB_VHD_BLOCKS) - blocksChild = self.getVHDBlocks() - blocksParent = self.parent.getVHDBlocks() - numBlocks = Util.countBits(blocksChild, blocksParent) - Util.log("Num combined blocks = %d" % numBlocks) - sizeData = numBlocks * vhdutil.VHD_BLOCK_SIZE - assert(sizeData <= self.sizeVirt) - return sizeData - - def _calcExtraSpaceForCoalescing(self): - sizeData = self._getCoalescedSizeData() - sizeCoalesced = sizeData + vhdutil.calcOverheadBitmap(sizeData) + \ - vhdutil.calcOverheadEmpty(self.sizeVirt) - Util.log("Coalesced size = %s" % Util.num2str(sizeCoalesced)) - return sizeCoalesced - self.parent.getSizeVHD() - - def _calcExtraSpaceForLeafCoalescing(self): - """How much extra space in the SR will be required to - [live-]leaf-coalesce this VDI""" - # the space requirements are the same as for inline coalesce - return self._calcExtraSpaceForCoalescing() - - def _calcExtraSpaceForSnapshotCoalescing(self): - """How much extra space in the SR will be required to - snapshot-coalesce this VDI""" - return self._calcExtraSpaceForCoalescing() + \ - vhdutil.calcOverheadEmpty(self.sizeVirt) # extra snap leaf - - def _getAllSubtree(self): - """Get self and all VDIs in the subtree of self as a flat list""" - vdiList = [self] - for child in self.children: - vdiList.extend(child._getAllSubtree()) - return vdiList - - def _resizeArgs(self, size): - args = { - "type": self.sr.TYPE, - "uuid": self.uuid, - "path": self.path, - "size": str(size), - "srUuid": self.sr.uuid - } - return args - - -class FileVDI(VDI): - """Object representing a VDI in a file-based SR (EXT or NFS)""" - - FILE_SUFFIX = ".vhd" - - def extractUuid(path): - path = os.path.basename(path.strip()) - if not path.endswith(FileVDI.FILE_SUFFIX): - return None - uuid = path.replace(FileVDI.FILE_SUFFIX, "") - # TODO: validate UUID format - return uuid - extractUuid = staticmethod(extractUuid) - - def load(self, info = None): - if not info: - if not util.pathexists(self.path): - raise util.SMException("%s not found" % self.path) - try: - info = vhdutil.getVHDInfo(self.path, self.extractUuid) - except util.SMException: - Util.log(" [VDI %s: failed to read VHD metadata]" % self.uuid) - return - self.parent = None - self.children = [] - self.parentUuid = info.parentUuid - self.sizeVirt = info.sizeVirt - self._sizeVHD = info.sizePhys - self.hidden = info.hidden - self.scanError = False - self.path = os.path.join(self.sr.path, "%s%s" % \ - (self.uuid, self.FILE_SUFFIX)) - - def rename(self, uuid): - oldPath = self.path - VDI.rename(self, uuid) - fileName = "%s%s" % (self.uuid, self.FILE_SUFFIX) - self.path = os.path.join(self.sr.path, fileName) - assert(not util.pathexists(self.path)) - Util.log("Renaming %s -> %s" % (oldPath, self.path)) - os.rename(oldPath, self.path) - - def delete(self): - if len(self.children) > 0: - raise util.SMException("VDI %s has children, can't delete" % \ - self.uuid) - try: - self.sr.lock() - try: - os.unlink(self.path) - finally: - self.sr.unlock() - except OSError: - raise util.SMException("os.unlink(%s) failed" % self.path) - self.sr.xapi.forgetVDI(self.uuid) - VDI.delete(self) - - -class LVHDVDI(VDI): - """Object representing a VDI in an LVHD SR""" - - JRN_ZERO = "zero" # journal entry type for zeroing out end of parent - DRIVER_NAME_RAW = "aio" - - def load(self, vdiInfo): - self.parent = None - self.children = [] - self._sizeVHD = -1 - self.scanError = vdiInfo.scanError - self.raw = vdiInfo.vdiType == lvhdutil.VDI_TYPE_RAW - self.sizeLV = vdiInfo.sizeLV - self.sizeVirt = vdiInfo.sizeVirt - self.lvName = vdiInfo.lvName - self.lvActive = vdiInfo.lvActive - self.lvReadonly = vdiInfo.lvReadonly - self.hidden = vdiInfo.hidden - self.parentUuid = vdiInfo.parentUuid - self.path = os.path.join(self.sr.path, self.lvName) - - def getDriverName(self): - if self.raw: - return self.DRIVER_NAME_RAW - return self.DRIVER_NAME_VHD - - def inflate(self, size): - """inflate the LV containing the VHD to 'size'""" - if self.raw: - return - self._activate() - self.sr.lock() - try: - lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, size) - util.fistpoint.activate("LVHDRT_inflating_the_parent",self.sr.uuid) - finally: - self.sr.unlock() - self.sizeLV = self.sr.lvmCache.getSize(self.lvName) - self._sizeVHD = -1 - - def deflate(self): - """deflate the LV containing the VHD to minimum""" - if self.raw: - return - self._activate() - self.sr.lock() - try: - lvhdutil.deflate(self.sr.lvmCache, self.lvName, self.getSizeVHD()) - finally: - self.sr.unlock() - self.sizeLV = self.sr.lvmCache.getSize(self.lvName) - self._sizeVHD = -1 - - def inflateFully(self): - self.inflate(lvhdutil.calcSizeVHDLV(self.sizeVirt)) - - def inflateParentForCoalesce(self): - """Inflate the parent only as much as needed for the purposes of - coalescing""" - if self.parent.raw: - return - inc = self._calcExtraSpaceForCoalescing() - if inc > 0: - util.fistpoint.activate("LVHDRT_coalescing_before_inflate_grandparent",self.sr.uuid) - self.parent.inflate(self.parent.sizeLV + inc) - - def updateBlockInfo(self): - if not self.raw: - VDI.updateBlockInfo(self) - - def rename(self, uuid): - oldUuid = self.uuid - oldLVName = self.lvName - VDI.rename(self, uuid) - self.lvName = lvhdutil.LV_PREFIX[lvhdutil.VDI_TYPE_VHD] + self.uuid - if self.raw: - self.lvName = lvhdutil.LV_PREFIX[lvhdutil.VDI_TYPE_RAW] + self.uuid - self.path = os.path.join(self.sr.path, self.lvName) - assert(not self.sr.lvmCache.checkLV(self.lvName)) - - self.sr.lvmCache.rename(oldLVName, self.lvName) - if self.sr.lvActivator.get(oldUuid, False): - self.sr.lvActivator.replace(oldUuid, self.uuid, self.lvName, False) - - ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid - (cnt, bcnt) = RefCounter.check(oldUuid, ns) - RefCounter.set(self.uuid, cnt, bcnt, ns) - RefCounter.reset(oldUuid, ns) - - def delete(self): - if len(self.children) > 0: - raise util.SMException("VDI %s has children, can't delete" % \ - self.uuid) - self.sr.lock() - try: - self.sr.lvmCache.remove(self.lvName) - finally: - self.sr.unlock() - RefCounter.reset(self.uuid, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) - self.sr.xapi.forgetVDI(self.uuid) - VDI.delete(self) - - def getSizeVHD(self): - if self._sizeVHD == -1: - self._loadInfoSizeVHD() - return self._sizeVHD - - def _loadInfoSizeVHD(self): - """Get the physical utilization of the VHD file. We do it individually - (and not using the VHD batch scanner) as an optimization: this info is - relatively expensive and we need it only for VDI's involved in - coalescing.""" - if self.raw: - return - self._activate() - self._sizeVHD = vhdutil.getSizePhys(self.path) - if self._sizeVHD <= 0: - raise util.SMException("phys size of %s = %d" % \ - (self.toString(), self._sizeVHD)) - - def _loadInfoHidden(self): - if self.raw: - self.hidden = self.sr.lvmCache.getHidden(self.lvName) - else: - VDI._loadInfoHidden(self) - - def _setHidden(self, hidden = True): - if self.raw: - self.sr.lvmCache.setHidden(self.lvName, hidden) - self.hidden = hidden - else: - VDI._setHidden(self, hidden) - - def toString(self): - strType = "VHD" - if self.raw: - strType = "RAW" - strHidden = "" - if self.hidden: - strHidden = "*" - strSizeVHD = "" - if self._sizeVHD > 0: - strSizeVHD = Util.num2str(self._sizeVHD) - strActive = "n" - if self.lvActive: - strActive = "a" - return "%s%s[%s](%s/%s/%s|%s)" % (strHidden, self.uuid[0:8], strType, - Util.num2str(self.sizeVirt), strSizeVHD, - Util.num2str(self.sizeLV), strActive) - - def validate(self): - if not self.raw: - VDI.validate(self) - - def _coalesceBegin(self): - """LVHD parents must first be activated, inflated, and made writable""" - try: - self._activateChain() - self.sr.lvmCache.setReadonly(self.parent.lvName, False) - self.parent.validate() - self.inflateParentForCoalesce() - VDI._coalesceBegin(self) - finally: - self.parent._loadInfoSizeVHD() - self.parent.deflate() - self.sr.lvmCache.setReadonly(self.lvName, True) - - def _setParent(self, parent): - self._activate() - if self.lvReadonly: - self.sr.lvmCache.setReadonly(self.lvName, False) - - try: - vhdutil.setParent(self.path, parent.path, parent.raw) - finally: - if self.lvReadonly: - self.sr.lvmCache.setReadonly(self.lvName, True) - self._deactivate() - self.parent = parent - self.parentUuid = parent.uuid - parent.children.append(self) - try: - self.setConfigUpdate(self.DB_VHD_PARENT, self.parentUuid) - Util.log("Updated the vhd-parent field for child %s with %s" % \ - (self.uuid, self.parentUuid)) - except: - Util.log("Failed to update the vhd-parent with %s for child %s" % \ - (self.parentUuid, self.uuid)) - - def _activate(self): - self.sr.lvActivator.activate(self.uuid, self.lvName, False) - - def _activateChain(self): - vdi = self - while vdi: - vdi._activate() - vdi = vdi.parent - - def _deactivate(self): - self.sr.lvActivator.deactivate(self.uuid, False) - - def _increaseSizeVirt(self, size, atomic = True): - "ensure the virtual size of 'self' is at least 'size'" - self._activate() - if not self.raw: - VDI._increaseSizeVirt(self, size, atomic) - return - - # raw VDI case - offset = self.sizeLV - if self.sizeVirt < size: - oldSize = self.sizeLV - self.sizeLV = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) - Util.log(" Growing %s: %d->%d" % (self.path, oldSize, self.sizeLV)) - self.sr.lvmCache.setSize(self.lvName, self.sizeLV) - offset = oldSize - unfinishedZero = False - jval = self.sr.journaler.get(self.JRN_ZERO, self.uuid) - if jval: - unfinishedZero = True - offset = int(jval) - length = self.sizeLV - offset - if not length: - return - - if unfinishedZero: - Util.log(" ==> Redoing unfinished zeroing out") - else: - self.sr.journaler.create(self.JRN_ZERO, self.uuid, \ - str(offset)) - Util.log(" Zeroing %s: from %d, %dB" % (self.path, offset, length)) - abortTest = lambda:IPCFlag(self.sr.uuid).test(FLAG_TYPE_ABORT) - func = lambda: util.zeroOut(self.path, offset, length) - Util.runAbortable(func, True, self.sr.uuid, abortTest, - VDI.POLL_INTERVAL, 0) - self.sr.journaler.remove(self.JRN_ZERO, self.uuid) - - def _setSizeVirt(self, size): - """WARNING: do not call this method directly unless all VDIs in the - subtree are guaranteed to be unplugged (and remain so for the duration - of the operation): this operation is only safe for offline VHDs""" - self._activate() - jFile = lvhdutil.createVHDJournalLV(self.sr.lvmCache, self.uuid, - vhdutil.MAX_VHD_JOURNAL_SIZE) - try: - lvhdutil.setSizeVirt(self.sr.journaler, self.sr.uuid, self.uuid, - size, jFile) - finally: - lvhdutil.deleteVHDJournalLV(self.sr.lvmCache, self.uuid) - - def _queryVHDBlocks(self): - self._activate() - return VDI._queryVHDBlocks(self) - - def _calcExtraSpaceForCoalescing(self): - if self.parent.raw: - return 0 # raw parents are never deflated in the first place - sizeCoalesced = lvhdutil.calcSizeVHDLV(self._getCoalescedSizeData()) - Util.log("Coalesced size = %s" % Util.num2str(sizeCoalesced)) - return sizeCoalesced - self.parent.sizeLV - - def _calcExtraSpaceForLeafCoalescing(self): - """How much extra space in the SR will be required to - [live-]leaf-coalesce this VDI""" - # we can deflate the leaf to minimize the space requirements - deflateDiff = self.sizeLV - lvhdutil.calcSizeLV(self.getSizeVHD()) - return self._calcExtraSpaceForCoalescing() - deflateDiff - - def _calcExtraSpaceForSnapshotCoalescing(self): - return self._calcExtraSpaceForCoalescing() + \ - lvhdutil.calcSizeLV(self.getSizeVHD()) - - def _resizeArgs(self, size): - args = VDI._resizeArgs(self, size) - args["vgName"] = self.sr.vgName - args["lvSize"] = str(self.sizeLV) - return args - - - -################################################################################ -# -# SR -# -class SR: - TYPE_FILE = "file" - TYPE_LVHD = "lvhd" - TYPES = [TYPE_LVHD, TYPE_FILE] - - LOCK_RETRY_INTERVAL = 3 - LOCK_RETRY_ATTEMPTS = 20 - LOCK_RETRY_ATTEMPTS_LOCK = 100 - - SCAN_RETRY_ATTEMPTS = 3 - - JRN_CLONE = "clone" # journal entry type for the clone operation (from SM) - TMP_RENAME_PREFIX = "OLD_" - - KEY_OFFLINE_COALESCE_NEEDED = "leaf_coalesce_need_offline" - KEY_OFFLINE_COALESCE_OVERRIDE = "leaf_coalesce_offline_override" - - def getInstance(uuid, xapiSession): - xapi = XAPI(xapiSession, uuid) - type = normalizeType(xapi.srRecord["type"]) - if type == SR.TYPE_FILE: - return FileSR(uuid, xapi) - elif type == SR.TYPE_LVHD: - return LVHDSR(uuid, xapi) - raise util.SMException("SR type %s not recognized" % type) - getInstance = staticmethod(getInstance) - - def __init__(self, uuid, xapi): - self.uuid = uuid - self.path = "" - self.name = "" - self.vdis = {} - self.vdiTrees = [] - self.journaler = None - self.xapi = xapi - self._locked = 0 - self._srLock = lock.Lock(vhdutil.LOCK_TYPE_SR, self.uuid) - self.name = unicode(self.xapi.srRecord["name_label"]).encode("utf-8", "replace") - self._failedCoalesceTargets = [] - - if not self.xapi.isPluggedHere(): - raise util.SMException("SR %s not attached on this host" % uuid) - - if not self.xapi.isMaster(): - raise util.SMException("This host is NOT master, will not run") - - def scan(self, force = False): - """Scan the SR and load VDI info for each VDI. If called repeatedly, - update VDI objects if they already exist""" - pass # abstract - - def scanLocked(self, force = False): - self.lock() - try: - self.scan(force) - finally: - self.unlock() - - def getVDI(self, uuid): - return self.vdis.get(uuid) - - def hasWork(self): - if len(self.findGarbage()) > 0: - return True - if self.findCoalesceable(): - return True - if self.findLeafCoalesceable(): - return True - if self.needUpdateBlockInfo(): - return True - return False - - def findCoalesceable(self): - """Find a coalesceable VDI. Return a vdi that should be coalesced - (choosing one among all coalesceable candidates according to some - criteria) or None if there is no VDI that could be coalesced""" - # finish any VDI for which a relink journal entry exists first - journals = self.journaler.getAll(VDI.JRN_RELINK) - for uuid in journals.iterkeys(): - vdi = self.getVDI(uuid) - if vdi and vdi not in self._failedCoalesceTargets: - return vdi - - candidates = [] - for vdi in self.vdis.values(): - if vdi.isCoalesceable() and vdi not in self._failedCoalesceTargets: - candidates.append(vdi) - - # pick one in the tallest tree - treeHeight = dict() - for c in candidates: - height = c.getTreeRoot().getTreeHeight() - if treeHeight.get(height): - treeHeight[height].append(c) - else: - treeHeight[height] = [c] - - freeSpace = self.getFreeSpace() - heights = treeHeight.keys() - heights.sort(reverse=True) - for h in heights: - for c in treeHeight[h]: - spaceNeeded = c._calcExtraSpaceForCoalescing() - if spaceNeeded <= freeSpace: - Util.log("Coalesce candidate: %s (tree height %d)" % \ - (c.toString(), h)) - return c - else: - Util.log("No space to coalesce %s (free space: %d)" % \ - (c.toString(), freeSpace)) - return None - - def findLeafCoalesceable(self): - """Find leaf-coalesceable VDIs in each VHD tree""" - candidates = [] - for vdi in self.vdis.values(): - if not vdi.isLeafCoalesceable(): - continue - if vdi in self._failedCoalesceTargets: - continue - if vdi.getConfig(vdi.DB_LEAFCLSC) == vdi.LEAFCLSC_DISABLED: - Util.log("Leaf-coalesce disabled for %s" % vdi.toString()) - continue - if not (AUTO_ONLINE_LEAF_COALESCE_ENABLED or \ - vdi.getConfig(vdi.DB_LEAFCLSC) == vdi.LEAFCLSC_FORCE): - continue - candidates.append(vdi) - - freeSpace = self.getFreeSpace() - for candidate in candidates: - # check the space constraints to see if leaf-coalesce is actually - # feasible for this candidate - spaceNeeded = candidate._calcExtraSpaceForSnapshotCoalescing() - spaceNeededLive = spaceNeeded - if spaceNeeded > freeSpace: - spaceNeededLive = candidate._calcExtraSpaceForLeafCoalescing() - if candidate.canLiveCoalesce(): - spaceNeeded = spaceNeededLive - if spaceNeeded <= freeSpace: - Util.log("Leaf-coalesce candidate: %s" % candidate.toString()) - return candidate - else: - Util.log("No space to leaf-coalesce %s (free space: %d)" % \ - (candidate.toString(), freeSpace)) - if spaceNeededLive <= freeSpace: - Util.log("...but enough space if skip snap-coalesce") - candidate.setConfigUpdate(VDI.DB_LEAFCLSC, - VDI.LEAFCLSC_OFFLINE) - - return None - - def coalesce(self, vdi, dryRun): - """Coalesce vdi onto parent""" - Util.log("Coalescing %s -> %s" % \ - (vdi.toString(), vdi.parent.toString())) - if dryRun: - return - - try: - self._coalesce(vdi) - except util.SMException, e: - if isinstance(e, AbortException): - self.cleanup() - raise - else: - self._failedCoalesceTargets.append(vdi) - Util.logException("coalesce") - Util.log("Coalesce failed, skipping") - self.cleanup() - - def coalesceLeaf(self, vdi, dryRun): - """Leaf-coalesce vdi onto parent""" - Util.log("Leaf-coalescing %s -> %s" % \ - (vdi.toString(), vdi.parent.toString())) - if dryRun: - return - - try: - try: - self._coalesceLeaf(vdi) - finally: - vdi.delConfigUpdate(vdi.DB_LEAFCLSC) - except (util.SMException, XenAPI.Failure), e: - if isinstance(e, AbortException): - self.cleanup() - raise - else: - self._failedCoalesceTargets.append(vdi) - Util.logException("leaf-coalesce") - Util.log("Leaf-coalesce failed, skipping") - self.cleanup() - - def garbageCollect(self, dryRun = False): - vdiList = self.findGarbage() - Util.log("Found %d VDIs for deletion:" % len(vdiList)) - for vdi in vdiList: - Util.log(" %s" % vdi.toString()) - if not dryRun: - self.deleteVDIs(vdiList) - self.cleanupJournals(dryRun) - - def findGarbage(self): - vdiList = [] - for vdi in self.vdiTrees: - vdiList.extend(vdi.getAllPrunable()) - return vdiList - - def deleteVDIs(self, vdiList): - for vdi in vdiList: - if IPCFlag(self.uuid).test(FLAG_TYPE_ABORT): - raise AbortException("Aborting due to signal") - Util.log("Deleting unlinked VDI %s" % vdi.toString()) - self.deleteVDI(vdi) - - def deleteVDI(self, vdi): - assert(len(vdi.children) == 0) - del self.vdis[vdi.uuid] - if vdi.parent: - vdi.parent.children.remove(vdi) - if vdi in self.vdiTrees: - self.vdiTrees.remove(vdi) - vdi.delete() - - def getFreeSpace(self): - return 0 - - def cleanup(self): - Util.log("In cleanup") - return - - def toString(self): - if self.name: - ret = "%s ('%s')" % (self.uuid[0:4], self.name) - else: - ret = "%s" % self.uuid - return ret - - def printVDIs(self): - Util.log("-- SR %s has %d VDIs (%d VHD trees) --" % \ - (self.toString(), len(self.vdis), len(self.vdiTrees))) - for vdi in self.vdiTrees: - vdi.printTree() - - def lock(self): - """Acquire the SR lock. Nested acquire()'s are ok. Check for Abort - signal to avoid deadlocking (trying to acquire the SR lock while the - lock is held by a process that is trying to abort us)""" - self._locked += 1 - if self._locked > 1: - return - - abortFlag = IPCFlag(self.uuid) - for i in range(SR.LOCK_RETRY_ATTEMPTS_LOCK): - if self._srLock.acquireNoblock(): - return - if abortFlag.test(FLAG_TYPE_ABORT): - raise AbortException("Abort requested") - time.sleep(SR.LOCK_RETRY_INTERVAL) - raise util.SMException("Unable to acquire the SR lock") - - def unlock(self): - assert(self._locked > 0) - self._locked -= 1 - if self._locked == 0: - self._srLock.release() - - def needUpdateBlockInfo(self): - for vdi in self.vdis.values(): - if vdi.scanError or len(vdi.children) == 0: - continue - if not vdi.getConfig(vdi.DB_VHD_BLOCKS): - return True - return False - - def updateBlockInfo(self): - for vdi in self.vdis.values(): - if vdi.scanError or len(vdi.children) == 0: - continue - if not vdi.getConfig(vdi.DB_VHD_BLOCKS): - vdi.updateBlockInfo() - - def cleanupCoalesceJournals(self): - """Remove stale coalesce VDI indicators""" - entries = self.journaler.getAll(VDI.JRN_COALESCE) - for uuid, jval in entries.iteritems(): - self.journaler.remove(VDI.JRN_COALESCE, uuid) - - def cleanupJournals(self, dryRun): - """delete journal entries for non-existing VDIs""" - for t in [LVHDVDI.JRN_ZERO, VDI.JRN_RELINK, SR.JRN_CLONE]: - entries = self.journaler.getAll(t) - for uuid, jval in entries.iteritems(): - if self.getVDI(uuid): - continue - if t == SR.JRN_CLONE: - baseUuid, clonUuid = jval.split("_") - if self.getVDI(baseUuid): - continue - Util.log(" Deleting stale '%s' journal entry for %s " - "(%s)" % (t, uuid, jval)) - if not dryRun: - self.journaler.remove(t, uuid) - - def _coalesce(self, vdi): - # JRN_COALESCE is used to check which VDI is being coalesced in order - # to decide whether to abort the coalesce. We remove the journal as - # soon as the VHD coalesce step is done, because we don't expect the - # rest of the process to take long - self.journaler.create(vdi.JRN_COALESCE, vdi.uuid, "1") - vdi._coalesceBegin() - self.journaler.remove(vdi.JRN_COALESCE, vdi.uuid) - - util.fistpoint.activate("LVHDRT_before_create_relink_journal",self.uuid) - - # we now need to relink the children: lock the SR to prevent ops like - # SM.clone from manipulating the VDIs we'll be relinking and rescan the - # SR first in case the children changed since the last scan - if not self.journaler.get(vdi.JRN_RELINK, vdi.uuid): - self.journaler.create(vdi.JRN_RELINK, vdi.uuid, "1") - - self.lock() - try: - self.scan() - vdi._relinkSkip() - finally: - self.unlock() - - vdi.parent._reloadChildren(vdi) - self.journaler.remove(vdi.JRN_RELINK, vdi.uuid) - - def _coalesceLeaf(self, vdi): - """Leaf-coalesce VDI vdi. Return true if we succeed, false if we cannot - complete due to external changes, namely vdi_delete and vdi_snapshot - that alter leaf-coalescibility of vdi""" - while not vdi.canLiveCoalesce(): - prevSizeVHD = vdi.getSizeVHD() - if not self._snapshotCoalesce(vdi): - return False - if vdi.getSizeVHD() >= prevSizeVHD: - Util.log("Snapshot-coalesce did not help, abandoning attempts") - vdi.setConfigUpdate(vdi.DB_LEAFCLSC, vdi.LEAFCLSC_OFFLINE) - break - return self._liveLeafCoalesce(vdi) - - def _snapshotCoalesce(self, vdi): - # Note that because we are not holding any locks here, concurrent SM - # operations may change this tree under our feet. In particular, vdi - # can be deleted, or it can be snapshotted. - assert(AUTO_ONLINE_LEAF_COALESCE_ENABLED) - Util.log("Single-snapshotting %s" % vdi.toString()) - util.fistpoint.activate("LVHDRT_coaleaf_delay_1", self.uuid) - try: - ret = self.xapi.singleSnapshotVDI(vdi) - Util.log("Single-snapshot returned: %s" % ret) - except XenAPI.Failure, e: - if self.xapi.isInvalidHandleError(e): - Util.log("The VDI appears to have been concurrently deleted") - return False - raise - self.scanLocked() - tempSnap = vdi.parent - if not tempSnap.isCoalesceable(): - Util.log("The VDI appears to have been concurrently snapshotted") - return False - Util.log("Coalescing parent %s" % tempSnap.toString()) - util.fistpoint.activate("LVHDRT_coaleaf_delay_2", self.uuid) - self._coalesce(tempSnap) - self.deleteVDI(tempSnap) - if not vdi.isLeafCoalesceable(): - Util.log("The VDI tree appears to have been altered since") - return False - return True - - def _liveLeafCoalesce(self, vdi): - args = {"srUuid": self.uuid, "vdiUuid": vdi.uuid} - util.fistpoint.activate("LVHDRT_coaleaf_delay_3", self.uuid) - try: - if not self.xapi.atomicOp([vdi], "coalesce_leaf", args, True): - Util.log("%s is no longer leaf-coalesceable" % vdi.toString()) - return False - except XenAPI.Failure, e: - if self.xapi.isInvalidHandleError(e): - Util.log("The VDI appears to have been deleted meanwhile") - return False - self.scanLocked() - return True - - def _doCoalesceLeaf(self, vdi): - pass # abstract - - def _removeStaleVDIs(self, uuidsPresent): - for uuid in self.vdis.keys(): - if not uuid in uuidsPresent: - Util.log("VDI %s disappeared since last scan" % \ - self.vdis[uuid].toString()) - del self.vdis[uuid] - - def _buildTree(self, force): - self.vdiTrees = [] - for vdi in self.vdis.values(): - if vdi.parentUuid: - parent = self.getVDI(vdi.parentUuid) - if not parent: - if vdi.uuid.startswith(self.TMP_RENAME_PREFIX): - self.vdiTrees.append(vdi) - continue - if force: - Util.log("ERROR: Parent VDI %s not found! (for %s)" % \ - (vdi.parentUuid, vdi.uuid)) - self.vdiTrees.append(vdi) - continue - else: - raise util.SMException("Parent VDI %s of %s not " \ - "found" % (vdi.parentUuid, vdi.uuid)) - vdi.parent = parent - parent.children.append(vdi) - else: - self.vdiTrees.append(vdi) - - -class FileSR(SR): - TYPE = SR.TYPE_FILE - - def __init__(self, uuid, xapi): - SR.__init__(self, uuid, xapi) - self.path = "/var/run/sr-mount/%s" % self.uuid - self.journaler = fjournaler.Journaler(self.path) - - def scan(self, force = False): - if not util.pathexists(self.path): - raise util.SMException("directory %s not found!" % self.uuid) - vhds = self._scan(force) - for uuid, vhdInfo in vhds.iteritems(): - vdi = self.getVDI(uuid) - if not vdi: - Util.log("Found new VDI when scanning: %s" % uuid) - vdi = FileVDI(self, uuid) - self.vdis[uuid] = vdi - vdi.load(vhdInfo) - self._removeStaleVDIs(vhds.keys()) - self._buildTree(force) - self.printVDIs() - - def getFreeSpace(self): - return util.get_fs_size(self.path) - util.get_fs_utilisation(self.path) - - def findLeafCoalesceable(self): - return None # not implemented for FileSR - - def _scan(self, force): - for i in range(SR.SCAN_RETRY_ATTEMPTS): - error = False - pattern = os.path.join(self.path, "*%s" % FileVDI.FILE_SUFFIX) - vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) - for uuid, vhdInfo in vhds.iteritems(): - if vhdInfo.error: - error = True - break - if not error: - return vhds - Util.log("Scan error on attempt %d" % i) - if force: - return vhds - raise util.SMException("Scan error") - - def deleteVDI(self, vdi): - self._checkSlaves(vdi) - SR.deleteVDI(self, vdi) - - def _checkSlaves(self, vdi): - onlineHosts = self.xapi.getOnlineHosts() - abortFlag = IPCFlag(self.uuid) - for pbdRecord in self.xapi.getAttachedPBDs(): - hostRef = pbdRecord["host"] - if hostRef == self.xapi._hostRef: - continue - if abortFlag.test(FLAG_TYPE_ABORT): - raise AbortException("Aborting due to signal") - try: - self._checkSlave(hostRef, vdi) - except util.CommandException: - if onlineHosts.__contains__(hostRef): - raise - - def _checkSlave(self, hostRef, vdi): - call = (hostRef, "nfs-on-slave", "check", { 'path': vdi.path }) - Util.log("Checking with slave: %s" % repr(call)) - _host = self.xapi.session.xenapi.host - text = _host.call_plugin(*call) - -class LVHDSR(SR): - TYPE = SR.TYPE_LVHD - SUBTYPES = ["lvhdoiscsi", "lvhdohba"] - - def __init__(self, uuid, xapi): - SR.__init__(self, uuid, xapi) - self.vgName = "%s%s" % (lvhdutil.VG_PREFIX, self.uuid) - self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgName) - self.lvmCache = lvmcache.LVMCache(self.vgName) - self.lvActivator = LVActivator(self.uuid, self.lvmCache) - self.journaler = journaler.Journaler(self.lvmCache) - - def deleteVDI(self, vdi): - if self.lvActivator.get(vdi.uuid, False): - self.lvActivator.deactivate(vdi.uuid, False) - self._checkSlaves(vdi) - SR.deleteVDI(self, vdi) - - def getFreeSpace(self): - stats = lvutil._getVGstats(self.vgName) - return stats['physical_size'] - stats['physical_utilisation'] - - def cleanup(self): - if not self.lvActivator.deactivateAll(): - Util.log("ERROR deactivating LVs while cleaning up") - - def needUpdateBlockInfo(self): - for vdi in self.vdis.values(): - if vdi.scanError or vdi.raw or len(vdi.children) == 0: - continue - if not vdi.getConfig(vdi.DB_VHD_BLOCKS): - return True - return False - - def updateBlockInfo(self): - for vdi in self.vdis.values(): - if vdi.scanError or vdi.raw or len(vdi.children) == 0: - continue - if not vdi.getConfig(vdi.DB_VHD_BLOCKS): - vdi.updateBlockInfo() - - def scan(self, force = False): - vdis = self._scan(force) - for uuid, vdiInfo in vdis.iteritems(): - vdi = self.getVDI(uuid) - if not vdi: - Util.log("Found new VDI when scanning: %s" % uuid) - vdi = LVHDVDI(self, uuid) - self.vdis[uuid] = vdi - vdi.load(vdiInfo) - self._removeStaleVDIs(vdis.keys()) - self._buildTree(force) - self.printVDIs() - self._handleInterruptedCoalesceLeaf() - - def _scan(self, force): - for i in range(SR.SCAN_RETRY_ATTEMPTS): - error = False - self.lvmCache.refresh() - vdis = lvhdutil.getVDIInfo(self.lvmCache) - for uuid, vdiInfo in vdis.iteritems(): - if vdiInfo.scanError: - error = True - break - if not error: - return vdis - Util.log("Scan error, retrying (%d)" % i) - if force: - return vdis - raise util.SMException("Scan error") - - def _liveLeafCoalesce(self, vdi): - """If the parent is raw and the child was resized (virt. size), then - we'll need to resize the parent, which can take a while due to zeroing - out of the extended portion of the LV. Do it before pausing the child - to avoid a protracted downtime""" - if vdi.parent.raw and vdi.sizeVirt > vdi.parent.sizeVirt: - self.lvmCache.setReadonly(vdi.parent.lvName, False) - vdi.parent._increaseSizeVirt(vdi.sizeVirt) - - parentUuid = vdi.parent.uuid - if not SR._liveLeafCoalesce(self, vdi): - return False - - # fix the activation records after the UUIDs have been changed - if self.lvActivator.get(parentUuid, False): - self.lvActivator.replace(parentUuid, vdi.uuid, vdi.lvName, False) - else: - self.lvActivator.remove(vdi.uuid, False) - return True - - def _doCoalesceLeaf(self, vdi): - """Actual coalescing of a leaf VDI onto parent. Must be called in an - offline/atomic context""" - vdi._activateChain() - self.journaler.create(VDI.JRN_LEAF, vdi.uuid, vdi.parent.uuid) - self.lvmCache.setReadonly(vdi.parent.lvName, False) - vdi.parent._setHidden(False) - vdi.deflate() - vdi.inflateParentForCoalesce() - vdi.parent._increaseSizeVirt(vdi.sizeVirt, False) - vdi.validate() - vdi.parent.validate() - util.fistpoint.activate("LVHDRT_coaleaf_before_coalesce", self.uuid) - timeout = vdi.LIVE_LEAF_COALESCE_TIMEOUT - if vdi.getConfig(vdi.DB_LEAFCLSC) == vdi.LEAFCLSC_FORCE: - Util.log("Leaf-coalesce forced, will not use timeout") - timeout = 0 - vdi._coalesceVHD(timeout) - util.fistpoint.activate("LVHDRT_coaleaf_after_coalesce", self.uuid) - vdi.parent.validate() - #vdi._verifyContents(timeout / 2) - - # rename - vdiUuid = vdi.uuid - oldNameLV = vdi.lvName - origParentUuid = vdi.parent.uuid - vdi.rename(self.TMP_RENAME_PREFIX + vdiUuid) - util.fistpoint.activate("LVHDRT_coaleaf_one_renamed", self.uuid) - vdi.parent.rename(vdiUuid) - util.fistpoint.activate("LVHDRT_coaleaf_both_renamed", self.uuid) - self._updateSlavesOnRename(vdi.parent, oldNameLV) - - # Note that "vdi.parent" is now the single remaining leaf and "vdi" is - # garbage - - # update the VDI record - vdi.parent.delConfig(VDI.DB_VHD_PARENT) - if vdi.parent.raw: - vdi.parent.setConfig(VDI.DB_VDI_TYPE, lvhdutil.VDI_TYPE_RAW) - vdi.parent.delConfig(VDI.DB_VHD_BLOCKS) - vdi.parent.updateConfig() - util.fistpoint.activate("LVHDRT_coaleaf_after_vdirec", self.uuid) - - # fix the refcounts: the remaining node should inherit the binary - # refcount from the leaf (because if it was online, it should remain - # refcounted as such), but the normal refcount from the parent (because - # this node is really the parent node) - minus 1 if it is online (since - # non-leaf nodes increment their normal counts when they are online and - # we are now a leaf, storing that 1 in the binary refcount). - ns = lvhdutil.NS_PREFIX_LVM + self.uuid - cCnt, cBcnt = RefCounter.check(vdi.uuid, ns) - pCnt, pBcnt = RefCounter.check(vdi.parent.uuid, ns) - pCnt = pCnt - cBcnt - assert(pCnt >= 0) - RefCounter.set(vdi.parent.uuid, pCnt, cBcnt, ns) - - # delete the obsolete leaf & inflate the parent (in that order, to - # minimize free space requirements) - parent = vdi.parent - vdi._setHidden(True) - vdi.parent.children = [] - vdi.parent = None - util.fistpoint.activate("LVHDRT_coaleaf_before_delete", self.uuid) - self.deleteVDI(vdi) - util.fistpoint.activate("LVHDRT_coaleaf_after_delete", self.uuid) - self.xapi.forgetVDI(origParentUuid) - parent.inflateFully() - - util.fistpoint.activate("LVHDRT_coaleaf_before_remove_j", self.uuid) - self.journaler.remove(VDI.JRN_LEAF, vdiUuid) - - def _handleInterruptedCoalesceLeaf(self): - """An interrupted leaf-coalesce operation may leave the VHD tree in an - inconsistent state. If the old-leaf VDI is still present, we revert the - operation (in case the original error is persistent); otherwise we must - finish the operation""" - entries = self.journaler.getAll(VDI.JRN_LEAF) - for uuid, parentUuid in entries.iteritems(): - childLV = lvhdutil.LV_PREFIX[lvhdutil.VDI_TYPE_VHD] + uuid - tmpChildLV = lvhdutil.LV_PREFIX[lvhdutil.VDI_TYPE_VHD] + \ - self.TMP_RENAME_PREFIX + uuid - parentLV1 = lvhdutil.LV_PREFIX[lvhdutil.VDI_TYPE_VHD] + parentUuid - parentLV2 = lvhdutil.LV_PREFIX[lvhdutil.VDI_TYPE_RAW] + parentUuid - parentPresent = (self.lvmCache.checkLV(parentLV1) or \ - self.lvmCache.checkLV(parentLV2)) - if parentPresent or self.lvmCache.checkLV(tmpChildLV): - self._undoInterruptedCoalesceLeaf(uuid, parentUuid) - else: - self._finishInterruptedCoalesceLeaf(uuid, parentUuid) - self.journaler.remove(VDI.JRN_LEAF, uuid) - - def _undoInterruptedCoalesceLeaf(self, childUuid, parentUuid): - Util.log("*** UNDO LEAF-COALESCE") - parent = self.getVDI(parentUuid) - if not parent: - parent = self.getVDI(childUuid) - if not parent: - raise util.SMException("Neither %s nor %s found" % \ - (parentUuid, childUuid)) - Util.log("Renaming parent back: %s -> %s" % (childUuid, parentUuid)) - parent.rename(parentUuid) - util.fistpoint.activate("LVHDRT_coaleaf_undo_after_rename", self.uuid) - - child = self.getVDI(childUuid) - if not child: - child = self.getVDI(self.TMP_RENAME_PREFIX + childUuid) - if not child: - raise util.SMException("Neither %s nor %s found" % \ - (childUuid, self.TMP_RENAME_PREFIX + childUuid)) - Util.log("Renaming child back to %s" % childUuid) - child.rename(childUuid) - Util.log("Updating the VDI record") - child.setConfig(VDI.DB_VHD_PARENT, parentUuid) - child.setConfig(VDI.DB_VDI_TYPE, lvhdutil.VDI_TYPE_VHD) - child.updateConfig() - util.fistpoint.activate("LVHDRT_coaleaf_undo_after_rename2", self.uuid) - - # refcount (best effort - assume that it had succeeded if the - # second rename succeeded; if not, this adjustment will be wrong, - # leading to a non-deactivation of the LV) - ns = lvhdutil.NS_PREFIX_LVM + self.uuid - cCnt, cBcnt = RefCounter.check(child.uuid, ns) - pCnt, pBcnt = RefCounter.check(parent.uuid, ns) - pCnt = pCnt + cBcnt - RefCounter.set(parent.uuid, pCnt, 0, ns) - util.fistpoint.activate("LVHDRT_coaleaf_undo_after_refcount", self.uuid) - - parent.deflate() - child.inflateFully() - util.fistpoint.activate("LVHDRT_coaleaf_undo_after_deflate", self.uuid) - if child.hidden: - child._setHidden(False) - if not parent.hidden: - parent._setHidden(True) - if not parent.lvReadonly: - self.lvmCache.setReadonly(parent.lvName, True) - util.fistpoint.activate("LVHDRT_coaleaf_undo_end", self.uuid) - Util.log("*** leaf-coalesce undo successful") - if util.fistpoint.is_active("LVHDRT_coaleaf_stop_after_recovery"): - child.setConfigUpdate(VDI.DB_LEAFCLSC, VDI.LEAFCLSC_DISABLED) - - def _finishInterruptedCoalesceLeaf(self, childUuid, parentUuid): - Util.log("*** FINISH LEAF-COALESCE") - vdi = self.getVDI(childUuid) - if not vdi: - raise util.SMException("VDI %s not found" % childUuid) - vdi.inflateFully() - util.fistpoint.activate("LVHDRT_coaleaf_finish_after_inflate", self.uuid) - try: - self.xapi.forgetVDI(parentUuid) - except XenAPI.Failure: - pass - util.fistpoint.activate("LVHDRT_coaleaf_finish_end", self.uuid) - Util.log("*** finished leaf-coalesce successfully") - - def _checkSlaves(self, vdi): - """Confirm with all slaves in the pool that 'vdi' is not in use. We - try to check all slaves, including those that the Agent believes are - offline, but ignore failures for offline hosts. This is to avoid cases - where the Agent thinks a host is offline but the host is up.""" - args = {"vgName" : self.vgName, - "action1": "deactivateNoRefcount", - "lvName1": vdi.lvName, - "action2": "cleanupLock", - "uuid2" : vdi.uuid, - "ns2" : lvhdutil.NS_PREFIX_LVM + self.uuid} - onlineHosts = self.xapi.getOnlineHosts() - abortFlag = IPCFlag(self.uuid) - for pbdRecord in self.xapi.getAttachedPBDs(): - hostRef = pbdRecord["host"] - if hostRef == self.xapi._hostRef: - continue - if abortFlag.test(FLAG_TYPE_ABORT): - raise AbortException("Aborting due to signal") - Util.log("Checking with slave %s (path %s)" % (hostRef, vdi.path)) - try: - self.xapi.ensureInactive(hostRef, args) - except XenAPI.Failure: - if onlineHosts.__contains__(hostRef): - raise - - def _updateSlavesOnRename(self, vdi, oldNameLV): - activeVBDs = util.get_attached_VBDs(self.xapi.session, vdi.uuid) - slaves = util.get_hosts(self.xapi.session, activeVBDs) - pool = self.xapi.session.xenapi.pool.get_all()[0] - master = self.xapi.session.xenapi.pool.get_master(pool) - if master in slaves: - slaves.remove(master) - if len(slaves) == 0: - Util.log("VDI %s not attached on any slave" % vdi.toString()) - return - - util.SMlog("Updating %s to %s on %d slaves:" % \ - (oldNameLV, vdi.lvName, len(slaves))) - for slave in slaves: - util.SMlog("Updating slave %s" % slave) - args = {"vgName" : self.vgName, - "action1": "deactivateNoRefcount", - "lvName1": oldNameLV, - "action2": "refresh", - "lvName2": vdi.lvName} - text = self.xapi.session.xenapi.host.call_plugin( \ - slave, self.xapi.PLUGIN_ON_SLAVE, "multi", args) - util.SMlog("call-plugin returned: '%s'" % text) - - -################################################################################ -# -# Helpers -# -def daemonize(): - pid = os.fork() - if pid: - os.waitpid(pid, 0) - Util.log("New PID [%d]" % pid) - return False - os.chdir("/") - os.setsid() - pid = os.fork() - if pid: - Util.log("Will finish as PID [%d]" % pid) - os._exit(0) - for fd in [0, 1, 2]: - try: - os.close(fd) - except OSError: - pass - # we need to fill those special fd numbers or pread won't work - sys.stdin = open("/dev/null", 'r') - sys.stderr = open("/dev/null", 'w') - sys.stdout = open("/dev/null", 'w') - return True - -def normalizeType(type): - if type in LVHDSR.SUBTYPES: - type = SR.TYPE_LVHD - if type in ["lvm", "lvmoiscsi", "lvmohba"]: - # temporary while LVHD is symlinked as LVM - type = SR.TYPE_LVHD - if type in ["ext", "nfs"]: - type = SR.TYPE_FILE - if not type in SR.TYPES: - raise util.SMException("Unsupported SR type: %s" % type) - return type - -def _gcLoop(sr, dryRun): - failedCandidates = [] - while True: - if not sr.xapi.isPluggedHere(): - Util.log("SR no longer attached, exiting") - break - sr.scanLocked() - if not sr.hasWork(): - Util.log("No work, exiting") - break - - if not lockRunning.acquireNoblock(): - Util.log("Another instance already running, exiting") - break - try: - sr.cleanupCoalesceJournals() - sr.scanLocked() - sr.updateBlockInfo() - - if len(sr.findGarbage()) > 0: - sr.garbageCollect(dryRun) - sr.xapi.srUpdate() - continue - - candidate = sr.findCoalesceable() - if candidate: - util.fistpoint.activate("LVHDRT_finding_a_suitable_pair",sr.uuid) - sr.coalesce(candidate, dryRun) - sr.xapi.srUpdate() - continue - - candidate = sr.findLeafCoalesceable() - if candidate: - sr.coalesceLeaf(candidate, dryRun) - sr.xapi.srUpdate() - continue - - Util.log("No work left") - finally: - lockRunning.release() - -def _gc(session, srUuid, dryRun): - init(srUuid) - sr = SR.getInstance(srUuid, session) - - try: - _gcLoop(sr, dryRun) - finally: - sr.cleanup() - Util.log("Final SR state:") - Util.log(sr.toString()) - sr.printVDIs() - -def _abort(srUuid): - """If successful, we return holding lockRunning; otherwise exception - raised.""" - Util.log("=== SR %s: abort ===" % (srUuid)) - init(srUuid) - if not lockRunning.acquireNoblock(): - gotLock = False - Util.log("Aborting currently-running instance (SR %s)" % srUuid) - abortFlag = IPCFlag(srUuid) - abortFlag.set(FLAG_TYPE_ABORT) - for i in range(SR.LOCK_RETRY_ATTEMPTS): - gotLock = lockRunning.acquireNoblock() - if gotLock: - break - time.sleep(SR.LOCK_RETRY_INTERVAL) - abortFlag.clear(FLAG_TYPE_ABORT) - if not gotLock: - raise util.SMException("SR %s: error aborting existing process" % \ - srUuid) - -def coalesceLeafAtomic(session, srUuid, vdiUuid): - """Coalesce a leaf node onto its parent. This should be invoked in the - stop_using_these_vdis_() context to ensure that the leaf is offline. It is - dangerous to invoke this function otherwise. Return True on success, False - if the target VDI is no longer leaf-coalesceable""" - Util.log("=== SR %s: coalesceLeafAtomic for VDI %s ===" % (srUuid, vdiUuid)) - init(srUuid) - sr = SR.getInstance(srUuid, session) - sr.lock() - try: - sr.scan() - vdi = sr.getVDI(vdiUuid) - if not vdi.isLeafCoalesceable(): - return False - try: - sr._doCoalesceLeaf(vdi) - except: - Util.logException("_doCoalesceLeaf") - sr._handleInterruptedCoalesceLeaf() - raise - finally: - sr.cleanup() - sr.unlock() - Util.log("final SR state:") - Util.log(sr.toString()) - sr.printVDIs() - return True - -def init(srUuid): - global lockRunning - if not lockRunning: - lockRunning = lock.Lock(LOCK_TYPE_RUNNING, srUuid) - -def usage(): - output = """Garbage collect and/or coalesce VHDs in a VHD-based SR - -Parameters: - -u --uuid UUID SR UUID - and one of: - -g --gc garbage collect, coalesce, and repeat while there is work - -G --gc_force garbage collect once, aborting any current operations - -a --abort abort any currently running operation (GC or coalesce) - -q --query query the current state (GC'ing, coalescing or not running) - -x --disable disable GC/coalesce (will be in effect until you exit) - -Options: - -b --background run in background (return immediately) (valid for -g only) - -f --force continue in the presence of VHDs with errors (when doing - GC, this might cause removal of any such VHDs) (only valid - for -G) (DANGEROUS) - """ - #-d --dry-run don't actually perform any SR-modifying operations - #-t perform a custom operation (for testing) - print output - Util.log("(Invalid usage)") - sys.exit(1) - - -############################################################################## -# -# API -# -def abort(srUuid): - """Abort GC/coalesce if we are currently GC'ing or coalescing a VDI pair. - """ - _abort(srUuid) - Util.log("abort: releasing the process lock") - lockRunning.release() - -def gc(session, srUuid, inBackground, dryRun = False): - """Garbage collect all deleted VDIs in SR "srUuid". Fork & return - immediately if inBackground=True. - - The following algorithm is used: - 1. If we are already GC'ing in this SR, return - 2. If we are already coalescing a VDI pair: - a. Scan the SR and determine if the VDI pair is GC'able - b. If the pair is not GC'able, return - c. If the pair is GC'able, abort coalesce - 3. Scan the SR - 4. If there is nothing to collect, nor to coalesce, return - 5. If there is something to collect, GC all, then goto 3 - 6. If there is something to coalesce, coalesce one pair, then goto 3 - """ - Util.log("=== SR %s: gc ===" % srUuid) - if inBackground: - if daemonize(): - # we are now running in the background. Catch & log any errors - # because there is no other way to propagate them back at this - # point - - try: - _gc(None, srUuid, dryRun) - except AbortException: - Util.log("Aborted") - except Exception: - Util.logException("gc") - Util.log("* * * * * SR %s: ERROR\n" % srUuid) - os._exit(0) - else: - _gc(session, srUuid, dryRun) - -def gc_force(session, srUuid, force = False, dryRun = False): - """Garbage collect all deleted VDIs in SR "srUuid". - The following algorithm is used: - 1. If we are already GC'ing or coalescing a VDI pair, abort GC/coalesce - 2. Scan the SR - 3. GC - 4. return - """ - Util.log("=== SR %s: gc_force ===" % srUuid) - init(srUuid) - sr = SR.getInstance(srUuid, session) - if not lockRunning.acquireNoblock(): - _abort(srUuid) - else: - Util.log("Nothing was running, clear to proceed") - - if force: - Util.log("FORCED: will continue even if there are VHD errors") - sr.scanLocked(force) - sr.cleanupCoalesceJournals() - - try: - sr.garbageCollect(dryRun) - finally: - sr.cleanup() - Util.log("final SR state:") - Util.log(sr.toString()) - sr.printVDIs() - lockRunning.release() - -def get_state(srUuid): - """Return whether GC/coalesce is currently running or not. The information - is not guaranteed for any length of time if the call is not protected by - locking. - """ - init(srUuid) - if lockRunning.acquireNoblock(): - lockRunning.release() - return False - return True - -def should_preempt(session, srUuid): - sr = SR.getInstance(srUuid, session) - entries = sr.journaler.getAll(VDI.JRN_COALESCE) - if len(entries) == 0: - return False - elif len(entries) > 1: - raise util.SMException("More than one coalesce entry: " + entries) - sr.scanLocked() - coalescedUuid = entries.popitem()[0] - garbage = sr.findGarbage() - for vdi in garbage: - if vdi.uuid == coalescedUuid: - return True - return False - -def get_coalesceable_leaves(session, srUuid, vdiUuids): - coalesceable = [] - sr = SR.getInstance(srUuid, session) - sr.scanLocked() - for uuid in vdiUuids: - vdi = sr.getVDI(uuid) - if not vdi: - raise util.SMException("VDI %s not found" % uuid) - if vdi.isLeafCoalesceable(): - coalesceable.append(uuid) - return coalesceable - -############################################################################## -# -# CLI -# -def main(): - action = "" - uuid = "" - background = False - force = False - dryRun = False - test = False - shortArgs = "gGaqxu:bfdt" - longArgs = ["gc", "gc_force", "abort", "query", "disable", - "uuid", "background", "force", "dry-run", "test"] - - try: - opts, args = getopt.getopt(sys.argv[1:], shortArgs, longArgs) - except getopt.GetoptError: - usage() - for o, a in opts: - if o in ("-g", "--gc"): - action = "gc" - if o in ("-G", "--gc_force"): - action = "gc_force" - if o in ("-a", "--abort"): - action = "abort" - if o in ("-q", "--query"): - action = "query" - if o in ("-x", "--disable"): - action = "disable" - if o in ("-u", "--uuid"): - uuid = a - if o in ("-b", "--background"): - background = True - if o in ("-f", "--force"): - force = True - if o in ("-d", "--dry-run"): - Util.log("Dry run mode") - dryRun = True - if o in ("-t"): - action = "test" - - if not action or not uuid: - usage() - elif action != "query": - print "All output goes in %s" % LOG_FILE - - if action == "gc": - gc(None, uuid, background, dryRun) - elif action == "gc_force": - gc_force(None, uuid, force, dryRun) - elif action == "abort": - abort(uuid) - elif action == "query": - print "Currently running: %s" % get_state(uuid) - elif action == "disable": - print "Disabling GC/coalesce for %s" % uuid - _abort(uuid) - raw_input("Press enter to re-enable...") - print "GC/coalesce re-enabled" - lockRunning.release() - elif action == "test": - Util.log("Test operation") - pass - - -if __name__ == '__main__': - main() diff --git a/scripts/vm/hypervisor/xenserver/xcpserver/patch b/scripts/vm/hypervisor/xenserver/xcpserver/patch index 4b9146c78cb..887ff6a77e7 100644 --- a/scripts/vm/hypervisor/xenserver/xcpserver/patch +++ b/scripts/vm/hypervisor/xenserver/xcpserver/patch @@ -4,36 +4,44 @@ # [Name of file]=[source path],[file permission],[destination path] # [destination path] is required. # If [file permission] is missing, 755 is assumed. -# If [source path] is missing, it looks in the same directory as the patch file. +# If [source path] is missing, it looks in the same +# directory as the patch file. # If [source path] starts with '/', then it is absolute path. # If [source path] starts with '~', then it is path relative to management server home directory. # If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file. -scsiutil.py=/opt/xensource/sm -cleanup.py=/opt/xensource/sm NFSSR.py=/opt/xensource/sm -ISCSISR.py=/opt/xensource/sm -LUNperVDI.py=/opt/xensource/sm nfs.py=/opt/xensource/sm -patch.tgz=..,0775,/opt/xensource/bin vmops=..,0755,/etc/xapi.d/plugins +ovsgre=..,0755,/etc/xapi.d/plugins +ovstunnel=..,0755,/etc/xapi.d/plugins vmopsSnapshot=..,0755,/etc/xapi.d/plugins -systemvm-premium.zip=../../../../../vms,0755,/opt/xensource/bin hostvmstats.py=..,0755,/opt/xensource/sm +systemvm.iso=../../../../../vms,0644,/opt/xensource/packages/iso +id_rsa.cloud=../../../systemvm,0600,/root/.ssh network_info.sh=..,0755,/opt/xensource/bin -prepsystemvm.sh=..,0755,/opt/xensource/bin setupxenserver.sh=..,0755,/opt/xensource/bin make_migratable.sh=..,0755,/opt/xensource/bin -networkUsage.sh=..,0755,/opt/xensource/bin -find_bond.sh=..,0755,/opt/xensource/bin setup_iscsi.sh=..,0755,/opt/xensource/bin -setup_heartbeat_sr.sh=..,0755,/opt/xensource/bin -check_heartbeat.sh=..,0755,/opt/xensource/bin -xenheartbeat.sh=..,0755,/opt/xensource/bin -launch_hb.sh=..,0755,/opt/xensource/bin pingtest.sh=../../..,0755,/opt/xensource/bin dhcp_entry.sh=../../../../network/domr/,0755,/opt/xensource/bin ipassoc.sh=../../../../network/domr/,0755,/opt/xensource/bin vm_data.sh=../../../../network/domr/,0755,/opt/xensource/bin save_password_to_domr.sh=../../../../network/domr/,0755,/opt/xensource/bin +networkUsage.sh=../../../../network/domr/,0755,/opt/xensource/bin call_firewall.sh=../../../../network/domr/,0755,/opt/xensource/bin call_loadbalancer.sh=../../../../network/domr/,0755,/opt/xensource/bin +l2tp_vpn.sh=../../../../network/domr/,0755,/opt/xensource/bin +cloud-setup-bonding.sh=..,0755,/opt/xensource/bin +copy_vhd_to_secondarystorage.sh=..,0755,/opt/xensource/bin +copy_vhd_from_secondarystorage.sh=..,0755,/opt/xensource/bin +setup_heartbeat_sr.sh=..,0755,/opt/xensource/bin +setup_heartbeat_file.sh=..,0755,/opt/xensource/bin +check_heartbeat.sh=..,0755,/opt/xensource/bin +xenheartbeat.sh=..,0755,/opt/xensource/bin +launch_hb.sh=..,0755,/opt/xensource/bin +vhd-util=..,0755,/opt/xensource/bin +vmopspremium=..,0755,/etc/xapi.d/plugins +create_privatetemplate_from_snapshot.sh=..,0755,/opt/xensource/bin +upgrade_snapshot.sh=..,0755,/opt/xensource/bin +cloud-clean-vlan.sh=..,0755,/opt/xensource/bin +cloud-prepare-upgrade.sh=..,0755,/opt/xensource/bin diff --git a/scripts/vm/hypervisor/xenserver/xcpserver/scsiutil.py b/scripts/vm/hypervisor/xenserver/xcpserver/scsiutil.py deleted file mode 100755 index 1d4506ef3fb..00000000000 --- a/scripts/vm/hypervisor/xenserver/xcpserver/scsiutil.py +++ /dev/null @@ -1,468 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2006-2007 XenSource Ltd. -# Copyright (C) 2008-2009 Citrix Ltd. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation; version 2.1 only. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# Miscellaneous scsi utility functions -# - -import util, SR -import os -import re -import xs_errors -import base64 -import time -import errno -import glob - -PREFIX_LEN = 4 -SUFFIX_LEN = 12 -SECTOR_SHIFT = 9 - -def gen_hash(st, len): - hs = 0 - for i in st: - hs = ord(i) + (hs << 6) + (hs << 16) - hs - return str(hs)[0:len] - -def gen_uuid_from_serial(iqn, serial): - if len(serial) < SUFFIX_LEN: - raise util.CommandException(1) - prefix = gen_hash(iqn, PREFIX_LEN) - suffix = gen_hash(serial, SUFFIX_LEN) - str = prefix.encode("hex") + suffix.encode("hex") - return str[0:8]+'-'+str[8:12]+'-'+str[12:16]+'-'+str[16:20]+'-'+str[20:32] - -def gen_serial_from_uuid(iqn, uuid): - str = uuid.replace('-','') - prefix = gen_hash(iqn, PREFIX_LEN) - if str[0:(PREFIX_LEN * 2)].decode("hex") != prefix: - raise util.CommandException(1) - return str[(PREFIX_LEN * 2):].decode("hex") - -def getsize(path): - dev = getdev(path) - sysfs = os.path.join('/sys/block',dev,'size') - size = 0 - if os.path.exists(sysfs): - try: - f=open(sysfs, 'r') - size = (long(f.readline()) << SECTOR_SHIFT) - f.close() - except: - pass - return size - -def getuniqueserial(path): - dev = getdev(path) - output = gen_rdmfile() - try: - cmd = ["md5sum"] - txt = util.pread3(cmd, getSCSIid(path)) - return txt.split(' ')[0] - except: - return '' - -def gen_uuid_from_string(str): - if len(str) < (PREFIX_LEN + SUFFIX_LEN): - raise util.CommandException(1) - return str[0:8]+'-'+str[8:12]+'-'+str[12:16]+'-'+str[16:20]+'-'+str[20:32] - -def SCSIid_sanitise(str): - text = re.sub("^\s+","",str) - return re.sub("\s+","_",text) - -def getSCSIid(path): - dev = rawdev(path) - cmd = ["scsi_id", "-g", "-s", "/block/%s" % dev] - return SCSIid_sanitise(util.pread2(cmd)[:-1]) - -def compareSCSIid_2_6_18(SCSIid, path): - serial = getserial(path) - len_serial = len(serial) - if (len_serial == 0 ) or (len_serial > (len(SCSIid) - 1)): - return False - list_SCSIid = list(SCSIid) - list_serial = list_SCSIid[1:(len_serial + 1)] - serial_2_6_18 = ''.join(list_serial) - if (serial == serial_2_6_18): - return True - else: - return False - -def getserial(path): - dev = os.path.join('/dev',getdev(path)) - try: - cmd = ["sginfo", "-s", dev] - text = re.sub("\s+","",util.pread2(cmd)) - except: - raise xs_errors.XenError('EIO', \ - opterr='An error occured querying device serial number [%s]' \ - % dev) - try: - return text.split("'")[1] - except: - return '' - -def getmanufacturer(path): - cmd = ["sginfo", "-M", path] - try: - for line in filter(match_vendor, util.pread2(cmd).split('\n')): - return line.replace(' ','').split(':')[-1] - except: - return '' - -def cacheSCSIidentifiers(): - SCSI = {} - SYS_PATH = "/dev/disk/by-scsibus/*" - for node in glob.glob(SYS_PATH): - dev = os.path.realpath(node) - HBTL = os.path.basename(node).split("-")[-1].split(":") - line = "NONE %s %s %s %s 0 %s" % \ - (HBTL[0],HBTL[1],HBTL[2],HBTL[3],dev) - ids = line.split() - SCSI[ids[6]] = ids - return SCSI - -def scsi_dev_ctrl(ids, cmd): - f = -1 - for i in range(0,10): - try: - str = "scsi %s-single-device %s %s %s %s" % \ - (cmd, ids[1],ids[2],ids[3],ids[4]) - util.SMlog(str) - f=open('/proc/scsi/scsi', 'w') - print >>f, str - f.close() - return - except IOError, e: - util.SMlog("SCSI_DEV_CTRL: Failure, %s [%d]" % (e.strerror,e.errno)) - if f >= 0: - f.close() - if e.errno == errno.ENXIO: - util.SMlog("Device has disappeared already") - return - f = -1 - time.sleep(6) - continue - raise xs_errors.XenError('EIO', \ - opterr='An error occured during the scsi operation') - -def getdev(path): - realpath = os.path.realpath(path) - if match_dm(realpath): - newpath = realpath.replace("/dev/mapper/","/dev/disk/by-id/scsi-") - else: - newpath = path - return os.path.realpath(newpath).split('/')[-1] - -def rawdev(dev): - return re.sub("[0-9]*$","",getdev(dev)) - -def getSessionID(path): - for line in filter(match_session, util.listdir(path)): - return line.split('-')[-1] - -def match_session(s): - regex = re.compile("^SESSIONID-") - return regex.search(s, 0) - -def match_vendor(s): - regex = re.compile("^Vendor:") - return regex.search(s, 0) - -def match_dm(s): - regex = re.compile("mapper/") - return regex.search(s, 0) - -def match_sd(s): - regex = re.compile("/dev/sd") - return regex.search(s, 0) - -def _isSCSIdev(dev): - if match_dm(dev): - path = dev.replace("/dev/mapper/","/dev/disk/by-id/scsi-") - else: - path = dev - return match_sd(os.path.realpath(path)) - -def gen_rdmfile(): - return "/tmp/%s" % util.gen_uuid() - -def add_serial_record(session, sr_ref, devstring): - try: - conf = session.xenapi.SR.get_sm_config(sr_ref) - conf['devserial'] = devstring - session.xenapi.SR.set_sm_config(sr_ref, conf) - except: - pass - -def get_serial_record(session, sr_ref): - try: - conf = session.xenapi.SR.get_sm_config(sr_ref) - return conf['devserial'] - except: - return "" - -def devlist_to_serialstring(devlist): - serial = '' - for dev in devlist: - try: - devserial = "scsi-%s" % getSCSIid(dev) - if not len(devserial) > 0: - continue - if len(serial): - serial += ',' - serial += devserial - except: - pass - - return serial - -def gen_synthetic_page_data(uuid): - # For generating synthetic page data for non-raw LUNs - # we set the vendor ID to XENSRC - # Note that the Page 80 serial number must be limited - # to 16 characters - page80 = "" - page80 += "\x00\x80" - page80 += "\x00\x12" - page80 += uuid[0:16] - page80 += " " - - page83 = "" - page83 += "\x00\x83" - page83 += "\x00\x31" - page83 += "\x02\x01\x00\x2d" - page83 += "XENSRC " - page83 += uuid - page83 += " " - return ["",base64.b64encode(page80),base64.b64encode(page83)] - -def gen_raw_page_data(path): - default = "" - page80 = "" - page83 = "" - try: - cmd = ["sg_inq", "-r", path] - text = util.pread2(cmd) - default = base64.b64encode(text) - - cmd = ["sg_inq", "--page=0x80", "-r", path] - text = util.pread2(cmd) - page80 = base64.b64encode(text) - - cmd = ["sg_inq", "--page=0x83", "-r", path] - text = util.pread2(cmd) - page83 = base64.b64encode(text) - except: - pass - return [default,page80,page83] - -def update_XS_SCSIdata(session, vdi_ref, vdi_uuid, data): - try: - session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, "vdi-uuid") - except: - pass - - try: - session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, "scsi/0x12/default") - except: - pass - - try: - session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, "scsi/0x12/0x80") - except: - pass - - try: - session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, "scsi/0x12/0x83") - except: - pass - - try: - session.xenapi.VDI.add_to_xenstore_data(vdi_ref, "vdi-uuid", vdi_uuid) - if len(data[0]): - session.xenapi.VDI.add_to_xenstore_data(vdi_ref, "scsi/0x12/default", data[0]) - - if len(data[1]): - session.xenapi.VDI.add_to_xenstore_data(vdi_ref, "scsi/0x12/0x80", data[1]) - - if len(data[2]): - session.xenapi.VDI.add_to_xenstore_data(vdi_ref, "scsi/0x12/0x83", data[2]) - except: - pass - -def rescan(ids, scanstring='- - -'): - for id in ids: - refresh_HostID(id, scanstring) - -def _genArrayIdentifier(dev): - try: - cmd = ["sg_inq", "--page=0xc8", "-r", dev] - id = util.pread2(cmd) - return id.encode("hex")[180:212] - except: - return "" - - -def _genHostList(procname): - # loop through and check all adapters - ids = [] - try: - for dir in util.listdir('/sys/class/scsi_host'): - filename = os.path.join('/sys/class/scsi_host',dir,'proc_name') - if os.path.exists(filename): - f = open(filename, 'r') - if f.readline().find(procname) != -1: - ids.append(dir.replace("host","")) - f.close() - except: - pass - return ids - -def _genReverseSCSIidmap(SCSIid, pathname="scsibus"): - util.SMlog("map_by_scsibus: sid=%s" % SCSIid) - - devices = [] - for link in glob.glob('/dev/disk/by-id/scsi-%s' % SCSIid): - devices.append(os.path.realpath(link)) - return devices - -def _genReverseSCSidtoLUNidmap(SCSIid): - devices = [] - for link in glob.glob('/dev/disk/by-scsibus/%s-*' % SCSIid): - devices.append(link.split('-')[-1]) - return devices - -def _dosgscan(): - regex=re.compile("([^:]*):\s+scsi([0-9]+)\s+channel=([0-9]+)\s+id=([0-9]+)\s+lun=([0-9]+)") - scan=util.pread2(["/usr/bin/sg_scan"]).split('\n') - sgs=[] - for line in scan: - m=regex.match(line) - if m: - device=m.group(1) - host=m.group(2) - channel=m.group(3) - sid=m.group(4) - lun=m.group(5) - sgs.append([device,host,channel,sid,lun]) - return sgs - -def refresh_HostID(HostID, scanstring): - LUNs = glob.glob('/sys/class/scsi_disk/%s*' % HostID) - li = [] - for l in LUNs: - chan = re.sub(":[0-9]*$",'',os.path.basename(l)) - if chan not in li: - li.append(chan) - - fullrescan = True - if len(li) and scanstring == "- - -": - fullrescan = False - for c in li: - if not refresh_scsi_channel(c): - fullrescan = True - - if fullrescan: - util.SMlog("Rescanning HostID %s with %s" % (HostID, scanstring)) - path = '/sys/class/scsi_host/host%s/scan' % HostID - if os.path.exists(path): - try: - f=open(path, 'w') - f.write('%s\n' % scanstring) - f.close() - except: - pass - # Host Bus scan issued, now try to detect channels - if util.wait_for_path("/sys/class/scsi_disk/%s*" % HostID, 5): - # At least one LUN is mapped - LUNs = glob.glob('/sys/class/scsi_disk/%s*' % HostID) - li = [] - for l in LUNs: - chan = re.sub(":[0-9]*$",'',os.path.basename(l)) - if chan not in li: - li.append(chan) - for c in li: - refresh_scsi_channel(c) - - -def refresh_scsi_channel(channel): - DEV_WAIT = 5 - util.SMlog("Refreshing channel %s" % channel) - util.wait_for_path('/dev/disk/by-scsibus/*-%s*' % channel, DEV_WAIT) - LUNs = glob.glob('/dev/disk/by-scsibus/*-%s*' % channel) - try: - rootdevs = util.dom0_disks() - except: - util.SMlog("Failed to query root disk, failing operation") - return False - - # a) Find a LUN to issue a Query LUNs command - li = [] - Query = False - for lun in LUNs: - try: - hbtl = lun.split('-')[-1] - h = hbtl.split(':') - l=util.pread2(["/usr/bin/sg_luns","-q",lun]).split('\n') - li = [] - for i in l: - if len(i): - li.append(int(i[0:4], 16)) - util.SMlog("sg_luns query returned %s" % li) - Query = True - break - except: - pass - if not Query: - util.SMlog("Failed to detect or query LUN on Channel %s" % channel) - return False - - # b) Remove stale LUNs - current = glob.glob('/dev/disk/by-scsibus/*-%s:%s:%s*' % (h[0],h[1],h[2])) - for cur in current: - lunID = int(cur.split(':')[-1]) - newhbtl = ['',h[0],h[1],h[2],str(lunID)] - if os.path.realpath(cur) in rootdevs: - # Don't touch the rootdev - if lunID in li: li.remove(lunID) - continue - - # Check if LUN is stale, and remove it - if not lunID in li: - util.SMlog("Stale LUN detected. Removing HBTL: %s" % newhbtl) - scsi_dev_ctrl(newhbtl,"remove") - util.wait_for_nopath(cur, DEV_WAIT) - continue - else: - li.remove(lunID) - - # Query SCSIid, check it matches, if not, re-probe - cur_SCSIid = os.path.basename(cur).split("-%s:%s:%s" % (h[0],h[1],h[2]))[0] - real_SCSIid = getSCSIid(cur) - if cur_SCSIid != real_SCSIid: - util.SMlog("HBTL %s does not match, re-probing" % newhbtl) - scsi_dev_ctrl(newhbtl,"remove") - util.wait_for_nopath(cur, DEV_WAIT) - scsi_dev_ctrl(newhbtl,"add") - util.wait_for_path('/dev/disk/by-scsibus/%s-%s' % (real_SCSIid,hbtl), DEV_WAIT) - pass - - # c) Probe for any LUNs that are not present in the system - for l in li: - newhbtl = ['',h[0],h[1],h[2],str(l)] - util.SMlog("Probing new HBTL: %s" % newhbtl) - scsi_dev_ctrl(newhbtl,"add") - util.wait_for_path('/dev/disk/by-scsibus/*-%s' % hbtl, DEV_WAIT) - - return True diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 1a161e31b04..04707ecba85 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -1573,8 +1573,8 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura } Long memory = cmd.getMemory(); - if ((memory == null) || (memory.intValue() <= 0) || (memory.intValue() > 2147483647)) { - throw new InvalidParameterValueException("Failed to create service offering " + name + ": specify the memory value between 1 and 2147483647"); + if ((memory == null) || (memory.intValue() < 32) || (memory.intValue() > 2147483647)) { + throw new InvalidParameterValueException("Failed to create service offering " + name + ": specify the memory value between 32 and 2147483647 MB"); } // check if valid domain diff --git a/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java b/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java index 7af0df28511..7ccbd768960 100755 --- a/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java +++ b/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java @@ -380,7 +380,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L String prodBrand = record.softwareVersion.get("product_brand").trim(); String prodVersion = record.softwareVersion.get("product_version").trim(); - if(prodBrand.equals("XenCloudPlatform") && prodVersion.equals("0.1.1")) + if(prodBrand.equals("XCP") && prodVersion.equals("1.0.0")) return new XcpServerResource(); if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) @@ -398,7 +398,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L } } - String msg = "Only support XCP 0.1.1, XenServer 5.6, XenServer 5.6 FP1 and XenServer 5.6 SP2, but this one is " + prodBrand + " " + prodVersion; + String msg = "Only support XCP 1.0.0, XenServer 5.6, XenServer 5.6 FP1 and XenServer 5.6 SP2, but this one is " + prodBrand + " " + prodVersion; _alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, dcId, podId, msg, msg); s_logger.debug(msg); throw new RuntimeException(msg); @@ -525,7 +525,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L String prodBrand = details.get("product_brand").trim(); String prodVersion = details.get("product_version").trim(); - if(prodBrand.equals("XenCloudPlatform") && prodVersion.equals("0.1.1")) { + if(prodBrand.equals("XCP") && prodVersion.equals("1.0.0")) { resource = XcpServerResource.class.getName(); } else if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) { resource = XenServer56Resource.class.getName(); @@ -540,7 +540,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L } } if( resource == null ){ - String msg = "Only support XCP 0.1.1, XenServer 5.6, XenServer 5.6 FP1 and XenServer 5.6 SP2, but this one is " + prodBrand + " " + prodVersion; + String msg = "Only support XCP 1.0.0, XenServer 5.6, XenServer 5.6 FP1 and XenServer 5.6 SP2, but this one is " + prodBrand + " " + prodVersion; s_logger.debug(msg); throw new RuntimeException(msg); } diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index 95243cb47a8..df2c705e242 100755 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -275,7 +275,7 @@ public class StatsCollector { GetStorageStatsCommand command = new GetStorageStatsCommand(host.getStorageUrl()); HostVO ssAhost = _agentMgr.getSSAgent(host); if (ssAhost == null) { - s_logger.warn("There is no secondary storage VM for secondary storage host " + host.getName()); + s_logger.debug("There is no secondary storage VM for secondary storage host " + host.getName()); continue; } long hostId = host.getId();