From 0648d000b243399bae0da8dddcd120f67e154543 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 13 May 2025 16:05:04 +0530 Subject: [PATCH] Support XenServer 8.4 / XCP 8.3 - make scripts python3 compatible (#10684) --- .../META-INF/db/schema-42000to42010.sql | 53 + plugins/hypervisors/ovm/pom.xml | 2 +- plugins/hypervisors/xenserver/pom.xml | 2 +- .../discoverer/XcpServerDiscoverer.java | 7 +- .../resource/CitrixResourceBase.java | 4 + .../resource/XcpServer83Resource.java | 25 + .../resource/XenServerConnectionPool.java | 10 +- .../resource/Xenserver84Resource.java | 24 + .../xenbase/CitrixStartCommandWrapper.java | 25 +- .../com/xensource/xenapi/ConnectionNew.java | 335 +++ pom.xml | 2 +- .../hypervisor/xenserver/xcpserver83/NFSSR.py | 278 +++ .../vm/hypervisor/xenserver/xcpserver83/patch | 65 + .../xenserver84/cloud-plugin-storage | 303 +++ .../xenserver84/cloudstack_pluginlib.py | 894 ++++++++ .../xenserver/xenserver84/ovs-vif-flows.py | 145 ++ .../vm/hypervisor/xenserver/xenserver84/patch | 70 + .../vm/hypervisor/xenserver/xenserver84/swift | 1884 +++++++++++++++++ .../vm/hypervisor/xenserver/xenserver84/vmops | 1607 ++++++++++++++ .../xenserver/xenserver84/vmopsSnapshot | 622 ++++++ .../xenserver/xenserver84/vmopspremium | 159 ++ .../storage/template/UploadManagerImpl.java | 14 +- .../smoke/test_deploy_vm_extra_config_data.py | 4 +- test/integration/smoke/test_guest_os.py | 5 +- 24 files changed, 6525 insertions(+), 14 deletions(-) create mode 100644 plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XcpServer83Resource.java create mode 100644 plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver84Resource.java create mode 100644 plugins/hypervisors/xenserver/src/main/java/com/xensource/xenapi/ConnectionNew.java create mode 100755 scripts/vm/hypervisor/xenserver/xcpserver83/NFSSR.py create mode 100644 scripts/vm/hypervisor/xenserver/xcpserver83/patch create mode 100644 scripts/vm/hypervisor/xenserver/xenserver84/cloud-plugin-storage create mode 100644 scripts/vm/hypervisor/xenserver/xenserver84/cloudstack_pluginlib.py create mode 100644 scripts/vm/hypervisor/xenserver/xenserver84/ovs-vif-flows.py create mode 100644 scripts/vm/hypervisor/xenserver/xenserver84/patch create mode 100644 scripts/vm/hypervisor/xenserver/xenserver84/swift create mode 100755 scripts/vm/hypervisor/xenserver/xenserver84/vmops create mode 100644 scripts/vm/hypervisor/xenserver/xenserver84/vmopsSnapshot create mode 100755 scripts/vm/hypervisor/xenserver/xenserver84/vmopspremium diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql index b8c44c40c46..3dd6c18f57c 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42000to42010.sql @@ -54,6 +54,59 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.storage_pool', 'used_iops', 'bigint -- Add reason column for op_ha_work CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.op_ha_work', 'reason', 'varchar(32) DEFAULT NULL COMMENT "Reason for the HA work"'); +-- Support for XCP-ng 8.3.0 and XenServer 8.4 by adding hypervisor capabilities +-- https://docs.xenserver.com/en-us/xenserver/8/system-requirements/configuration-limits.html +-- https://docs.xenserver.com/en-us/citrix-hypervisor/system-requirements/configuration-limits.html +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) VALUES (UUID(), 'XenServer', '8.3.0', 1000, 254, 64, 1); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) VALUES (UUID(), 'XenServer', '8.4.0', 1000, 240, 64, 1); + +-- Add missing and new Guest OS mappings +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (64-bit)', 'XenServer', '8.2.1', 'Debian Buster 10'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 15 (64-bit)', 'XenServer', '8.2.1', 'SUSE Linux Enterprise 15 (64-bit)'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2022 (64-bit)', 'XenServer', '8.2.1', 'Windows Server 2022 (64-bit)'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows 11 (64-bit)', 'XenServer', '8.2.1', 'Windows 11'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 20.04 LTS', 'XenServer', '8.2.1', 'Ubuntu Focal Fossa 20.04'); + +-- Copy XS 8.2.1 hypervisor guest OS mappings to XS 8.3 and 8.3 mappings to 8.4 +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.3.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='8.2.1'; + +-- Add new and missing guest os mappings for XS 8.3 +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Rocky Linux 9', 'XenServer', '8.3.0', 'Rocky Linux 9'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Rocky Linux 8', 'XenServer', '8.3.0', 'Rocky Linux 8'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'AlmaLinux 9', 'XenServer', '8.3.0', 'AlmaLinux 9'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'AlmaLinux 8', 'XenServer', '8.3.0', 'AlmaLinux 8'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 12 (64-bit)', 'XenServer', '8.3.0', 'Debian Bookworm 12'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 9', 'XenServer', '8.3.0', 'Oracle Linux 9'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 8', 'XenServer', '8.3.0', 'Oracle Linux 8'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Red Hat Enterprise Linux 8.0', 'XenServer', '8.3.0', 'Red Hat Enterprise Linux 8'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Red Hat Enterprise Linux 9.0', 'XenServer', '8.3.0', 'Red Hat Enterprise Linux 9'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 22.04 LTS', 'XenServer', '8.3.0', 'Ubuntu Jammy Jellyfish 22.04'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 12 SP5 (64-bit)', 'XenServer', '8.3.0', 'SUSE Linux Enterprise Server 12 SP5 (64-bit'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'NeoKylin Linux Server 7', 'XenServer', '8.3.0', 'NeoKylin Linux Server 7'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 9', 'XenServer', '8.3.0', 'CentOS Stream 9'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Scientific Linux 7', 'XenServer', '8.3.0', 'Scientific Linux 7'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (7, 'Generic Linux UEFI', 'XenServer', '8.3.0', 'Generic Linux UEFI'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (7, 'Generic Linux BIOS', 'XenServer', '8.3.0', 'Generic Linux BIOS'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Gooroom Platform 2.0', 'XenServer', '8.3.0', 'Gooroom Platform 2.0'); + +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.4.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='8.3.0'; + +-- Add new guest os mappings for XS 8.4 and KVM +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2025', 'XenServer', '8.4.0', 'Windows Server 2025'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 24.04 LTS', 'XenServer', '8.4.0', 'Ubuntu Noble Numbat 24.04'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 10 (64-bit)'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 11 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 11 (64-bit)'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 12 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 12 (64-bit)'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows 11 (64-bit)', 'KVM', 'default', 'Windows 11'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2025', 'KVM', 'default', 'Windows Server 2025'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 24.04 LTS', 'KVM', 'default', 'Ubuntu 24.04 LTS'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 10 (preview)', 'XenServer', '8.4.0', 'CentOS Stream 10 (preview)'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 9', 'XenServer', '8.4.0', 'CentOS Stream 9'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Scientific Linux 7', 'XenServer', '8.4.0', 'Scientific Linux 7'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'NeoKylin Linux Server 7', 'XenServer', '8.4.0', 'NeoKylin Linux Server 7'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 12 SP5 (64-bit)', 'XenServer', '8.4.0', 'SUSE Linux Enterprise Server 12 SP5 (64-bit'); +CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Gooroom Platform 2.0', 'XenServer', '8.4.0', 'Gooroom Platform 2.0'); + -- Grant access to 2FA APIs for the "Read-Only User - Default" role CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); diff --git a/plugins/hypervisors/ovm/pom.xml b/plugins/hypervisors/ovm/pom.xml index 2be84b612c0..fe106bcaae0 100644 --- a/plugins/hypervisors/ovm/pom.xml +++ b/plugins/hypervisors/ovm/pom.xml @@ -29,7 +29,7 @@ - net.java.dev.vcc.thirdparty + com.citrix.hypervisor xen-api ${cs.xapi.version} diff --git a/plugins/hypervisors/xenserver/pom.xml b/plugins/hypervisors/xenserver/pom.xml index 846b97b1dbe..0d36206dc66 100644 --- a/plugins/hypervisors/xenserver/pom.xml +++ b/plugins/hypervisors/xenserver/pom.xml @@ -44,7 +44,7 @@ compile - net.java.dev.vcc.thirdparty + com.citrix.hypervisor xen-api ${cs.xapi.version} diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java index 4fa7e788224..6d298629e7e 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java @@ -30,6 +30,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import javax.persistence.EntityExistsException; +import com.cloud.hypervisor.xenserver.resource.XcpServer83Resource; +import com.cloud.hypervisor.xenserver.resource.Xenserver84Resource; import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; @@ -265,7 +267,6 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L } catch (Exception e) { logger.debug("Caught exception during logout", e); } - conn.dispose(); conn = null; } @@ -435,6 +436,10 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L } } else if (prodBrand.equals("XCP_Kronos")) { return new XcpOssResource(); + } else if (prodBrand.equals("XenServer") && prodVersion.equals("8.4.0")) { + return new Xenserver84Resource(); + } else if (prodBrand.equals("XCP-ng") && (prodVersion.equals("8.3.0"))) { + return new XcpServer83Resource(); } else if (prodBrand.equals("XenServer") || prodBrand.equals("XCP-ng") || prodBrand.equals("Citrix Hypervisor")) { final String[] items = prodVersion.split("\\."); if ((Integer.parseInt(items[0]) > 6) || diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index 6953441f34d..adbdbe01326 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -1699,6 +1699,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S nwr.nameLabel = newName; nwr.tags = new HashSet<>(); nwr.tags.add(generateTimeStamp()); + nwr.managed = true; vlanNetwork = Network.create(conn, nwr); vlanNic = getNetworkByName(conn, newName); if (vlanNic == null) { // Still vlanNic is null means we could not @@ -2004,6 +2005,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S // started otherConfig.put("assume_network_is_shared", "true"); rec.otherConfig = otherConfig; + rec.managed = true; nw = Network.create(conn, rec); logger.debug("### XenServer network for tunnels created:" + nwName); } else { @@ -4829,6 +4831,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S configs.put("netmask", NetUtils.getLinkLocalNetMask()); configs.put("vswitch-disable-in-band", "true"); rec.otherConfig = configs; + rec.managed = true; linkLocal = Network.create(conn, rec); } else { linkLocal = networks.iterator().next(); @@ -5017,6 +5020,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S if (networks.isEmpty()) { rec.nameDescription = "vswitch network for " + nwName; rec.nameLabel = nwName; + rec.managed = true; vswitchNw = Network.create(conn, rec); } else { vswitchNw = networks.iterator().next(); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XcpServer83Resource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XcpServer83Resource.java new file mode 100644 index 00000000000..65add312119 --- /dev/null +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XcpServer83Resource.java @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.xenserver.resource; + +public class XcpServer83Resource extends XenServer650Resource { + + @Override + protected String getPatchFilePath() { + return "scripts/vm/hypervisor/xenserver/xcpserver83/patch"; + } +} diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerConnectionPool.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerConnectionPool.java index 87b869ba3c6..74e8d2b045c 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerConnectionPool.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerConnectionPool.java @@ -21,6 +21,7 @@ import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.APIVersion; import com.xensource.xenapi.Connection; +import com.xensource.xenapi.ConnectionNew; import com.xensource.xenapi.Host; import com.xensource.xenapi.Pool; import com.xensource.xenapi.Session; @@ -150,12 +151,12 @@ public class XenServerConnectionPool { } public Connection getConnect(String ip, String username, Queue password) { - Connection conn = new Connection(getURL(ip), 10, _connWait); + Connection conn = new ConnectionNew(getURL(ip), 10, _connWait); try { loginWithPassword(conn, username, password, APIVersion.latest().toString()); } catch (Types.HostIsSlave e) { String maddress = e.masterIPAddress; - conn = new Connection(getURL(maddress), 10, _connWait); + conn = new ConnectionNew(getURL(maddress), 10, _connWait); try { loginWithPassword(conn, username, password, APIVersion.latest().toString()); } catch (Exception e1) { @@ -221,7 +222,7 @@ public class XenServerConnectionPool { if ( mConn == null ) { try { - Connection conn = new Connection(getURL(ipAddress), 5, _connWait); + Connection conn = new ConnectionNew(getURL(ipAddress), 5, _connWait); Session sess = loginWithPassword(conn, username, password, APIVersion.latest().toString()); Host host = sess.getThisHost(conn); Boolean hostenabled = host.getEnabled(conn); @@ -231,7 +232,6 @@ public class XenServerConnectionPool { } catch (Exception e) { LOGGER.debug("Caught exception during logout", e); } - conn.dispose(); } if (!hostenabled) { String msg = "Unable to create master connection, due to master Host " + ipAddress + " is not enabled"; @@ -412,7 +412,7 @@ public class XenServerConnectionPool { return s_instance; } - public class XenServerConnection extends Connection { + public class XenServerConnection extends ConnectionNew { long _interval; int _retries; String _ip; diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver84Resource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver84Resource.java new file mode 100644 index 00000000000..b1a370b0db8 --- /dev/null +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver84Resource.java @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.xenserver.resource; + +public class Xenserver84Resource extends XenServer650Resource { + @Override + protected String getPatchFilePath() { + return "scripts/vm/hypervisor/xenserver/xenserver84/patch"; + } +} diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java index 2f3fb049339..9aca3fed599 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java @@ -96,7 +96,15 @@ public final class CitrixStartCommandWrapper extends CommandWrapper num2) return true; + if (num1 < num2) return false; + } + return true; // versions are equal + } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/xensource/xenapi/ConnectionNew.java b/plugins/hypervisors/xenserver/src/main/java/com/xensource/xenapi/ConnectionNew.java new file mode 100644 index 00000000000..656915d0132 --- /dev/null +++ b/plugins/hypervisors/xenserver/src/main/java/com/xensource/xenapi/ConnectionNew.java @@ -0,0 +1,335 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.xensource.xenapi; + + +import org.apache.ws.commons.util.NamespaceContextImpl; +import org.apache.xmlrpc.XmlRpcException; +import org.apache.xmlrpc.client.XmlRpcClient; +import org.apache.xmlrpc.client.XmlRpcClientConfigImpl; +import org.apache.xmlrpc.client.XmlRpcHttpClientConfig; +import org.apache.xmlrpc.common.TypeFactory; +import org.apache.xmlrpc.common.TypeFactoryImpl; +import org.apache.xmlrpc.common.XmlRpcStreamConfig; +import org.apache.xmlrpc.parser.MapParser; +import org.apache.xmlrpc.parser.RecursiveTypeParserImpl; +import org.apache.xmlrpc.parser.TypeParser; +import org.xml.sax.Attributes; +import org.xml.sax.SAXException; +import org.xml.sax.SAXParseException; + +import javax.xml.namespace.QName; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; +import java.util.TimeZone; + +public class ConnectionNew extends Connection { + /** + * The version of the bindings that this class belongs to. + * + * @deprecated This field is not used any more. + */ + private APIVersion apiVersion; + + + /** + * Updated when Session.login_with_password() is called. + */ + @Override + public APIVersion getAPIVersion() + { + return apiVersion; + } + + /** + * The opaque reference to the session used by this connection + */ + private String sessionReference; + + /** + * As seen by the xmlrpc library. From our point of view it's a server. + */ + private final XmlRpcClient client; + + /** + * Creates a connection to a particular server using a given url. This object can then be passed + * in to any other API calls. + * + * Note this constructor does NOT call Session.loginWithPassword; the programmer is responsible for calling it, + * passing the Connection as a parameter. No attempt to connect to the server is made until login is called. + * + * When this constructor is used, a call to dispose() will do nothing. The programmer is responsible for manually + * logging out the Session. + * + * @param url The URL of the server to connect to + * @param replyTimeout The reply timeout for xml-rpc calls in seconds + * @param connTimeout The connection timeout for xml-rpc calls in seconds + */ + public ConnectionNew(URL url, int replyTimeout, int connTimeout) + { + super(url, replyTimeout, connTimeout); + this.client = getClientFromURL(url, replyTimeout, connTimeout); + } + + private XmlRpcClientConfigImpl config = new XmlRpcClientConfigImpl(); + + @Override + public XmlRpcClientConfigImpl getConfig() + { + return config; + } + + static class CustomMapParser extends RecursiveTypeParserImpl { + + private int level = 0; + private StringBuffer nameBuffer = new StringBuffer(); + private Object nameObject; + private Map map; + private boolean inName; + private boolean inValue; + private boolean doneValue; + + public CustomMapParser(XmlRpcStreamConfig pConfig, NamespaceContextImpl pContext, TypeFactory pFactory) { + super(pConfig, pContext, pFactory); + } + + protected void addResult(Object pResult) throws SAXException { + if (this.inName) { + this.nameObject = pResult; + } else { + if (this.nameObject == null) { + throw new SAXParseException("Invalid state: Expected name", this.getDocumentLocator()); + } + + this.map.put(this.nameObject, pResult); + } + + } + + public void startDocument() throws SAXException { + super.startDocument(); + this.level = 0; + this.map = new HashMap(); + this.inValue = this.inName = false; + } + + public void characters(char[] pChars, int pOffset, int pLength) throws SAXException { + if (this.inName && !this.inValue) { + this.nameBuffer.append(pChars, pOffset, pLength); + } else { + super.characters(pChars, pOffset, pLength); + } + + } + + public void ignorableWhitespace(char[] pChars, int pOffset, int pLength) throws SAXException { + if (this.inName) { + this.characters(pChars, pOffset, pLength); + } else { + super.ignorableWhitespace(pChars, pOffset, pLength); + } + + } + + public void startElement(String pURI, String pLocalName, String pQName, Attributes pAttrs) throws SAXException { + switch (this.level++) { + case 0: + if (!"".equals(pURI) || !"struct".equals(pLocalName)) { + throw new SAXParseException("Expected struct, got " + new QName(pURI, pLocalName), this.getDocumentLocator()); + } + break; + case 1: + if (!"".equals(pURI) || !"member".equals(pLocalName)) { + throw new SAXParseException("Expected member, got " + new QName(pURI, pLocalName), this.getDocumentLocator()); + } + + this.doneValue = this.inName = this.inValue = false; + this.nameObject = null; + this.nameBuffer.setLength(0); + break; + case 2: + if (this.doneValue) { + throw new SAXParseException("Expected /member, got " + new QName(pURI, pLocalName), this.getDocumentLocator()); + } + + if ("".equals(pURI) && "name".equals(pLocalName)) { + if (this.nameObject != null) { + throw new SAXParseException("Expected value, got " + new QName(pURI, pLocalName), this.getDocumentLocator()); + } + + this.inName = true; + } else if ("".equals(pURI) && "value".equals(pLocalName)) { + if (this.nameObject == null) { + throw new SAXParseException("Expected name, got " + new QName(pURI, pLocalName), this.getDocumentLocator()); + } + + this.inValue = true; + this.startValueTag(); + } + break; + case 3: + if (this.inName && "".equals(pURI) && "value".equals(pLocalName)) { + if (!this.cfg.isEnabledForExtensions()) { + throw new SAXParseException("Expected /name, got " + new QName(pURI, pLocalName), this.getDocumentLocator()); + } + + this.inValue = true; + this.startValueTag(); + } else { + super.startElement(pURI, pLocalName, pQName, pAttrs); + } + break; + default: + super.startElement(pURI, pLocalName, pQName, pAttrs); + } + + } + + public void endElement(String pURI, String pLocalName, String pQName) throws SAXException { + switch (--this.level) { + case 0: + this.setResult(this.map); + case 1: + break; + case 2: + if (this.inName) { + this.inName = false; + if (this.nameObject == null) { + this.nameObject = this.nameBuffer.toString(); + } else { + for(int i = 0; i < this.nameBuffer.length(); ++i) { + if (!Character.isWhitespace(this.nameBuffer.charAt(i))) { + throw new SAXParseException("Unexpected non-whitespace character in member name", this.getDocumentLocator()); + } + } + } + } else if (this.inValue) { + this.endValueTag(); + this.doneValue = true; + } + break; + case 3: + if (this.inName && this.inValue && "".equals(pURI) && "value".equals(pLocalName)) { + this.endValueTag(); + } else { + super.endElement(pURI, pLocalName, pQName); + } + break; + default: + super.endElement(pURI, pLocalName, pQName); + } + + } + } + + private XmlRpcClient getClientFromURL(URL url, int replyWait, int connWait) + { + config.setTimeZone(TimeZone.getTimeZone("UTC")); + config.setServerURL(url); + config.setReplyTimeout(replyWait * 1000); + config.setConnectionTimeout(connWait * 1000); + XmlRpcClient client = new XmlRpcClient(); + client.setConfig(config); + client.setTypeFactory(new TypeFactoryImpl(client) { + @Override + public TypeParser getParser(XmlRpcStreamConfig pConfig, NamespaceContextImpl pContext, String pURI, String pLocalName) { + TypeParser parser = super.getParser(pConfig, pContext, pURI, pLocalName); + if (parser instanceof MapParser) { + return new CustomMapParser(pConfig, pContext, this); + } + return parser; + } + }); + return client; + } + + @Override + public String getSessionReference() + { + return this.sessionReference; + } + + @Override + protected Map dispatch(String methodCall, Object[] methodParams) throws XmlRpcException, Types.XenAPIException + { + Map response = (Map) client.execute(methodCall, methodParams); + + if (methodCall.equals("session.login_with_password") && + response.get("Status").equals("Success")) + { + Session session = Types.toSession(response.get("Value")); + sessionReference = session.ref; + setAPIVersion(session); + } + else if (methodCall.equals("session.slave_local_login_with_password") && + response.get("Status").equals("Success")) + { + sessionReference = Types.toSession(response.get("Value")).ref; + apiVersion = APIVersion.latest(); + } + else if (methodCall.equals("session.logout")) + { + // Work around a bug in XenServer 5.0 and below. + // session.login_with_password should have rejected us with + // HOST_IS_SLAVE, but instead we don't find out until later. + // We don't want to leak the session, so we need to log out + // this session from the master instead. + if (response.get("Status").equals("Failure")) + { + Object[] error = (Object[]) response.get("ErrorDescription"); + if (error.length == 2 && error[0].equals("HOST_IS_SLAVE")) + { + try + { + XmlRpcHttpClientConfig clientConfig = (XmlRpcHttpClientConfig)client.getClientConfig(); + URL client_url = clientConfig.getServerURL(); + URL masterUrl = new URL(client_url.getProtocol(), (String)error[1], client_url.getPort(), client_url.getFile()); + + Connection tmp_conn = new Connection(masterUrl, sessionReference, clientConfig.getReplyTimeout(), clientConfig.getConnectionTimeout()); + + Session.logout(tmp_conn); + } + catch (Exception ex) + { + // Ignore + } + } + } + + this.sessionReference = null; + } + + return Types.checkResponse(response); + } + + + private void setAPIVersion(Session session) throws Types.XenAPIException, XmlRpcException + { + try + { + long major = session.getThisHost(this).getAPIVersionMajor(this); + long minor = session.getThisHost(this).getAPIVersionMinor(this); + apiVersion = APIVersion.fromMajorMinor(major, minor); + } + catch (Types.BadServerResponse exn) + { + apiVersion = APIVersion.UNKNOWN; + } + } +} diff --git a/pom.xml b/pom.xml index 1f163c6b1fb..d2c13ed7bcd 100644 --- a/pom.xml +++ b/pom.xml @@ -184,7 +184,7 @@ build-217-jenkins-27 8.0 0.5.0 - 6.2.0-3.1 + 8.1.0 3.1.3 1.4.20 5.3.26 diff --git a/scripts/vm/hypervisor/xenserver/xcpserver83/NFSSR.py b/scripts/vm/hypervisor/xenserver/xcpserver83/NFSSR.py new file mode 100755 index 00000000000..45237e59895 --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xcpserver83/NFSSR.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# FileSR: local-file storage repository + +import SR, VDI, SRCommand, FileSR, util +import errno +import os, re, sys, stat +import time +import xml.dom.minidom +import xs_errors +import nfs +import vhdutil +from lock import Lock +import cleanup + +CAPABILITIES = ["SR_PROBE","SR_UPDATE", "SR_CACHING", \ + "VDI_CREATE","VDI_DELETE","VDI_ATTACH","VDI_DETACH", \ + "VDI_UPDATE", "VDI_CLONE","VDI_SNAPSHOT","VDI_RESIZE", \ + "VDI_RESIZE_ONLINE", "VDI_RESET_ON_BOOT", "ATOMIC_PAUSE"] + +CONFIGURATION = [ [ 'server', 'hostname or IP address of NFS server (required)' ], \ + [ 'serverpath', 'path on remote server (required)' ] ] + + +DRIVER_INFO = { + 'name': 'NFS VHD', + 'description': 'SR plugin which stores disks as VHD files on a remote NFS filesystem', + 'vendor': 'The Apache Software Foundation', + 'copyright': 'Copyright (c) 2012 The Apache Software Foundation', + 'driver_version': '1.0', + 'required_api_version': '1.0', + 'capabilities': CAPABILITIES, + 'configuration': CONFIGURATION + } + + +# The mountpoint for the directory when performing an sr_probe. All probes +PROBE_MOUNTPOINT = "probe" +NFSPORT = 2049 +DEFAULT_TRANSPORT = "tcp" + + +class NFSSR(FileSR.FileSR): + """NFS file-based storage repository""" + def handles(type): + return type == 'nfs' + handles = staticmethod(handles) + + + def load(self, sr_uuid): + self.ops_exclusive = FileSR.OPS_EXCLUSIVE + self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) + self.sr_vditype = SR.DEFAULT_TAP + if 'server' not in self.dconf: + raise xs_errors.XenError('ConfigServerMissing') + self.remoteserver = self.dconf['server'] + self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) + + # Test for the optional 'nfsoptions' dconf attribute + self.transport = DEFAULT_TRANSPORT + if 'useUDP' in self.dconf and self.dconf['useUDP'] == 'true': + self.transport = "udp" + + + def validate_remotepath(self, scan): + if 'serverpath' not in self.dconf: + if scan: + try: + self.scan_exports(self.dconf['server']) + except: + pass + raise xs_errors.XenError('ConfigServerPathMissing') + if not self._isvalidpathstring(self.dconf['serverpath']): + raise xs_errors.XenError('ConfigServerPathBad', \ + opterr='serverpath is %s' % self.dconf['serverpath']) + + def check_server(self): + try: + nfs.check_server_tcp(self.remoteserver, self.transport) + except nfs.NfsException as exc: + raise xs_errors.XenError('NFSVersion', + opterr=exc.errstr) + + + def mount(self, mountpoint, remotepath): + try: + nfs.soft_mount(mountpoint, self.remoteserver, remotepath, self.transport) + except nfs.NfsException as exc: + raise xs_errors.XenError('NFSMount', opterr=exc.errstr) + + + def attach(self, sr_uuid): + self.validate_remotepath(False) + #self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid) + self.remotepath = self.dconf['serverpath'] + util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget') + self.mount_remotepath(sr_uuid) + + + def mount_remotepath(self, sr_uuid): + if not self._checkmount(): + self.check_server() + self.mount(self.path, self.remotepath) + + return super(NFSSR, self).attach(sr_uuid) + + + def probe(self): + # Verify NFS target and port + util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget') + + self.validate_remotepath(True) + self.check_server() + + temppath = os.path.join(SR.MOUNT_BASE, PROBE_MOUNTPOINT) + + self.mount(temppath, self.dconf['serverpath']) + try: + return nfs.scan_srlist(temppath) + finally: + try: + nfs.unmount(temppath, True) + except: + pass + + + def detach(self, sr_uuid): + """Detach the SR: Unmounts and removes the mountpoint""" + if not self._checkmount(): + return + util.SMlog("Aborting GC/coalesce") + cleanup.abort(self.uuid) + + # Change directory to avoid unmount conflicts + os.chdir(SR.MOUNT_BASE) + + try: + nfs.unmount(self.path, True) + except nfs.NfsException as exc: + raise xs_errors.XenError('NFSUnMount', opterr=exc.errstr) + + return super(NFSSR, self).detach(sr_uuid) + + + def create(self, sr_uuid, size): + util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget') + self.validate_remotepath(True) + if self._checkmount(): + raise xs_errors.XenError('NFSAttached') + + # Set the target path temporarily to the base dir + # so that we can create the target SR directory + self.remotepath = self.dconf['serverpath'] + try: + self.mount_remotepath(sr_uuid) + except Exception as exn: + try: + os.rmdir(self.path) + except: + pass + raise exn + + #newpath = os.path.join(self.path, sr_uuid) + #if util.ioretry(lambda: util.pathexists(newpath)): + # if len(util.ioretry(lambda: util.listdir(newpath))) != 0: + # self.detach(sr_uuid) + # raise xs_errors.XenError('SRExists') + #else: + # try: + # util.ioretry(lambda: util.makedirs(newpath)) + # except util.CommandException, inst: + # if inst.code != errno.EEXIST: + # self.detach(sr_uuid) + # raise xs_errors.XenError('NFSCreate', + # opterr='remote directory creation error is %d' + # % inst.code) + self.detach(sr_uuid) + + def delete(self, sr_uuid): + # try to remove/delete non VDI contents first + super(NFSSR, self).delete(sr_uuid) + try: + if self._checkmount(): + self.detach(sr_uuid) + + # Set the target path temporarily to the base dir + # so that we can remove the target SR directory + self.remotepath = self.dconf['serverpath'] + self.mount_remotepath(sr_uuid) + newpath = os.path.join(self.path, sr_uuid) + + if util.ioretry(lambda: util.pathexists(newpath)): + util.ioretry(lambda: os.rmdir(newpath)) + self.detach(sr_uuid) + except util.CommandException as inst: + self.detach(sr_uuid) + if inst.code != errno.ENOENT: + raise xs_errors.XenError('NFSDelete') + + def vdi(self, uuid, loadLocked = False): + if not loadLocked: + return NFSFileVDI(self, uuid) + return NFSFileVDI(self, uuid) + + def _checkmount(self): + return util.ioretry(lambda: util.pathexists(self.path)) \ + and util.ioretry(lambda: util.ismount(self.path)) + + def scan_exports(self, target): + util.SMlog("scanning2 (target=%s)" % target) + dom = nfs.scan_exports(target) + print >>sys.stderr,dom.toprettyxml() + + def _isvalidpathstring(self, path): + if not path.startswith("/"): + return False + l = self._splitstring(path) + for char in l: + if char.isalpha(): + continue + elif char.isdigit(): + continue + elif char in ['/','-','_','.',':']: + continue + else: + return False + return True + +class NFSFileVDI(FileSR.FileVDI): + def attach(self, sr_uuid, vdi_uuid): + try: + vdi_ref = self.sr.srcmd.params['vdi_ref'] + self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \ + "vdi-type") + self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \ + "storage-type") + self.session.xenapi.VDI.add_to_xenstore_data(vdi_ref, \ + "storage-type", "nfs") + except: + util.logException("NFSSR:attach") + pass + return super(NFSFileVDI, self).attach(sr_uuid, vdi_uuid) + + def get_mtime(self, path): + st = util.ioretry_stat(path) + return st[stat.ST_MTIME] + + def clone(self, sr_uuid, vdi_uuid): + timestamp_before = int(self.get_mtime(self.sr.path)) + ret = super(NFSFileVDI, self).clone(sr_uuid, vdi_uuid) + timestamp_after = int(self.get_mtime(self.sr.path)) + if timestamp_after == timestamp_before: + util.SMlog("SR dir timestamp didn't change, updating") + timestamp_after += 1 + os.utime(self.sr.path, (timestamp_after, timestamp_after)) + return ret + + + +if __name__ == '__main__': + SRCommand.run(NFSSR, DRIVER_INFO) +else: + SR.registerSR(NFSSR) diff --git a/scripts/vm/hypervisor/xenserver/xcpserver83/patch b/scripts/vm/hypervisor/xenserver/xcpserver83/patch new file mode 100644 index 00000000000..4d90c0cf266 --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xcpserver83/patch @@ -0,0 +1,65 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This file specifies the files that need +# +# to be transferred over to the XenServer. +# The format of this file is as follows: +# [Name of file]=[source path],[file permission],[destination path] +# [destination path] is required. +# If [file permission] is missing, 755 is assumed. +# If [source path] is missing, it looks in the same +# directory as the patch file. +# If [source path] starts with '/', then it is absolute path. +# If [source path] starts with '~', then it is path relative to management server home directory. +# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file. +NFSSR.py=/opt/xensource/sm +vmops=../xenserver84/,0755,/etc/xapi.d/plugins +ovstunnel=..,0755,/etc/xapi.d/plugins +vmopsSnapshot=../xenserver84/,0755,/etc/xapi.d/plugins +agent.zip=../../../../../vms,0644,/opt/xensource/packages/resources/ +cloud-scripts.tgz=../../../../../vms,0644,/opt/xensource/packages/resources/ +patch-sysvms.sh=../../../../../vms,0644,/opt/xensource/packages/resources/ +id_rsa.cloud=../../../systemvm,0600,/root/.ssh +network_info.sh=..,0755,/opt/cloud/bin +setupxenserver.sh=..,0755,/opt/cloud/bin +make_migratable.sh=..,0755,/opt/cloud/bin +setup_iscsi.sh=..,0755,/opt/cloud/bin +pingtest.sh=../../..,0755,/opt/cloud/bin +router_proxy.sh=../../../../network/domr/,0755,/opt/cloud/bin +cloud-setup-bonding.sh=..,0755,/opt/cloud/bin +copy_vhd_to_secondarystorage.sh=..,0755,/opt/cloud/bin +copy_vhd_from_secondarystorage.sh=..,0755,/opt/cloud/bin +setup_heartbeat_sr.sh=..,0755,/opt/cloud/bin +setup_heartbeat_file.sh=..,0755,/opt/cloud/bin +check_heartbeat.sh=..,0755,/opt/cloud/bin +xenheartbeat.sh=..,0755,/opt/cloud/bin +launch_hb.sh=..,0755,/opt/cloud/bin +vhd-util=..,0755,/opt/cloud/bin +vmopspremium=../xenserver84/,0755,/etc/xapi.d/plugins +create_privatetemplate_from_snapshot.sh=..,0755,/opt/cloud/bin +upgrade_snapshot.sh=..,0755,/opt/cloud/bin +cloud-clean-vlan.sh=..,0755,/opt/cloud/bin +cloud-prepare-upgrade.sh=..,0755,/opt/cloud/bin +add_to_vcpus_params_live.sh=..,0755,/opt/cloud/bin +cloud-plugin-storage=../xenserver84/,0755,/etc/xapi.d/plugins + +###add cloudstack plugin script for XCP +cloudstack_plugins.conf=..,0644,/etc/xensource +cloudstack_pluginlib.py=../xenserver84/,0755,/etc/xapi.d/plugins +cloudlog=..,0644,/etc/logrotate.d +update_host_passwd.sh=../..,0755,/opt/cloud/bin diff --git a/scripts/vm/hypervisor/xenserver/xenserver84/cloud-plugin-storage b/scripts/vm/hypervisor/xenserver/xenserver84/cloud-plugin-storage new file mode 100644 index 00000000000..670949f8c06 --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xenserver84/cloud-plugin-storage @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Version @VERSION@ +# +# A plugin for executing script needed by vmops cloud + +import os, sys, time +import XenAPIPlugin +if os.path.exists("/opt/xensource/sm"): + sys.path.extend(["/opt/xensource/sm/", "/usr/local/sbin/", "/sbin/"]) +if os.path.exists("/usr/lib/xcp/sm"): + sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"]) + +import SR, VDI, SRCommand, util, lvutil +from util import CommandException +import vhdutil +import shutil +import lvhdutil +import errno +import subprocess +import xs_errors +import cleanup +import stat +import random +import cloudstack_pluginlib as lib +import logging + +lib.setup_logging("/var/log/cloud/cloud.log") + +VHDUTIL = "vhd-util" +VHD_PREFIX = 'VHD-' +CLOUD_DIR = '/var/run/cloud_mount' + +def echo(fn): + def wrapped(*v, **k): + name = fn.__name__ + logging.debug("#### CLOUD enter %s ####" % name ) + res = fn(*v, **k) + logging.debug("#### CLOUD exit %s ####" % name ) + return res + return wrapped + +def getPrimarySRPath(primaryStorageSRUuid, isISCSI): + if isISCSI: + primarySRDir = lvhdutil.VG_PREFIX + primaryStorageSRUuid + return os.path.join(lvhdutil.VG_LOCATION, primarySRDir) + else: + return os.path.join(SR.MOUNT_BASE, primaryStorageSRUuid) + +def getBackupVHD(UUID): + return UUID + '.' + SR.DEFAULT_TAP + +def getVHD(UUID, isISCSI): + if isISCSI: + return VHD_PREFIX + UUID + else: + return UUID + '.' + SR.DEFAULT_TAP + +def getIsTrueString(stringValue): + booleanValue = False + if (stringValue and stringValue == 'true'): + booleanValue = True + return booleanValue + +def makeUnavailable(uuid, primarySRPath, isISCSI): + if not isISCSI: + return + VHD = getVHD(uuid, isISCSI) + path = os.path.join(primarySRPath, VHD) + manageAvailability(path, '-an') + return + +def manageAvailability(path, value): + if path.__contains__("/var/run/sr-mount"): + return + logging.debug("Setting availability of " + path + " to " + value) + try: + cmd = ['/usr/sbin/lvchange', value, path] + util.pread2(cmd) + except: #CommandException, (rc, cmdListStr, stderr): + #errMsg = "CommandException thrown while executing: " + cmdListStr + " with return code: " + str(rc) + " and stderr: " + stderr + errMsg = "Unexpected exception thrown by lvchange" + logging.debug(errMsg) + if value == "-ay": + # Raise an error only if we are trying to make it available. + # Just warn if we are trying to make it unavailable after the + # snapshot operation is done. + raise xs_errors.XenError(errMsg) + return + + +def checkVolumeAvailability(path): + try: + if not isVolumeAvailable(path): + # The VHD file is not available on XenSever. The volume is probably + # inactive or detached. + # Do lvchange -ay to make it available on XenServer + manageAvailability(path, '-ay') + except: + errMsg = "Could not determine status of ISCSI path: " + path + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + success = False + i = 0 + while i < 6: + i = i + 1 + # Check if the vhd is actually visible by checking for the link + # set isISCSI to true + success = isVolumeAvailable(path) + if success: + logging.debug("Made vhd: " + path + " available and confirmed that it is visible") + break + + # Sleep for 10 seconds before checking again. + time.sleep(10) + + # If not visible within 1 min fail + if not success: + logging.debug("Could not make vhd: " + path + " available despite waiting for 1 minute. Does it exist?") + + return success + +def isVolumeAvailable(path): + # Check if iscsi volume is available on this XenServer. + status = "0" + try: + p = subprocess.Popen(["/bin/bash", "-c", "if [ -L " + path + " ]; then echo 1; else echo 0;fi"], stdout=subprocess.PIPE) + status = p.communicate()[0].strip("\n") + except: + errMsg = "Could not determine status of ISCSI path: " + path + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + return (status == "1") + +def scanParent(path): + # Do a scan for the parent for ISCSI volumes + # Note that the parent need not be visible on the XenServer + parentUUID = '' + try: + lvName = os.path.basename(path) + dirname = os.path.dirname(path) + vgName = os.path.basename(dirname) + vhdInfo = vhdutil.getVHDInfoLVM(lvName, lvhdutil.extractUuid, vgName) + parentUUID = vhdInfo.parentUuid + except: + errMsg = "Could not get vhd parent of " + path + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return parentUUID + +def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI): + snapshotVHD = getVHD(snapshotUuid, isISCSI) + snapshotPath = os.path.join(primarySRPath, snapshotVHD) + + baseCopyUuid = '' + if isISCSI: + checkVolumeAvailability(snapshotPath) + baseCopyUuid = scanParent(snapshotPath) + else: + baseCopyUuid = getParent(snapshotPath, isISCSI) + + logging.debug("Base copy of snapshotUuid: " + snapshotUuid + " is " + baseCopyUuid) + return baseCopyUuid + +def getParent(path, isISCSI): + parentUUID = '' + try : + if isISCSI: + parentUUID = vhdutil.getParent(path, lvhdutil.extractUuid) + else: + parentUUID = vhdutil.getParent(path, cleanup.FileVDI.extractUuid) + except: + errMsg = "Could not get vhd parent of " + path + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return parentUUID + +def getVhdParent(session, args): + logging.debug("getParent with " + str(args)) + try: + primaryStorageSRUuid = args['primaryStorageSRUuid'] + snapshotUuid = args['snapshotUuid'] + isISCSI = getIsTrueString(args['isISCSI']) + + primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI) + logging.debug("primarySRPath: " + primarySRPath) + + baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI) + + return baseCopyUuid + except: + logging.debug('getVhdParent', exc_info=True) + raise xs_errors.XenError("Failed to getVhdParent") +def makedirs(path): + if not os.path.isdir(path): + try: + os.makedirs(path) + except OSError as e: + umount(path) + if os.path.isdir(path): + return + errMsg = "OSError while creating " + path + " with errno: " + str(e.errno) + " and strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return + +def umount(localDir): + try: + cmd = ['umount', localDir] + util.pread2(cmd) + except CommandException: + errMsg = "CommandException raised while trying to umount " + localDir + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + logging.debug("Successfully unmounted " + localDir) + return + +@echo +def mountNfsSecondaryStorage(session, args): + remoteDir = args['remoteDir'] + localDir = args['localDir'] + nfsVersion = args['nfsVersion'] + logging.debug("mountNfsSecondaryStorage with params: " + str(args)) + mounted = False + f = open("/proc/mounts", 'r') + for line in f: + tokens = line.split(" ") + if len(tokens) > 2 and tokens[0] == remoteDir and tokens[1] == localDir: + mounted = True + + if mounted: + return "true" + + makedirs(localDir) + options = "soft,tcp,timeo=133,retrans=1" + if nfsVersion: + options += ",vers=" + nfsVersion + try: + cmd = ['mount', '-o', options, remoteDir, localDir] + txt = util.pread2(cmd) + except: + txt = '' + errMsg = "Unexpected error while trying to mount " + remoteDir + " to " + localDir + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + logging.debug("Successfully mounted " + remoteDir + " to " + localDir) + + return "true" + +@echo +def umountNfsSecondaryStorage(session, args): + localDir = args['localDir'] + try: + cmd = ['umount', localDir] + util.pread2(cmd) + except CommandException: + errMsg = "CommandException raised while trying to umount " + localDir + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + try: + os.system("rmdir " + localDir) + except: + pass + logging.debug("Successfully unmounted " + localDir) + return "true" + +@echo +def makeDirectory(session, args): + path = args['path'] + if not os.path.isdir(path): + try: + os.makedirs(path) + except OSError as e: + if os.path.isdir(path): + return "true" + errMsg = "OSError while creating " + path + " with errno: " + str(e.errno) + " and strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return "true" + +if __name__ == "__main__": + XenAPIPlugin.dispatch({"getVhdParent":getVhdParent, "mountNfsSecondaryStorage":mountNfsSecondaryStorage, + "umountNfsSecondaryStorage":umountNfsSecondaryStorage, + "makeDirectory":makeDirectory}) diff --git a/scripts/vm/hypervisor/xenserver/xenserver84/cloudstack_pluginlib.py b/scripts/vm/hypervisor/xenserver/xenserver84/cloudstack_pluginlib.py new file mode 100644 index 00000000000..effe7055e59 --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xenserver84/cloudstack_pluginlib.py @@ -0,0 +1,894 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Common function for Cloudstack's XenAPI plugins + +import configparser +import logging +import os +import subprocess +try: + import simplejson as json +except ImportError: + import json +import copy + +from time import localtime, asctime + +DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" +DEFAULT_LOG_FILE = "/var/log/cloudstack_plugins.log" + +PLUGIN_CONFIG_PATH = "/etc/xensource/cloudstack_plugins.conf" +OVSDB_PID_PATH = "/var/run/openvswitch/ovsdb-server.pid" +OVSDB_DAEMON_PATH = "ovsdb-server" +OVS_PID_PATH = "/var/run/openvswitch/ovs-vswitchd.pid" +OVS_DAEMON_PATH = "ovs-vswitchd" +VSCTL_PATH = "/usr/bin/ovs-vsctl" +OFCTL_PATH = "/usr/bin/ovs-ofctl" +XE_PATH = "/opt/xensource/bin/xe" + +# OpenFlow tables set in a pipeline processing fashion for the bridge created for a VPC's that are enabled for +# distributed routing. +# L2 path (intra-tier traffic) CLASSIFIER-> L2 lookup -> L2 flooding tables +# L3 path (inter-tier traffic) CLASSIFIER-> EGRESS ACL -> L3 lookup -> INGRESS ACL-> L2 lookup -> L2 flooding tables + +# Classifier table has the rules to separate broadcast/multi-cast traffic, inter-tier traffic, intra-tier traffic +CLASSIFIER_TABLE=0 +# Lookup table to determine the output port (vif/tunnel port) based on the MAC address +L2_LOOKUP_TABLE=1 +# flooding table has the rules to flood on ports (both VIF, tunnel ports) except on the port on which packet arrived +L2_FLOOD_TABLE=2 +# table has flow rules derived from egress ACL's +EGRESS_ACL_TABLE=3 +# Lookup table to determine the output port (vif/tunnel port) based on the IP address +L3_LOOKUP_TABLE=4 +# table has flow rules derived from egress ACL's +INGRESS_ACL_TABLE=5 + +class PluginError(Exception): + """Base Exception class for all plugin errors.""" + def __init__(self, *args): + Exception.__init__(self, *args) + + +def setup_logging(log_file=None): + debug = False + verbose = False + log_format = DEFAULT_LOG_FORMAT + log_date_format = DEFAULT_LOG_DATE_FORMAT + # try to read plugin configuration file + if os.path.exists(PLUGIN_CONFIG_PATH): + config = configparser.ConfigParser() + config.read(PLUGIN_CONFIG_PATH) + try: + options = config.options('LOGGING') + if 'debug' in options: + debug = config.getboolean('LOGGING', 'debug') + if 'verbose' in options: + verbose = config.getboolean('LOGGING', 'verbose') + if 'format' in options: + log_format = config.get('LOGGING', 'format') + if 'date_format' in options: + log_date_format = config.get('LOGGING', 'date_format') + if 'file' in options: + log_file_2 = config.get('LOGGING', 'file') + except ValueError: + # configuration file contained invalid attributes + # ignore them + pass + except configparser.NoSectionError: + # Missing 'Logging' section in configuration file + pass + + root_logger = logging.root + if debug: + root_logger.setLevel(logging.DEBUG) + elif verbose: + root_logger.setLevel(logging.INFO) + else: + root_logger.setLevel(logging.WARNING) + formatter = logging.Formatter(log_format, log_date_format) + + log_filename = log_file or log_file_2 or DEFAULT_LOG_FILE + + logfile_handler = logging.FileHandler(log_filename) + logfile_handler.setFormatter(formatter) + root_logger.addHandler(logfile_handler) + + +def do_cmd(cmd): + """Abstracts out the basics of issuing system commands. If the command + returns anything in stderr, a PluginError is raised with that information. + Otherwise, the output from stdout is returned. + """ + + pipe = subprocess.PIPE + logging.debug("Executing:%s", cmd) + proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + ret_code = proc.wait() + err = proc.stderr.read() + if ret_code: + logging.debug("The command exited with the error code: " + + "%s (stderr output:%s)" % (ret_code, err)) + raise PluginError(err) + output = proc.stdout.read() + if output.endswith('\n'): + output = output[:-1] + return output + + +def _is_process_run(pidFile, name): + try: + fpid = open(pidFile, "r") + pid = fpid.readline() + fpid.close() + except IOError as e: + return -1 + + pid = pid[:-1] + ps = os.popen("ps -ae") + for l in ps: + if pid in l and name in l: + ps.close() + return 0 + + ps.close() + return -2 + + +def _is_tool_exist(name): + if os.path.exists(name): + return 0 + return -1 + + +def check_switch(): + global result + + ret = _is_process_run(OVSDB_PID_PATH, OVSDB_DAEMON_PATH) + if ret < 0: + if ret == -1: + return "NO_DB_PID_FILE" + if ret == -2: + return "DB_NOT_RUN" + + ret = _is_process_run(OVS_PID_PATH, OVS_DAEMON_PATH) + if ret < 0: + if ret == -1: + return "NO_SWITCH_PID_FILE" + if ret == -2: + return "SWITCH_NOT_RUN" + + if _is_tool_exist(VSCTL_PATH) < 0: + return "NO_VSCTL" + + if _is_tool_exist(OFCTL_PATH) < 0: + return "NO_OFCTL" + + return "SUCCESS" + + +def _build_flow_expr(**kwargs): + is_delete_expr = kwargs.get('delete', False) + flow = "" + if not is_delete_expr: + flow = "hard_timeout=%s,idle_timeout=%s,priority=%s" \ + % (kwargs.get('hard_timeout', '0'), + kwargs.get('idle_timeout', '0'), + kwargs.get('priority', '1')) + in_port = 'in_port' in kwargs and ",in_port=%s" % kwargs['in_port'] or '' + dl_type = 'dl_type' in kwargs and ",dl_type=%s" % kwargs['dl_type'] or '' + dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or '' + dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or '' + nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or '' + nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or '' + table = 'table' in kwargs and ",table=%s" % kwargs['table'] or '' + cookie = 'cookie' in kwargs and ",cookie=%s" % kwargs['cookie'] or '' + proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or '' + ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or '' + flow = (flow + cookie+ in_port + dl_type + dl_src + dl_dst + + (ip or proto) + nw_src + nw_dst + table) + return flow + + +def add_flow(bridge, **kwargs): + """ + Builds a flow expression for **kwargs and adds the flow entry + to an Open vSwitch instance + """ + flow = _build_flow_expr(**kwargs) + actions = 'actions' in kwargs and ",actions=%s" % kwargs['actions'] or '' + flow = flow + actions + addflow = [OFCTL_PATH, "add-flow", bridge, flow] + do_cmd(addflow) + + +def del_flows(bridge, **kwargs): + """ + Removes flows according to criteria passed as keyword. + """ + flow = _build_flow_expr(delete=True, **kwargs) + # out_port condition does not exist for all flow commands + out_port = ("out_port" in kwargs and + ",out_port=%s" % kwargs['out_port'] or '') + flow = flow + out_port + delFlow = [OFCTL_PATH, 'del-flows', bridge, flow] + do_cmd(delFlow) + + +def del_all_flows(bridge): + delFlow = [OFCTL_PATH, "del-flows", bridge] + do_cmd(delFlow) + + normalFlow = "priority=0 idle_timeout=0 hard_timeout=0 actions=normal" + add_flow(bridge, normalFlow) + + +def del_port(bridge, port): + delPort = [VSCTL_PATH, "del-port", bridge, port] + do_cmd(delPort) + +def get_network_id_for_vif(vif_name): + domain_id, device_id = vif_name[3:len(vif_name)].split(".") + hostname = do_cmd(["/bin/bash", "-c", "hostname"]) + this_host_uuid = do_cmd([XE_PATH, "host-list", "hostname=%s" % hostname, "--minimal"]) + dom_uuid = do_cmd([XE_PATH, "vm-list", "dom-id=%s" % domain_id, "resident-on=%s" %this_host_uuid, "--minimal"]) + vif_uuid = do_cmd([XE_PATH, "vif-list", "vm-uuid=%s" % dom_uuid, "device=%s" % device_id, "--minimal"]) + vnet = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=other-config", + "param-key=cloudstack-network-id"]) + return vnet + +def get_network_id_for_tunnel_port(tunnelif_name): + vnet = do_cmd([VSCTL_PATH, "get", "interface", tunnelif_name, "options:cloudstack-network-id"]) + return vnet + +def clear_flooding_rules_for_port(bridge, ofport): + del_flows(bridge, in_port=ofport, table=L2_FLOOD_TABLE) + +def clear_flooding_rules_for_all_ports(bridge): + del_flows(bridge, cookie=111, table=L2_FLOOD_TABLE) + +def add_flooding_rules_for_port(bridge, in_ofport, out_ofports): + action = "".join("output:%s," %ofport for ofport in out_ofports)[:-1] + add_flow(bridge, cookie=111, priority=1100, in_port=in_ofport, table=L2_FLOOD_TABLE, actions=action) + +def get_ofport_for_vif(vif_name): + return do_cmd([VSCTL_PATH, "get", "interface", vif_name, "ofport"]) + +def get_macaddress_of_vif(vif_name): + domain_id, device_id = vif_name[3:len(vif_name)].split(".") + dom_uuid = do_cmd([XE_PATH, "vm-list", "dom-id=%s" % domain_id, "--minimal"]) + vif_uuid = do_cmd([XE_PATH, "vif-list", "vm-uuid=%s" % dom_uuid, "device=%s" % device_id, "--minimal"]) + mac = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=MAC"]) + return mac + +def get_vif_name_from_macaddress(macaddress): + vif_uuid = do_cmd([XE_PATH, "vif-list", "MAC=%s" % macaddress, "--minimal"]) + vif_device_id = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=device"]) + vm_uuid = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=vm-uuid"]) + vm_domain_id = do_cmd([XE_PATH, "vm-param-get", "uuid=%s" % vm_uuid, "param-name=dom-id"]) + return "vif"+vm_domain_id+"."+vif_device_id + +def add_mac_lookup_table_entry(bridge, mac_address, out_of_port): + action = "output=%s" %out_of_port + add_flow(bridge, priority=1100, dl_dst=mac_address, table=L2_LOOKUP_TABLE, actions=action) + +def delete_mac_lookup_table_entry(bridge, mac_address): + del_flows(bridge, dl_dst=mac_address, table=L2_LOOKUP_TABLE) + +def add_ip_lookup_table_entry(bridge, ip, dst_tier_gateway_mac, dst_vm_mac): + action_str = "mod_dl_src:%s" % dst_tier_gateway_mac + ",mod_dl_dst:%s" % dst_vm_mac + ",resubmit(,%s)"%INGRESS_ACL_TABLE + action_str = "table=%s"%L3_LOOKUP_TABLE + ", ip, nw_dst=%s" % ip + ", actions=%s" %action_str + addflow = [OFCTL_PATH, "add-flow", bridge, action_str] + do_cmd(addflow) + +def get_vpc_vms_on_host(vpc, host_id): + all_vms = vpc.vms + vms_on_host = [] + for vm in all_vms: + if str(vm.hostid) == (host_id): + vms_on_host.append(vm) + return vms_on_host + +def get_network_details(vpc, network_uuid): + tiers = vpc.tiers + for tier in tiers: + if str(tier.networkuuid) == (network_uuid): + return tier + return None + +class jsonLoader(object): + def __init__(self, obj): + for k in obj: + v = obj[k] + if isinstance(v, dict): + setattr(self, k, jsonLoader(v)) + elif isinstance(v, (list, tuple)): + if len(v) > 0 and isinstance(v[0], dict): + setattr(self, k, [jsonLoader(elem) for elem in v]) + else: + setattr(self, k, v) + else: + setattr(self, k, v) + + def __getattr__(self, val): + if val in self.__dict__: + return self.__dict__[val] + else: + return None + + def __repr__(self): + return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) + in self.__dict__.iteritems())) + + def __str__(self): + return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) + in self.__dict__.iteritems())) +def get_acl(vpcconfig, required_acl_id): + acls = vpcconfig.acls + for acl in acls: + if acl.id == required_acl_id: + return acl + return None + +def check_tunnel_exists(bridge, tunnel_name): + try: + res = do_cmd([VSCTL_PATH, "port-to-br", tunnel_name]) + return res == bridge + except: + return False + +def create_tunnel(bridge, remote_ip, gre_key, src_host, dst_host, network_uuid): + + logging.debug("Creating tunnel from host %s" %src_host + " to host %s" %dst_host + " with GRE key %s" %gre_key) + + res = check_switch() + if res != "SUCCESS": + logging.debug("Openvswitch running: NO") + return "FAILURE:%s" % res + + # We need to keep the name below 14 characters + # src and target are enough - consider a fixed length hash + name = "t%s-%s-%s" % (gre_key, src_host, dst_host) + + # Verify the xapi bridge to be created + # NOTE: Timeout should not be necessary anymore + wait = [VSCTL_PATH, "--timeout=30", "wait-until", "bridge", + bridge, "--", "get", "bridge", bridge, "name"] + res = do_cmd(wait) + if bridge not in res: + logging.debug("WARNING:Can't find bridge %s for creating " + + "tunnel!" % bridge) + return "FAILURE:NO_BRIDGE" + logging.debug("bridge %s for creating tunnel - VERIFIED" % bridge) + tunnel_setup = False + drop_flow_setup = False + try: + # Create a port and configure the tunnel interface for it + add_tunnel = [VSCTL_PATH, "add-port", bridge, + name, "--", "set", "interface", + name, "type=gre", "options:key=%s" % gre_key, + "options:remote_ip=%s" % remote_ip] + do_cmd(add_tunnel) + tunnel_setup = True + # verify port + verify_port = [VSCTL_PATH, "get", "port", name, "interfaces"] + res = do_cmd(verify_port) + # Expecting python-style list as output + iface_list = [] + if len(res) > 2: + iface_list = res.strip()[1:-1].split(',') + if len(iface_list) != 1: + logging.debug("WARNING: Unexpected output while verifying " + + "port %s on bridge %s" % (name, bridge)) + return "FAILURE:VERIFY_PORT_FAILED" + + # verify interface + iface_uuid = iface_list[0] + verify_interface_key = [VSCTL_PATH, "get", "interface", + iface_uuid, "options:key"] + verify_interface_ip = [VSCTL_PATH, "get", "interface", + iface_uuid, "options:remote_ip"] + + key_validation = do_cmd(verify_interface_key) + ip_validation = do_cmd(verify_interface_ip) + + if gre_key not in key_validation or remote_ip not in ip_validation: + logging.debug("WARNING: Unexpected output while verifying " + + "interface %s on bridge %s" % (name, bridge)) + return "FAILURE:VERIFY_INTERFACE_FAILED" + logging.debug("Tunnel interface validated:%s" % verify_interface_ip) + cmd_tun_ofport = [VSCTL_PATH, "get", "interface", + iface_uuid, "ofport"] + tun_ofport = do_cmd(cmd_tun_ofport) + # Ensure no trailing LF + if tun_ofport.endswith('\n'): + tun_ofport = tun_ofport[:-1] + # find xs network for this bridge, verify is used for ovs tunnel network + xs_nw_uuid = do_cmd([XE_PATH, "network-list", + "bridge=%s" % bridge, "--minimal"]) + + ovs_tunnel_network = is_regular_tunnel_network(xs_nw_uuid) + ovs_vpc_distributed_vr_network = is_vpc_network_with_distributed_routing(xs_nw_uuid) + + if ovs_tunnel_network == 'True': + # add flow entryies for dropping broadcast coming in from gre tunnel + add_flow(bridge, priority=1000, in_port=tun_ofport, + dl_dst='ff:ff:ff:ff:ff:ff', actions='drop') + add_flow(bridge, priority=1000, in_port=tun_ofport, + nw_dst='224.0.0.0/24', actions='drop') + drop_flow_setup = True + logging.debug("Broadcast drop rules added") + + if ovs_vpc_distributed_vr_network == 'True': + # add flow rules for dropping broadcast coming in from tunnel ports + add_flow(bridge, priority=1000, in_port=tun_ofport, table=0, + dl_dst='ff:ff:ff:ff:ff:ff', actions='drop') + add_flow(bridge, priority=1000, in_port=tun_ofport, table=0, + nw_dst='224.0.0.0/24', actions='drop') + + # add flow rule to send the traffic from tunnel ports to L2 switching table only + add_flow(bridge, priority=1100, in_port=tun_ofport, table=0, actions='resubmit(,1)') + + # mark tunnel interface with network id for which this tunnel was created + do_cmd([VSCTL_PATH, "set", "interface", name, "options:cloudstack-network-id=%s" % network_uuid]) + update_flooding_rules_on_port_plug_unplug(bridge, name, 'online', network_uuid) + + logging.debug("Successfully created tunnel from host %s" %src_host + " to host %s" %dst_host + + " with GRE key %s" %gre_key) + return "SUCCESS:%s" % name + except: + logging.debug("An unexpected error occurred. Rolling back") + if tunnel_setup: + logging.debug("Deleting GRE interface") + # Destroy GRE port and interface + del_port(bridge, name) + if drop_flow_setup: + # Delete flows + logging.debug("Deleting flow entries from GRE interface") + del_flows(bridge, in_port=tun_ofport) + # This will not cancel the original exception + raise + +# Configures the bridge created for a VPC that is enabled for distributed routing. Management server sends VPC +# physical topology details (which VM from which tier running on which host etc). Based on the VPC physical topology L2 +# lookup table and L3 lookup tables are updated by this function. +def configure_vpc_bridge_for_network_topology(bridge, this_host_id, json_config, sequence_no): + + vpconfig = jsonLoader(json.loads(json_config)).vpc + if vpconfig is None: + logging.debug("WARNING:Can't find VPC topology information in the json configuration file") + return "FAILURE:IMPROPER_JSON_CONFG_FILE" + + try: + if not os.path.exists('/var/run/cloud'): + os.makedirs('/var/run/cloud') + + # create a temporary file to store OpenFlow rules corresponding to L2 and L3 lookup table updates + ofspec_filename = "/var/run/cloud/" + bridge + sequence_no + ".ofspec" + ofspec = open(ofspec_filename, 'w+') + + # get the list of VM's in all the tiers of VPC running in this host from the JSON config + this_host_vms = get_vpc_vms_on_host(vpconfig, this_host_id) + + for vm in this_host_vms: + for nic in vm.nics: + mac_addr = nic.macaddress + ip = nic.ipaddress + vif_name = get_vif_name_from_macaddress(mac_addr) + of_port = get_ofport_for_vif(vif_name) + network = get_network_details(vpconfig, nic.networkuuid) + + # Add OF rule in L2 look up table, if packet's destination mac matches MAC of the VM's nic + # then send packet on the found OFPORT + ofspec.write("table=%s" %L2_LOOKUP_TABLE + " priority=1100 dl_dst=%s " %mac_addr + + " actions=output:%s" %of_port + "\n") + + # Add OF rule in L3 look up table: if packet's destination IP matches VM's IP then modify the packet + # to set DST MAC = VM's MAC, SRC MAC= destination tier gateway MAC and send to egress table. This step + # emulates steps VPC virtual router would have done on the current host itself. + action_str = " mod_dl_src:%s"%network.gatewaymac + ",mod_dl_dst:%s" % mac_addr \ + + ",resubmit(,%s)"%INGRESS_ACL_TABLE + action_str = "table=%s"%L3_LOOKUP_TABLE + " ip nw_dst=%s"%ip + " actions=%s" %action_str + ofspec.write(action_str + "\n") + + # Add OF rule to send intra-tier traffic from this nic of the VM to L2 lookup path (L2 switching) + action_str = "table=%s" %CLASSIFIER_TABLE + " priority=1200 in_port=%s " %of_port + \ + " ip nw_dst=%s " %network.cidr + " actions=resubmit(,%s)" %L2_LOOKUP_TABLE + ofspec.write(action_str + "\n") + + # Add OF rule to send inter-tier traffic from this nic of the VM to egress ACL table(L3 lookup path) + action_str = "table=%s "%CLASSIFIER_TABLE + " priority=1100 in_port=%s " %of_port + \ + " ip dl_dst=%s " %network.gatewaymac + " nw_dst=%s " %vpconfig.cidr + \ + " actions=resubmit(,%s)" %EGRESS_ACL_TABLE + ofspec.write(action_str + "\n") + + # get the list of hosts on which VPC spans from the JSON config + vpc_spanning_hosts = vpconfig.hosts + + for host in vpc_spanning_hosts: + if str(this_host_id) == str(host.hostid): + continue + + other_host_vms = get_vpc_vms_on_host(vpconfig, str(host.hostid)) + + for vm in other_host_vms: + for nic in vm.nics: + mac_addr = nic.macaddress + ip = nic.ipaddress + network = get_network_details(vpconfig, nic.networkuuid) + gre_key = network.grekey + + # generate tunnel name as per the tunnel naming convention + tunnel_name = "t%s-%s-%s" % (gre_key, this_host_id, host.hostid) + + # check if tunnel exists already, if not create a tunnel from this host to remote host + if not check_tunnel_exists(bridge, tunnel_name): + create_tunnel(bridge, str(host.ipaddress), str(gre_key), this_host_id, + host.hostid, network.networkuuid) + + of_port = get_ofport_for_vif(tunnel_name) + + # Add flow rule in L2 look up table, if packet's destination mac matches MAC of the VM's nic + # on the remote host then send packet on the found OFPORT corresponding to the tunnel + ofspec.write("table=%s" %L2_LOOKUP_TABLE + " priority=1100 dl_dst=%s " %mac_addr + + " actions=output:%s" %of_port + "\n") + + # Add flow rule in L3 look up table. if packet's destination IP matches VM's IP then modify the + # packet to set DST MAC = VM's MAC, SRC MAC=tier gateway MAC and send to ingress table. This step + # emulates steps VPC virtual router would have done on the current host itself. + action_str = "mod_dl_src:%s"%network.gatewaymac + ",mod_dl_dst:%s" % mac_addr + \ + ",resubmit(,%s)"%INGRESS_ACL_TABLE + action_str = "table=%s"%L3_LOOKUP_TABLE + " ip nw_dst=%s"%ip + " actions=%s" %action_str + ofspec.write(action_str + "\n") + + # add a default rule in L2_LOOKUP_TABLE to send unknown mac address to L2 flooding table + ofspec.write("table=%s "%L2_LOOKUP_TABLE + " priority=0 " + " actions=resubmit(,%s)"%L2_FLOOD_TABLE + "\n") + + # add a default rule in L3 lookup table to forward (unknown destination IP) packets to L2 lookup table. This + # is fallback option to send the packet to VPC VR, when routing can not be performed at the host + ofspec.write("table=%s "%L3_LOOKUP_TABLE + " priority=0 " + " actions=resubmit(,%s)"%L2_LOOKUP_TABLE + "\n") + + # First flush current L2_LOOKUP_TABLE & L3_LOOKUP_TABLE before re-applying L2 & L3 lookup entries + del_flows(bridge, table=L2_LOOKUP_TABLE) + del_flows(bridge, table=L3_LOOKUP_TABLE) + + ofspec.seek(0) + logging.debug("Adding below flows rules in L2 & L3 lookup tables:\n" + ofspec.read()) + ofspec.close() + + # update bridge with the flow-rules for L2 lookup and L3 lookup in the file in one attempt + do_cmd([OFCTL_PATH, 'add-flows', bridge, ofspec_filename]) + + # now that we updated the bridge with flow rules close and delete the file. + os.remove(ofspec_filename) + + return "SUCCESS: successfully configured bridge as per the VPC topology update with sequence no: %s"%sequence_no + + except Exception as e: + error_message = "An unexpected error occurred while configuring bridge " + bridge + \ + " as per latest VPC topology update with sequence no: %s" %sequence_no + logging.debug(error_message + " due to " + str(e)) + if os.path.isfile(ofspec_filename): + os.remove(ofspec_filename) + raise error_message + +# Configures the bridge created for a VPC that is enabled for distributed firewall. Management server sends VPC routing +# policy (network ACL applied on the tiers etc) details. Based on the VPC routing policies ingress ACL table and +# egress ACL tables are updated by this function. +def configure_vpc_bridge_for_routing_policies(bridge, json_config, sequence_no): + + vpconfig = jsonLoader(json.loads(json_config)).vpc + if vpconfig is None: + logging.debug("WARNING: Can't find VPC routing policies info in json config file") + return "FAILURE:IMPROPER_JSON_CONFG_FILE" + + try: + + if not os.path.exists('/var/run/cloud'): + os.makedirs('/var/run/cloud') + + # create a temporary file to store OpenFlow rules corresponding to ingress and egress ACL table updates + ofspec_filename = "/var/run/cloud/" + bridge + sequence_no + ".ofspec" + ofspec = open(ofspec_filename, 'w+') + + tiers = vpconfig.tiers + for tier in tiers: + tier_cidr = tier.cidr + acl = get_acl(vpconfig, tier.aclid) + acl_items = acl.aclitems + + for acl_item in acl_items: + number = acl_item.number + action = acl_item.action + direction = acl_item.direction + source_port_start = acl_item.sourceportstart + source_port_end = acl_item.sourceportend + protocol = acl_item.protocol + if protocol == "all": + protocol = "*" + elif protocol == "tcp": + protocol = "6" + elif protocol == "udp": + protocol == "17" + elif protocol == "icmp": + protocol == "1" + source_cidrs = acl_item.sourcecidrs + acl_priority = 1000 + number + if direction == "ingress": + matching_table = INGRESS_ACL_TABLE + resubmit_table = L2_LOOKUP_TABLE + elif direction == "egress": + matching_table = EGRESS_ACL_TABLE + resubmit_table = L3_LOOKUP_TABLE + + for source_cidr in source_cidrs: + if source_port_start is None and source_port_end is None: + if source_cidr.startswith('0.0.0.0'): + if action == "deny": + if direction == "ingress": + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " nw_dst=%s " %tier_cidr + " nw_proto=%s " %protocol + + " actions=drop" + "\n") + else: + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " nw_src=%s " %tier_cidr + " nw_proto=%s " %protocol + + " actions=drop" + "\n") + if action == "allow": + if direction == "ingress": + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " nw_dst=%s " %tier_cidr + " nw_proto=%s " %protocol + + " actions=resubmit(,%s)"%resubmit_table + "\n") + else: + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " nw_src=%s " %tier_cidr + " nw_proto=%s " %protocol + + " actions=resubmit(,%s)"%resubmit_table + "\n") + else: + if action == "deny": + if direction == "ingress": + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " nw_src=%s " %source_cidr + " nw_dst=%s " %tier_cidr + + " nw_proto=%s " %protocol + " actions=drop" + "\n") + else: + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " nw_src=%s " %tier_cidr + " nw_dst=%s " %source_cidr + + " nw_proto=%s " %protocol + " actions=drop" + "\n") + if action == "allow": + if direction == "ingress": + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " nw_src=%s "%source_cidr + " nw_dst=%s " %tier_cidr + + " nw_proto=%s " %protocol + + " actions=resubmit(,%s)"%resubmit_table + "\n") + else: + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " nw_src=%s "%tier_cidr + " nw_dst=%s " %source_cidr + + " nw_proto=%s " %protocol + + " actions=resubmit(,%s)"%resubmit_table + "\n") + continue + + # add flow rule to do action (allow/deny) for flows where source IP of the packet is in + # source_cidr and destination ip is in tier_cidr + port = int(source_port_start) + while (port <= int(source_port_end)): + if source_cidr.startswith('0.0.0.0'): + if action == "deny": + if direction == "ingress": + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " tp_dst=%s " %port + " nw_dst=%s " %tier_cidr + + " nw_proto=%s " %protocol + " actions=drop" + "\n") + else: + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " tp_dst=%s " %port + " nw_src=%s " %tier_cidr + + " nw_proto=%s " %protocol + " actions=drop" + "\n") + if action == "allow": + if direction == "ingress": + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " tp_dst=%s " %port + " nw_dst=%s " %tier_cidr + + " nw_proto=%s " %protocol + + " actions=resubmit(,%s)"%resubmit_table + "\n") + else: + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " tp_dst=%s " %port + " nw_src=%s " %tier_cidr + + " nw_proto=%s " %protocol + + " actions=resubmit(,%s)"%resubmit_table + "\n") + else: + if action == "deny": + if direction == "ingress": + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " tp_dst=%s " %port + " nw_src=%s " %source_cidr + + " nw_dst=%s " %tier_cidr + + " nw_proto=%s " %protocol + " actions=drop" + "\n") + else: + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " tp_dst=%s " %port + " nw_src=%s " %tier_cidr + + " nw_dst=%s " %source_cidr + + " nw_proto=%s " %protocol + " actions=drop" + "\n") + if action == "allow": + if direction == "ingress": + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " tp_dst=%s " %port + " nw_src=%s "%source_cidr + + " nw_dst=%s " %tier_cidr + + " nw_proto=%s " %protocol + + " actions=resubmit(,%s)"%resubmit_table + "\n") + else: + ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " + + " tp_dst=%s " %port + " nw_src=%s "%tier_cidr + + " nw_dst=%s " %source_cidr + + " nw_proto=%s " %protocol + + " actions=resubmit(,%s)"%resubmit_table + "\n") + port = port + 1 + + # add a default rule in egress table to allow packets (so forward packet to L3 lookup table) + ofspec.write("table=%s " %EGRESS_ACL_TABLE + " priority=0 actions=resubmit(,%s)" %L3_LOOKUP_TABLE + "\n") + + # add a default rule in ingress table to drop packets + ofspec.write("table=%s " %INGRESS_ACL_TABLE + " priority=0 actions=drop" + "\n") + + # First flush current ingress and egress ACL's before re-applying the ACL's + del_flows(bridge, table=EGRESS_ACL_TABLE) + del_flows(bridge, table=INGRESS_ACL_TABLE) + + ofspec.seek(0) + logging.debug("Adding below flows rules Ingress & Egress ACL tables:\n" + ofspec.read()) + ofspec.close() + + # update bridge with the flow-rules for ingress and egress ACL's added in the file in one attempt + do_cmd([OFCTL_PATH, 'add-flows', bridge, ofspec_filename]) + + # now that we updated the bridge with flow rules delete the file. + os.remove(ofspec_filename) + + return "SUCCESS: successfully configured bridge as per the latest routing policies update with " \ + "sequence no: %s"%sequence_no + + except Exception as e: + error_message = "An unexpected error occurred while configuring bridge " + bridge + \ + " as per latest VPC's routing policy update with sequence number %s." %sequence_no + logging.debug(error_message + " due to " + str(e)) + if os.path.isfile(ofspec_filename): + os.remove(ofspec_filename) + raise error_message + +# configures bridge L2 flooding rules stored in table=2. Single bridge is used for all the tiers of VPC. So controlled +# flooding is required to restrict the broadcast to only to the ports (vifs and tunnel interfaces) in the tier. Also +# packets arrived from the tunnel ports should not be flooded on the other tunnel ports. +def update_flooding_rules_on_port_plug_unplug(bridge, interface, command, if_network_id): + + class tier_ports: + tier_vif_ofports = [] + tier_tunnelif_ofports = [] + tier_all_ofports = [] + + logging.debug("Updating the flooding rules on bridge " + bridge + " as interface %s" %interface + + " is %s"%command + " now.") + try: + + if not os.path.exists('/var/run/cloud'): + os.makedirs('/var/run/cloud') + + # create a temporary file to store OpenFlow rules corresponding L2 flooding table + ofspec_filename = "/var/run/cloud/" + bridge + "-" +interface + "-" + command + ".ofspec" + ofspec = open(ofspec_filename, 'w+') + + all_tiers = dict() + + vsctl_output = do_cmd([VSCTL_PATH, 'list-ports', bridge]) + ports = vsctl_output.split('\n') + + for port in ports: + + if_ofport = do_cmd([VSCTL_PATH, 'get', 'Interface', port, 'ofport']) + + if port.startswith('vif'): + network_id = get_network_id_for_vif(port) + if network_id not in all_tiers.keys(): + all_tiers[network_id] = tier_ports() + tier_ports_info = all_tiers[network_id] + tier_ports_info.tier_vif_ofports.append(if_ofport) + tier_ports_info.tier_all_ofports.append(if_ofport) + all_tiers[network_id] = tier_ports_info + + if port.startswith('t'): + network_id = get_network_id_for_tunnel_port(port)[1:-1] + if network_id not in all_tiers.keys(): + all_tiers[network_id] = tier_ports() + tier_ports_info = all_tiers[network_id] + tier_ports_info.tier_tunnelif_ofports.append(if_ofport) + tier_ports_info.tier_all_ofports.append(if_ofport) + all_tiers[network_id] = tier_ports_info + + for network_id, tier_ports_info in all_tiers.items(): + if len(tier_ports_info.tier_all_ofports) == 1 : + continue + + # for a packet arrived from tunnel port, flood only on to VIF ports connected to bridge for this tier + for port in tier_ports_info.tier_tunnelif_ofports: + action = "".join("output:%s," %ofport for ofport in tier_ports_info.tier_vif_ofports)[:-1] + ofspec.write("table=%s " %L2_FLOOD_TABLE + " priority=1100 in_port=%s " %port + + "actions=%s " %action + "\n") + + # for a packet arrived from VIF port send on all VIF and tunnel ports corresponding to the tier excluding + # the port on which packet arrived + for port in tier_ports_info.tier_vif_ofports: + tier_all_ofports_copy = copy.copy(tier_ports_info.tier_all_ofports) + tier_all_ofports_copy.remove(port) + action = "".join("output:%s," %ofport for ofport in tier_all_ofports_copy)[:-1] + ofspec.write("table=%s " %L2_FLOOD_TABLE + " priority=1100 in_port=%s " %port + + "actions=%s " %action + "\n") + + # add a default rule in L2 flood table to drop packet + ofspec.write("table=%s " %L2_FLOOD_TABLE + " priority=0 actions=drop") + + # First flush current L2 flooding table before re-populating the tables + del_flows(bridge, table=L2_FLOOD_TABLE) + + ofspec.seek(0) + logging.debug("Adding below flows rules L2 flooding table: \n" + ofspec.read()) + ofspec.close() + + # update bridge with the flow-rules for broadcast rules added in the file in one attempt + do_cmd([OFCTL_PATH, 'add-flows', bridge, ofspec_filename]) + + # now that we updated the bridge with flow rules delete the file. + os.remove(ofspec_filename) + + logging.debug("successfully configured bridge %s as per the latest flooding rules " %bridge) + + except Exception as e: + if os.path.isfile(ofspec_filename): + os.remove(ofspec_filename) + error_message = "An unexpected error occurred while updating the flooding rules for the bridge " + \ + bridge + " when interface " + " %s" %interface + " is %s" %command + logging.debug(error_message + " due to " + str(e)) + raise error_message + + +def is_regular_tunnel_network(xs_nw_uuid): + cmd = [XE_PATH,"network-param-get", "uuid=%s" % xs_nw_uuid, "param-name=other-config", + "param-key=is-ovs-tun-network", "--minimal"] + logging.debug("Executing:%s", cmd) + pipe = subprocess.PIPE + proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + ret_code = proc.wait() + if ret_code: + return False + + output = proc.stdout.read() + if output.endswith('\n'): + output = output[:-1] + return output + + +def is_vpc_network_with_distributed_routing(xs_nw_uuid): + cmd = [XE_PATH,"network-param-get", "uuid=%s" % xs_nw_uuid, "param-name=other-config", + "param-key=is-ovs-vpc-distributed-vr-network", "--minimal"] + logging.debug("Executing:%s", cmd) + pipe = subprocess.PIPE + proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + ret_code = proc.wait() + if ret_code: + return False + + output = proc.stdout.read() + if output.endswith('\n'): + output = output[:-1] + return output diff --git a/scripts/vm/hypervisor/xenserver/xenserver84/ovs-vif-flows.py b/scripts/vm/hypervisor/xenserver/xenserver84/ovs-vif-flows.py new file mode 100644 index 00000000000..b182b189aa8 --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xenserver84/ovs-vif-flows.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# A simple script for enabling and disabling per-vif and tunnel interface rules for explicitly +# allowing broadcast/multicast traffic from the tunnel ports and on the port where the VIF is attached + +import copy +import os +import sys +import logging + +import cloudstack_pluginlib as pluginlib + +pluginlib.setup_logging("/var/log/cloud/ovstunnel.log") + +def clear_flows(bridge, this_vif_ofport, vif_ofports): + action = "".join("output:%s," %ofport + for ofport in vif_ofports)[:-1] + # Remove flow entries originating from given ofport + pluginlib.del_flows(bridge, in_port=this_vif_ofport) + # The following will remove the port being delete from actions + pluginlib.add_flow(bridge, priority=1100, + dl_dst='ff:ff:ff:ff:ff:ff', actions=action) + pluginlib.add_flow(bridge, priority=1100, + nw_dst='224.0.0.0/24', actions=action) + + +def apply_flows(bridge, this_vif_ofport, vif_ofports): + action = "".join("output:%s," %ofport + for ofport in vif_ofports)[:-1] + # Ensure {b|m}casts sent from VIF ports are always allowed + pluginlib.add_flow(bridge, priority=1200, + in_port=this_vif_ofport, + dl_dst='ff:ff:ff:ff:ff:ff', + actions='NORMAL') + pluginlib.add_flow(bridge, priority=1200, + in_port=this_vif_ofport, + nw_dst='224.0.0.0/24', + actions='NORMAL') + # Ensure {b|m}casts are always propagated to VIF ports + pluginlib.add_flow(bridge, priority=1100, + dl_dst='ff:ff:ff:ff:ff:ff', actions=action) + pluginlib.add_flow(bridge, priority=1100, + nw_dst='224.0.0.0/24', actions=action) + +def clear_rules(vif): + try: + delcmd = "/sbin/ebtables -t nat -L PREROUTING | grep " + vif + delcmds = pluginlib.do_cmd(['/bin/bash', '-c', delcmd]).split('\n') + for cmd in delcmds: + try: + cmd = '/sbin/ebtables -t nat -D PREROUTING ' + cmd + pluginlib.do_cmd(['/bin/bash', '-c', cmd]) + except: + pass + except: + pass + +def main(command, vif_raw): + if command not in ('online', 'offline'): + return + + vif_name, dom_id, vif_index = vif_raw.split('-') + # validate vif and dom-id + this_vif = "%s%s.%s" % (vif_name, dom_id, vif_index) + # Make sure the networking stack is not linux bridge! + net_stack = pluginlib.do_cmd(['cat', '/etc/xensource/network.conf']) + if net_stack.lower() == "bridge": + if command == 'offline': + clear_rules(this_vif) + # Nothing to do here! + return + + bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'iface-to-br', this_vif]) + + # find xs network for this bridge, verify is used for ovs tunnel network + xs_nw_uuid = pluginlib.do_cmd([pluginlib.XE_PATH, "network-list", + "bridge=%s" % bridge, "--minimal"]) + + ovs_tunnel_network = pluginlib.is_regular_tunnel_network(xs_nw_uuid) + + # handle case where network is reguar tunnel network + if ovs_tunnel_network == 'True': + vlan = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'br-to-vlan', bridge]) + if vlan != '0': + # We need the REAL bridge name + bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH, + 'br-to-parent', bridge]) + vsctl_output = pluginlib.do_cmd([pluginlib.VSCTL_PATH, + 'list-ports', bridge]) + vifs = vsctl_output.split('\n') + vif_ofports = [] + vif_other_ofports = [] + for vif in vifs: + vif_ofport = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'get', + 'Interface', vif, 'ofport']) + if this_vif == vif: + this_vif_ofport = vif_ofport + if vif.startswith('vif'): + vif_ofports.append(vif_ofport) + + if command == 'offline': + vif_other_ofports = copy.copy(vif_ofports) + vif_other_ofports.remove(this_vif_ofport) + clear_flows(bridge, this_vif_ofport, vif_other_ofports) + + if command == 'online': + apply_flows(bridge, this_vif_ofport, vif_ofports) + + + # handle case where bridge is setup for VPC which is enabled for distributed routing + ovs_vpc_distributed_vr_network = pluginlib.is_vpc_network_with_distributed_routing(xs_nw_uuid) + if ovs_vpc_distributed_vr_network == 'True': + vlan = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'br-to-vlan', bridge]) + if vlan != '0': + # We need the REAL bridge name + bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH, + 'br-to-parent', bridge]) + vif_network_id = pluginlib.get_network_id_for_vif(this_vif) + pluginlib.update_flooding_rules_on_port_plug_unplug(bridge, this_vif, command, vif_network_id) + + return + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("usage: {} [online|offline] vif-domid-idx".format(os.path.basename(sys.argv[0]))) + sys.exit(1) + else: + command, vif_raw = sys.argv[1:3] + main(command, vif_raw) diff --git a/scripts/vm/hypervisor/xenserver/xenserver84/patch b/scripts/vm/hypervisor/xenserver/xenserver84/patch new file mode 100644 index 00000000000..88a1a96765a --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xenserver84/patch @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This file specifies the files that need +# to be transferred over to the XenServer. +# The format of this file is as follows: +# [Name of file]=[source path],[file permission],[destination path] +# [destination path] is required. +# If [file permission] is missing, 755 is assumed. +# If [source path] is missing, it looks in the same +# directory as the patch file. +# If [source path] starts with '/', then it is absolute path. +# If [source path] starts with '~', then it is path relative to management server home directory. +# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file. +vmops=,0755,/etc/xapi.d/plugins +vmopspremium=,0755,/etc/xapi.d/plugins +vmopsSnapshot=,0755,/etc/xapi.d/plugins +xen-ovs-vif-flows.rules=..,0644,/etc/udev/rules.d +ovs-vif-flows.py=,0755,/etc/xapi.d/plugins +cloudstack_plugins.conf=..,0644,/etc/xensource +cloudstack_pluginlib.py=,0755,/etc/xapi.d/plugins +ovstunnel=..,0755,/etc/xapi.d/plugins +cloud-plugin-storage=,0755,/etc/xapi.d/plugins +agent.zip=../../../../../vms,0644,/opt/xensource/packages/resources/ +cloud-scripts.tgz=../../../../../vms,0644,/opt/xensource/packages/resources/ +patch-sysvms.sh=../../../../../vms,0644,/opt/xensource/packages/resources/ +id_rsa.cloud=../../../systemvm,0600,/root/.ssh +network_info.sh=..,0755,/opt/cloud/bin +setupxenserver.sh=..,0755,/opt/cloud/bin +make_migratable.sh=..,0755,/opt/cloud/bin +setup_iscsi.sh=..,0755,/opt/cloud/bin +pingtest.sh=../../..,0755,/opt/cloud/bin +router_proxy.sh=../../../../network/domr/,0755,/opt/cloud/bin +cloud-setup-bonding.sh=..,0755,/opt/cloud/bin +kill_copy_process.sh=..,0755,/opt/cloud/bin +setup_heartbeat_sr.sh=..,0755,/opt/cloud/bin +setup_heartbeat_file.sh=..,0755,/opt/cloud/bin +check_heartbeat.sh=..,0755,/opt/cloud/bin +xenheartbeat.sh=..,0755,/opt/cloud/bin +launch_hb.sh=..,0755,/opt/cloud/bin +upgrade_snapshot.sh=..,0755,/opt/cloud/bin +cloud-clean-vlan.sh=..,0755,/opt/cloud/bin +cloud-prepare-upgrade.sh=..,0755,/opt/cloud/bin +swift=,0755,/opt/cloud/bin +swiftxenserver=..,0755,/etc/xapi.d/plugins +s3xenserver=..,0755,/etc/xapi.d/plugins +add_to_vcpus_params_live.sh=..,0755,/opt/cloud/bin +ovs-pvlan=..,0755,/etc/xapi.d/plugins +ovs-pvlan-dhcp-host.sh=../../../network,0755,/opt/cloud/bin +ovs-pvlan-vm.sh=../../../network,0755,/opt/cloud/bin +ovs-pvlan-cleanup.sh=../../../network,0755,/opt/cloud/bin +ovs-get-dhcp-iface.sh=..,0755,/opt/cloud/bin +ovs-get-bridge.sh=..,0755,/opt/cloud/bin +cloudlog=..,0644,/etc/logrotate.d +update_host_passwd.sh=../..,0755,/opt/cloud/bin +logrotate=..,0755,/etc/cron.hourly diff --git a/scripts/vm/hypervisor/xenserver/xenserver84/swift b/scripts/vm/hypervisor/xenserver/xenserver84/swift new file mode 100644 index 00000000000..d2304a30a7b --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xenserver84/swift @@ -0,0 +1,1884 @@ +#!/usr/bin/env python3 +# Copyright (c) 2010-2011 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from errno import EEXIST, ENOENT +import md5 +from optparse import OptionParser +from os import environ, listdir, makedirs, utime +from os.path import basename, dirname, getmtime, getsize, isdir, join +from Queue import Empty, Queue +from sys import argv, exc_info, exit, stderr, stdout +from threading import enumerate as threading_enumerate, Thread +from time import sleep +from traceback import format_exception + + +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# Inclusion of swift.common.client for convenience of single file distribution + +import socket +from cStringIO import StringIO +from re import compile, DOTALL +from tokenize import generate_tokens, STRING, NAME, OP +from urllib import quote as _quote, unquote +from urlparse import urlparse, urlunparse + +try: + from eventlet.green.httplib import HTTPException, HTTPSConnection +except ImportError: + from httplib import HTTPException, HTTPSConnection + +try: + from eventlet import sleep +except ImportError: + from time import sleep + +try: + from swift.common.bufferedhttp \ + import BufferedHTTPConnection as HTTPConnection +except ImportError: + try: + from eventlet.green.httplib import HTTPConnection + except ImportError: + from httplib import HTTPConnection + + +def quote(value, safe='/'): + """ + Patched version of urllib.quote that encodes utf8 strings before quoting + """ + if isinstance(value, unicode): + value = value.encode('utf8') + return _quote(value, safe) + + +# look for a real json parser first +try: + # simplejson is popular and pretty good + from simplejson import loads as json_loads +except ImportError: + try: + # 2.6 will have a json module in the stdlib + from json import loads as json_loads + except ImportError: + # fall back on local parser otherwise + comments = compile(r'/\*.*\*/|//[^\r\n]*', DOTALL) + + def json_loads(string): + ''' + Fairly competent json parser exploiting the python tokenizer and + eval(). -- From python-cloudfiles + + _loads(serialized_json) -> object + ''' + try: + res = [] + consts = {'true': True, 'false': False, 'null': None} + string = '(' + comments.sub('', string) + ')' + for type, val, _junk, _junk, _junk in \ + generate_tokens(StringIO(string).readline): + if (type == OP and val not in '[]{}:,()-') or \ + (type == NAME and val not in consts): + raise AttributeError() + elif type == STRING: + res.append('u') + res.append(val.replace('\\/', '/')) + else: + res.append(val) + return eval(''.join(res), {}, consts) + except Exception: + raise AttributeError() + + +class ClientException(Exception): + + def __init__(self, msg, http_scheme='', http_host='', http_port='', + http_path='', http_query='', http_status=0, http_reason='', + http_device=''): + Exception.__init__(self, msg) + self.msg = msg + self.http_scheme = http_scheme + self.http_host = http_host + self.http_port = http_port + self.http_path = http_path + self.http_query = http_query + self.http_status = http_status + self.http_reason = http_reason + self.http_device = http_device + + def __str__(self): + a = self.msg + b = '' + if self.http_scheme: + b += '%s://' % self.http_scheme + if self.http_host: + b += self.http_host + if self.http_port: + b += ':%s' % self.http_port + if self.http_path: + b += self.http_path + if self.http_query: + b += '?%s' % self.http_query + if self.http_status: + if b: + b = '%s %s' % (b, self.http_status) + else: + b = str(self.http_status) + if self.http_reason: + if b: + b = '%s %s' % (b, self.http_reason) + else: + b = '- %s' % self.http_reason + if self.http_device: + if b: + b = '%s: device %s' % (b, self.http_device) + else: + b = 'device %s' % self.http_device + return b and '%s: %s' % (a, b) or a + + +def http_connection(url, proxy=None): + """ + Make an HTTPConnection or HTTPSConnection + + :param url: url to connect to + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :returns: tuple of (parsed url, connection object) + :raises ClientException: Unable to handle protocol scheme + """ + parsed = urlparse(url) + proxy_parsed = parsed + if proxy: + proxy_parsed = urlparse(proxy) + if parsed[0] == 'http': + conn = HTTPConnection(proxy_parsed[1]) + elif parsed[0] == 'https': + conn = HTTPSConnection(proxy_parsed[1]) + else: + raise ClientException('Cannot handle protocol scheme %s for url %s' % + (parsed[0], repr(url))) + if proxy: + conn._set_tunnel(parsed.hostname, parsed.port) + return parsed, conn + + +def get_auth(url, user, key, snet=False): + """ + Get authentication/authorization credentials. + + The snet parameter is used for Rackspace's ServiceNet internal network + implementation. In this function, it simply adds *snet-* to the beginning + of the host name for the returned storage URL. With Rackspace Cloud Files, + use of this network path causes no bandwidth charges but requires the + client to be running on Rackspace's ServiceNet network. + + :param url: authentication/authorization URL + :param user: user to authenticate as + :param key: key or password for authorization + :param snet: use SERVICENET internal network (see above), default is False + :returns: tuple of (storage URL, auth token) + :raises ClientException: HTTP GET request to auth URL failed + """ + parsed, conn = http_connection(url) + conn.request('GET', parsed[2], '', + {'X-Auth-User': user, 'X-Auth-Key': key}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Auth GET failed', http_scheme=parsed[0], + http_host=conn.host, http_port=conn.port, + http_path=parsed[2], http_status=resp.status, + http_reason=resp.reason) + url = resp.getheader('x-storage-url') + if snet: + parsed = list(urlparse(url)) + # Second item in the list is the netloc + parsed[1] = 'snet-' + parsed[1] + url = urlunparse(parsed) + return url, resp.getheader('x-storage-token', + resp.getheader('x-auth-token')) + + +def get_account(url, token, marker=None, limit=None, prefix=None, + http_conn=None, full_listing=False): + """ + Get a listing of containers for the account. + + :param url: storage URL + :param token: auth token + :param marker: marker query + :param limit: limit query + :param prefix: prefix query + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param full_listing: if True, return a full listing, else returns a max + of 10000 listings + :returns: a tuple of (response headers, a list of containers) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if not http_conn: + http_conn = http_connection(url) + if full_listing: + rv = get_account(url, token, marker, limit, prefix, http_conn) + listing = rv[1] + while listing: + marker = listing[-1]['name'] + listing = \ + get_account(url, token, marker, limit, prefix, http_conn)[1] + if listing: + rv[1].extend(listing) + return rv + parsed, conn = http_conn + qs = 'format=json' + if marker: + qs += '&marker=%s' % quote(marker) + if limit: + qs += '&limit=%d' % limit + if prefix: + qs += '&prefix=%s' % quote(prefix) + conn.request('GET', '%s?%s' % (parsed[2], qs), '', + {'X-Auth-Token': token}) + resp = conn.getresponse() + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + if resp.status < 200 or resp.status >= 300: + resp.read() + raise ClientException('Account GET failed', http_scheme=parsed[0], + http_host=conn.host, http_port=conn.port, + http_path=parsed[2], http_query=qs, http_status=resp.status, + http_reason=resp.reason) + if resp.status == 204: + resp.read() + return resp_headers, [] + return resp_headers, json_loads(resp.read()) + + +def head_account(url, token, http_conn=None): + """ + Get account stats. + + :param url: storage URL + :param token: auth token + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + conn.request('HEAD', parsed[2], '', {'X-Auth-Token': token}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Account HEAD failed', http_scheme=parsed[0], + http_host=conn.host, http_port=conn.port, + http_path=parsed[2], http_status=resp.status, + http_reason=resp.reason) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def post_account(url, token, headers, http_conn=None): + """ + Update an account's metadata. + + :param url: storage URL + :param token: auth token + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + headers['X-Auth-Token'] = token + conn.request('POST', parsed[2], '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Account POST failed', + http_scheme=parsed[0], http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +def get_container(url, token, container, marker=None, limit=None, + prefix=None, delimiter=None, http_conn=None, + full_listing=False): + """ + Get a listing of objects for the container. + + :param url: storage URL + :param token: auth token + :param container: container name to get a listing for + :param marker: marker query + :param limit: limit query + :param prefix: prefix query + :param delimiter: string to delimit the queries on + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param full_listing: if True, return a full listing, else returns a max + of 10000 listings + :returns: a tuple of (response headers, a list of objects) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if not http_conn: + http_conn = http_connection(url) + if full_listing: + rv = get_container(url, token, container, marker, limit, prefix, + delimiter, http_conn) + listing = rv[1] + while listing: + if not delimiter: + marker = listing[-1]['name'] + else: + marker = listing[-1].get('name', listing[-1].get('subdir')) + listing = get_container(url, token, container, marker, limit, + prefix, delimiter, http_conn)[1] + if listing: + rv[1].extend(listing) + return rv + parsed, conn = http_conn + path = '%s/%s' % (parsed[2], quote(container)) + qs = 'format=json' + if marker: + qs += '&marker=%s' % quote(marker) + if limit: + qs += '&limit=%d' % limit + if prefix: + qs += '&prefix=%s' % quote(prefix) + if delimiter: + qs += '&delimiter=%s' % quote(delimiter) + conn.request('GET', '%s?%s' % (path, qs), '', {'X-Auth-Token': token}) + resp = conn.getresponse() + if resp.status < 200 or resp.status >= 300: + resp.read() + raise ClientException('Container GET failed', + http_scheme=parsed[0], http_host=conn.host, + http_port=conn.port, http_path=path, http_query=qs, + http_status=resp.status, http_reason=resp.reason) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + if resp.status == 204: + resp.read() + return resp_headers, [] + return resp_headers, json_loads(resp.read()) + + +def head_container(url, token, container, http_conn=None): + """ + Get container stats. + + :param url: storage URL + :param token: auth token + :param container: container name to get stats for + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed[2], quote(container)) + conn.request('HEAD', path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container HEAD failed', + http_scheme=parsed[0], http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def put_container(url, token, container, headers=None, http_conn=None): + """ + Create a container + + :param url: storage URL + :param token: auth token + :param container: container name to create + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP PUT request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed[2], quote(container)) + if not headers: + headers = {} + headers['X-Auth-Token'] = token + conn.request('PUT', path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container PUT failed', + http_scheme=parsed[0], http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +def post_container(url, token, container, headers, http_conn=None): + """ + Update a container's metadata. + + :param url: storage URL + :param token: auth token + :param container: container name to update + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed[2], quote(container)) + headers['X-Auth-Token'] = token + conn.request('POST', path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container POST failed', + http_scheme=parsed[0], http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +def delete_container(url, token, container, http_conn=None): + """ + Delete a container + + :param url: storage URL + :param token: auth token + :param container: container name to delete + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP DELETE request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s' % (parsed[2], quote(container)) + conn.request('DELETE', path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Container DELETE failed', + http_scheme=parsed[0], http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +def get_object(url, token, container, name, http_conn=None, + resp_chunk_size=None): + """ + Get an object + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: object name to get + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param resp_chunk_size: if defined, chunk size of data to read. NOTE: If + you specify a resp_chunk_size you must fully read + the object's contents before making another + request. + :returns: a tuple of (response headers, the object's contents) The response + headers will be a dict and all header names will be lowercase. + :raises ClientException: HTTP GET request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed[2], quote(container), quote(name)) + conn.request('GET', path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + if resp.status < 200 or resp.status >= 300: + resp.read() + raise ClientException('Object GET failed', http_scheme=parsed[0], + http_host=conn.host, http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason) + if resp_chunk_size: + + def _object_body(): + buf = resp.read(resp_chunk_size) + while buf: + yield buf + buf = resp.read(resp_chunk_size) + object_body = _object_body() + else: + object_body = resp.read() + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers, object_body + + +def head_object(url, token, container, name, http_conn=None): + """ + Get object info + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: object name to get info for + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :returns: a dict containing the response's headers (all header names will + be lowercase) + :raises ClientException: HTTP HEAD request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed[2], quote(container), quote(name)) + conn.request('HEAD', path, '', {'X-Auth-Token': token}) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object HEAD failed', http_scheme=parsed[0], + http_host=conn.host, http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason) + resp_headers = {} + for header, value in resp.getheaders(): + resp_headers[header.lower()] = value + return resp_headers + + +def put_object(url, token=None, container=None, name=None, contents=None, + content_length=None, etag=None, chunk_size=65536, + content_type=None, headers=None, http_conn=None, proxy=None): + """ + Put an object + + :param url: storage URL + :param token: auth token; if None, no token will be sent + :param container: container name that the object is in; if None, the + container name is expected to be part of the url + :param name: object name to put; if None, the object name is expected to be + part of the url + :param contents: a string or a file like object to read object data from; + if None, a zero-byte put will be done + :param content_length: value to send as content-length header; also limits + the amount read from contents; if None, it will be + computed via the contents or chunked transfer + encoding will be used + :param etag: etag of contents; if None, no etag will be sent + :param chunk_size: chunk size of data to write; default 65536 + :param content_type: value to send as content-type header; if None, no + content-type will be set (remote end will likely try + to auto-detect it) + :param headers: additional headers to include in the request, if any + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :returns: etag from server response + :raises ClientException: HTTP PUT request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url, proxy=proxy) + path = parsed[2] + if container: + path = '%s/%s' % (path.rstrip('/'), quote(container)) + if name: + path = '%s/%s' % (path.rstrip('/'), quote(name)) + if headers: + headers = dict(headers) + else: + headers = {} + if token: + headers['X-Auth-Token'] = token + if etag: + headers['ETag'] = etag.strip('"') + if content_length is not None: + headers['content-length'] = str(content_length) + else: + for n, v in headers.iteritems(): + if n.lower() == 'content-length': + content_length = int(v) + if content_type is not None: + headers['content-type'] = content_type + if not contents: + headers['content-length'] = '0' + if hasattr(contents, 'read'): + conn.putrequest('PUT', path) + for header, value in headers.iteritems(): + conn.putheader(header, value) + if content_length is None: + conn.putheader('Transfer-Encoding', 'chunked') + conn.endheaders() + chunk = contents.read(chunk_size) + while chunk: + conn.send('%x\r\n%s\r\n' % (len(chunk), chunk)) + chunk = contents.read(chunk_size) + conn.send('0\r\n\r\n') + else: + conn.endheaders() + left = content_length + while left > 0: + size = chunk_size + if size > left: + size = left + chunk = contents.read(size) + conn.send(chunk) + left -= len(chunk) + else: + conn.request('PUT', path, contents, headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object PUT failed', http_scheme=parsed[0], + http_host=conn.host, http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason) + return resp.getheader('etag', '').strip('"') + + +def post_object(url, token, container, name, headers, http_conn=None): + """ + Update object metadata + + :param url: storage URL + :param token: auth token + :param container: container name that the object is in + :param name: name of the object to update + :param headers: additional headers to include in the request + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :raises ClientException: HTTP POST request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url) + path = '%s/%s/%s' % (parsed[2], quote(container), quote(name)) + headers['X-Auth-Token'] = token + conn.request('POST', path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object POST failed', http_scheme=parsed[0], + http_host=conn.host, http_port=conn.port, http_path=path, + http_status=resp.status, http_reason=resp.reason) + + +def delete_object(url, token=None, container=None, name=None, http_conn=None, + headers=None, proxy=None): + """ + Delete object + + :param url: storage URL + :param token: auth token; if None, no token will be sent + :param container: container name that the object is in; if None, the + container name is expected to be part of the url + :param name: object name to delete; if None, the object name is expected to + be part of the url + :param http_conn: HTTP connection object (If None, it will create the + conn object) + :param headers: additional headers to include in the request + :param proxy: proxy to connect through, if any; None by default; str of the + format 'http://127.0.0.1:8888' to set one + :raises ClientException: HTTP DELETE request failed + """ + if http_conn: + parsed, conn = http_conn + else: + parsed, conn = http_connection(url, proxy=proxy) + path = parsed[2] + if container: + path = '%s/%s' % (path.rstrip('/'), quote(container)) + if name: + path = '%s/%s' % (path.rstrip('/'), quote(name)) + if headers: + headers = dict(headers) + else: + headers = {} + if token: + headers['X-Auth-Token'] = token + conn.request('DELETE', path, '', headers) + resp = conn.getresponse() + resp.read() + if resp.status < 200 or resp.status >= 300: + raise ClientException('Object DELETE failed', + http_scheme=parsed[0], http_host=conn.host, + http_port=conn.port, http_path=path, http_status=resp.status, + http_reason=resp.reason) + + +class Connection(object): + """Convenience class to make requests that will also retry the request""" + + def __init__(self, authurl, user, key, retries=5, preauthurl=None, + preauthtoken=None, snet=False, starting_backoff=1): + """ + :param authurl: authenitcation URL + :param user: user name to authenticate as + :param key: key/password to authenticate with + :param retries: Number of times to retry the request before failing + :param preauthurl: storage URL (if you have already authenticated) + :param preauthtoken: authentication token (if you have already + authenticated) + :param snet: use SERVICENET internal network default is False + """ + self.authurl = authurl + self.user = user + self.key = key + self.retries = retries + self.http_conn = None + self.url = preauthurl + self.token = preauthtoken + self.attempts = 0 + self.snet = snet + self.starting_backoff = starting_backoff + + def get_auth(self): + return get_auth(self.authurl, self.user, self.key, snet=self.snet) + + def http_connection(self): + return http_connection(self.url) + + def _retry(self, reset_func, func, *args, **kwargs): + self.attempts = 0 + backoff = self.starting_backoff + while self.attempts <= self.retries: + self.attempts += 1 + try: + if not self.url or not self.token: + self.url, self.token = self.get_auth() + self.http_conn = None + if not self.http_conn: + self.http_conn = self.http_connection() + kwargs['http_conn'] = self.http_conn + rv = func(self.url, self.token, *args, **kwargs) + return rv + except (socket.error, HTTPException): + if self.attempts > self.retries: + raise + self.http_conn = None + except ClientException as err: + if self.attempts > self.retries: + raise + if err.http_status == 401: + self.url = self.token = None + if self.attempts > 1: + raise + elif err.http_status == 408: + self.http_conn = None + elif 500 <= err.http_status <= 599: + pass + else: + raise + sleep(backoff) + backoff *= 2 + if reset_func: + reset_func(func, *args, **kwargs) + + def head_account(self): + """Wrapper for :func:`head_account`""" + return self._retry(None, head_account) + + def get_account(self, marker=None, limit=None, prefix=None, + full_listing=False): + """Wrapper for :func:`get_account`""" + # TODO(unknown): With full_listing=True this will restart the entire + # listing with each retry. Need to make a better version that just + # retries where it left off. + return self._retry(None, get_account, marker=marker, limit=limit, + prefix=prefix, full_listing=full_listing) + + def post_account(self, headers): + """Wrapper for :func:`post_account`""" + return self._retry(None, post_account, headers) + + def head_container(self, container): + """Wrapper for :func:`head_container`""" + return self._retry(None, head_container, container) + + def get_container(self, container, marker=None, limit=None, prefix=None, + delimiter=None, full_listing=False): + """Wrapper for :func:`get_container`""" + # TODO(unknown): With full_listing=True this will restart the entire + # listing with each retry. Need to make a better version that just + # retries where it left off. + return self._retry(None, get_container, container, marker=marker, + limit=limit, prefix=prefix, delimiter=delimiter, + full_listing=full_listing) + + def put_container(self, container, headers=None): + """Wrapper for :func:`put_container`""" + return self._retry(None, put_container, container, headers=headers) + + def post_container(self, container, headers): + """Wrapper for :func:`post_container`""" + return self._retry(None, post_container, container, headers) + + def delete_container(self, container): + """Wrapper for :func:`delete_container`""" + return self._retry(None, delete_container, container) + + def head_object(self, container, obj): + """Wrapper for :func:`head_object`""" + return self._retry(None, head_object, container, obj) + + def get_object(self, container, obj, resp_chunk_size=None): + """Wrapper for :func:`get_object`""" + return self._retry(None, get_object, container, obj, + resp_chunk_size=resp_chunk_size) + + def put_object(self, container, obj, contents, content_length=None, + etag=None, chunk_size=65536, content_type=None, + headers=None): + """Wrapper for :func:`put_object`""" + + def _default_reset(*args, **kwargs): + raise ClientException('put_object(%r, %r, ...) failure and no ' + 'ability to reset contents for reupload.' % (container, obj)) + + reset_func = _default_reset + tell = getattr(contents, 'tell', None) + seek = getattr(contents, 'seek', None) + if tell and seek: + orig_pos = tell() + reset_func = lambda *a, **k: seek(orig_pos) + elif not contents: + reset_func = lambda *a, **k: None + + return self._retry(reset_func, put_object, container, obj, contents, + content_length=content_length, etag=etag, chunk_size=chunk_size, + content_type=content_type, headers=headers) + + def post_object(self, container, obj, headers): + """Wrapper for :func:`post_object`""" + return self._retry(None, post_object, container, obj, headers) + + def delete_object(self, container, obj): + """Wrapper for :func:`delete_object`""" + return self._retry(None, delete_object, container, obj) + +# End inclusion of swift.common.client +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # + + +def mkdirs(path): + try: + makedirs(path) + except OSError as err: + if err.errno != EEXIST: + raise + + +def put_errors_from_threads(threads, error_queue): + """ + Places any errors from the threads into error_queue. + :param threads: A list of QueueFunctionThread instances. + :param error_queue: A queue to put error strings into. + :returns: True if any errors were found. + """ + was_error = False + for thread in threads: + for info in thread.exc_infos: + was_error = True + if isinstance(info[1], ClientException): + error_queue.put(str(info[1])) + else: + error_queue.put(''.join(format_exception(*info))) + return was_error + + +class QueueFunctionThread(Thread): + + def __init__(self, queue, func, *args, **kwargs): + """ Calls func for each item in queue; func is called with a queued + item as the first arg followed by *args and **kwargs. Use the abort + attribute to have the thread empty the queue (without processing) + and exit. """ + Thread.__init__(self) + self.abort = False + self.queue = queue + self.func = func + self.args = args + self.kwargs = kwargs + self.exc_infos = [] + + def run(self): + try: + while True: + try: + item = self.queue.get_nowait() + if not self.abort: + self.func(item, *self.args, **self.kwargs) + #self.queue.task_done() + except Empty: + if self.abort: + break + sleep(0.01) + except Exception: + self.exc_infos.append(exc_info()) + + +st_delete_help = ''' +delete --all OR delete container [--leave-segments] [object] [object] ... + Deletes everything in the account (with --all), or everything in a + container, or a list of objects depending on the args given. Segments of + manifest objects will be deleted as well, unless you specify the + --leave-segments option.'''.strip('\n') + + +def st_delete(parser, args, print_queue, error_queue): + parser.add_option('-a', '--all', action='store_true', dest='yes_all', + default=False, help='Indicates that you really want to delete ' + 'everything in the account') + parser.add_option('', '--leave-segments', action='store_true', + dest='leave_segments', default=False, help='Indicates that you want ' + 'the segments of manifest objects left alone') + (options, args) = parse_args(parser, args) + args = args[1:] + if (not args and not options.yes_all) or (args and options.yes_all): + error_queue.put('Usage: %s [options] %s' % + (basename(argv[0]), st_delete_help)) + return + + def _delete_segment(container_obj, conn): + container, obj = container_obj + conn.delete_object(container, obj) + if options.verbose: + if conn.attempts > 2: + print_queue.put('%s/%s [after %d attempts]' % + (container, obj, conn.attempts)) + else: + print_queue.put('%s/%s' % (container, obj)) + + object_queue = Queue(10000) + + def _delete_object(container_obj, conn): + container, obj = container_obj + try: + old_manifest = None + if not options.leave_segments: + try: + old_manifest = conn.head_object(container, obj).get( + 'x-object-manifest') + except ClientException as err: + if err.http_status != 404: + raise + conn.delete_object(container, obj) + if old_manifest: + segment_queue = Queue(10000) + scontainer, sprefix = old_manifest.split('/', 1) + for delobj in conn.get_container(scontainer, + prefix=sprefix)[1]: + segment_queue.put((scontainer, delobj['name'])) + if not segment_queue.empty(): + segment_threads = [QueueFunctionThread(segment_queue, + _delete_segment, create_connection()) for _junk in + xrange(10)] + for thread in segment_threads: + thread.start() + while not segment_queue.empty(): + sleep(0.01) + for thread in segment_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(segment_threads, error_queue) + if options.verbose: + path = options.yes_all and join(container, obj) or obj + if path[:1] in ('/', '\\'): + path = path[1:] + if conn.attempts > 1: + print_queue.put('%s [after %d attempts]' % + (path, conn.attempts)) + else: + print_queue.put(path) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Object %s not found' % + repr('%s/%s' % (container, obj))) + + container_queue = Queue(10000) + + def _delete_container(container, conn): + try: + marker = '' + while True: + objects = [o['name'] for o in + conn.get_container(container, marker=marker)[1]] + if not objects: + break + for obj in objects: + object_queue.put((container, obj)) + marker = objects[-1] + while not object_queue.empty(): + sleep(0.01) + attempts = 1 + while True: + try: + conn.delete_container(container) + break + except ClientException as err: + if err.http_status != 409: + raise + if attempts > 10: + raise + attempts += 1 + sleep(1) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Container %s not found' % repr(container)) + + url, token = get_auth(options.auth, options.user, options.key, + snet=options.snet) + create_connection = lambda: Connection(options.auth, options.user, + options.key, preauthurl=url, preauthtoken=token, snet=options.snet) + object_threads = [QueueFunctionThread(object_queue, _delete_object, + create_connection()) for _junk in xrange(10)] + for thread in object_threads: + thread.start() + container_threads = [QueueFunctionThread(container_queue, + _delete_container, create_connection()) for _junk in xrange(10)] + for thread in container_threads: + thread.start() + if not args: + conn = create_connection() + try: + marker = '' + while True: + containers = \ + [c['name'] for c in conn.get_account(marker=marker)[1]] + if not containers: + break + for container in containers: + container_queue.put(container) + marker = containers[-1] + while not container_queue.empty(): + sleep(0.01) + while not object_queue.empty(): + sleep(0.01) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Account not found') + elif len(args) == 1: + if '/' in args[0]: + print >> stderr, 'WARNING: / in container name; you might have ' \ + 'meant %r instead of %r.' % \ + (args[0].replace('/', ' ', 1), args[0]) + conn = create_connection() + _delete_container(args[0], conn) + else: + for obj in args[1:]: + object_queue.put((args[0], obj)) + while not container_queue.empty(): + sleep(0.01) + for thread in container_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(container_threads, error_queue) + while not object_queue.empty(): + sleep(0.01) + for thread in object_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(object_threads, error_queue) + + +st_download_help = ''' +download --all OR download container [options] [object] [object] ... + Downloads everything in the account (with --all), or everything in a + container, or a list of objects depending on the args given. For a single + object download, you may use the -o [--output] option to + redirect the output to a specific file or if "-" then just redirect to + stdout.'''.strip('\n') + + +def st_download(options, args, print_queue, error_queue): + parser.add_option('-a', '--all', action='store_true', dest='yes_all', + default=False, help='Indicates that you really want to download ' + 'everything in the account') + parser.add_option('-o', '--output', dest='out_file', help='For a single ' + 'file download, stream the output to an alternate location ') + (options, args) = parse_args(parser, args) + args = args[1:] + if options.out_file == '-': + options.verbose = 0 + if options.out_file and len(args) != 2: + exit('-o option only allowed for single file downloads') + if (not args and not options.yes_all) or (args and options.yes_all): + error_queue.put('Usage: %s [options] %s' % + (basename(argv[0]), st_download_help)) + return + + object_queue = Queue(10000) + + def _download_object(queue_arg, conn): + if len(queue_arg) == 2: + container, obj = queue_arg + out_file = None + elif len(queue_arg) == 3: + container, obj, out_file = queue_arg + else: + raise Exception("Invalid queue_arg length of %s" % len(queue_arg)) + try: + headers, body = \ + conn.get_object(container, obj, resp_chunk_size=65536) + content_type = headers.get('content-type') + if 'content-length' in headers: + content_length = int(headers.get('content-length')) + else: + content_length = None + etag = headers.get('etag') + path = options.yes_all and join(container, obj) or obj + if path[:1] in ('/', '\\'): + path = path[1:] + md5sum = None + make_dir = out_file != "-" + if content_type.split(';', 1)[0] == 'text/directory': + if make_dir and not isdir(path): + mkdirs(path) + read_length = 0 + if 'x-object-manifest' not in headers: + md5sum = md5() + for chunk in body: + read_length += len(chunk) + if md5sum: + md5sum.update(chunk) + else: + dirpath = dirname(path) + if make_dir and dirpath and not isdir(dirpath): + mkdirs(dirpath) + if out_file == "-": + fp = stdout + elif out_file: + fp = open(out_file, 'wb') + else: + fp = open(path, 'wb') + read_length = 0 + if 'x-object-manifest' not in headers: + md5sum = md5.new() + for chunk in body: + fp.write(chunk) + read_length += len(chunk) + if md5sum: + md5sum.update(chunk) + fp.close() + if md5sum and md5sum.hexdigest() != etag: + error_queue.put('%s: md5sum != etag, %s != %s' % + (path, md5sum.hexdigest(), etag)) + if content_length is not None and read_length != content_length: + error_queue.put('%s: read_length != content_length, %d != %d' % + (path, read_length, content_length)) + if 'x-object-meta-mtime' in headers and not options.out_file: + mtime = float(headers['x-object-meta-mtime']) + utime(path, (mtime, mtime)) + if options.verbose: + if conn.attempts > 1: + print_queue.put('%s [after %d attempts' % + (path, conn.attempts)) + else: + print_queue.put(path) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Object %s not found' % + repr('%s/%s' % (container, obj))) + + container_queue = Queue(10000) + + def _download_container(container, conn): + try: + marker = '' + while True: + objects = [o['name'] for o in + conn.get_container(container, marker=marker)[1]] + if not objects: + break + for obj in objects: + object_queue.put((container, obj)) + marker = objects[-1] + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Container %s not found' % repr(container)) + + url, token = get_auth(options.auth, options.user, options.key, + snet=options.snet) + create_connection = lambda: Connection(options.auth, options.user, + options.key, preauthurl=url, preauthtoken=token, snet=options.snet) + object_threads = [QueueFunctionThread(object_queue, _download_object, + create_connection()) for _junk in xrange(10)] + for thread in object_threads: + thread.start() + container_threads = [QueueFunctionThread(container_queue, + _download_container, create_connection()) for _junk in xrange(10)] + for thread in container_threads: + thread.start() + if not args: + conn = create_connection() + try: + marker = '' + while True: + containers = [c['name'] + for c in conn.get_account(marker=marker)[1]] + if not containers: + break + for container in containers: + container_queue.put(container) + marker = containers[-1] + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Account not found') + elif len(args) == 1: + if '/' in args[0]: + print >> stderr, 'WARNING: / in container name; you might have ' \ + 'meant %r instead of %r.' % \ + (args[0].replace('/', ' ', 1), args[0]) + _download_container(args[0], create_connection()) + else: + if len(args) == 2: + obj = args[1] + object_queue.put((args[0], obj, options.out_file)) + else: + for obj in args[1:]: + object_queue.put((args[0], obj)) + while not container_queue.empty(): + sleep(0.01) + for thread in container_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(container_threads, error_queue) + while not object_queue.empty(): + sleep(0.01) + for thread in object_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(object_threads, error_queue) + + +st_list_help = ''' +list [options] [container] + Lists the containers for the account or the objects for a container. -p or + --prefix is an option that will only list items beginning with that prefix. + -d or --delimiter is option (for container listings only) that will roll up + items with the given delimiter (see Cloud Files general documentation for + what this means). +'''.strip('\n') + + +def st_list(options, args, print_queue, error_queue): + parser.add_option('-p', '--prefix', dest='prefix', help='Will only list ' + 'items beginning with the prefix') + parser.add_option('-d', '--delimiter', dest='delimiter', help='Will roll ' + 'up items with the given delimiter (see Cloud Files general ' + 'documentation for what this means)') + (options, args) = parse_args(parser, args) + args = args[1:] + if options.delimiter and not args: + exit('-d option only allowed for container listings') + if len(args) > 1: + error_queue.put('Usage: %s [options] %s' % + (basename(argv[0]), st_list_help)) + return + conn = Connection(options.auth, options.user, options.key, + snet=options.snet) + try: + marker = '' + while True: + if not args: + items = \ + conn.get_account(marker=marker, prefix=options.prefix)[1] + else: + items = conn.get_container(args[0], marker=marker, + prefix=options.prefix, delimiter=options.delimiter)[1] + if not items: + break + for item in items: + print_queue.put(item.get('name', item.get('subdir'))) + marker = items[-1].get('name', items[-1].get('subdir')) + except ClientException as err: + if err.http_status != 404: + raise + if not args: + error_queue.put('Account not found') + else: + error_queue.put('Container %s not found' % repr(args[0])) + + +st_stat_help = ''' +stat [container] [object] + Displays information for the account, container, or object depending on the + args given (if any).'''.strip('\n') + + +def st_stat(options, args, print_queue, error_queue): + (options, args) = parse_args(parser, args) + args = args[1:] + conn = Connection(options.auth, options.user, options.key) + if not args: + try: + headers = conn.head_account() + if options.verbose > 1: + print_queue.put(''' +StorageURL: %s +Auth Token: %s +'''.strip('\n') % (conn.url, conn.token)) + container_count = int(headers.get('x-account-container-count', 0)) + object_count = int(headers.get('x-account-object-count', 0)) + bytes_used = int(headers.get('x-account-bytes-used', 0)) + print_queue.put(''' + Account: %s +Containers: %d + Objects: %d + Bytes: %d'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], container_count, + object_count, bytes_used)) + for key, value in headers.items(): + if key.startswith('x-account-meta-'): + print_queue.put('%10s: %s' % ('Meta %s' % + key[len('x-account-meta-'):].title(), value)) + for key, value in headers.items(): + if not key.startswith('x-account-meta-') and key not in ( + 'content-length', 'date', 'x-account-container-count', + 'x-account-object-count', 'x-account-bytes-used'): + print_queue.put( + '%10s: %s' % (key.title(), value)) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Account not found') + elif len(args) == 1: + if '/' in args[0]: + print >> stderr, 'WARNING: / in container name; you might have ' \ + 'meant %r instead of %r.' % \ + (args[0].replace('/', ' ', 1), args[0]) + try: + headers = conn.head_container(args[0]) + object_count = int(headers.get('x-container-object-count', 0)) + bytes_used = int(headers.get('x-container-bytes-used', 0)) + print_queue.put(''' + Account: %s +Container: %s + Objects: %d + Bytes: %d + Read ACL: %s +Write ACL: %s + Sync To: %s + Sync Key: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], args[0], + object_count, bytes_used, + headers.get('x-container-read', ''), + headers.get('x-container-write', ''), + headers.get('x-container-sync-to', ''), + headers.get('x-container-sync-key', ''))) + for key, value in headers.items(): + if key.startswith('x-container-meta-'): + print_queue.put('%9s: %s' % ('Meta %s' % + key[len('x-container-meta-'):].title(), value)) + for key, value in headers.items(): + if not key.startswith('x-container-meta-') and key not in ( + 'content-length', 'date', 'x-container-object-count', + 'x-container-bytes-used', 'x-container-read', + 'x-container-write', 'x-container-sync-to', + 'x-container-sync-key'): + print_queue.put( + '%9s: %s' % (key.title(), value)) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Container %s not found' % repr(args[0])) + elif len(args) == 2: + try: + headers = conn.head_object(args[0], args[1]) + print_queue.put(''' + Account: %s + Container: %s + Object: %s + Content Type: %s'''.strip('\n') % (conn.url.rsplit('/', 1)[-1], args[0], + args[1], headers.get('content-type'))) + if 'content-length' in headers: + print_queue.put('Content Length: %s' % + headers['content-length']) + if 'last-modified' in headers: + print_queue.put(' Last Modified: %s' % + headers['last-modified']) + if 'etag' in headers: + print_queue.put(' ETag: %s' % headers['etag']) + if 'x-object-manifest' in headers: + print_queue.put(' Manifest: %s' % + headers['x-object-manifest']) + for key, value in headers.items(): + if key.startswith('x-object-meta-'): + print_queue.put('%14s: %s' % ('Meta %s' % + key[len('x-object-meta-'):].title(), value)) + for key, value in headers.items(): + if not key.startswith('x-object-meta-') and key not in ( + 'content-type', 'content-length', 'last-modified', + 'etag', 'date', 'x-object-manifest'): + print_queue.put( + '%14s: %s' % (key.title(), value)) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Object %s not found' % + repr('%s/%s' % (args[0], args[1]))) + else: + error_queue.put('Usage: %s [options] %s' % + (basename(argv[0]), st_stat_help)) + + +st_post_help = ''' +post [options] [container] [object] + Updates meta information for the account, container, or object depending on + the args given. If the container is not found, it will be created + automatically; but this is not true for accounts and objects. Containers + also allow the -r (or --read-acl) and -w (or --write-acl) options. + The --storage-policy will set a storage policy to the container if the container does not exist. + The -m or --meta option is allowed on all and used to define the user meta data + items to set in the form Name:Value. This option can be repeated. Example: + post -m Color:Blue -m Size:Large'''.strip('\n') + + +def st_post(options, args, print_queue, error_queue): + parser.add_option('-r', '--read-acl', dest='read_acl', help='Sets the ' + 'Read ACL for containers. Quick summary of ACL syntax: .r:*, ' + '.r:-.example.com, .r:www.example.com, account1, account2:user2') + parser.add_option('-w', '--write-acl', dest='write_acl', help='Sets the ' + 'Write ACL for containers. Quick summary of ACL syntax: account1, ' + 'account2:user2') + parser.add_option('-t', '--sync-to', dest='sync_to', help='Sets the ' + 'Sync To for containers, for multi-cluster replication.') + parser.add_option('-k', '--sync-key', dest='sync_key', help='Sets the ' + 'Sync Key for containers, for multi-cluster replication.') + parser.add_option('-m', '--meta', action='append', dest='meta', default=[], + help='Sets a meta data item with the syntax name:value. This option ' + 'may be repeated. Example: -m Color:Blue -m Size:Large') + parser.add_option('', '--storage-policy', action='store', dest='storage_policy', + help='Sets a storage policy to the container if the container does not exist') + (options, args) = parse_args(parser, args) + args = args[1:] + if (options.read_acl or options.write_acl or options.sync_to or + options.sync_key) and not args: + exit('-r, -w, -t, and -k options only allowed for containers') + conn = Connection(options.auth, options.user, options.key) + if not args: + headers = {} + for item in options.meta: + split_item = item.split(':') + headers['X-Account-Meta-' + split_item[0]] = \ + len(split_item) > 1 and split_item[1] + try: + conn.post_account(headers=headers) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Account not found') + elif len(args) == 1: + if '/' in args[0]: + print >> stderr, 'WARNING: / in container name; you might have ' \ + 'meant %r instead of %r.' % \ + (args[0].replace('/', ' ', 1), args[0]) + headers = {} + for item in options.meta: + split_item = item.split(':') + headers['X-Container-Meta-' + split_item[0]] = \ + len(split_item) > 1 and split_item[1] + if options.read_acl is not None: + headers['X-Container-Read'] = options.read_acl + if options.write_acl is not None: + headers['X-Container-Write'] = options.write_acl + if options.sync_to is not None: + headers['X-Container-Sync-To'] = options.sync_to + if options.sync_key is not None: + headers['X-Container-Sync-Key'] = options.sync_key + if options.storage_policy is not None: + headers['X-Storage-Policy'] = options.storage_policy + try: + conn.post_container(args[0], headers=headers) + except ClientException as err: + if err.http_status != 404: + raise + conn.put_container(args[0], headers=headers) + elif len(args) == 2: + headers = {} + for item in options.meta: + split_item = item.split(':') + headers['X-Object-Meta-' + split_item[0]] = \ + len(split_item) > 1 and split_item[1] + try: + conn.post_object(args[0], args[1], headers=headers) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Object %s not found' % + repr('%s/%s' % (args[0], args[1]))) + else: + error_queue.put('Usage: %s [options] %s' % + (basename(argv[0]), st_post_help)) + + +st_upload_help = ''' +upload [options] container file_or_directory [file_or_directory] [...] + Uploads to the given container the files and directories specified by the + remaining args. -c or --changed is an option that will only upload files + that have changed since the last upload. -S or --segment-size + and --leave-segments are options as well (see --help for more). --storage-policy + Sets a storage policy to the container if the container does not exist. +'''.strip('\n') + + +def st_upload(options, args, print_queue, error_queue): + parser.add_option('-c', '--changed', action='store_true', dest='changed', + default=False, help='Will only upload files that have changed since ' + 'the last upload') + parser.add_option('-S', '--segment-size', dest='segment_size', help='Will ' + 'upload files in segments no larger than and then create a ' + '"manifest" file that will download all the segments as if it were ' + 'the original file. The segments will be uploaded to a ' + '_segments container so as to not pollute the main ' + ' listings.') + parser.add_option('', '--leave-segments', action='store_true', + dest='leave_segments', default=False, help='Indicates that you want ' + 'the older segments of manifest objects left alone (in the case of ' + 'overwrites)') + parser.add_option('', '--storage-policy', action='store', dest='storage_policy', + help='Sets a storage policy to the container if the container does not exist') + (options, args) = parse_args(parser, args) + args = args[1:] + if len(args) < 2: + error_queue.put('Usage: %s [options] %s' % + (basename(argv[0]), st_upload_help)) + return + object_queue = Queue(10000) + + def _segment_job(job, conn): + if job.get('delete', False): + conn.delete_object(job['container'], job['obj']) + else: + fp = open(job['path'], 'rb') + fp.seek(job['segment_start']) + conn.put_object(job.get('container', args[0] + '_segments'), + job['obj'], fp, content_length=job['segment_size']) + if options.verbose and 'log_line' in job: + if conn.attempts > 1: + print_queue.put('%s [after %d attempts]' % + (job['log_line'], conn.attempts)) + else: + print_queue.put(job['log_line']) + + def _object_job(job, conn): + path = job['path'] + container = job.get('container', args[0]) + dir_marker = job.get('dir_marker', False) + try: + obj = path + if obj.startswith('./') or obj.startswith('.\\'): + obj = obj[2:] + if obj.startswith('/'): + obj = obj[1:] + put_headers = {'x-object-meta-mtime': str(getmtime(path))} + if dir_marker: + if options.changed: + try: + headers = conn.head_object(container, obj) + ct = headers.get('content-type') + cl = int(headers.get('content-length')) + et = headers.get('etag') + mt = headers.get('x-object-meta-mtime') + if ct.split(';', 1)[0] == 'text/directory' and \ + cl == 0 and \ + et == 'd41d8cd98f00b204e9800998ecf8427e' and \ + mt == put_headers['x-object-meta-mtime']: + return + except ClientException as err: + if err.http_status != 404: + raise + conn.put_object(container, obj, '', content_length=0, + content_type='text/directory', + headers=put_headers) + else: + # We need to HEAD all objects now in case we're overwriting a + # manifest object and need to delete the old segments + # ourselves. + old_manifest = None + if options.changed or not options.leave_segments: + try: + headers = conn.head_object(container, obj) + cl = int(headers.get('content-length')) + mt = headers.get('x-object-meta-mtime') + if options.changed and cl == getsize(path) and \ + mt == put_headers['x-object-meta-mtime']: + return + if not options.leave_segments: + old_manifest = headers.get('x-object-manifest') + except ClientException as err: + if err.http_status != 404: + raise + if options.segment_size and \ + getsize(path) < options.segment_size: + full_size = getsize(path) + segment_queue = Queue(10000) + segment_threads = [QueueFunctionThread(segment_queue, + _segment_job, create_connection()) for _junk in + xrange(10)] + for thread in segment_threads: + thread.start() + segment = 0 + segment_start = 0 + while segment_start < full_size: + segment_size = int(options.segment_size) + if segment_start + segment_size > full_size: + segment_size = full_size - segment_start + segment_queue.put({'path': path, + 'obj': '%s/%s/%s/%08d' % (obj, + put_headers['x-object-meta-mtime'], full_size, + segment), + 'segment_start': segment_start, + 'segment_size': segment_size, + 'log_line': '%s segment %s' % (obj, segment)}) + segment += 1 + segment_start += segment_size + while not segment_queue.empty(): + sleep(0.01) + for thread in segment_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + if put_errors_from_threads(segment_threads, error_queue): + raise ClientException('Aborting manifest creation ' + 'because not all segments could be uploaded. %s/%s' + % (container, obj)) + new_object_manifest = '%s_segments/%s/%s/%s/' % ( + container, obj, put_headers['x-object-meta-mtime'], + full_size) + if old_manifest == new_object_manifest: + old_manifest = None + put_headers['x-object-manifest'] = new_object_manifest + conn.put_object(container, obj, '', content_length=0, + headers=put_headers) + else: + conn.put_object(container, obj, open(path, 'rb'), + content_length=getsize(path), headers=put_headers) + if old_manifest: + segment_queue = Queue(10000) + scontainer, sprefix = old_manifest.split('/', 1) + for delobj in conn.get_container(scontainer, + prefix=sprefix)[1]: + segment_queue.put({'delete': True, + 'container': scontainer, 'obj': delobj['name']}) + if not segment_queue.empty(): + segment_threads = [QueueFunctionThread(segment_queue, + _segment_job, create_connection()) for _junk in + xrange(10)] + for thread in segment_threads: + thread.start() + while not segment_queue.empty(): + sleep(0.01) + for thread in segment_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(segment_threads, error_queue) + if options.verbose: + if conn.attempts > 1: + print_queue.put( + '%s [after %d attempts]' % (obj, conn.attempts)) + else: + print_queue.put(obj) + except OSError as err: + if err.errno != ENOENT: + raise + error_queue.put('Local file %s not found' % repr(path)) + + def _upload_dir(path): + names = listdir(path) + if not names: + object_queue.put({'path': path, 'dir_marker': True}) + else: + for name in listdir(path): + subpath = join(path, name) + if isdir(subpath): + _upload_dir(subpath) + else: + object_queue.put({'path': subpath}) + + url, token = get_auth(options.auth, options.user, options.key, + snet=options.snet) + create_connection = lambda: Connection(options.auth, options.user, + options.key, preauthurl=url, preauthtoken=token, snet=options.snet) + object_threads = [QueueFunctionThread(object_queue, _object_job, + create_connection()) for _junk in xrange(10)] + for thread in object_threads: + thread.start() + conn = create_connection() + # Try to create the container, just in case it doesn't exist. If this + # fails, it might just be because the user doesn't have container PUT + # permissions, so we'll ignore any error. If there's really a problem, + # it'll surface on the first object PUT. + try: + container_headers = {} + if options.storage_policy is not None: + container_headers['X-Storage-Policy'] = options.storage_policy + conn.put_container(args[0],headers=container_headers) + if options.segment_size is not None: + conn.put_container(args[0] + '_segments',headers=container_headers) + except Exception: + pass + try: + for arg in args[1:]: + if isdir(arg): + _upload_dir(arg) + else: + object_queue.put({'path': arg}) + while not object_queue.empty(): + sleep(0.01) + for thread in object_threads: + thread.abort = True + while thread.isAlive(): + thread.join(0.01) + put_errors_from_threads(object_threads, error_queue) + except ClientException as err: + if err.http_status != 404: + raise + error_queue.put('Account not found') + + +def parse_args(parser, args, enforce_requires=True): + if not args: + args = ['-h'] + (options, args) = parser.parse_args(args) + if enforce_requires and \ + not (options.auth and options.user and options.key): + exit(''' +Requires ST_AUTH, ST_USER, and ST_KEY environment variables be set or +overridden with -A, -U, or -K.'''.strip('\n')) + return options, args + + +if __name__ == '__main__': + parser = OptionParser(version='%prog 1.0', usage=''' +Usage: %%prog [options] [args] + +Commands: + %(st_stat_help)s + %(st_list_help)s + %(st_upload_help)s + %(st_post_help)s + %(st_download_help)s + %(st_delete_help)s + +Example: + %%prog -A https://auth.api.rackspacecloud.com/v1.0 -U user -K key stat +'''.strip('\n') % globals()) + parser.add_option('-s', '--snet', action='store_true', dest='snet', + default=False, help='Use SERVICENET internal network') + parser.add_option('-v', '--verbose', action='count', dest='verbose', + default=1, help='Print more info') + parser.add_option('-q', '--quiet', action='store_const', dest='verbose', + const=0, default=1, help='Suppress status output') + parser.add_option('-A', '--auth', dest='auth', + default=environ.get('ST_AUTH'), + help='URL for obtaining an auth token') + parser.add_option('-U', '--user', dest='user', + default=environ.get('ST_USER'), + help='User name for obtaining an auth token') + parser.add_option('-K', '--key', dest='key', + default=environ.get('ST_KEY'), + help='Key for obtaining an auth token') + parser.disable_interspersed_args() + (options, args) = parse_args(parser, argv[1:], enforce_requires=False) + parser.enable_interspersed_args() + + commands = ('delete', 'download', 'list', 'post', 'stat', 'upload') + if not args or args[0] not in commands: + parser.print_usage() + if args: + exit('no such command: %s' % args[0]) + exit() + + print_queue = Queue(10000) + + def _print(item): + if isinstance(item, unicode): + item = item.encode('utf8') + print item + + print_thread = QueueFunctionThread(print_queue, _print) + print_thread.start() + + error_queue = Queue(10000) + + def _error(item): + if isinstance(item, unicode): + item = item.encode('utf8') + print >> stderr, item + + error_thread = QueueFunctionThread(error_queue, _error) + error_thread.start() + + try: + parser.usage = globals()['st_%s_help' % args[0]] + try: + globals()['st_%s' % args[0]](parser, argv[1:], print_queue, + error_queue) + except (ClientException, HTTPException, socket.error) as err: + error_queue.put(str(err)) + while not print_queue.empty(): + sleep(0.01) + print_thread.abort = True + while print_thread.isAlive(): + print_thread.join(0.01) + while not error_queue.empty(): + sleep(0.01) + error_thread.abort = True + while error_thread.isAlive(): + error_thread.join(0.01) + except (SystemExit, Exception): + for thread in threading_enumerate(): + thread.abort = True + raise diff --git a/scripts/vm/hypervisor/xenserver/xenserver84/vmops b/scripts/vm/hypervisor/xenserver/xenserver84/vmops new file mode 100755 index 00000000000..cf6e6325d68 --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xenserver84/vmops @@ -0,0 +1,1607 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Version @VERSION@ +# +# A plugin for executing script needed by vmops cloud + +import os, sys, time +import XenAPIPlugin +if os.path.exists("/opt/xensource/sm"): + sys.path.extend(["/opt/xensource/sm/", "/usr/local/sbin/", "/sbin/"]) +if os.path.exists("/usr/lib/xcp/sm"): + sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"]) +import base64 +import socket +import stat +import tempfile +import util +import subprocess +import zlib +import cloudstack_pluginlib as lib +import logging +from util import CommandException + +lib.setup_logging("/var/log/cloud/cloud.log") + +def echo(fn): + def wrapped(*v, **k): + name = fn.__name__ + #command string is logged in SMlog, so method enter/exit logging into SMlog will help for debugging + util.SMlog("#### CLOUD enter %s ####" % name ) + res = fn(*v, **k) + util.SMlog("#### CLOUD exit %s ####" % name ) + return res + return wrapped + +@echo +def add_to_VCPUs_params_live(session, args): + key = args['key'] + value = args['value'] + vmname = args['vmname'] + try: + cmd = ["bash", "/opt/cloud/bin/add_to_vcpus_params_live.sh", vmname, key, value] + txt = util.pread2(cmd) + except: + return 'false' + return 'true' + +@echo +def setup_iscsi(session, args): + uuid=args['uuid'] + try: + cmd = ["bash", "/opt/cloud/bin/setup_iscsi.sh", uuid] + txt = util.pread2(cmd) + except: + txt = '' + return txt + + +@echo +def preparemigration(session, args): + uuid = args['uuid'] + try: + cmd = ["/opt/cloud/bin/make_migratable.sh", uuid] + util.pread2(cmd) + txt = 'success' + except: + logging.debug("Catch prepare migration exception" ) + txt = '' + + return txt + +@echo +def setIptables(session, args): + try: + cmd = ["/bin/bash", "/opt/cloud/bin/setupxenserver.sh"] + txt = util.pread2(cmd) + txt = 'success' + except: + logging.debug(" setIptables execution failed " ) + txt = '' + + return txt + +@echo +def pingdomr(session, args): + host = args['host'] + port = args['port'] + socket.setdefaulttimeout(3) + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + s.connect((host,int(port))) + txt = 'success' + except: + txt = '' + + s.close() + + return txt + +@echo +def kill_copy_process(session, args): + namelabel = args['namelabel'] + try: + cmd = ["bash", "/opt/cloud/bin/kill_copy_process.sh", namelabel] + txt = util.pread2(cmd) + except: + txt = 'false' + return txt + +@echo +def pingxenserver(session, args): + txt = 'success' + return txt + +def pingtest(session, args): + sargs = args['args'] + cmd = sargs.split(' ') + cmd.insert(0, "/opt/cloud/bin/pingtest.sh") + cmd.insert(0, "/bin/bash") + try: + txt = util.pread2(cmd) + txt = 'success' + except: + logging.debug(" pingtest failed " ) + txt = '' + + return txt + +@echo +def setLinkLocalIP(session, args): + brName = args['brName'] + try: + cmd = ["ip", "route", "del", "169.254.0.0/16"] + txt = util.pread2(cmd) + except: + txt = '' + try: + cmd = ["ifconfig", brName, "169.254.0.1", "netmask", "255.255.0.0"] + txt = util.pread2(cmd) + except: + try: + cmd = ['cat', '/etc/xensource/network.conf'] + result = util.pread2(cmd) + except: + return 'can not cat network.conf' + + if result.lower().strip() == "bridge": + try: + cmd = ["brctl", "addbr", brName] + txt = util.pread2(cmd) + except: + pass + + else: + try: + cmd = ["ovs-vsctl", "add-br", brName] + txt = util.pread2(cmd) + except: + pass + + try: + cmd = ["ifconfig", brName, "169.254.0.1", "netmask", "255.255.0.0"] + txt = util.pread2(cmd) + except: + pass + try: + cmd = ["ip", "route", "add", "169.254.0.0/16", "dev", brName, "src", "169.254.0.1"] + txt = util.pread2(cmd) + except: + txt = '' + txt = 'success' + return txt + +@echo +def createFile(session, args): + file_path = args['filepath'] + file_contents = args['filecontents'] + + try: + f = open(file_path, "w") + f.write(file_contents) + f.close() + txt = 'success' + except: + logging.debug(" failed to create HA proxy cfg file ") + txt = '' + + return txt + +@echo +def secureCopyToHost(session, args): + host_filepath = args['hostfilepath'] + src_ip = args['srcip'] + src_filepath = args['srcfilepath'] + src_target = "root@" + src_ip + ":" + src_filepath + # Make any directories as needed + if not os.path.isdir(host_filepath): + try: + os.makedirs(host_filepath) + except OSError as e: + if not os.path.isdir(host_filepath): + errMsg = "OSError while creating " + host_filepath + " with errno: " + str(e.errno) + " and strerr: " + e.strerror + logging.debug(errMsg) + return "fail# Cannot create the directory to copy file to " + host_filepath + + # Copy file to created directory + txt="" + try: + txt = util.pread2(['scp','-P','3922','-q','-o','StrictHostKeyChecking=no','-i','/root/.ssh/id_rsa.cloud', src_target, host_filepath]) + util.pread2(['chmod', 'a+r', os.path.join(host_filepath, os.path.basename(src_filepath))]) + txt = 'success#' + txt + except: + logging.error("failed to scp source target " + src_target + " to host at file path " + host_filepath) + txt = 'fail#' + txt + return txt + +@echo +def createFileInDomr(session, args): + src_filepath = args['srcfilepath'] + dst_path = args['dstfilepath'] + domrip = args['domrip'] + cleanup = 'true' if 'cleanup' not in args else args['cleanup'] + txt="" + try: + target = "root@" + domrip + ":" + dst_path + txt = util.pread2(['scp','-P','3922','-q','-o','StrictHostKeyChecking=no','-i','/root/.ssh/id_rsa.cloud',src_filepath, target]) + if cleanup == 'true' or not cleanup: + util.pread2(['rm',src_filepath]) + txt = 'succ#' + txt + except: + logging.debug("failed to copy file " + src_filepath + " from host to VR with ip " + domrip) + txt = 'fail#' + txt + return txt + +@echo +def runPatchScriptInDomr(session, args): + domrip = args['domrip'] + txt="" + try: + target = "root@" + domrip + txt = util.pread2(['ssh','-p','3922','-i','/root/.ssh/id_rsa.cloud', target, "/bin/bash","/var/cache/cloud/patch-sysvms.sh"]) + txt = 'succ#' + txt + except: + logging.debug("failed to run patch script in systemVM with IP: " + domrip) + txt = 'fail#' + txt + return txt + +@echo +def deleteFile(session, args): + file_path = args["filepath"] + + try: + if os.path.isfile(file_path): + os.remove(file_path) + txt = 'success' + except: + logging.debug(" failed to remove HA proxy cfg file ") + txt = '' + + return txt + +#using all the iptables chain names length to 24 because cleanup_rules groups the vm chain excluding -def,-eg +#to avoid multiple iptables chains for single vm, there using length 24 +def chain_name(vm_name): + if vm_name.startswith('i-') or vm_name.startswith('r-'): + if vm_name.endswith('untagged'): + return '-'.join(vm_name.split('-')[:-1]) + if len(vm_name) > 25: + vm_name = vm_name[0:24] + return vm_name + +def chain_name_def(vm_name): + #iptables chain length max is 29 chars + if len(vm_name) > 25: + vm_name = vm_name[0:24] + + if vm_name.startswith('i-'): + if vm_name.endswith('untagged'): + return '-'.join(vm_name.split('-')[:-1]) + "-def" + return vm_name + "-def" + + if len(vm_name) > 28: + vm_name = vm_name[0:27] + return vm_name + +def egress_chain_name(vm_name): + #iptables chain length max is 29 chars + name = chain_name(vm_name) + name = name+"-eg" + return name + +#chain name length is 14 because it has protocol and ports appends +def chain_name_ipset(vm_name): + if vm_name.startswith('i-') or vm_name.startswith('r-'): + if vm_name.endswith('untagged'): + return ''.join(vm_name.split('')[:-1]) + if len(vm_name) > 14: + vm_name = vm_name[0:13] + return vm_name + +def egress_chain_name_ipset(vm_name): + name = chain_name_ipset(vm_name) + "-e" + return name + +def ingress_chain_name_ipset(vm_name): + name = chain_name_ipset(vm_name) + return name + +@echo +def can_bridge_firewall(session, args): + try: + util.pread2(['ebtables', '-V']) + util.pread2(['ipset', '-V']) + cmd = ['cat', '/etc/xensource/network.conf'] + result = util.pread2(cmd) + if result.lower().strip() != "bridge": + return 'false' + + except: + return 'false' + + try: + util.pread2(['iptables', '-N', 'BRIDGE-FIREWALL']) + util.pread2(['iptables', '-I', 'BRIDGE-FIREWALL', '-m', 'state', '--state', 'RELATED,ESTABLISHED', '-j', 'ACCEPT']) + util.pread2(['iptables', '-A', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '-p', 'udp', '--dport', '67', '--sport', '68', '-j', 'ACCEPT']) + util.pread2(['iptables', '-A', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '-p', 'udp', '--dport', '68', '--sport', '67', '-j', 'ACCEPT']) + util.pread2(['iptables', '-D', 'FORWARD', '-j', 'RH-Firewall-1-INPUT']) + except: + logging.debug('Chain BRIDGE-FIREWALL already exists') + + try: + util.pread2(['iptables', '-N', 'BRIDGE-DEFAULT-FIREWALL']) + util.pread2(['iptables', '-A', 'BRIDGE-DEFAULT-FIREWALL', '-m', 'state', '--state', 'RELATED,ESTABLISHED', '-j', 'ACCEPT']) + util.pread2(['iptables', '-A', 'BRIDGE-DEFAULT-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '-p', 'udp', '--dport', '67', '--sport', '68', '-j', 'ACCEPT']) + util.pread2(['iptables', '-A', 'BRIDGE-DEFAULT-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '-p', 'udp', '--dport', '68', '--sport', '67', '-j', 'ACCEPT']) + util.pread2(['iptables', '-I', 'BRIDGE-FIREWALL', '-j', 'BRIDGE-DEFAULT-FIREWALL']) + util.pread2(['iptables', '-D', 'BRIDGE-FIREWALL', '-m', 'state', '--state', 'RELATED,ESTABLISHED', '-j', 'ACCEPT']) + util.pread2(['iptables', '-D', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '-p', 'udp', '--dport', '67', '--sport', '68', '-j', 'ACCEPT']) + util.pread2(['iptables', '-D', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '-p', 'udp', '--dport', '68', '--sport', '67', '-j', 'ACCEPT']) + except: + logging.debug('Chain BRIDGE-DEFAULT-FIREWALL already exists') + + result = 'true' + try: + util.pread2(['/bin/bash', '-c', 'iptables -n -L FORWARD | grep BRIDGE-FIREWALL']) + except: + try: + util.pread2(['iptables', '-I', 'FORWARD', '-m', 'physdev', '--physdev-is-bridged', '-j', 'BRIDGE-FIREWALL']) + util.pread2(['iptables', '-A', 'FORWARD', '-j', 'DROP']) + except: + return 'false' + default_ebtables_rules() + allow_egress_traffic(session) + if not os.path.exists('/var/run/cloud'): + os.makedirs('/var/run/cloud') + if not os.path.exists('/var/cache/cloud'): + os.makedirs('/var/cache/cloud') + #get_ipset_keyword() + + cleanup_rules_for_dead_vms(session) + cleanup_rules(session, args) + + return result + +@echo +def default_ebtables_rules(): + try: + util.pread2(['ebtables', '-N', 'DEFAULT_EBTABLES']) + util.pread2(['ebtables', '-A', 'FORWARD', '-j' 'DEFAULT_EBTABLES']) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '--ip-dst', '255.255.255.255', '--ip-proto', 'udp', '--ip-dport', '67', '-j', 'ACCEPT']) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '--ip-dst', '255.255.255.255', '--ip-proto', 'udp', '--ip-dport', '68', '-j', 'ACCEPT']) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'ARP', '--arp-op', 'Request', '-j', 'ACCEPT']) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'ARP', '--arp-op', 'Reply', '-j', 'ACCEPT']) + # deny mac broadcast and multicast + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '-d', 'Broadcast', '-j', 'DROP']) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '-d', 'Multicast', '-j', 'DROP']) + # deny ip broadcast and multicast + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '--ip-dst', '255.255.255.255', '-j', 'DROP']) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '--ip-dst', '224.0.0.0/4', '-j', 'DROP']) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv4', '-j', 'RETURN']) + # deny ipv6 + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', 'IPv6', '-j', 'DROP']) + # deny vlan + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-p', '802_1Q', '-j', 'DROP']) + # deny all others (e.g., 802.1d, CDP) + util.pread2(['ebtables', '-A', 'DEFAULT_EBTABLES', '-j', 'DROP']) + except: + logging.debug('Chain DEFAULT_EBTABLES already exists') + + +@echo +def allow_egress_traffic(session): + devs = [] + for pif in session.xenapi.PIF.get_all(): + pif_rec = session.xenapi.PIF.get_record(pif) + dev = pif_rec.get('device') + devs.append(dev + "+") + for d in devs: + try: + util.pread2(['/bin/bash', '-c', "iptables -n -L FORWARD | grep '%s '" % d]) + except: + try: + util.pread2(['iptables', '-I', 'FORWARD', '2', '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', d, '-j', 'ACCEPT']) + except: + logging.debug("Failed to add FORWARD rule through to %s" % d) + return 'false' + return 'true' + +def getIpsetType(): + try: + out = util.pread2(['/bin/bash', '-c', "ipset -v | awk '{print $5}'"]) + out.replace(".","") + if int(out) < 6: + return 'iptreemap' + else: + return 'nethash' + except: + return 'iptreemap' + +def ipset(ipsetname, proto, start, end, cidrs): + type = getIpsetType() + try: + util.pread2(['ipset', '-N', ipsetname, type]) + except: + logging.debug("ipset chain already exists: " + ipsetname) + + result = True + ipsettmp = ''.join(''.join(ipsetname.split('-')).split('_')) + str(int(time.time()) % 1000) + + try: + util.pread2(['ipset', '-N', ipsettmp, type]) + except: + logging.debug("Failed to create temp ipset, reusing old name= " + ipsettmp) + try: + util.pread2(['ipset', '-F', ipsettmp]) + except: + logging.debug("Failed to clear old temp ipset name=" + ipsettmp) + return False + try: + for cidr in cidrs: + try: + util.pread2(['ipset', '-A', ipsettmp, cidr]) + except CommandException as cex: + logging.debug("ipset cidr add failed due to: " + str(cex.reason)) + if cex.reason.rfind('already in set') == -1: + raise + except: + logging.debug("Failed to program ipset " + ipsetname) + util.pread2(['ipset', '-F', ipsettmp]) + util.pread2(['ipset', '-X', ipsettmp]) + return False + + try: + util.pread2(['ipset', '-W', ipsettmp, ipsetname]) + except: + logging.debug("Failed to swap ipset, trying to delete and swap ipset: " + ipsetname) + # the old ipset entry could be of iphash type, try to delete and recreate + try: + util.pread2(['ipset', '-X', ipsetname]) + util.pread2(['ipset', '-N', ipsetname, type]) + util.pread2(['ipset', '-W', ipsettmp, ipsetname]) + except: + logging.debug("Failed to swap ipset " + ipsetname) + result = False + logging.debug("Succeeded in re-initializing and swapping ipset") + + try: + util.pread2(['ipset', '-F', ipsettmp]) + util.pread2(['ipset', '-X', ipsettmp]) + except: + # if the temporary name clashes next time we'll just reuse it + logging.debug("Failed to delete temp ipset " + ipsettmp) + + return result + +@echo +def destroy_network_rules_for_vm(session, args): + vm_name = args.pop('vmName') + vmchain = chain_name(vm_name) + vmchain_egress = egress_chain_name(vm_name) + vmchain_default = chain_name_def(vm_name) + + delete_rules_for_vm_in_bridge_firewall_chain(vm_name) + if vm_name.startswith('i-') or vm_name.startswith('r-') or vm_name.startswith('l-'): + try: + util.pread2(['iptables', '-F', vmchain_default]) + util.pread2(['iptables', '-X', vmchain_default]) + except: + logging.debug("Ignoring failure to delete chain " + vmchain_default) + + destroy_ebtables_rules(vmchain) + destroy_arptables_rules(vmchain) + + try: + util.pread2(['iptables', '-F', vmchain]) + util.pread2(['iptables', '-X', vmchain]) + except: + logging.debug("Ignoring failure to delete ingress chain " + vmchain) + + + try: + util.pread2(['iptables', '-F', vmchain_egress]) + util.pread2(['iptables', '-X', vmchain_egress]) + except: + logging.debug("Ignoring failure to delete egress chain " + vmchain_egress) + + remove_rule_log_for_vm(vm_name) + remove_secip_log_for_vm(vm_name) + + if 1 in [ vm_name.startswith(c) for c in ['r-', 's-', 'v-', 'l-'] ]: + return 'true' + + try: + setscmd = "ipset --save | grep '%s' | grep -e '^-N' -e '^create' | awk '{print $2}'" % vmchain + ipset_names = filter(None, util.pread2(['/bin/bash', '-c', setscmd]).split('\n')) + for ipset_name in ipset_names: + if not ipset_name: + continue + util.pread2(['ipset', '-F', ipset_name]) + util.pread2(['ipset', '-X', ipset_name]) + except: + logging.debug("Failed to destroy ipsets for %" % vm_name) + + return 'true' + +@echo +def destroy_ebtables_rules(vm_chain): + delcmd = "ebtables-save | grep '%s' | sed 's/-A/-D/'" % vm_chain + delcmds = util.pread2(['/bin/bash', '-c', delcmd]).split('\n') + for cmd in filter(None, delcmds): + try: + dc = 'ebtables ' + cmd + util.pread2(filter(None, dc.split(' '))) + except: + logging.debug("Ignoring failure to delete ebtables rules for vm " + vm_chain) + try: + util.pread2(['ebtables', '-F', vm_chain]) + util.pread2(['ebtables', '-X', vm_chain]) + except: + logging.debug("Ignoring failure to delete ebtables chain for vm " + vm_chain) + +@echo +def destroy_arptables_rules(vm_chain): + delcmd = "arptables -vL FORWARD | grep '%s' | sed 's/-i any//' | sed 's/-o any//' | awk '{print $1,$2,$3,$4}' " % vm_chain + delcmds = util.pread2(['/bin/bash', '-c', delcmd]).split('\n') + for cmd in filter(None, delcmds): + try: + dc = 'arptables -D FORWARD ' + cmd + util.pread2(filter(None, dc.split(' '))) + except: + logging.debug("Ignoring failure to delete arptables rules for vm " + vm_chain) + + try: + util.pread2(['arptables', '-F', vm_chain]) + util.pread2(['arptables', '-X', vm_chain]) + except: + logging.debug("Ignoring failure to delete arptables chain for vm " + vm_chain) + +@echo +def default_ebtables_antispoof_rules(vm_chain, vifs, vm_ip, vm_mac): + if vm_mac == 'ff:ff:ff:ff:ff:ff': + logging.debug("Ignoring since mac address is not valid") + return 'true' + + try: + util.pread2(['ebtables', '-N', vm_chain]) + except: + try: + util.pread2(['ebtables', '-F', vm_chain]) + except: + logging.debug("Failed to create ebtables antispoof chain, skipping") + return 'true' + + # note all rules for packets into the bridge (-i) precede all output rules (-o) + # always start after the first rule in the FORWARD chain that jumps to DEFAULT_EBTABLES chain + try: + for vif in vifs: + util.pread2(['ebtables', '-I', 'FORWARD', '2', '-i', vif, '-j', vm_chain]) + util.pread2(['ebtables', '-A', 'FORWARD', '-o', vif, '-j', vm_chain]) + except: + logging.debug("Failed to program default ebtables FORWARD rules for %s" % vm_chain) + return 'false' + + try: + for vif in vifs: + # only allow source mac that belongs to the vm + try: + util.pread2(['ebtables', '-t', 'nat', '-I', 'PREROUTING', '-i', vif, '-s', '!' , vm_mac, '-j', 'DROP']) + except: + util.pread2(['ebtables', '-A', vm_chain, '-i', vif, '-s', '!', vm_mac, '-j', 'DROP']) + + # do not allow fake dhcp responses + util.pread2(['ebtables', '-A', vm_chain, '-i', vif, '-p', 'IPv4', '--ip-proto', 'udp', '--ip-dport', '68', '-j', 'DROP']) + # do not allow snooping of dhcp requests + util.pread2(['ebtables', '-A', vm_chain, '-o', vif, '-p', 'IPv4', '--ip-proto', 'udp', '--ip-dport', '67', '-j', 'DROP']) + except: + logging.debug("Failed to program default ebtables antispoof rules for %s" % vm_chain) + return 'false' + + return 'true' + +@echo +def default_arp_antispoof(vm_chain, vifs, vm_ip, vm_mac): + if vm_mac == 'ff:ff:ff:ff:ff:ff': + logging.debug("Ignoring since mac address is not valid") + return 'true' + + try: + util.pread2(['arptables', '-N', vm_chain]) + except: + try: + util.pread2(['arptables', '-F', vm_chain]) + except: + logging.debug("Failed to create arptables rule, skipping") + return 'true' + + # note all rules for packets into the bridge (-i) precede all output rules (-o) + try: + for vif in vifs: + util.pread2(['arptables', '-I', 'FORWARD', '-i', vif, '-j', vm_chain]) + util.pread2(['arptables', '-A', 'FORWARD', '-o', vif, '-j', vm_chain]) + except: + logging.debug("Failed to program default arptables rules in FORWARD chain vm=" + vm_chain) + return 'false' + + try: + for vif in vifs: + #accept arp replies into the bridge as long as the source mac and ips match the vm + util.pread2(['arptables', '-A', vm_chain, '-i', vif, '--opcode', 'Reply', '--source-mac', vm_mac, '--source-ip', vm_ip, '-j', 'ACCEPT']) + #accept any arp requests from this vm. In the future this can be restricted to deny attacks on hosts + #also important to restrict source ip and src mac in these requests as they can be used to update arp tables on destination + util.pread2(['arptables', '-A', vm_chain, '-i', vif, '--opcode', 'Request', '--source-mac', vm_mac, '--source-ip', vm_ip, '-j', 'RETURN']) + #accept any arp requests to this vm as long as the request is for this vm's ip + util.pread2(['arptables', '-A', vm_chain, '-o', vif, '--opcode', 'Request', '--destination-ip', vm_ip, '-j', 'ACCEPT']) + #accept any arp replies to this vm as long as the mac and ip matches + util.pread2(['arptables', '-A', vm_chain, '-o', vif, '--opcode', 'Reply', '--destination-mac', vm_mac, '--destination-ip', vm_ip, '-j', 'ACCEPT']) + util.pread2(['arptables', '-A', vm_chain, '-j', 'DROP']) + + except: + logging.debug("Failed to program default arptables rules") + return 'false' + + return 'true' + + +@echo +def network_rules_vmSecondaryIp(session, args): + vm_name = args.pop('vmName') + vm_mac = args.pop('vmMac') + ip_secondary = args.pop('vmSecIp') + action = args.pop('action') + logging.debug("vmMac = "+ vm_mac) + logging.debug("vmName = "+ vm_name) + #action = "-A" + logging.debug("action = "+ action) + try: + vm = session.xenapi.VM.get_by_name_label(vm_name) + if len(vm) != 1: + return 'false' + vm_rec = session.xenapi.VM.get_record(vm[0]) + vm_vifs = vm_rec.get('VIFs') + vifnums = [session.xenapi.VIF.get_record(vif).get('device') for vif in vm_vifs] + domid = vm_rec.get('domid') + except: + logging.debug("### Failed to get domid or vif list for vm ##" + vm_name) + return 'false' + + if domid == '-1': + logging.debug("### Failed to get domid for vm (-1): " + vm_name) + return 'false' + + vifs = ["vif" + domid + "." + v for v in vifnums] + #vm_name = '-'.join(vm_name.split('-')[:-1]) + vmchain = chain_name(vm_name) + add_to_ipset(vmchain, [ip_secondary], action) + + #add arptables rules for the secondary ip + arp_rules_vmip(vmchain, vifs, [ip_secondary], vm_mac, action) + + return 'true' + +@echo +def default_network_rules_systemvm(session, args): + try: + util.pread2(['/bin/bash', '-c', 'iptables -n -L FORWARD | grep BRIDGE-FIREWALL']) + except: + can_bridge_firewall(session, args) + + vm_name = args.pop('vmName') + try: + vm = session.xenapi.VM.get_by_name_label(vm_name) + if len(vm) != 1: + return 'false' + vm_rec = session.xenapi.VM.get_record(vm[0]) + vm_vifs = vm_rec.get('VIFs') + vifnums = [session.xenapi.VIF.get_record(vif).get('device') for vif in vm_vifs] + domid = vm_rec.get('domid') + except: + logging.debug("### Failed to get domid or vif list for vm ##" + vm_name) + return 'false' + + if domid == '-1': + logging.debug("### Failed to get domid for vm (-1): " + vm_name) + return 'false' + + vifs = ["vif" + domid + "." + v for v in vifnums] + #vm_name = '-'.join(vm_name.split('-')[:-1]) + vmchain = chain_name(vm_name) + + + delete_rules_for_vm_in_bridge_firewall_chain(vm_name) + + try: + util.pread2(['iptables', '-N', vmchain]) + except: + util.pread2(['iptables', '-F', vmchain]) + + for vif in vifs: + try: + util.pread2(['iptables', '-A', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', vif, '-j', vmchain]) + util.pread2(['iptables', '-I', 'BRIDGE-FIREWALL', '2', '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', vif, '-j', vmchain]) + util.pread2(['iptables', '-I', vmchain, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', vif, '-j', 'RETURN']) + except: + logging.debug("Failed to program default rules") + return 'false' + + util.pread2(['iptables', '-A', vmchain, '-j', 'ACCEPT']) + + if write_rule_log_for_vm(vm_name, '-1', '_ignore_', domid, '_initial_', '-1') == False: + logging.debug("Failed to log default network rules for systemvm, ignoring") + return 'true' + +@echo +def create_ipset_forvm (ipsetname): + result = True + type = getIpsetType() + try: + logging.debug("Creating ipset chain .... " + ipsetname) + util.pread2(['ipset', '-F', ipsetname]) + util.pread2(['ipset', '-X', ipsetname]) + util.pread2(['ipset', '-N', ipsetname, type]) + except: + logging.debug("ipset chain not exists creating.... " + ipsetname) + util.pread2(['ipset', '-N', ipsetname, type]) + + return result + +@echo +def add_to_ipset(ipsetname, ips, action): + result = True + for ip in ips: + try: + logging.debug("vm ip " + ip) + util.pread2(['ipset', action, ipsetname, ip]) + except: + logging.debug("vm ip already in ip set" + ip) + continue + + return result + +@echo +def arp_rules_vmip (vm_chain, vifs, ips, vm_mac, action): + try: + if action == "-A": + action = "-I" + for vif in vifs: + for vm_ip in ips: + #accept any arp requests to this vm as long as the request is for this vm's ip + util.pread2(['arptables', action, vm_chain, '-o', vif, '--opcode', 'Request', '--destination-ip', vm_ip, '-j', 'ACCEPT']) + #accept any arp replies to this vm as long as the mac and ip matches + util.pread2(['arptables', action, vm_chain, '-o', vif, '--opcode', 'Reply', '--destination-mac', vm_mac, '--destination-ip', vm_ip, '-j', 'ACCEPT']) + #accept arp replies into the bridge as long as the source mac and ips match the vm + util.pread2(['arptables', action, vm_chain, '-i', vif, '--opcode', 'Reply', '--source-mac', vm_mac, '--source-ip', vm_ip, '-j', 'ACCEPT']) + #accept any arp requests from this vm. In the future this can be restricted to deny attacks on hosts + #also important to restrict source ip and src mac in these requests as they can be used to update arp tables on destination + util.pread2(['arptables', action, vm_chain, '-i', vif, '--opcode', 'Request', '--source-mac', vm_mac, '--source-ip', vm_ip, '-j', 'RETURN']) + except: + logging.debug("Failed to program arptables rules for ip") + return 'false' + + return 'true' + + +@echo +def default_network_rules(session, args): + vm_name = args.pop('vmName') + vm_ip = args.pop('vmIP') + vm_id = args.pop('vmID') + vm_mac = args.pop('vmMAC') + sec_ips = args.pop("secIps") + action = "-A" + + try: + vm = session.xenapi.VM.get_by_name_label(vm_name) + if len(vm) != 1: + logging.debug("### Failed to get record for vm " + vm_name) + return 'false' + vm_rec = session.xenapi.VM.get_record(vm[0]) + domid = vm_rec.get('domid') + except: + logging.debug("### Failed to get domid for vm " + vm_name) + return 'false' + if domid == '-1': + logging.debug("### Failed to get domid for vm (-1): " + vm_name) + return 'false' + + vif = "vif" + domid + ".0" + tap = "tap" + domid + ".0" + vifs = [vif] + try: + util.pread2(['ifconfig', tap]) + vifs.append(tap) + except: + pass + + delete_rules_for_vm_in_bridge_firewall_chain(vm_name) + + + vmchain = chain_name(vm_name) + vmchain_egress = egress_chain_name(vm_name) + vmchain_default = chain_name_def(vm_name) + + destroy_ebtables_rules(vmchain) + + try: + util.pread2(['iptables', '-N', vmchain]) + except: + util.pread2(['iptables', '-F', vmchain]) + + try: + util.pread2(['iptables', '-N', vmchain_egress]) + except: + util.pread2(['iptables', '-F', vmchain_egress]) + + try: + util.pread2(['iptables', '-N', vmchain_default]) + except: + util.pread2(['iptables', '-F', vmchain_default]) + + vmipset = vm_name + if len(vmipset) > 28: + vmipset = vmipset[0:27] + #create ipset and add vm ips to that ip set + if create_ipset_forvm(vmipset) == False: + logging.debug(" failed to create ipset for rule " + str(tokens)) + return 'false' + + #add primary nic ip to ipset + if add_to_ipset(vmipset, [vm_ip], action ) == False: + logging.debug(" failed to add vm " + vm_ip + " ip to set ") + return 'false' + + #add secondary nic ips to ipset + secIpSet = "1" + ips = sec_ips.split(';') + ips.pop() + if ips[0] == "0": + secIpSet = "0"; + + if secIpSet == "1": + logging.debug("Adding ipset for secondary ips") + add_to_ipset(vmipset, ips, action) + if write_secip_log_for_vm(vm_name, sec_ips, vm_id) == False: + logging.debug("Failed to log default network rules, ignoring") + + keyword = '--' + get_ipset_keyword() + + try: + for v in vifs: + util.pread2(['iptables', '-A', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '-j', vmchain_default]) + util.pread2(['iptables', '-I', 'BRIDGE-FIREWALL', '2', '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '-j', vmchain_default]) + + #don't let vm spoof its ip address + for v in vifs: + #util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '--source', vm_ip,'-p', 'udp', '--dport', '53', '-j', 'RETURN']) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '-m', 'set', keyword, vmipset, 'src', '-p', 'udp', '--dport', '53', '-j', 'RETURN']) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '-m', 'set', '!', keyword, vmipset, 'src', '-j', 'DROP']) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '-m', 'set', '!', keyword, vmipset, 'dst', '-j', 'DROP']) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '-m', 'set', keyword, vmipset, 'src', '-j', vmchain_egress]) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '-j', vmchain]) + except: + logging.debug("Failed to program default rules for vm " + vm_name) + return 'false' + + default_arp_antispoof(vmchain, vifs, vm_ip, vm_mac) + #add default arp rules for secondary ips; + if secIpSet == "1": + logging.debug("Adding arp rules for sec ip") + arp_rules_vmip(vmchain, vifs, ips, vm_mac, action) + + default_ebtables_antispoof_rules(vmchain, vifs, vm_ip, vm_mac) + + if write_rule_log_for_vm(vm_name, vm_id, vm_ip, domid, '_initial_', '-1', vm_mac) == False: + logging.debug("Failed to log default network rules, ignoring") + + logging.debug("Programmed default rules for vm " + vm_name) + return 'true' + +@echo +def check_domid_changed(session, vmName): + curr_domid = '-1' + try: + vm = session.xenapi.VM.get_by_name_label(vmName) + if len(vm) != 1: + logging.debug("### Could not get record for vm ## " + vmName) + else: + vm_rec = session.xenapi.VM.get_record(vm[0]) + curr_domid = vm_rec.get('domid') + except: + logging.debug("### Failed to get domid for vm ## " + vmName) + + + logfilename = "/var/run/cloud/" + vmName +".log" + if not os.path.exists(logfilename): + return ['-1', curr_domid] + + lines = (line.rstrip() for line in open(logfilename)) + + [_vmName,_vmID,_vmIP,old_domid,_signature,_seqno, _vmMac] = ['_', '-1', '_', '-1', '_', '-1', 'ff:ff:ff:ff:ff:ff'] + for line in lines: + try: + [_vmName,_vmID,_vmIP,old_domid,_signature,_seqno,_vmMac] = line.split(',') + except ValueError as v: + [_vmName,_vmID,_vmIP,old_domid,_signature,_seqno] = line.split(',') + break + + return [curr_domid, old_domid] + +@echo +def delete_rules_for_vm_in_bridge_firewall_chain(vmName): + vm_name = vmName + vmchain = chain_name_def(vm_name) + + delcmd = "iptables-save | grep '\-A BRIDGE-FIREWALL' | grep '%s' | sed 's/-A/-D/'" % vmchain + delcmds = util.pread2(['/bin/bash', '-c', delcmd]).split('\n') + for cmd in filter(None, delcmds): + try: + dc = 'iptables ' + cmd + util.pread2(filter(None, dc.split(' '))) + except: + logging.debug("Ignoring failure to delete rules for vm " + vmName) + +@echo +def network_rules_for_rebooted_vm(session, vmName): + vm_name = vmName + [curr_domid, old_domid] = check_domid_changed(session, vm_name) + + if curr_domid == old_domid: + return True + + if old_domid == '-1': + return True + + if curr_domid == '-1': + return True + + logging.debug("Found a rebooted VM -- reprogramming rules for " + vm_name) + + delete_rules_for_vm_in_bridge_firewall_chain(vm_name) + if 1 in [ vm_name.startswith(c) for c in ['r-', 's-', 'v-', 'l-'] ]: + default_network_rules_systemvm(session, {"vmName":vm_name}) + return True + + vif = "vif" + curr_domid + ".0" + tap = "tap" + curr_domid + ".0" + vifs = [vif] + try: + util.pread2(['ifconfig', tap]) + vifs.append(tap) + except: + pass + vmchain = chain_name(vm_name) + vmchain_default = chain_name_def(vm_name) + + for v in vifs: + util.pread2(['iptables', '-A', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '-j', vmchain_default]) + util.pread2(['iptables', '-I', 'BRIDGE-FIREWALL', '2', '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '-j', vmchain_default]) + + #change antispoof rule in vmchain + try: + delcmd = "iptables-save | grep '\-A " + vmchain_default + "' | grep physdev-in | sed 's/!--set/! --set/' | sed 's/-A/-D/'" + delcmd2 = "iptables-save | grep '\-A " + vmchain_default + "' | grep physdev-out | sed 's/!--set/! --set/'| sed 's/-A/-D/'" + inscmd = "iptables-save | grep '\-A " + vmchain_default + "' | grep physdev-in | grep vif | sed -r 's/vif[0-9]+.0/" + vif + "/' | sed 's/!--set/! --set/'" + inscmd2 = "iptables-save| grep '\-A " + vmchain_default + "' | grep physdev-in | grep tap | sed -r 's/tap[0-9]+.0/" + tap + "/' | sed 's/!--set/! --set/'" + inscmd3 = "iptables-save | grep '\-A " + vmchain_default + "' | grep physdev-out | grep vif | sed -r 's/vif[0-9]+.0/" + vif + "/' | sed 's/!--set/! --set/'" + inscmd4 = "iptables-save| grep '\-A " + vmchain_default + "' | grep physdev-out | grep tap | sed -r 's/tap[0-9]+.0/" + tap + "/' | sed 's/!--set/! --set/'" + + ipts = [] + for cmd in [delcmd, delcmd2, inscmd, inscmd2, inscmd3, inscmd4]: + cmds = util.pread2(['/bin/bash', '-c', cmd]).split('\n') + cmds.pop() + for c in filter(None,cmds): + ipt = c.split(' ') + ipt.insert(0, 'iptables') + ipts.append(ipt) + + for ipt in ipts: + try: + util.pread2(filter(None,ipt)) + except: + logging.debug("Failed to rewrite antispoofing rules for vm " + vm_name) + except: + logging.debug("No rules found for vm " + vm_name) + + destroy_ebtables_rules(vmchain) + destroy_arptables_rules(vmchain) + [vm_ip, vm_mac] = get_vm_mac_ip_from_log(vmchain) + default_arp_antispoof(vmchain, vifs, vm_ip, vm_mac) + + #check whether the vm has secondary ips + if is_secondary_ips_set(vm_name) == True: + vmips = get_vm_sec_ips(vm_name) + #add arp rules for the secondaryp ip + for ip in vmips: + arp_rules_vmip(vmchain, vifs, [ip], vm_mac, "-A") + + + default_ebtables_antispoof_rules(vmchain, vifs, vm_ip, vm_mac) + rewrite_rule_log_for_vm(vm_name, curr_domid) + return True + + + +@echo +def get_vm_sec_ips(vm_name): + logfilename = "/var/run/cloud/" + vm_name +".ip" + + lines = (line.rstrip() for line in open(logfilename)) + for line in lines: + try: + [_vmName,_vmIP,_vmID] = line.split(',') + break + except ValueError as v: + [_vmName,_vmIP,_vmID] = line.split(',') + + _vmIPS = _vmIP.split(":")[:-1] + return _vmIPS + +@echo +def is_secondary_ips_set(vm_name): + logfilename = "/var/run/cloud/" + vm_name +".ip" + if not os.path.exists(logfilename): + return False + + return True + +@echo +def rewrite_rule_log_for_vm(vm_name, new_domid): + logfilename = "/var/run/cloud/" + vm_name +".log" + if not os.path.exists(logfilename): + return + lines = (line.rstrip() for line in open(logfilename)) + + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno,_vmMac] = ['_', '-1', '_', '-1', '_', '-1','ff:ff:ff:ff:ff:ff'] + for line in lines: + try: + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno,_vmMac] = line.split(',') + break + except ValueError as v: + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno] = line.split(',') + + write_rule_log_for_vm(_vmName, _vmID, _vmIP, new_domid, _signature, '-1', _vmMac) + +def get_rule_log_for_vm(session, vmName): + vm_name = vmName; + logfilename = "/var/run/cloud/" + vm_name +".log" + if not os.path.exists(logfilename): + return '' + + lines = (line.rstrip() for line in open(logfilename)) + + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno,_vmMac] = ['_', '-1', '_', '-1', '_', '-1', 'ff:ff:ff:ff:ff:ff'] + for line in lines: + try: + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno,_vmMac] = line.split(',') + break + except ValueError as v: + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno] = line.split(',') + + return ','.join([_vmName, _vmID, _vmIP, _domID, _signature, _seqno]) + +@echo +def get_vm_mac_ip_from_log(vm_name): + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno,_vmMac] = ['_', '-1', '0.0.0.0', '-1', '_', '-1','ff:ff:ff:ff:ff:ff'] + logfilename = "/var/run/cloud/" + vm_name +".log" + if not os.path.exists(logfilename): + return ['_', '_'] + + lines = (line.rstrip() for line in open(logfilename)) + for line in lines: + try: + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno,_vmMac] = line.split(',') + break + except ValueError as v: + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno] = line.split(',') + + return [ _vmIP, _vmMac] + +@echo +def get_rule_logs_for_vms(session, args): + host_uuid = args.pop('host_uuid') + try: + thishost = session.xenapi.host.get_by_uuid(host_uuid) + hostrec = session.xenapi.host.get_record(thishost) + vms = hostrec.get('resident_VMs') + except: + logging.debug("Failed to get host from uuid " + host_uuid) + return ' ' + + result = [] + try: + for name in [session.xenapi.VM.get_name_label(x) for x in vms]: + if 1 not in [ name.startswith(c) for c in ['r-', 's-', 'v-', 'i-', 'l-'] ]: + continue + network_rules_for_rebooted_vm(session, name) + if name.startswith('i-'): + log = get_rule_log_for_vm(session, name) + result.append(log) + except: + logging.debug("Failed to get rule logs, better luck next time!") + + return ";".join(result) + +@echo +def cleanup_rules_for_dead_vms(session): + try: + vms = session.xenapi.VM.get_all() + cleaned = 0 + for vm_name in [session.xenapi.VM.get_name_label(x) for x in vms]: + if 1 in [ vm_name.startswith(c) for c in ['r-', 'i-', 's-', 'v-', 'l-'] ]: + vm = session.xenapi.VM.get_by_name_label(vm_name) + if len(vm) != 1: + continue + vm_rec = session.xenapi.VM.get_record(vm[0]) + state = vm_rec.get('power_state') + if state != 'Running' and state != 'Paused': + logging.debug("vm " + vm_name + " is not running, cleaning up") + destroy_network_rules_for_vm(session, {'vmName':vm_name}) + cleaned = cleaned+1 + + logging.debug("Cleaned up rules for " + str(cleaned) + " vms") + except: + logging.debug("Failed to cleanup rules for dead vms!") + + +@echo +def cleanup_rules(session, args): + instance = args.get('instance') + if not instance: + instance = 'VM' + resident_vms = [] + try: + hostname = util.pread2(['/bin/bash', '-c', 'hostname']).split('\n') + if len(hostname) < 1: + raise Exception('Could not find hostname of this host') + thishost = session.xenapi.host.get_by_name_label(hostname[0]) + if len(thishost) < 1: + raise Exception("Could not find host record from hostname %s of this host"%hostname[0]) + hostrec = session.xenapi.host.get_record(thishost[0]) + vms = hostrec.get('resident_VMs') + resident_vms = [session.xenapi.VM.get_name_label(x) for x in vms] + util.SMlog('cleanup_rules: resident_vms= %s' %resident_vms) + util.SMlog('cleanup_rules: found %s resident vms on this host %s' % (len(resident_vms)-1, hostname[0])) + + chainscmd = "iptables-save | grep '^:' | awk '{print $1}' | cut -d':' -f2 | sed 's/-def//'| sed 's/-eg//' | sort|uniq" + chains = util.pread2(['/bin/bash', '-c', chainscmd]).split('\n') + vmchains = [ch for ch in chains if 1 in [ ch.startswith(c) for c in ['r-', 'i-', 's-', 'v-', 'l-']]] + util.SMlog('cleanup_rules: vmchains= %s' %vmchains) + util.SMlog('cleanup_rules: found %s iptables chains for vms on this host %s' % (len(vmchains), hostname[0])) + cleaned = 0 + cleanup = [] + for chain in vmchains: + vmname = chain + vmpresent = False + #looping intentionally because if vmaname in resident_vms is not greedy. + #after trimming the vm names which more than 29 chars, resident vm name and iptables chain name is substring of + #of resident vm. + for rvm in resident_vms: + if vmname in rvm: + vmpresent = True + break + + if vmpresent is False: + vmname = chain + "-untagged" + for rvm in resident_vms: + if vmname in rvm: + vmpresent = True + break + #vm chain is present but vm is not running on the host. So remove the rules + if vmpresent is False: + logging.debug("vm " + chain + " is not running on this host, cleaning up") + cleanup.append(chain) + + for vm_name in cleanup: + destroy_network_rules_for_vm(session, {'vmName':vm_name}) + + logging.debug("Cleaned up rules for " + str(len(cleanup)) + " chains") + return str(len(cleanup)) + except Exception as ex: + logging.debug("Failed to cleanup rules, reason= " + str(ex)) + return '-1'; + +@echo +def check_rule_log_for_vm(vmName, vmID, vmIP, domID, signature, seqno): + vm_name = vmName; + logfilename = "/var/run/cloud/" + vm_name +".log" + if not os.path.exists(logfilename): + logging.debug("Failed to find logfile %s" %logfilename) + return [True, True, True] + + lines = (line.rstrip() for line in open(logfilename)) + + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno,_vmMac] = ['_', '-1', '_', '-1', '_', '-1', 'ff:ff:ff:ff:ff:ff'] + try: + for line in lines: + try: + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno, _vmMac] = line.split(',') + except ValueError as v: + [_vmName,_vmID,_vmIP,_domID,_signature,_seqno] = line.split(',') + break + except: + logging.debug("Failed to parse log file for vm " + vmName) + remove_rule_log_for_vm(vmName) + return [True, True, True] + + reprogramDefault = False + if (domID != _domID) or (vmID != _vmID) or (vmIP != _vmIP): + logging.debug("Change in default info set of vm %s" % vmName) + return [True, True, True] + else: + logging.debug("No change in default info set of vm %s" % vmName) + + reprogramChain = False + rewriteLog = True + if (int(seqno) > int(_seqno)): + if (_signature != signature): + reprogramChain = True + logging.debug("Seqno increased from %s to %s: reprogamming " \ + "ingress rules for vm %s" % (_seqno, seqno, vmName)) + else: + logging.debug("Seqno increased from %s to %s: but no change " \ + "in signature for vm: skip programming ingress " \ + "rules %s" % (_seqno, seqno, vmName)) + elif (int(seqno) < int(_seqno)): + logging.debug("Seqno decreased from %s to %s: ignoring these " \ + "ingress rules for vm %s" % (_seqno, seqno, vmName)) + rewriteLog = False + elif (signature != _signature): + logging.debug("Seqno %s stayed the same but signature changed from " \ + "%s to %s for vm %s" % (seqno, _signature, signature, vmName)) + rewriteLog = True + reprogramChain = True + else: + logging.debug("Seqno and signature stayed the same: %s : ignoring these " \ + "ingress rules for vm %s" % (seqno, vmName)) + rewriteLog = False + + return [reprogramDefault, reprogramChain, rewriteLog] + +@echo +def write_secip_log_for_vm (vmName, secIps, vmId): + vm_name = vmName + logfilename = "/var/run/cloud/"+vm_name+".ip" + logging.debug("Writing log to " + logfilename) + logf = open(logfilename, 'w') + output = ','.join([vmName, secIps, vmId]) + result = True + + try: + logf.write(output) + logf.write('\n') + except: + logging.debug("Failed to write to rule log file " + logfilename) + result = False + + logf.close() + + return result + +@echo +def remove_secip_log_for_vm(vmName): + vm_name = vmName + logfilename = "/var/run/cloud/"+vm_name+".ip" + + result = True + try: + os.remove(logfilename) + except: + logging.debug("Failed to delete rule log file " + logfilename) + result = False + + return result + +@echo +def write_rule_log_for_vm(vmName, vmID, vmIP, domID, signature, seqno, vmMac='ff:ff:ff:ff:ff:ff'): + vm_name = vmName + logfilename = "/var/run/cloud/" + vm_name +".log" + logging.debug("Writing log to " + logfilename) + logf = open(logfilename, 'w') + output = ','.join([vmName, vmID, vmIP, domID, signature, seqno, vmMac]) + result = True + try: + logf.write(output) + logf.write('\n') + except: + logging.debug("Failed to write to rule log file " + logfilename) + result = False + + logf.close() + + return result + +@echo +def remove_rule_log_for_vm(vmName): + vm_name = vmName + logfilename = "/var/run/cloud/" + vm_name +".log" + + result = True + try: + os.remove(logfilename) + except: + logging.debug("Failed to delete rule log file " + logfilename) + result = False + + return result + +@echo +def inflate_rules (zipped): + return zlib.decompress(base64.b64decode(zipped)) + +@echo +def cache_ipset_keyword(): + type = getIpsetType() + tmpname = 'ipsetqzvxtmp' + try: + util.pread2(['ipset', '-N', tmpname, type]) + except: + util.pread2(['ipset', '-F', tmpname]) + + try: + util.pread2(['iptables -A INPUT -m set --set ' + tmpname + ' src' + ' -j ACCEPT']) + util.pread2(['iptables -D INPUT -m set --set ' + tmpname + ' src' + ' -j ACCEPT']) + keyword = 'set' + except: + keyword = 'match-set' + + try: + util.pread2(['ipset', '-X', tmpname]) + except: + pass + + cachefile = "/var/cache/cloud/ipset.keyword" + logging.debug("Writing ipset keyword to " + cachefile) + cachef = open(cachefile, 'w') + try: + cachef.write(keyword) + cachef.write('\n') + except: + logging.debug("Failed to write to cache file " + cachef) + + cachef.close() + return keyword + +@echo +def get_ipset_keyword(): + cachefile = "/var/cache/cloud/ipset.keyword" + keyword = 'match-set' + + if not os.path.exists(cachefile): + logging.debug("Failed to find ipset keyword cachefile %s" %cachefile) + keyword = cache_ipset_keyword() + else: + lines = (line.rstrip() for line in open(cachefile)) + for line in lines: + keyword = line + break + + return keyword + +@echo +def network_rules(session, args): + try: + vm_name = args.get('vmName') + vm_ip = args.get('vmIP') + vm_id = args.get('vmID') + vm_mac = args.get('vmMAC') + signature = args.pop('signature') + seqno = args.pop('seqno') + sec_ips = args.get("secIps") + deflated = 'false' + + try: + util.pread2(['/bin/bash', '-c', 'iptables -n -L FORWARD | grep BRIDGE-FIREWALL']) + except: + can_bridge_firewall(session, args) + + if 'deflated' in args: + deflated = args.pop('deflated') + + try: + vm = session.xenapi.VM.get_by_name_label(vm_name) + if len(vm) != 1: + logging.debug("### Could not get record for vm ## " + vm_name) + return 'false' + vm_rec = session.xenapi.VM.get_record(vm[0]) + domid = vm_rec.get('domid') + except: + logging.debug("### Failed to get domid for vm ## " + vm_name) + return 'false' + if domid == '-1': + logging.debug("### Failed to get domid for vm (-1): " + vm_name) + return 'false' + + vif = "vif" + domid + ".0" + tap = "tap" + domid + ".0" + vifs = [vif] + try: + util.pread2(['ifconfig', tap]) + vifs.append(tap) + except: + pass + + + reason = 'seqno_change_or_sig_change' + [reprogramDefault, reprogramChain, rewriteLog] = \ + check_rule_log_for_vm (vm_name, vm_id, vm_ip, domid, signature, seqno) + + if not reprogramDefault and not reprogramChain: + logging.debug("No changes detected between current state and received state") + reason = 'seqno_same_sig_same' + if rewriteLog: + reason = 'seqno_increased_sig_same' + write_rule_log_for_vm(vm_name, vm_id, vm_ip, domid, signature, seqno, vm_mac) + logging.debug("Programming network rules for vm %s seqno=%s signature=%s guestIp=%s," \ + " do nothing, reason=%s" % (vm_name, seqno, signature, vm_ip, reason)) + return 'true' + + if not reprogramChain: + logging.debug("###Not programming any ingress rules since no changes detected?") + return 'true' + + if reprogramDefault: + logging.debug("Change detected in vmId or vmIp or domId, resetting default rules") + default_network_rules(session, args) + reason = 'domid_change' + + rules = args.pop('rules') + logging.debug("Network rules compressed in base64 [%s]." % rules) + + if deflated.lower() == 'true': + rules = inflate_rules (rules) + keyword = '--' + get_ipset_keyword() + + #Split spaces into lines. Example of rule [I:tcp;22;22;0.0.0.0/22,NEXT I:tcp;8001;8010;192.168.100.0/24,NEXT E:tcp;22;22;0.0.0.0/0,NEXT ] + #After split: + # I:tcp;22;22;0.0.0.0/22,NEXT + # I:tcp;8001;8010;192.168.100.0/24,NEXT + # E:tcp;22;22;0.0.0.0/0,NEXT + lines = rules.split(' ') + + logging.info("Network rules to be processed [%s]." % rules) + + logging.info("Programming network rules for vm %s seqno=%s numrules=%s signature=%s guestIp=%s," \ + " update iptables, reason=%s" % (vm_name, seqno, len(lines), signature, vm_ip, reason)) + + # Flush iptables rules to clear ipset references and before re-applying iptable rules + for chain in [chain_name(vm_name), egress_chain_name(vm_name)]: + try: + util.pread2(['iptables', '-F', chain]) + except: + logging.debug("Ignoring failure to delete chain " + chain) + util.pread2(['iptables', '-N', chain]) + + cmds = [] + egressrules = 0 + for line in lines: + logging.debug("Processing rule [%s]." % line) + + #Example of rule: [I:tcp;12;34;1.2.3.4/24,NEXT] -> tokens: ['I:tcp', '12', '34', '1.2.3.4/24,NEXT']. + tokens = line.split(';') + logging.debug("Tokens %s." % tokens) + + tokens_size = len(tokens) + + expected_tokens_size = 4 + if tokens_size != expected_tokens_size: + logging.warning("Network rule tokens size [%s] is different from the expected size [%s], ignoring rule %s" % (str(tokens_size), str(expected_tokens_size), tokens)) + continue + + token_type, protocol = tokens[0].split(':') + start = tokens[1] + end = tokens[2] + cidrs = tokens[3].split(",") + # Remove placeholder 'NEXT' from the cidrs array + cidrs.pop() + allow_any = False + + if token_type == 'E': + vmchain = egress_chain_name(vm_name) + #ipset chain name + ipset_chain = egress_chain_name_ipset(vm_name) + action = "RETURN" + direction = "dst" + egressrules = egressrules + 1 + logging.debug("Ipset chaing [%s], VM chain [%s], VM name [%s], Action [%s], Direction [%s], egress rules [%s]" % (ipset_chain, vmchain, vm_name, action, direction, egressrules)) + else: + #ipset chain name + ipset_chain = ingress_chain_name_ipset(vm_name) + vmchain = chain_name(vm_name) + action = "ACCEPT" + direction = "src" + logging.debug("Ipset chaing [%s], VM [%s], Action [%s], Direction [%s], egress rules [%s]" % (ipset_chain, vm_name, action, direction, egressrules)) + if '0.0.0.0/0' in cidrs: + i = cidrs.index('0.0.0.0/0') + del cidrs[i] + allow_any = True + + port_range = start + ":" + end + logging.debug("port range [%s]" % port_range) + + if cidrs: + #create separate ipset name + ipsetname = ipset_chain + "" + protocol[0:1] + "" + start + "_" + end + if start == "-1": + ipsetname = ipset_chain + "_" + protocol[0:1] + "_any" + + if ipset(ipsetname, protocol, start, end, cidrs) == False: + logging.debug(" failed to create ipset for rule " + str(tokens)) + + if protocol == 'all': + iptables = ['iptables', '-I', vmchain, '-m', 'state', '--state', 'NEW', '-m', 'set', keyword, ipsetname, direction, '-j', action] + elif protocol != 'icmp': + iptables = ['iptables', '-I', vmchain, '-p', protocol, '-m', protocol, '--dport', port_range, '-m', 'state', '--state', 'NEW', '-m', 'set', keyword, ipsetname, direction, '-j', action] + else: + port_range = start + "/" + end + if start == "-1": + port_range = "any" + iptables = ['iptables', '-I', vmchain, '-p', 'icmp', '--icmp-type', port_range, '-m', 'set', keyword, ipsetname, direction, '-j', action] + cmds.append(iptables) + logging.debug(iptables) + + if allow_any and protocol != 'all': + if protocol != 'icmp': + iptables = ['iptables', '-I', vmchain, '-p', protocol, '-m', protocol, '--dport', port_range, '-m', 'state', '--state', 'NEW', '-j', action] + else: + port_range = start + "/" + end + if start == "-1": + port_range = "any" + iptables = ['iptables', '-I', vmchain, '-p', 'icmp', '--icmp-type', port_range, '-j', action] + cmds.append(iptables) + logging.debug(iptables) + + for cmd in cmds: + logging.debug("Executing command: [%s]." % str(cmd)) + util.pread2(cmd) + + vmchain = chain_name(vm_name) + egress_vmchain = egress_chain_name(vm_name) + + if egressrules == 0 : + util.pread2(['iptables', '-A', egress_vmchain, '-j', 'RETURN']) + else: + util.pread2(['iptables', '-A', egress_vmchain, '-j', 'DROP']) + + util.pread2(['iptables', '-A', vmchain, '-j', 'DROP']) + + if write_rule_log_for_vm(vm_name, vm_id, vm_ip, domid, signature, seqno, vm_mac) == False: + return 'false' + + return 'true' + except: + logging.exception("Failed to network rule!") + +if __name__ == "__main__": + XenAPIPlugin.dispatch({"pingtest": pingtest, "setup_iscsi":setup_iscsi, + "preparemigration": preparemigration, + "setIptables": setIptables, "pingdomr": pingdomr, "pingxenserver": pingxenserver, + "createFile": createFile, "deleteFile": deleteFile, + "network_rules":network_rules, + "can_bridge_firewall":can_bridge_firewall, "default_network_rules":default_network_rules, + "destroy_network_rules_for_vm":destroy_network_rules_for_vm, + "default_network_rules_systemvm":default_network_rules_systemvm, + "network_rules_vmSecondaryIp":network_rules_vmSecondaryIp, + "get_rule_logs_for_vms":get_rule_logs_for_vms, + "add_to_VCPUs_params_live":add_to_VCPUs_params_live, + "setLinkLocalIP":setLinkLocalIP, + "cleanup_rules":cleanup_rules, + "createFileInDomr":createFileInDomr, + "kill_copy_process":kill_copy_process, + "secureCopyToHost":secureCopyToHost, + "runPatchScriptInDomr": runPatchScriptInDomr}) diff --git a/scripts/vm/hypervisor/xenserver/xenserver84/vmopsSnapshot b/scripts/vm/hypervisor/xenserver/xenserver84/vmopsSnapshot new file mode 100644 index 00000000000..7f731254abe --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xenserver84/vmopsSnapshot @@ -0,0 +1,622 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Version @VERSION@ +# +# A plugin for executing script needed by vmops cloud + +import os, sys, time +import XenAPIPlugin +if os.path.exists("/opt/xensource/sm"): + sys.path.extend(["/opt/xensource/sm/", "/usr/local/sbin/", "/sbin/"]) +if os.path.exists("/usr/lib/xcp/sm"): + sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"]) + +import SR, VDI, SRCommand, util, lvutil +from util import CommandException +import vhdutil +import shutil +import lvhdutil +import errno +import subprocess +import xs_errors +import cleanup +import stat +import random +import cloudstack_pluginlib as lib +import logging + +lib.setup_logging("/var/log/cloud/cloud.log") + +VHDUTIL = "vhd-util" +VHD_PREFIX = 'VHD-' +CLOUD_DIR = '/var/run/cloud_mount' + +def echo(fn): + def wrapped(*v, **k): + name = fn.__name__ + logging.debug("#### CLOUD enter %s ####" % name ) + res = fn(*v, **k) + logging.debug("#### CLOUD exit %s ####" % name ) + return res + return wrapped + + +@echo +def create_secondary_storage_folder(session, args): + local_mount_path = None + + logging.debug("create_secondary_storage_folder, args: " + str(args)) + + try: + try: + # Mount the remote resource folder locally + remote_mount_path = args["remoteMountPath"] + local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid()) + nfsVersion = args["nfsVersion"] + mount(remote_mount_path, local_mount_path, nfsVersion) + + # Create the new folder + new_folder = local_mount_path + "/" + args["newFolder"] + if not os.path.isdir(new_folder): + current_umask = os.umask(0) + os.makedirs(new_folder) + os.umask(current_umask) + except OSError as e: + errMsg = "create_secondary_storage_folder failed: errno: " + str(e.errno) + ", strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + except: + errMsg = "create_secondary_storage_folder failed." + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + finally: + if local_mount_path != None: + # Unmount the local folder + umount(local_mount_path) + # Remove the local folder + os.system("rmdir " + local_mount_path) + + return "1" + +@echo +def delete_secondary_storage_folder(session, args): + local_mount_path = None + + logging.debug("delete_secondary_storage_folder, args: " + str(args)) + + try: + try: + # Mount the remote resource folder locally + remote_mount_path = args["remoteMountPath"] + local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid()) + nfsVersion = args["nfsVersion"] + mount(remote_mount_path, local_mount_path, nfsVersion) + + # Delete the specified folder + folder = local_mount_path + "/" + args["folder"] + if os.path.isdir(folder): + os.system("rm -f " + folder + "/*") + os.system("rmdir " + folder) + except OSError as e: + errMsg = "delete_secondary_storage_folder failed: errno: " + str(e.errno) + ", strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + except: + errMsg = "delete_secondary_storage_folder failed." + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + finally: + if local_mount_path != None: + # Unmount the local folder + umount(local_mount_path) + # Remove the local folder + os.system("rmdir " + local_mount_path) + + return "1" + +@echo +def post_create_private_template(session, args): + local_mount_path = None + try: + try: + # get local template folder + templatePath = args["templatePath"] + local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid()) + nfsVersion = args["nfsVersion"] + mount(templatePath, local_mount_path, nfsVersion) + # Retrieve args + filename = args["templateFilename"] + name = args["templateName"] + description = args["templateDescription"] + checksum = args["checksum"] + file_size = args["size"] + virtual_size = args["virtualSize"] + template_id = args["templateId"] + + # Create the template.properties file + template_properties_install_path = local_mount_path + "/template.properties" + f = open(template_properties_install_path, "w") + f.write("filename=" + filename + "\n") + f.write("vhd=true\n") + f.write("id=" + template_id + "\n") + f.write("vhd.filename=" + filename + "\n") + f.write("public=false\n") + f.write("uniquename=" + name + "\n") + f.write("vhd.virtualsize=" + virtual_size + "\n") + f.write("virtualsize=" + virtual_size + "\n") + f.write("checksum=" + checksum + "\n") + f.write("hvm=true\n") + f.write("description=" + description + "\n") + f.write("vhd.size=" + str(file_size) + "\n") + f.write("size=" + str(file_size) + "\n") + f.close() + logging.debug("Created template.properties file") + + # Set permissions + permissions = stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH + os.chmod(template_properties_install_path, permissions) + logging.debug("Set permissions on template and template.properties") + + except: + errMsg = "post_create_private_template failed." + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + finally: + if local_mount_path != None: + # Unmount the local folder + umount(local_mount_path) + # Remove the local folder + os.system("rmdir " + local_mount_path) + return "1" + +def isfile(path, isISCSI): + errMsg = '' + exists = True + if isISCSI: + exists = checkVolumeAvailability(path) + else: + exists = os.path.isfile(path) + + if not exists: + errMsg = "File " + path + " does not exist." + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return errMsg + +def copyfile(fromFile, toFile, isISCSI): + logging.debug("Starting to copy " + fromFile + " to " + toFile) + errMsg = '' + if isISCSI: + bs = "4M" + else: + bs = "128k" + + try: + cmd = ['dd', 'if=' + fromFile, 'iflag=direct', 'of=' + toFile, 'oflag=direct', 'bs=' + bs] + txt = util.pread2(cmd) + except: + try: + os.system("rm -f " + toFile) + except: + txt = '' + txt = '' + errMsg = "Error while copying " + fromFile + " to " + toFile + " in secondary storage" + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + logging.debug("Successfully copied " + fromFile + " to " + toFile) + return errMsg + +def chdir(path): + try: + os.chdir(path) + except OSError as e: + errMsg = "Unable to chdir to " + path + " because of OSError with errno: " + str(e.errno) + " and strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + logging.debug("Chdired to " + path) + return + +def scanParent(path): + # Do a scan for the parent for ISCSI volumes + # Note that the parent need not be visible on the XenServer + parentUUID = '' + try: + lvName = os.path.basename(path) + dirname = os.path.dirname(path) + vgName = os.path.basename(dirname) + vhdInfo = vhdutil.getVHDInfoLVM(lvName, lvhdutil.extractUuid, vgName) + parentUUID = vhdInfo.parentUuid + except: + errMsg = "Could not get vhd parent of " + path + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return parentUUID + +def getParent(path, isISCSI): + parentUUID = '' + try : + if isISCSI: + parentUUID = vhdutil.getParent(path, lvhdutil.extractUuid) + else: + parentUUID = vhdutil.getParent(path, cleanup.FileVDI.extractUuid) + except: + errMsg = "Could not get vhd parent of " + path + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return parentUUID + +def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI): + snapshotVHD = getVHD(snapshotUuid, isISCSI) + snapshotPath = os.path.join(primarySRPath, snapshotVHD) + + baseCopyUuid = '' + if isISCSI: + checkVolumeAvailability(snapshotPath) + baseCopyUuid = scanParent(snapshotPath) + else: + baseCopyUuid = getParent(snapshotPath, isISCSI) + + logging.debug("Base copy of snapshotUuid: " + snapshotUuid + " is " + baseCopyUuid) + return baseCopyUuid + +def setParent(parent, child): + try: + cmd = [VHDUTIL, "modify", "-p", parent, "-n", child] + txt = util.pread2(cmd) + except: + errMsg = "Unexpected error while trying to set parent of " + child + " to " + parent + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + logging.debug("Successfully set parent of " + child + " to " + parent) + return + +def rename(originalVHD, newVHD): + try: + os.rename(originalVHD, newVHD) + except OSError as e: + errMsg = "OSError while renaming " + origiinalVHD + " to " + newVHD + "with errno: " + str(e.errno) + " and strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return + +def makedirs(path): + if not os.path.isdir(path): + try: + os.makedirs(path) + except OSError as e: + umount(path) + if os.path.isdir(path): + return + errMsg = "OSError while creating " + path + " with errno: " + str(e.errno) + " and strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return + +def mount(remoteDir, localDir, nfsVersion=None): + makedirs(localDir) + options = "soft,tcp,timeo=133,retrans=1" + if nfsVersion: + options += ",vers=" + nfsVersion + try: + cmd = ['mount', '-o', options, remoteDir, localDir] + txt = util.pread2(cmd) + except: + txt = '' + errMsg = "Unexpected error while trying to mount " + remoteDir + " to " + localDir + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + logging.debug("Successfully mounted " + remoteDir + " to " + localDir) + + return + +def umount(localDir): + try: + cmd = ['umount', localDir] + util.pread2(cmd) + except CommandException: + errMsg = "CommandException raised while trying to umount " + localDir + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + logging.debug("Successfully unmounted " + localDir) + return + +def mountSnapshotsDir(secondaryStorageMountPath, localMountPointPath, path): + # The aim is to mount secondaryStorageMountPath on + # And create / dir on it, if it doesn't exist already. + # Assuming that secondaryStorageMountPath exists remotely + + # Just mount secondaryStorageMountPath//SecondaryStorageHost/ everytime + # Never unmount. + # path is like "snapshots/account/volumeId", we mount secondary_storage:/snapshots + relativeDir = path.split("/")[0] + restDir = "/".join(path.split("/")[1:]) + snapshotsDir = os.path.join(secondaryStorageMountPath, relativeDir) + + makedirs(localMountPointPath) + # if something is not mounted already on localMountPointPath, + # mount secondaryStorageMountPath on localMountPath + if os.path.ismount(localMountPointPath): + # There is more than one secondary storage per zone. + # And we are mounting each sec storage under a zone-specific directory + # So two secondary storage snapshot dirs will never get mounted on the same point on the same XenServer. + logging.debug("The remote snapshots directory has already been mounted on " + localMountPointPath) + else: + mount(snapshotsDir, localMountPointPath) + + # Create accountId/instanceId dir on localMountPointPath, if it doesn't exist + backupsDir = os.path.join(localMountPointPath, restDir) + makedirs(backupsDir) + return backupsDir + +def unmountAll(path): + try: + for dir in os.listdir(path): + if dir.isdigit(): + logging.debug("Unmounting Sub-Directory: " + dir) + localMountPointPath = os.path.join(path, dir) + umount(localMountPointPath) + except: + logging.debug("Ignoring the error while trying to unmount the snapshots dir") + +@echo +def unmountSnapshotsDir(session, args): + dcId = args['dcId'] + localMountPointPath = os.path.join(CLOUD_DIR, dcId) + localMountPointPath = os.path.join(localMountPointPath, "snapshots") + unmountAll(localMountPointPath) + try: + umount(localMountPointPath) + except: + logging.debug("Ignoring the error while trying to unmount the snapshots dir.") + + return "1" + +def getPrimarySRPath(primaryStorageSRUuid, isISCSI): + if isISCSI: + primarySRDir = lvhdutil.VG_PREFIX + primaryStorageSRUuid + return os.path.join(lvhdutil.VG_LOCATION, primarySRDir) + else: + return os.path.join(SR.MOUNT_BASE, primaryStorageSRUuid) + +def getBackupVHD(UUID): + return UUID + '.' + SR.DEFAULT_TAP + +def getVHD(UUID, isISCSI): + if isISCSI: + return VHD_PREFIX + UUID + else: + return UUID + '.' + SR.DEFAULT_TAP + +def getIsTrueString(stringValue): + booleanValue = False + if (stringValue and stringValue == 'true'): + booleanValue = True + return booleanValue + +def makeUnavailable(uuid, primarySRPath, isISCSI): + if not isISCSI: + return + VHD = getVHD(uuid, isISCSI) + path = os.path.join(primarySRPath, VHD) + manageAvailability(path, '-an') + return + +def manageAvailability(path, value): + if path.__contains__("/var/run/sr-mount"): + return + logging.debug("Setting availability of " + path + " to " + value) + try: + cmd = ['/usr/sbin/lvchange', value, path] + util.pread2(cmd) + except: #CommandException, (rc, cmdListStr, stderr): + #errMsg = "CommandException thrown while executing: " + cmdListStr + " with return code: " + str(rc) + " and stderr: " + stderr + errMsg = "Unexpected exception thrown by lvchange" + logging.debug(errMsg) + if value == "-ay": + # Raise an error only if we are trying to make it available. + # Just warn if we are trying to make it unavailable after the + # snapshot operation is done. + raise xs_errors.XenError(errMsg) + return + + +def checkVolumeAvailability(path): + try: + if not isVolumeAvailable(path): + # The VHD file is not available on XenSever. The volume is probably + # inactive or detached. + # Do lvchange -ay to make it available on XenServer + manageAvailability(path, '-ay') + except: + errMsg = "Could not determine status of ISCSI path: " + path + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + success = False + i = 0 + while i < 6: + i = i + 1 + # Check if the vhd is actually visible by checking for the link + # set isISCSI to true + success = isVolumeAvailable(path) + if success: + logging.debug("Made vhd: " + path + " available and confirmed that it is visible") + break + + # Sleep for 10 seconds before checking again. + time.sleep(10) + + # If not visible within 1 min fail + if not success: + logging.debug("Could not make vhd: " + path + " available despite waiting for 1 minute. Does it exist?") + + return success + +def isVolumeAvailable(path): + # Check if iscsi volume is available on this XenServer. + status = "0" + try: + p = subprocess.Popen(["/bin/bash", "-c", "if [ -L " + path + " ]; then echo 1; else echo 0;fi"], stdout=subprocess.PIPE) + status = p.communicate()[0].strip("\n") + except: + errMsg = "Could not determine status of ISCSI path: " + path + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + return (status == "1") + +def getVhdParent(session, args): + logging.debug("getParent with " + str(args)) + primaryStorageSRUuid = args['primaryStorageSRUuid'] + snapshotUuid = args['snapshotUuid'] + isISCSI = getIsTrueString(args['isISCSI']) + + primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI) + logging.debug("primarySRPath: " + primarySRPath) + + baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI) + + return baseCopyUuid + +def getSnapshotSize(session, args): + primaryStorageSRUuid = args['primaryStorageSRUuid'] + snapshotUuid = args['snapshotUuid'] + isISCSI = getIsTrueString(args['isISCSI']) + + primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI) + logging.debug("primarySRPath: " + primarySRPath) + + snapshotVHD = getVHD(snapshotUuid, isISCSI) + snapshotPath = os.path.join(primarySRPath, snapshotVHD) + physicalSize = vhdutil.getSizePhys(snapshotPath) + return str(physicalSize) + +def backupSnapshot(session, args): + logging.debug("Called backupSnapshot with " + str(args)) + primaryStorageSRUuid = args['primaryStorageSRUuid'] + secondaryStorageMountPath = args['secondaryStorageMountPath'] + snapshotUuid = args['snapshotUuid'] + prevBackupUuid = args['prevBackupUuid'] + backupUuid = args['backupUuid'] + isISCSI = getIsTrueString(args['isISCSI']) + path = args['path'] + localMountPoint = args['localMountPoint'] + primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI) + logging.debug("primarySRPath: " + primarySRPath) + + baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI) + baseCopyVHD = getVHD(baseCopyUuid, isISCSI) + baseCopyPath = os.path.join(primarySRPath, baseCopyVHD) + logging.debug("Base copy path: " + baseCopyPath) + + + # Mount secondary storage mount path on XenServer along the path + # /var/run/sr-mount//snapshots/ and create / dir + # on it. + backupsDir = mountSnapshotsDir(secondaryStorageMountPath, localMountPoint, path) + logging.debug("Backups dir " + backupsDir) + prevBackupUuid = prevBackupUuid.split("/")[-1] + # Check existence of snapshot on primary storage + isfile(baseCopyPath, isISCSI) + physicalSize = vhdutil.getSizePhys(baseCopyPath) + if prevBackupUuid: + # Check existence of prevBackupFile + prevBackupVHD = getBackupVHD(prevBackupUuid) + prevBackupFile = os.path.join(backupsDir, prevBackupVHD) + isfile(prevBackupFile, False) + + # copy baseCopyPath to backupsDir with new uuid + backupVHD = getBackupVHD(backupUuid) + backupFile = os.path.join(backupsDir, backupVHD) + logging.debug("Back up " + baseCopyUuid + " to Secondary Storage as " + backupUuid) + copyfile(baseCopyPath, backupFile, isISCSI) + vhdutil.setHidden(backupFile, False) + + # Because the primary storage is always scanned, the parent of this base copy is always the first base copy. + # We don't want that, we want a chain of VHDs each of which is a delta from the previous. + # So set the parent of the current baseCopyVHD to prevBackupVHD + if prevBackupUuid: + # If there was a previous snapshot + setParent(prevBackupFile, backupFile) + + txt = "1#" + backupUuid + "#" + str(physicalSize) + return txt + +@echo +def deleteSnapshotBackup(session, args): + logging.debug("Calling deleteSnapshotBackup with " + str(args)) + secondaryStorageMountPath = args['secondaryStorageMountPath'] + backupUUID = args['backupUUID'] + path = args['path'] + localMountPoint = args['localMountPoint'] + + backupsDir = mountSnapshotsDir(secondaryStorageMountPath, localMountPoint, path) + # chdir to the backupsDir for convenience + chdir(backupsDir) + + backupVHD = getBackupVHD(backupUUID) + logging.debug("checking existence of " + backupVHD) + + # The backupVHD is on secondary which is NFS and not ISCSI. + if not os.path.isfile(backupVHD): + logging.debug("backupVHD " + backupVHD + "does not exist. Not trying to delete it") + return "1" + logging.debug("backupVHD " + backupVHD + " exists.") + + # Just delete the backupVHD + try: + os.remove(backupVHD) + except OSError as e: + errMsg = "OSError while removing " + backupVHD + " with errno: " + str(e.errno) + " and strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + + return "1" + +@echo +def revert_memory_snapshot(session, args): + logging.debug("Calling revert_memory_snapshot with " + str(args)) + vmName = args['vmName'] + snapshotUUID = args['snapshotUUID'] + oldVmUuid = args['oldVmUuid'] + snapshotMemory = args['snapshotMemory'] + hostUUID = args['hostUUID'] + try: + cmd = '''xe vbd-list vm-uuid=%s | grep 'vdi-uuid' | grep -v 'not in database' | sed -e 's/vdi-uuid ( RO)://g' ''' % oldVmUuid + vdiUuids = os.popen(cmd).read().split() + cmd2 = '''xe vm-param-get param-name=power-state uuid=''' + oldVmUuid + if os.popen(cmd2).read().split()[0] != 'halted': + os.system("xe vm-shutdown force=true vm=" + vmName) + os.system("xe vm-destroy uuid=" + oldVmUuid) + os.system("xe snapshot-revert snapshot-uuid=" + snapshotUUID) + if snapshotMemory == 'true': + os.system("xe vm-resume vm=" + vmName + " on=" + hostUUID) + for vdiUuid in vdiUuids: + os.system("xe vdi-destroy uuid=" + vdiUuid) + except OSError as e: + errMsg = "OSError while reverting vm " + vmName + " to snapshot " + snapshotUUID + " with errno: " + str(e.errno) + " and strerr: " + e.strerror + logging.debug(errMsg) + raise xs_errors.XenError(errMsg) + return "0" + +if __name__ == "__main__": + XenAPIPlugin.dispatch({"getVhdParent":getVhdParent, "create_secondary_storage_folder":create_secondary_storage_folder, "delete_secondary_storage_folder":delete_secondary_storage_folder, "post_create_private_template":post_create_private_template, "backupSnapshot": backupSnapshot, "deleteSnapshotBackup": deleteSnapshotBackup, "unmountSnapshotsDir": unmountSnapshotsDir, "revert_memory_snapshot":revert_memory_snapshot, "getSnapshotSize":getSnapshotSize}) diff --git a/scripts/vm/hypervisor/xenserver/xenserver84/vmopspremium b/scripts/vm/hypervisor/xenserver/xenserver84/vmopspremium new file mode 100755 index 00000000000..3c46cda186b --- /dev/null +++ b/scripts/vm/hypervisor/xenserver/xenserver84/vmopspremium @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Version @VERSION@ +# +# A plugin for executing script needed by vmops cloud + +import os, sys, time +import XenAPIPlugin +if os.path.exists("/opt/xensource/sm"): + sys.path.extend(["/opt/xensource/sm/", "/usr/local/sbin/", "/sbin/"]) +if os.path.exists("/usr/lib/xcp/sm"): + sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"]) +import util +import socket +import cloudstack_pluginlib as lib +import logging + +lib.setup_logging("/var/log/cloud/cloud.log") + +def echo(fn): + def wrapped(*v, **k): + name = fn.__name__ + logging.debug("#### CLOUD enter %s ####" % name ) + res = fn(*v, **k) + logging.debug("#### CLOUD exit %s ####" % name ) + return res + return wrapped + +@echo +def forceShutdownVM(session, args): + domId = args['domId'] + try: + cmd = ["/opt/xensource/debug/xenops", "destroy_domain", "-domid", domId] + txt = util.pread2(cmd) + except: + txt = '10#failed' + return txt + + +@echo +def create_privatetemplate_from_snapshot(session, args): + templatePath = args['templatePath'] + snapshotPath = args['snapshotPath'] + tmpltLocalDir = args['tmpltLocalDir'] + try: + cmd = ["bash", "/opt/cloud/bin/create_privatetemplate_from_snapshot.sh",snapshotPath, templatePath, tmpltLocalDir] + txt = util.pread2(cmd) + except: + txt = '10#failed' + return txt + +@echo +def upgrade_snapshot(session, args): + templatePath = args['templatePath'] + snapshotPath = args['snapshotPath'] + try: + cmd = ["bash", "/opt/cloud/bin/upgrate_snapshot.sh",snapshotPath, templatePath] + txt = util.pread2(cmd) + except: + txt = '10#failed' + return txt + +@echo +def copy_vhd_to_secondarystorage(session, args): + mountpoint = args['mountpoint'] + vdiuuid = args['vdiuuid'] + sruuid = args['sruuid'] + try: + cmd = ["bash", "/opt/cloud/bin/copy_vhd_to_secondarystorage.sh", mountpoint, vdiuuid, sruuid] + txt = util.pread2(cmd) + except: + txt = '10#failed' + return txt + +@echo +def copy_vhd_from_secondarystorage(session, args): + mountpoint = args['mountpoint'] + sruuid = args['sruuid'] + namelabel = args['namelabel'] + try: + cmd = ["bash", "/opt/cloud/bin/copy_vhd_from_secondarystorage.sh", mountpoint, sruuid, namelabel] + txt = util.pread2(cmd) + except: + txt = '10#failed' + return txt + +@echo +def remove_corrupt_vdi(session, args): + vdifile = args['vdifile'] + try: + cmd = ['rm', '-f', vdifile] + txt = util.pread2(cmd) + except: + txt = '10#failed' + return txt + +@echo +def setup_heartbeat_sr(session, args): + host = args['host'] + sr = args['sr'] + try: + cmd = ["bash", "/opt/cloud/bin/setup_heartbeat_sr.sh", host, sr] + txt = util.pread2(cmd) + except: + txt = '' + return txt + +@echo +def setup_heartbeat_file(session, args): + host = args['host'] + sr = args['sr'] + add = args['add'] + try: + cmd = ["bash", "/opt/cloud/bin/setup_heartbeat_file.sh", host, sr, add] + txt = util.pread2(cmd) + except: + txt = '' + return txt + + +@echo +def heartbeat(session, args): + host = args['host'] + timeout = args['timeout'] + interval = args['interval'] + try: + cmd = ["/bin/bash", "/opt/cloud/bin/launch_hb.sh", host, timeout, interval] + txt = util.pread2(cmd) + except: + txt='fail' + return txt + +@echo +def asmonitor(session, args): + try: + perfmod = __import__("perfmon") + result = perfmod.get_vm_group_perfmon(args) + return result + except: + return 'fail' + +if __name__ == "__main__": + XenAPIPlugin.dispatch({"forceShutdownVM":forceShutdownVM, "upgrade_snapshot":upgrade_snapshot, "create_privatetemplate_from_snapshot":create_privatetemplate_from_snapshot, "copy_vhd_to_secondarystorage":copy_vhd_to_secondarystorage, "copy_vhd_from_secondarystorage":copy_vhd_from_secondarystorage, "setup_heartbeat_sr":setup_heartbeat_sr, "setup_heartbeat_file":setup_heartbeat_file, "heartbeat": heartbeat, "asmonitor": asmonitor, "remove_corrupt_vdi": remove_corrupt_vdi}) diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/UploadManagerImpl.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/UploadManagerImpl.java index a14667886c7..828f61f89dc 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/UploadManagerImpl.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/template/UploadManagerImpl.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.template; import java.io.File; import java.net.URI; import java.net.URISyntaxException; +import java.nio.file.Path; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Map; @@ -300,7 +301,18 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager { logger.error(errorString); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); } - + File parentFolder = file.getParentFile(); + if (parentFolder != null && parentFolder.exists()) { + Path folderPath = parentFolder.toPath(); + Script script = new Script(true, "chmod", 1440 * 1000, logger); + script.add("755", folderPath.toString()); + result = script.execute(); + if (result != null) { + String errMsg = "Unable to set permissions for " + folderPath + " due to " + result; + logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } return new CreateEntityDownloadURLAnswer("", CreateEntityDownloadURLAnswer.RESULT_SUCCESS); } diff --git a/test/integration/smoke/test_deploy_vm_extra_config_data.py b/test/integration/smoke/test_deploy_vm_extra_config_data.py index 318febb7a1e..98e60f9d287 100644 --- a/test/integration/smoke/test_deploy_vm_extra_config_data.py +++ b/test/integration/smoke/test_deploy_vm_extra_config_data.py @@ -513,7 +513,7 @@ class TestAddConfigtoDeployVM(cloudstackTestCase): raise self.skipTest("Skipping test case for non-xenserver hypervisor") """ Following commands are used to convert a VM from HVM to PV and set using vm-param-set - HVM-boot-policy= + HVM-boot-policy="" PV-bootloader=pygrub PV-args=hvc0 """ @@ -524,7 +524,7 @@ class TestAddConfigtoDeployVM(cloudstackTestCase): add_config_response = self.add_global_config(name, value) if add_config_response.name: - extraconfig = 'HVM-boot-policy%3D%0APV-bootloader%3Dpygrub%0APV-args%3Dhvc0' + extraconfig = 'HVM-boot-policy%3D%22%22%0APV-bootloader%3Dpygrub%0APV-args%3Dhvc0' try: response = self.deploy_vm(hypervisor, extraconfig) host_id = response.hostid diff --git a/test/integration/smoke/test_guest_os.py b/test/integration/smoke/test_guest_os.py index bb09b2b86a1..0a7649ef139 100644 --- a/test/integration/smoke/test_guest_os.py +++ b/test/integration/smoke/test_guest_os.py @@ -183,7 +183,10 @@ class TestGuestOS(cloudstackTestCase): raise unittest.SkipTest("OS name check with hypervisor is supported only on XenServer and VMware") if self.hypervisor.hypervisor.lower() == "xenserver": - testosname="Debian Jessie 8.0" + if tuple(map(int, self.hypervisor.hypervisorversion.split("."))) >= (8, 3, 0): + testosname = "Debian Bookworm 12" + else: + testosname = "Debian Jessie 8.0" else: testosname="debian4_64Guest"