Merge branch '4.20'

This commit is contained in:
Daan Hoogland 2025-05-13 13:34:23 +02:00
commit 64828f66e8
34 changed files with 7015 additions and 297 deletions

View File

@ -27,6 +27,7 @@ import java.io.InputStream;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Date; import java.util.Date;
import java.util.List; import java.util.List;
@ -80,6 +81,18 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
private ResourceType resourceType = ResourceType.TEMPLATE; private ResourceType resourceType = ResourceType.TEMPLATE;
private final HttpMethodRetryHandler myretryhandler; private final HttpMethodRetryHandler myretryhandler;
private boolean followRedirects = false; private boolean followRedirects = false;
private boolean isChunkedTransfer;
protected static final List<String> CUSTOM_HEADERS_FOR_CHUNKED_TRANSFER_SIZE = Arrays.asList(
"x-goog-stored-content-length",
"x-goog-meta-size",
"x-amz-meta-size",
"x-amz-meta-content-length",
"x-object-meta-size",
"x-original-content-length",
"x-oss-meta-content-length",
"x-file-size");
private static final long MIN_FORMAT_VERIFICATION_SIZE = 1024 * 1024;
public HttpTemplateDownloader(StorageLayer storageLayer, String downloadUrl, String toDir, DownloadCompleteCallback callback, long maxTemplateSizeInBytes, public HttpTemplateDownloader(StorageLayer storageLayer, String downloadUrl, String toDir, DownloadCompleteCallback callback, long maxTemplateSizeInBytes,
String user, String password, Proxy proxy, ResourceType resourceType) { String user, String password, Proxy proxy, ResourceType resourceType) {
@ -205,13 +218,11 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
RandomAccessFile out = new RandomAccessFile(file, "rw"); RandomAccessFile out = new RandomAccessFile(file, "rw");
) { ) {
out.seek(localFileSize); out.seek(localFileSize);
logger.info("Starting download from {} to {} remoteSize={} , max size={}",downloadUrl, toFile,
logger.info("Starting download from " + downloadUrl + " to " + toFile + " remoteSize=" + toHumanReadableSize(remoteSize) + " , max size=" + toHumanReadableSize(maxTemplateSizeInBytes)); toHumanReadableSize(remoteSize), toHumanReadableSize(maxTemplateSizeInBytes));
boolean eof = copyBytes(file, in, out);
if (copyBytes(file, in, out)) return 0;
Date finish = new Date(); Date finish = new Date();
checkDowloadCompletion(); checkDownloadCompletion(eof);
downloadTime += finish.getTime() - start.getTime(); downloadTime += finish.getTime() - start.getTime();
} finally { /* in.close() and out.close() */ } } finally { /* in.close() and out.close() */ }
return totalBytes; return totalBytes;
@ -237,28 +248,32 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
} }
private boolean copyBytes(File file, InputStream in, RandomAccessFile out) throws IOException { private boolean copyBytes(File file, InputStream in, RandomAccessFile out) throws IOException {
int bytes; byte[] buffer = new byte[CHUNK_SIZE];
byte[] block = new byte[CHUNK_SIZE];
long offset = 0; long offset = 0;
boolean done = false;
VerifyFormat verifyFormat = new VerifyFormat(file); VerifyFormat verifyFormat = new VerifyFormat(file);
status = Status.IN_PROGRESS; status = Status.IN_PROGRESS;
while (!done && status != Status.ABORTED && offset <= remoteSize) { while (status != Status.ABORTED) {
if ((bytes = in.read(block, 0, CHUNK_SIZE)) > -1) { int bytesRead = in.read(buffer, 0, CHUNK_SIZE);
offset = writeBlock(bytes, out, block, offset); if (bytesRead == -1) {
if (!ResourceType.SNAPSHOT.equals(resourceType) && logger.debug("Reached EOF on input stream");
!verifyFormat.isVerifiedFormat() && break;
(offset >= 1048576 || offset >= remoteSize)) { //let's check format after we get 1MB or full file }
verifyFormat.invoke(); offset = writeBlock(bytesRead, out, buffer, offset);
} if (!ResourceType.SNAPSHOT.equals(resourceType)
} else { && !verifyFormat.isVerifiedFormat()
done = true; && (offset >= MIN_FORMAT_VERIFICATION_SIZE || offset >= remoteSize)) {
verifyFormat.invoke();
}
if (offset >= remoteSize) {
logger.debug("Reached expected remote size limit: {} bytes", remoteSize);
break;
} }
} }
out.getFD().sync(); out.getFD().sync();
return false; return !Status.ABORTED.equals(status);
} }
private long writeBlock(int bytes, RandomAccessFile out, byte[] block, long offset) throws IOException { private long writeBlock(int bytes, RandomAccessFile out, byte[] block, long offset) throws IOException {
out.write(block, 0, bytes); out.write(block, 0, bytes);
offset += bytes; offset += bytes;
@ -267,11 +282,13 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
return offset; return offset;
} }
private void checkDowloadCompletion() { private void checkDownloadCompletion(boolean eof) {
String downloaded = "(incomplete download)"; String downloaded = "(incomplete download)";
if (totalBytes >= remoteSize) { if (eof && ((totalBytes >= remoteSize) || (isChunkedTransfer && remoteSize == maxTemplateSizeInBytes))) {
status = Status.DOWNLOAD_FINISHED; status = Status.DOWNLOAD_FINISHED;
downloaded = "(download complete remote=" + toHumanReadableSize(remoteSize) + " bytes)"; downloaded = "(download complete remote=" +
(remoteSize == maxTemplateSizeInBytes ? toHumanReadableSize(remoteSize) : "unknown") +
" bytes)";
} }
errorString = "Downloaded " + toHumanReadableSize(totalBytes) + " bytes " + downloaded; errorString = "Downloaded " + toHumanReadableSize(totalBytes) + " bytes " + downloaded;
} }
@ -293,18 +310,42 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
} }
} }
protected long getRemoteSizeForChunkedTransfer() {
for (String headerKey : CUSTOM_HEADERS_FOR_CHUNKED_TRANSFER_SIZE) {
Header header = request.getResponseHeader(headerKey);
if (header == null) {
continue;
}
try {
return Long.parseLong(header.getValue());
} catch (NumberFormatException ignored) {}
}
Header contentRangeHeader = request.getResponseHeader("Content-Range");
if (contentRangeHeader != null) {
String contentRange = contentRangeHeader.getValue();
if (contentRange != null && contentRange.contains("/")) {
String totalSize = contentRange.substring(contentRange.indexOf('/') + 1).trim();
return Long.parseLong(totalSize);
}
}
return 0;
}
private boolean tryAndGetRemoteSize() { private boolean tryAndGetRemoteSize() {
Header contentLengthHeader = request.getResponseHeader("content-length"); Header contentLengthHeader = request.getResponseHeader("content-length");
boolean chunked = false; isChunkedTransfer = false;
long reportedRemoteSize = 0; long reportedRemoteSize = 0;
if (contentLengthHeader == null) { if (contentLengthHeader == null) {
Header chunkedHeader = request.getResponseHeader("Transfer-Encoding"); Header chunkedHeader = request.getResponseHeader("Transfer-Encoding");
if (chunkedHeader == null || !"chunked".equalsIgnoreCase(chunkedHeader.getValue())) { if (chunkedHeader != null && "chunked".equalsIgnoreCase(chunkedHeader.getValue())) {
isChunkedTransfer = true;
reportedRemoteSize = getRemoteSizeForChunkedTransfer();
logger.debug("{} is using chunked transfer encoding, possible remote size: {}", downloadUrl,
reportedRemoteSize);
} else {
status = Status.UNRECOVERABLE_ERROR; status = Status.UNRECOVERABLE_ERROR;
errorString = " Failed to receive length of download "; errorString = " Failed to receive length of download ";
return false; return false;
} else if ("chunked".equalsIgnoreCase(chunkedHeader.getValue())) {
chunked = true;
} }
} else { } else {
reportedRemoteSize = Long.parseLong(contentLengthHeader.getValue()); reportedRemoteSize = Long.parseLong(contentLengthHeader.getValue());
@ -316,9 +357,11 @@ public class HttpTemplateDownloader extends ManagedContextRunnable implements Te
return false; return false;
} }
} }
if (remoteSize == 0) { if (remoteSize == 0) {
remoteSize = reportedRemoteSize; remoteSize = reportedRemoteSize;
if (remoteSize != 0) {
logger.debug("Remote size for {} found to be {}", downloadUrl, toHumanReadableSize(remoteSize));
}
} }
return true; return true;
} }

View File

@ -54,6 +54,59 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.storage_pool', 'used_iops', 'bigint
-- Add reason column for op_ha_work -- Add reason column for op_ha_work
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.op_ha_work', 'reason', 'varchar(32) DEFAULT NULL COMMENT "Reason for the HA work"'); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.op_ha_work', 'reason', 'varchar(32) DEFAULT NULL COMMENT "Reason for the HA work"');
-- Support for XCP-ng 8.3.0 and XenServer 8.4 by adding hypervisor capabilities
-- https://docs.xenserver.com/en-us/xenserver/8/system-requirements/configuration-limits.html
-- https://docs.xenserver.com/en-us/citrix-hypervisor/system-requirements/configuration-limits.html
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) VALUES (UUID(), 'XenServer', '8.3.0', 1000, 254, 64, 1);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported) VALUES (UUID(), 'XenServer', '8.4.0', 1000, 240, 64, 1);
-- Add missing and new Guest OS mappings
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (64-bit)', 'XenServer', '8.2.1', 'Debian Buster 10');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 15 (64-bit)', 'XenServer', '8.2.1', 'SUSE Linux Enterprise 15 (64-bit)');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2022 (64-bit)', 'XenServer', '8.2.1', 'Windows Server 2022 (64-bit)');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows 11 (64-bit)', 'XenServer', '8.2.1', 'Windows 11');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 20.04 LTS', 'XenServer', '8.2.1', 'Ubuntu Focal Fossa 20.04');
-- Copy XS 8.2.1 hypervisor guest OS mappings to XS 8.3 and 8.3 mappings to 8.4
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.3.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='8.2.1';
-- Add new and missing guest os mappings for XS 8.3
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Rocky Linux 9', 'XenServer', '8.3.0', 'Rocky Linux 9');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'Rocky Linux 8', 'XenServer', '8.3.0', 'Rocky Linux 8');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'AlmaLinux 9', 'XenServer', '8.3.0', 'AlmaLinux 9');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'AlmaLinux 8', 'XenServer', '8.3.0', 'AlmaLinux 8');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 12 (64-bit)', 'XenServer', '8.3.0', 'Debian Bookworm 12');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 9', 'XenServer', '8.3.0', 'Oracle Linux 9');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (3, 'Oracle Linux 8', 'XenServer', '8.3.0', 'Oracle Linux 8');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Red Hat Enterprise Linux 8.0', 'XenServer', '8.3.0', 'Red Hat Enterprise Linux 8');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Red Hat Enterprise Linux 9.0', 'XenServer', '8.3.0', 'Red Hat Enterprise Linux 9');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 22.04 LTS', 'XenServer', '8.3.0', 'Ubuntu Jammy Jellyfish 22.04');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 12 SP5 (64-bit)', 'XenServer', '8.3.0', 'SUSE Linux Enterprise Server 12 SP5 (64-bit');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'NeoKylin Linux Server 7', 'XenServer', '8.3.0', 'NeoKylin Linux Server 7');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 9', 'XenServer', '8.3.0', 'CentOS Stream 9');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Scientific Linux 7', 'XenServer', '8.3.0', 'Scientific Linux 7');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (7, 'Generic Linux UEFI', 'XenServer', '8.3.0', 'Generic Linux UEFI');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (7, 'Generic Linux BIOS', 'XenServer', '8.3.0', 'Generic Linux BIOS');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Gooroom Platform 2.0', 'XenServer', '8.3.0', 'Gooroom Platform 2.0');
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.4.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='8.3.0';
-- Add new guest os mappings for XS 8.4 and KVM
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2025', 'XenServer', '8.4.0', 'Windows Server 2025');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 24.04 LTS', 'XenServer', '8.4.0', 'Ubuntu Noble Numbat 24.04');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 10 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 10 (64-bit)');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 11 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 11 (64-bit)');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 12 (64-bit)', 'KVM', 'default', 'Debian GNU/Linux 12 (64-bit)');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows 11 (64-bit)', 'KVM', 'default', 'Windows 11');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (6, 'Windows Server 2025', 'KVM', 'default', 'Windows Server 2025');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 24.04 LTS', 'KVM', 'default', 'Ubuntu 24.04 LTS');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 10 (preview)', 'XenServer', '8.4.0', 'CentOS Stream 10 (preview)');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (1, 'CentOS Stream 9', 'XenServer', '8.4.0', 'CentOS Stream 9');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'Scientific Linux 7', 'XenServer', '8.4.0', 'Scientific Linux 7');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (4, 'NeoKylin Linux Server 7', 'XenServer', '8.4.0', 'NeoKylin Linux Server 7');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (5, 'SUSE Linux Enterprise Server 12 SP5 (64-bit)', 'XenServer', '8.4.0', 'SUSE Linux Enterprise Server 12 SP5 (64-bit');
CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Gooroom Platform 2.0', 'XenServer', '8.4.0', 'Gooroom Platform 2.0');
-- Grant access to 2FA APIs for the "Read-Only User - Default" role -- Grant access to 2FA APIs for the "Read-Only User - Default" role
CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'setupUserTwoFactorAuthentication', 'ALLOW'); CALL `cloud`.`IDEMPOTENT_UPDATE_API_PERMISSION`('Read-Only User - Default', 'setupUserTwoFactorAuthentication', 'ALLOW');

View File

@ -126,7 +126,26 @@ public class ConfigDriveBuilder {
File openStackFolder = new File(tempDirName + ConfigDrive.openStackConfigDriveName); File openStackFolder = new File(tempDirName + ConfigDrive.openStackConfigDriveName);
writeVendorEmptyJsonFile(openStackFolder); /*
Try to find VM password in the vmData.
If it is found, then write it into vendor-data.json
*/
String vmPassword = "";
for (String[] item : vmData) {
String dataType = item[CONFIGDATA_DIR];
String fileName = item[CONFIGDATA_FILE];
String content = item[CONFIGDATA_CONTENT];
if (PASSWORD_FILE.equals(fileName)) {
vmPassword = content;
break;
}
}
if (vmPassword.equals("")) {
writeVendorDataJsonFile(openStackFolder);
} else {
writeVendorDataJsonFile(openStackFolder, vmPassword);
}
writeNetworkData(nics, supportedServices, openStackFolder); writeNetworkData(nics, supportedServices, openStackFolder);
for (NicProfile nic: nics) { for (NicProfile nic: nics) {
if (supportedServices.get(nic.getId()).contains(Network.Service.UserData)) { if (supportedServices.get(nic.getId()).contains(Network.Service.UserData)) {
@ -253,7 +272,7 @@ public class ConfigDriveBuilder {
* *
* If the folder does not exist, and we cannot create it, we throw a {@link CloudRuntimeException}. * If the folder does not exist, and we cannot create it, we throw a {@link CloudRuntimeException}.
*/ */
static void writeVendorEmptyJsonFile(File openStackFolder) { static void writeVendorDataJsonFile(File openStackFolder) {
if (openStackFolder.exists() || openStackFolder.mkdirs()) { if (openStackFolder.exists() || openStackFolder.mkdirs()) {
writeFile(openStackFolder, "vendor_data.json", "{}"); writeFile(openStackFolder, "vendor_data.json", "{}");
} else { } else {
@ -261,6 +280,26 @@ public class ConfigDriveBuilder {
} }
} }
/**
* Writes vendor data containing Cloudstack-generated password into vendor-data.json
*
* If the folder does not exist, and we cannot create it, we throw a {@link CloudRuntimeException}.
*/
static void writeVendorDataJsonFile(File openStackFolder, String password) {
if (openStackFolder.exists() || openStackFolder.mkdirs()) {
writeFile(
openStackFolder,
"vendor_data.json",
String.format(
"{\"cloud-init\": \"#cloud-config\\npassword: %s\\nchpasswd:\\n expire: False\"}",
password
)
);
} else {
throw new CloudRuntimeException("Failed to create folder " + openStackFolder);
}
}
/** /**
* Creates the {@link JsonObject} with VM's metadata. The vmData is a list of arrays; we expect this list to have the following entries: * Creates the {@link JsonObject} with VM's metadata. The vmData is a list of arrays; we expect this list to have the following entries:
* <ul> * <ul>

View File

@ -134,7 +134,7 @@ public class ConfigDriveBuilderTest {
@Test(expected = CloudRuntimeException.class) @Test(expected = CloudRuntimeException.class)
public void buildConfigDriveTestIoException() { public void buildConfigDriveTestIoException() {
try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) { try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) {
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(nullable(File.class))).thenThrow(CloudRuntimeException.class); configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorDataJsonFile(nullable(File.class))).thenThrow(CloudRuntimeException.class);
Mockito.when(ConfigDriveBuilder.buildConfigDrive(null, new ArrayList<>(), "teste", "C:", null, supportedServices)).thenCallRealMethod(); Mockito.when(ConfigDriveBuilder.buildConfigDrive(null, new ArrayList<>(), "teste", "C:", null, supportedServices)).thenCallRealMethod();
ConfigDriveBuilder.buildConfigDrive(null, new ArrayList<>(), "teste", "C:", null, supportedServices); ConfigDriveBuilder.buildConfigDrive(null, new ArrayList<>(), "teste", "C:", null, supportedServices);
} }
@ -144,7 +144,7 @@ public class ConfigDriveBuilderTest {
public void buildConfigDriveTest() { public void buildConfigDriveTest() {
try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) { try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) {
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(Mockito.any(File.class))).then(invocationOnMock -> null); configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorDataJsonFile(Mockito.any(File.class))).then(invocationOnMock -> null);
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVmMetadata(Mockito.anyList(), Mockito.anyString(), Mockito.any(File.class), anyMap())).then(invocationOnMock -> null); configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVmMetadata(Mockito.anyList(), Mockito.anyString(), Mockito.any(File.class), anyMap())).then(invocationOnMock -> null);
@ -163,7 +163,7 @@ public class ConfigDriveBuilderTest {
Assert.assertEquals("mockIsoDataBase64", returnedIsoData); Assert.assertEquals("mockIsoDataBase64", returnedIsoData);
configDriveBuilderMocked.verify(() -> { configDriveBuilderMocked.verify(() -> {
ConfigDriveBuilder.writeVendorEmptyJsonFile(Mockito.any(File.class)); ConfigDriveBuilder.writeVendorDataJsonFile(Mockito.any(File.class));
ConfigDriveBuilder.writeVmMetadata(Mockito.anyList(), Mockito.anyString(), Mockito.any(File.class), anyMap()); ConfigDriveBuilder.writeVmMetadata(Mockito.anyList(), Mockito.anyString(), Mockito.any(File.class), anyMap());
ConfigDriveBuilder.linkUserData(Mockito.anyString()); ConfigDriveBuilder.linkUserData(Mockito.anyString());
ConfigDriveBuilder.generateAndRetrieveIsoAsBase64Iso(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); ConfigDriveBuilder.generateAndRetrieveIsoAsBase64Iso(Mockito.anyString(), Mockito.anyString(), Mockito.anyString());
@ -172,23 +172,23 @@ public class ConfigDriveBuilderTest {
} }
@Test(expected = CloudRuntimeException.class) @Test(expected = CloudRuntimeException.class)
public void writeVendorEmptyJsonFileTestCannotCreateOpenStackFolder() { public void writeVendorDataJsonFileTestCannotCreateOpenStackFolder() {
File folderFileMock = Mockito.mock(File.class); File folderFileMock = Mockito.mock(File.class);
Mockito.doReturn(false).when(folderFileMock).mkdirs(); Mockito.doReturn(false).when(folderFileMock).mkdirs();
ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock); ConfigDriveBuilder.writeVendorDataJsonFile(folderFileMock);
} }
@Test(expected = CloudRuntimeException.class) @Test(expected = CloudRuntimeException.class)
public void writeVendorEmptyJsonFileTest() { public void writeVendorDataJsonFileTest() {
File folderFileMock = Mockito.mock(File.class); File folderFileMock = Mockito.mock(File.class);
Mockito.doReturn(false).when(folderFileMock).mkdirs(); Mockito.doReturn(false).when(folderFileMock).mkdirs();
ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock); ConfigDriveBuilder.writeVendorDataJsonFile(folderFileMock);
} }
@Test @Test
public void writeVendorEmptyJsonFileTestCreatingFolder() { public void writeVendorDataJsonFileTestCreatingFolder() {
try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) { try (MockedStatic<ConfigDriveBuilder> configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) {
File folderFileMock = Mockito.mock(File.class); File folderFileMock = Mockito.mock(File.class);
@ -196,9 +196,9 @@ public class ConfigDriveBuilderTest {
Mockito.doReturn(true).when(folderFileMock).mkdirs(); Mockito.doReturn(true).when(folderFileMock).mkdirs();
//force execution of real method //force execution of real method
configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock)).thenCallRealMethod(); configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorDataJsonFile(folderFileMock)).thenCallRealMethod();
ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock); ConfigDriveBuilder.writeVendorDataJsonFile(folderFileMock);
Mockito.verify(folderFileMock).exists(); Mockito.verify(folderFileMock).exists();
Mockito.verify(folderFileMock).mkdirs(); Mockito.verify(folderFileMock).mkdirs();

View File

@ -29,7 +29,7 @@
</parent> </parent>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>net.java.dev.vcc.thirdparty</groupId> <groupId>com.citrix.hypervisor</groupId>
<artifactId>xen-api</artifactId> <artifactId>xen-api</artifactId>
<version>${cs.xapi.version}</version> <version>${cs.xapi.version}</version>
</dependency> </dependency>

View File

@ -44,7 +44,7 @@
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>net.java.dev.vcc.thirdparty</groupId> <groupId>com.citrix.hypervisor</groupId>
<artifactId>xen-api</artifactId> <artifactId>xen-api</artifactId>
<version>${cs.xapi.version}</version> <version>${cs.xapi.version}</version>
</dependency> </dependency>

View File

@ -30,6 +30,8 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
import javax.persistence.EntityExistsException; import javax.persistence.EntityExistsException;
import com.cloud.hypervisor.xenserver.resource.XcpServer83Resource;
import com.cloud.hypervisor.xenserver.resource.Xenserver84Resource;
import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs; import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs;
import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
@ -265,7 +267,6 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
} catch (Exception e) { } catch (Exception e) {
logger.debug("Caught exception during logout", e); logger.debug("Caught exception during logout", e);
} }
conn.dispose();
conn = null; conn = null;
} }
@ -435,6 +436,10 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L
} }
} else if (prodBrand.equals("XCP_Kronos")) { } else if (prodBrand.equals("XCP_Kronos")) {
return new XcpOssResource(); return new XcpOssResource();
} else if (prodBrand.equals("XenServer") && prodVersion.equals("8.4.0")) {
return new Xenserver84Resource();
} else if (prodBrand.equals("XCP-ng") && (prodVersion.equals("8.3.0"))) {
return new XcpServer83Resource();
} else if (prodBrand.equals("XenServer") || prodBrand.equals("XCP-ng") || prodBrand.equals("Citrix Hypervisor")) { } else if (prodBrand.equals("XenServer") || prodBrand.equals("XCP-ng") || prodBrand.equals("Citrix Hypervisor")) {
final String[] items = prodVersion.split("\\."); final String[] items = prodVersion.split("\\.");
if ((Integer.parseInt(items[0]) > 6) || if ((Integer.parseInt(items[0]) > 6) ||

View File

@ -1699,6 +1699,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
nwr.nameLabel = newName; nwr.nameLabel = newName;
nwr.tags = new HashSet<>(); nwr.tags = new HashSet<>();
nwr.tags.add(generateTimeStamp()); nwr.tags.add(generateTimeStamp());
nwr.managed = true;
vlanNetwork = Network.create(conn, nwr); vlanNetwork = Network.create(conn, nwr);
vlanNic = getNetworkByName(conn, newName); vlanNic = getNetworkByName(conn, newName);
if (vlanNic == null) { // Still vlanNic is null means we could not if (vlanNic == null) { // Still vlanNic is null means we could not
@ -2004,6 +2005,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
// started // started
otherConfig.put("assume_network_is_shared", "true"); otherConfig.put("assume_network_is_shared", "true");
rec.otherConfig = otherConfig; rec.otherConfig = otherConfig;
rec.managed = true;
nw = Network.create(conn, rec); nw = Network.create(conn, rec);
logger.debug("### XenServer network for tunnels created:" + nwName); logger.debug("### XenServer network for tunnels created:" + nwName);
} else { } else {
@ -4835,6 +4837,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
configs.put("netmask", NetUtils.getLinkLocalNetMask()); configs.put("netmask", NetUtils.getLinkLocalNetMask());
configs.put("vswitch-disable-in-band", "true"); configs.put("vswitch-disable-in-band", "true");
rec.otherConfig = configs; rec.otherConfig = configs;
rec.managed = true;
linkLocal = Network.create(conn, rec); linkLocal = Network.create(conn, rec);
} else { } else {
linkLocal = networks.iterator().next(); linkLocal = networks.iterator().next();
@ -5023,6 +5026,7 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S
if (networks.isEmpty()) { if (networks.isEmpty()) {
rec.nameDescription = "vswitch network for " + nwName; rec.nameDescription = "vswitch network for " + nwName;
rec.nameLabel = nwName; rec.nameLabel = nwName;
rec.managed = true;
vswitchNw = Network.create(conn, rec); vswitchNw = Network.create(conn, rec);
} else { } else {
vswitchNw = networks.iterator().next(); vswitchNw = networks.iterator().next();

View File

@ -0,0 +1,25 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.xenserver.resource;
public class XcpServer83Resource extends XenServer650Resource {
@Override
protected String getPatchFilePath() {
return "scripts/vm/hypervisor/xenserver/xcpserver83/patch";
}
}

View File

@ -21,6 +21,7 @@ import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import com.xensource.xenapi.APIVersion; import com.xensource.xenapi.APIVersion;
import com.xensource.xenapi.Connection; import com.xensource.xenapi.Connection;
import com.xensource.xenapi.ConnectionNew;
import com.xensource.xenapi.Host; import com.xensource.xenapi.Host;
import com.xensource.xenapi.Pool; import com.xensource.xenapi.Pool;
import com.xensource.xenapi.Session; import com.xensource.xenapi.Session;
@ -150,12 +151,12 @@ public class XenServerConnectionPool {
} }
public Connection getConnect(String ip, String username, Queue<String> password) { public Connection getConnect(String ip, String username, Queue<String> password) {
Connection conn = new Connection(getURL(ip), 10, _connWait); Connection conn = new ConnectionNew(getURL(ip), 10, _connWait);
try { try {
loginWithPassword(conn, username, password, APIVersion.latest().toString()); loginWithPassword(conn, username, password, APIVersion.latest().toString());
} catch (Types.HostIsSlave e) { } catch (Types.HostIsSlave e) {
String maddress = e.masterIPAddress; String maddress = e.masterIPAddress;
conn = new Connection(getURL(maddress), 10, _connWait); conn = new ConnectionNew(getURL(maddress), 10, _connWait);
try { try {
loginWithPassword(conn, username, password, APIVersion.latest().toString()); loginWithPassword(conn, username, password, APIVersion.latest().toString());
} catch (Exception e1) { } catch (Exception e1) {
@ -221,7 +222,7 @@ public class XenServerConnectionPool {
if ( mConn == null ) { if ( mConn == null ) {
try { try {
Connection conn = new Connection(getURL(ipAddress), 5, _connWait); Connection conn = new ConnectionNew(getURL(ipAddress), 5, _connWait);
Session sess = loginWithPassword(conn, username, password, APIVersion.latest().toString()); Session sess = loginWithPassword(conn, username, password, APIVersion.latest().toString());
Host host = sess.getThisHost(conn); Host host = sess.getThisHost(conn);
Boolean hostenabled = host.getEnabled(conn); Boolean hostenabled = host.getEnabled(conn);
@ -231,7 +232,6 @@ public class XenServerConnectionPool {
} catch (Exception e) { } catch (Exception e) {
LOGGER.debug("Caught exception during logout", e); LOGGER.debug("Caught exception during logout", e);
} }
conn.dispose();
} }
if (!hostenabled) { if (!hostenabled) {
String msg = "Unable to create master connection, due to master Host " + ipAddress + " is not enabled"; String msg = "Unable to create master connection, due to master Host " + ipAddress + " is not enabled";
@ -412,7 +412,7 @@ public class XenServerConnectionPool {
return s_instance; return s_instance;
} }
public class XenServerConnection extends Connection { public class XenServerConnection extends ConnectionNew {
long _interval; long _interval;
int _retries; int _retries;
String _ip; String _ip;

View File

@ -0,0 +1,24 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.xenserver.resource;
public class Xenserver84Resource extends XenServer650Resource {
@Override
protected String getPatchFilePath() {
return "scripts/vm/hypervisor/xenserver/xenserver84/patch";
}
}

View File

@ -96,7 +96,15 @@ public final class CitrixStartCommandWrapper extends CommandWrapper<StartCommand
citrixResourceBase.createVGPU(conn, command, vm, gpuDevice); citrixResourceBase.createVGPU(conn, command, vm, gpuDevice);
} }
if (vmSpec.getType() != VirtualMachine.Type.User) { Host.Record record = host.getRecord(conn);
String xenBrand = record.softwareVersion.get("product_brand");
String xenVersion = record.softwareVersion.get("product_version");
boolean requiresGuestTools = true;
if (xenBrand.equals("XenServer") && isVersionGreaterThanOrEqual(xenVersion, "8.2.0")) {
requiresGuestTools = false;
}
if (vmSpec.getType() != VirtualMachine.Type.User && requiresGuestTools) {
citrixResourceBase.createPatchVbd(conn, vmName, vm); citrixResourceBase.createPatchVbd(conn, vmName, vm);
} }
@ -263,4 +271,19 @@ public final class CitrixStartCommandWrapper extends CommandWrapper<StartCommand
} }
} }
} }
public static boolean isVersionGreaterThanOrEqual(String v1, String v2) {
String[] parts1 = v1.split("\\.");
String[] parts2 = v2.split("\\.");
int length = Math.max(parts1.length, parts2.length);
for (int i = 0; i < length; i++) {
int num1 = i < parts1.length ? Integer.parseInt(parts1[i]) : 0;
int num2 = i < parts2.length ? Integer.parseInt(parts2[i]) : 0;
if (num1 > num2) return true;
if (num1 < num2) return false;
}
return true; // versions are equal
}
} }

View File

@ -0,0 +1,335 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.xensource.xenapi;
import org.apache.ws.commons.util.NamespaceContextImpl;
import org.apache.xmlrpc.XmlRpcException;
import org.apache.xmlrpc.client.XmlRpcClient;
import org.apache.xmlrpc.client.XmlRpcClientConfigImpl;
import org.apache.xmlrpc.client.XmlRpcHttpClientConfig;
import org.apache.xmlrpc.common.TypeFactory;
import org.apache.xmlrpc.common.TypeFactoryImpl;
import org.apache.xmlrpc.common.XmlRpcStreamConfig;
import org.apache.xmlrpc.parser.MapParser;
import org.apache.xmlrpc.parser.RecursiveTypeParserImpl;
import org.apache.xmlrpc.parser.TypeParser;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import javax.xml.namespace.QName;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import java.util.TimeZone;
public class ConnectionNew extends Connection {
/**
* The version of the bindings that this class belongs to.
*
* @deprecated This field is not used any more.
*/
private APIVersion apiVersion;
/**
* Updated when Session.login_with_password() is called.
*/
@Override
public APIVersion getAPIVersion()
{
return apiVersion;
}
/**
* The opaque reference to the session used by this connection
*/
private String sessionReference;
/**
* As seen by the xmlrpc library. From our point of view it's a server.
*/
private final XmlRpcClient client;
/**
* Creates a connection to a particular server using a given url. This object can then be passed
* in to any other API calls.
*
* Note this constructor does NOT call Session.loginWithPassword; the programmer is responsible for calling it,
* passing the Connection as a parameter. No attempt to connect to the server is made until login is called.
*
* When this constructor is used, a call to dispose() will do nothing. The programmer is responsible for manually
* logging out the Session.
*
* @param url The URL of the server to connect to
* @param replyTimeout The reply timeout for xml-rpc calls in seconds
* @param connTimeout The connection timeout for xml-rpc calls in seconds
*/
public ConnectionNew(URL url, int replyTimeout, int connTimeout)
{
super(url, replyTimeout, connTimeout);
this.client = getClientFromURL(url, replyTimeout, connTimeout);
}
private XmlRpcClientConfigImpl config = new XmlRpcClientConfigImpl();
@Override
public XmlRpcClientConfigImpl getConfig()
{
return config;
}
static class CustomMapParser extends RecursiveTypeParserImpl {
private int level = 0;
private StringBuffer nameBuffer = new StringBuffer();
private Object nameObject;
private Map map;
private boolean inName;
private boolean inValue;
private boolean doneValue;
public CustomMapParser(XmlRpcStreamConfig pConfig, NamespaceContextImpl pContext, TypeFactory pFactory) {
super(pConfig, pContext, pFactory);
}
protected void addResult(Object pResult) throws SAXException {
if (this.inName) {
this.nameObject = pResult;
} else {
if (this.nameObject == null) {
throw new SAXParseException("Invalid state: Expected name", this.getDocumentLocator());
}
this.map.put(this.nameObject, pResult);
}
}
public void startDocument() throws SAXException {
super.startDocument();
this.level = 0;
this.map = new HashMap();
this.inValue = this.inName = false;
}
public void characters(char[] pChars, int pOffset, int pLength) throws SAXException {
if (this.inName && !this.inValue) {
this.nameBuffer.append(pChars, pOffset, pLength);
} else {
super.characters(pChars, pOffset, pLength);
}
}
public void ignorableWhitespace(char[] pChars, int pOffset, int pLength) throws SAXException {
if (this.inName) {
this.characters(pChars, pOffset, pLength);
} else {
super.ignorableWhitespace(pChars, pOffset, pLength);
}
}
public void startElement(String pURI, String pLocalName, String pQName, Attributes pAttrs) throws SAXException {
switch (this.level++) {
case 0:
if (!"".equals(pURI) || !"struct".equals(pLocalName)) {
throw new SAXParseException("Expected struct, got " + new QName(pURI, pLocalName), this.getDocumentLocator());
}
break;
case 1:
if (!"".equals(pURI) || !"member".equals(pLocalName)) {
throw new SAXParseException("Expected member, got " + new QName(pURI, pLocalName), this.getDocumentLocator());
}
this.doneValue = this.inName = this.inValue = false;
this.nameObject = null;
this.nameBuffer.setLength(0);
break;
case 2:
if (this.doneValue) {
throw new SAXParseException("Expected /member, got " + new QName(pURI, pLocalName), this.getDocumentLocator());
}
if ("".equals(pURI) && "name".equals(pLocalName)) {
if (this.nameObject != null) {
throw new SAXParseException("Expected value, got " + new QName(pURI, pLocalName), this.getDocumentLocator());
}
this.inName = true;
} else if ("".equals(pURI) && "value".equals(pLocalName)) {
if (this.nameObject == null) {
throw new SAXParseException("Expected name, got " + new QName(pURI, pLocalName), this.getDocumentLocator());
}
this.inValue = true;
this.startValueTag();
}
break;
case 3:
if (this.inName && "".equals(pURI) && "value".equals(pLocalName)) {
if (!this.cfg.isEnabledForExtensions()) {
throw new SAXParseException("Expected /name, got " + new QName(pURI, pLocalName), this.getDocumentLocator());
}
this.inValue = true;
this.startValueTag();
} else {
super.startElement(pURI, pLocalName, pQName, pAttrs);
}
break;
default:
super.startElement(pURI, pLocalName, pQName, pAttrs);
}
}
public void endElement(String pURI, String pLocalName, String pQName) throws SAXException {
switch (--this.level) {
case 0:
this.setResult(this.map);
case 1:
break;
case 2:
if (this.inName) {
this.inName = false;
if (this.nameObject == null) {
this.nameObject = this.nameBuffer.toString();
} else {
for(int i = 0; i < this.nameBuffer.length(); ++i) {
if (!Character.isWhitespace(this.nameBuffer.charAt(i))) {
throw new SAXParseException("Unexpected non-whitespace character in member name", this.getDocumentLocator());
}
}
}
} else if (this.inValue) {
this.endValueTag();
this.doneValue = true;
}
break;
case 3:
if (this.inName && this.inValue && "".equals(pURI) && "value".equals(pLocalName)) {
this.endValueTag();
} else {
super.endElement(pURI, pLocalName, pQName);
}
break;
default:
super.endElement(pURI, pLocalName, pQName);
}
}
}
private XmlRpcClient getClientFromURL(URL url, int replyWait, int connWait)
{
config.setTimeZone(TimeZone.getTimeZone("UTC"));
config.setServerURL(url);
config.setReplyTimeout(replyWait * 1000);
config.setConnectionTimeout(connWait * 1000);
XmlRpcClient client = new XmlRpcClient();
client.setConfig(config);
client.setTypeFactory(new TypeFactoryImpl(client) {
@Override
public TypeParser getParser(XmlRpcStreamConfig pConfig, NamespaceContextImpl pContext, String pURI, String pLocalName) {
TypeParser parser = super.getParser(pConfig, pContext, pURI, pLocalName);
if (parser instanceof MapParser) {
return new CustomMapParser(pConfig, pContext, this);
}
return parser;
}
});
return client;
}
@Override
public String getSessionReference()
{
return this.sessionReference;
}
@Override
protected Map dispatch(String methodCall, Object[] methodParams) throws XmlRpcException, Types.XenAPIException
{
Map response = (Map) client.execute(methodCall, methodParams);
if (methodCall.equals("session.login_with_password") &&
response.get("Status").equals("Success"))
{
Session session = Types.toSession(response.get("Value"));
sessionReference = session.ref;
setAPIVersion(session);
}
else if (methodCall.equals("session.slave_local_login_with_password") &&
response.get("Status").equals("Success"))
{
sessionReference = Types.toSession(response.get("Value")).ref;
apiVersion = APIVersion.latest();
}
else if (methodCall.equals("session.logout"))
{
// Work around a bug in XenServer 5.0 and below.
// session.login_with_password should have rejected us with
// HOST_IS_SLAVE, but instead we don't find out until later.
// We don't want to leak the session, so we need to log out
// this session from the master instead.
if (response.get("Status").equals("Failure"))
{
Object[] error = (Object[]) response.get("ErrorDescription");
if (error.length == 2 && error[0].equals("HOST_IS_SLAVE"))
{
try
{
XmlRpcHttpClientConfig clientConfig = (XmlRpcHttpClientConfig)client.getClientConfig();
URL client_url = clientConfig.getServerURL();
URL masterUrl = new URL(client_url.getProtocol(), (String)error[1], client_url.getPort(), client_url.getFile());
Connection tmp_conn = new Connection(masterUrl, sessionReference, clientConfig.getReplyTimeout(), clientConfig.getConnectionTimeout());
Session.logout(tmp_conn);
}
catch (Exception ex)
{
// Ignore
}
}
}
this.sessionReference = null;
}
return Types.checkResponse(response);
}
private void setAPIVersion(Session session) throws Types.XenAPIException, XmlRpcException
{
try
{
long major = session.getThisHost(this).getAPIVersionMajor(this);
long minor = session.getThisHost(this).getAPIVersionMinor(this);
apiVersion = APIVersion.fromMajorMinor(major, minor);
}
catch (Types.BadServerResponse exn)
{
apiVersion = APIVersion.UNKNOWN;
}
}
}

View File

@ -5,6 +5,11 @@ All notable changes to Linstor CloudStack plugin will be documented in this file
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [2025-05-07]
### Added
- Implemented storage/volume stats
## [2025-03-13] ## [2025-03-13]
### Fixed ### Fixed

View File

@ -28,11 +28,11 @@ import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionCloneRequest; import com.linbit.linstor.api.model.ResourceDefinitionCloneRequest;
import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted; import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted;
import com.linbit.linstor.api.model.ResourceDefinitionCreate; import com.linbit.linstor.api.model.ResourceDefinitionCreate;
import com.linbit.linstor.api.model.ResourceDefinitionModify; import com.linbit.linstor.api.model.ResourceDefinitionModify;
import com.linbit.linstor.api.model.ResourceGroup; import com.linbit.linstor.api.model.ResourceGroup;
import com.linbit.linstor.api.model.ResourceGroupSpawn; import com.linbit.linstor.api.model.ResourceGroupSpawn;
import com.linbit.linstor.api.model.ResourceMakeAvailable; import com.linbit.linstor.api.model.ResourceMakeAvailable;
import com.linbit.linstor.api.model.ResourceWithVolumes;
import com.linbit.linstor.api.model.Snapshot; import com.linbit.linstor.api.model.Snapshot;
import com.linbit.linstor.api.model.SnapshotRestore; import com.linbit.linstor.api.model.SnapshotRestore;
import com.linbit.linstor.api.model.VolumeDefinition; import com.linbit.linstor.api.model.VolumeDefinition;
@ -134,6 +134,9 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
@Inject @Inject
private HostDao _hostDao; private HostDao _hostDao;
private long volumeStatsLastUpdate = 0L;
private final Map<String, Pair<Long, Long>> volumeStats = new HashMap<>();
public LinstorPrimaryDataStoreDriverImpl() public LinstorPrimaryDataStoreDriverImpl()
{ {
} }
@ -403,9 +406,9 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
} }
} }
private String getRscGrp(StoragePoolVO storagePoolVO) { private String getRscGrp(StoragePool storagePool) {
return storagePoolVO.getUserInfo() != null && !storagePoolVO.getUserInfo().isEmpty() ? return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ?
storagePoolVO.getUserInfo() : "DfltRscGrp"; storagePool.getUserInfo() : "DfltRscGrp";
} }
/** /**
@ -618,7 +621,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
*/ */
private void updateRscGrpIfNecessary(DevelopersApi api, String rscName, String tgtRscGrp) throws ApiException { private void updateRscGrpIfNecessary(DevelopersApi api, String rscName, String tgtRscGrp) throws ApiException {
List<ResourceDefinition> rscDfns = api.resourceDefinitionList( List<ResourceDefinition> rscDfns = api.resourceDefinitionList(
Collections.singletonList(rscName), null, null, null); Collections.singletonList(rscName), false, null, null, null);
if (rscDfns != null && !rscDfns.isEmpty()) { if (rscDfns != null && !rscDfns.isEmpty()) {
ResourceDefinition rscDfn = rscDfns.get(0); ResourceDefinition rscDfn = rscDfns.get(0);
@ -648,7 +651,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
private void deleteTemplateForProps( private void deleteTemplateForProps(
DevelopersApi api, String rscName) throws ApiException { DevelopersApi api, String rscName) throws ApiException {
List<ResourceDefinition> rdList = api.resourceDefinitionList( List<ResourceDefinition> rdList = api.resourceDefinitionList(
Collections.singletonList(rscName), null, null, null); Collections.singletonList(rscName), false, null, null, null);
if (CollectionUtils.isNotEmpty(rdList)) { if (CollectionUtils.isNotEmpty(rdList)) {
ResourceDefinitionModify rdm = new ResourceDefinitionModify(); ResourceDefinitionModify rdm = new ResourceDefinitionModify();
@ -1506,22 +1509,77 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
@Override @Override
public boolean canProvideStorageStats() { public boolean canProvideStorageStats() {
return false; return true;
} }
@Override @Override
public Pair<Long, Long> getStorageStats(StoragePool storagePool) { public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
return null; logger.debug(String.format("Requesting storage stats: %s", storagePool));
return LinstorUtil.getStorageStats(storagePool.getHostAddress(), getRscGrp(storagePool));
} }
@Override @Override
public boolean canProvideVolumeStats() { public boolean canProvideVolumeStats() {
return false; return LinstorConfigurationManager.VolumeStatsCacheTime.value() > 0;
}
/**
* Updates the cache map containing current allocated size data.
* @param api Linstor Developers api object
*/
private void fillVolumeStatsCache(DevelopersApi api) {
try {
logger.trace("Start volume stats cache update");
List<ResourceWithVolumes> resources = api.viewResources(
Collections.emptyList(),
Collections.emptyList(),
Collections.emptyList(),
null,
null,
null);
List<ResourceDefinition> rscDfns = api.resourceDefinitionList(
Collections.emptyList(), true, null, null, null);
HashMap<String, Long> resSizeMap = new HashMap<>();
for (ResourceDefinition rscDfn : rscDfns) {
if (CollectionUtils.isNotEmpty(rscDfn.getVolumeDefinitions())) {
resSizeMap.put(rscDfn.getName(), rscDfn.getVolumeDefinitions().get(0).getSizeKib() * 1024);
}
}
HashMap<String, Long> allocSizeMap = new HashMap<>();
for (ResourceWithVolumes rsc : resources) {
if (!LinstorUtil.isRscDiskless(rsc) && !rsc.getVolumes().isEmpty()) {
long allocatedBytes = allocSizeMap.getOrDefault(rsc.getName(), 0L);
allocSizeMap.put(rsc.getName(), Math.max(allocatedBytes, rsc.getVolumes().get(0).getAllocatedSizeKib() * 1024));
}
}
volumeStats.clear();
for (Map.Entry<String, Long> entry : allocSizeMap.entrySet()) {
Long reserved = resSizeMap.getOrDefault(entry.getKey(), 0L);
Pair<Long, Long> volStat = new Pair<>(entry.getValue(), reserved);
volumeStats.put(entry.getKey(), volStat);
}
volumeStatsLastUpdate = System.currentTimeMillis();
logger.trace("Done volume stats cache update: {}", volumeStats.size());
} catch (ApiException e) {
logger.error("Unable to fetch Linstor resources: {}", e.getBestMessage());
}
} }
@Override @Override
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) { public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
return null; final DevelopersApi api = LinstorUtil.getLinstorAPI(storagePool.getHostAddress());
synchronized (volumeStats) {
long invalidateCacheTime = volumeStatsLastUpdate +
LinstorConfigurationManager.VolumeStatsCacheTime.value() * 1000;
if (invalidateCacheTime < System.currentTimeMillis()) {
fillVolumeStatsCache(api);
}
return volumeStats.get(LinstorUtil.RSC_PREFIX + volumeId);
}
} }
@Override @Override

View File

@ -24,7 +24,14 @@ public class LinstorConfigurationManager implements Configurable
public static final ConfigKey<Boolean> BackupSnapshots = new ConfigKey<>(Boolean.class, "lin.backup.snapshots", "Advanced", "true", public static final ConfigKey<Boolean> BackupSnapshots = new ConfigKey<>(Boolean.class, "lin.backup.snapshots", "Advanced", "true",
"Backup Linstor primary storage snapshots to secondary storage (deleting ps snapshot)", true, ConfigKey.Scope.Global, null); "Backup Linstor primary storage snapshots to secondary storage (deleting ps snapshot)", true, ConfigKey.Scope.Global, null);
public static final ConfigKey<?>[] CONFIG_KEYS = new ConfigKey<?>[] { BackupSnapshots }; public static final ConfigKey<Integer> VolumeStatsCacheTime = new ConfigKey<>("Advanced", Integer.class,
"lin.volumes.stats.cachetime", "300",
"Cache time of volume stats for Linstor volumes. 0 to disable volume stats",
false);
public static final ConfigKey<?>[] CONFIG_KEYS = new ConfigKey<?>[] {
BackupSnapshots, VolumeStatsCacheTime
};
@Override @Override
public String getConfigComponentName() public String getConfigComponentName()

View File

@ -196,6 +196,30 @@ public class LinstorUtil {
} }
} }
public static Pair<Long, Long> getStorageStats(String linstorUrl, String rscGroupName) {
DevelopersApi linstorApi = getLinstorAPI(linstorUrl);
try {
List<StoragePool> storagePools = LinstorUtil.getRscGroupStoragePools(linstorApi, rscGroupName);
long capacity = storagePools.stream()
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
.mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0L)
.sum() * 1024; // linstor uses kiB
long used = storagePools.stream()
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
.mapToLong(sp -> sp.getTotalCapacity() != null && sp.getFreeCapacity() != null ?
sp.getTotalCapacity() - sp.getFreeCapacity() : 0L)
.sum() * 1024; // linstor uses Kib
LOGGER.debug(
String.format("Linstor(%s;%s): storageStats -> %d/%d", linstorUrl, rscGroupName, capacity, used));
return new Pair<>(capacity, used);
} catch (ApiException apiEx) {
LOGGER.error(apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
/** /**
* Check if any resource of the given name is InUse on any host. * Check if any resource of the given name is InUse on any host.
* *
@ -304,7 +328,7 @@ public class LinstorUtil {
public static List<ResourceDefinition> getRDListStartingWith(DevelopersApi api, String startWith) public static List<ResourceDefinition> getRDListStartingWith(DevelopersApi api, String startWith)
throws ApiException throws ApiException
{ {
List<ResourceDefinition> rscDfns = api.resourceDefinitionList(null, null, null, null); List<ResourceDefinition> rscDfns = api.resourceDefinitionList(null, false, null, null, null);
return rscDfns.stream() return rscDfns.stream()
.filter(rscDfn -> rscDfn.getName().toLowerCase().startsWith(startWith.toLowerCase())) .filter(rscDfn -> rscDfn.getName().toLowerCase().startsWith(startWith.toLowerCase()))
@ -387,7 +411,7 @@ public class LinstorUtil {
*/ */
public static ResourceDefinition findResourceDefinition(DevelopersApi api, String rscName, String rscGrpName) public static ResourceDefinition findResourceDefinition(DevelopersApi api, String rscName, String rscGrpName)
throws ApiException { throws ApiException {
List<ResourceDefinition> rscDfns = api.resourceDefinitionList(null, null, null, null); List<ResourceDefinition> rscDfns = api.resourceDefinitionList(null, false, null, null, null);
List<ResourceDefinition> rdsStartingWith = rscDfns.stream() List<ResourceDefinition> rdsStartingWith = rscDfns.stream()
.filter(rscDfn -> rscDfn.getName().toLowerCase().startsWith(rscName.toLowerCase())) .filter(rscDfn -> rscDfn.getName().toLowerCase().startsWith(rscName.toLowerCase()))
@ -403,4 +427,8 @@ public class LinstorUtil {
return rd.orElseGet(() -> rdsStartingWith.get(0)); return rd.orElseGet(() -> rdsStartingWith.get(0));
} }
public static boolean isRscDiskless(ResourceWithVolumes rsc) {
return rsc.getFlags() != null && rsc.getFlags().contains(ApiConsts.FLAG_DISKLESS);
}
} }

View File

@ -175,7 +175,7 @@
<cs.nitro.version>10.1</cs.nitro.version> <cs.nitro.version>10.1</cs.nitro.version>
<cs.opensaml.version>2.6.6</cs.opensaml.version> <cs.opensaml.version>2.6.6</cs.opensaml.version>
<cs.rados-java.version>0.6.0</cs.rados-java.version> <cs.rados-java.version>0.6.0</cs.rados-java.version>
<cs.java-linstor.version>0.6.0</cs.java-linstor.version> <cs.java-linstor.version>0.6.1</cs.java-linstor.version>
<cs.reflections.version>0.10.2</cs.reflections.version> <cs.reflections.version>0.10.2</cs.reflections.version>
<cs.servicemix.version>3.4.4_1</cs.servicemix.version> <cs.servicemix.version>3.4.4_1</cs.servicemix.version>
<cs.servlet.version>4.0.1</cs.servlet.version> <cs.servlet.version>4.0.1</cs.servlet.version>
@ -184,7 +184,7 @@
<cs.trilead.version>build-217-jenkins-27</cs.trilead.version> <cs.trilead.version>build-217-jenkins-27</cs.trilead.version>
<cs.vmware.api.version>8.0</cs.vmware.api.version> <cs.vmware.api.version>8.0</cs.vmware.api.version>
<cs.winrm4j.version>0.5.0</cs.winrm4j.version> <cs.winrm4j.version>0.5.0</cs.winrm4j.version>
<cs.xapi.version>6.2.0-3.1</cs.xapi.version> <cs.xapi.version>8.1.0</cs.xapi.version>
<cs.xmlrpc.version>3.1.3</cs.xmlrpc.version> <cs.xmlrpc.version>3.1.3</cs.xmlrpc.version>
<cs.xstream.version>1.4.20</cs.xstream.version> <cs.xstream.version>1.4.20</cs.xstream.version>
<org.springframework.version>5.3.26</org.springframework.version> <org.springframework.version>5.3.26</org.springframework.version>

View File

@ -0,0 +1,278 @@
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# FileSR: local-file storage repository
import SR, VDI, SRCommand, FileSR, util
import errno
import os, re, sys, stat
import time
import xml.dom.minidom
import xs_errors
import nfs
import vhdutil
from lock import Lock
import cleanup
CAPABILITIES = ["SR_PROBE","SR_UPDATE", "SR_CACHING", \
"VDI_CREATE","VDI_DELETE","VDI_ATTACH","VDI_DETACH", \
"VDI_UPDATE", "VDI_CLONE","VDI_SNAPSHOT","VDI_RESIZE", \
"VDI_RESIZE_ONLINE", "VDI_RESET_ON_BOOT", "ATOMIC_PAUSE"]
CONFIGURATION = [ [ 'server', 'hostname or IP address of NFS server (required)' ], \
[ 'serverpath', 'path on remote server (required)' ] ]
DRIVER_INFO = {
'name': 'NFS VHD',
'description': 'SR plugin which stores disks as VHD files on a remote NFS filesystem',
'vendor': 'The Apache Software Foundation',
'copyright': 'Copyright (c) 2012 The Apache Software Foundation',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
# The mountpoint for the directory when performing an sr_probe. All probes
PROBE_MOUNTPOINT = "probe"
NFSPORT = 2049
DEFAULT_TRANSPORT = "tcp"
class NFSSR(FileSR.FileSR):
"""NFS file-based storage repository"""
def handles(type):
return type == 'nfs'
handles = staticmethod(handles)
def load(self, sr_uuid):
self.ops_exclusive = FileSR.OPS_EXCLUSIVE
self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
self.sr_vditype = SR.DEFAULT_TAP
if 'server' not in self.dconf:
raise xs_errors.XenError('ConfigServerMissing')
self.remoteserver = self.dconf['server']
self.path = os.path.join(SR.MOUNT_BASE, sr_uuid)
# Test for the optional 'nfsoptions' dconf attribute
self.transport = DEFAULT_TRANSPORT
if 'useUDP' in self.dconf and self.dconf['useUDP'] == 'true':
self.transport = "udp"
def validate_remotepath(self, scan):
if 'serverpath' not in self.dconf:
if scan:
try:
self.scan_exports(self.dconf['server'])
except:
pass
raise xs_errors.XenError('ConfigServerPathMissing')
if not self._isvalidpathstring(self.dconf['serverpath']):
raise xs_errors.XenError('ConfigServerPathBad', \
opterr='serverpath is %s' % self.dconf['serverpath'])
def check_server(self):
try:
nfs.check_server_tcp(self.remoteserver, self.transport)
except nfs.NfsException as exc:
raise xs_errors.XenError('NFSVersion',
opterr=exc.errstr)
def mount(self, mountpoint, remotepath):
try:
nfs.soft_mount(mountpoint, self.remoteserver, remotepath, self.transport)
except nfs.NfsException as exc:
raise xs_errors.XenError('NFSMount', opterr=exc.errstr)
def attach(self, sr_uuid):
self.validate_remotepath(False)
#self.remotepath = os.path.join(self.dconf['serverpath'], sr_uuid)
self.remotepath = self.dconf['serverpath']
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
self.mount_remotepath(sr_uuid)
def mount_remotepath(self, sr_uuid):
if not self._checkmount():
self.check_server()
self.mount(self.path, self.remotepath)
return super(NFSSR, self).attach(sr_uuid)
def probe(self):
# Verify NFS target and port
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
self.validate_remotepath(True)
self.check_server()
temppath = os.path.join(SR.MOUNT_BASE, PROBE_MOUNTPOINT)
self.mount(temppath, self.dconf['serverpath'])
try:
return nfs.scan_srlist(temppath)
finally:
try:
nfs.unmount(temppath, True)
except:
pass
def detach(self, sr_uuid):
"""Detach the SR: Unmounts and removes the mountpoint"""
if not self._checkmount():
return
util.SMlog("Aborting GC/coalesce")
cleanup.abort(self.uuid)
# Change directory to avoid unmount conflicts
os.chdir(SR.MOUNT_BASE)
try:
nfs.unmount(self.path, True)
except nfs.NfsException as exc:
raise xs_errors.XenError('NFSUnMount', opterr=exc.errstr)
return super(NFSSR, self).detach(sr_uuid)
def create(self, sr_uuid, size):
util._testHost(self.dconf['server'], NFSPORT, 'NFSTarget')
self.validate_remotepath(True)
if self._checkmount():
raise xs_errors.XenError('NFSAttached')
# Set the target path temporarily to the base dir
# so that we can create the target SR directory
self.remotepath = self.dconf['serverpath']
try:
self.mount_remotepath(sr_uuid)
except Exception as exn:
try:
os.rmdir(self.path)
except:
pass
raise exn
#newpath = os.path.join(self.path, sr_uuid)
#if util.ioretry(lambda: util.pathexists(newpath)):
# if len(util.ioretry(lambda: util.listdir(newpath))) != 0:
# self.detach(sr_uuid)
# raise xs_errors.XenError('SRExists')
#else:
# try:
# util.ioretry(lambda: util.makedirs(newpath))
# except util.CommandException, inst:
# if inst.code != errno.EEXIST:
# self.detach(sr_uuid)
# raise xs_errors.XenError('NFSCreate',
# opterr='remote directory creation error is %d'
# % inst.code)
self.detach(sr_uuid)
def delete(self, sr_uuid):
# try to remove/delete non VDI contents first
super(NFSSR, self).delete(sr_uuid)
try:
if self._checkmount():
self.detach(sr_uuid)
# Set the target path temporarily to the base dir
# so that we can remove the target SR directory
self.remotepath = self.dconf['serverpath']
self.mount_remotepath(sr_uuid)
newpath = os.path.join(self.path, sr_uuid)
if util.ioretry(lambda: util.pathexists(newpath)):
util.ioretry(lambda: os.rmdir(newpath))
self.detach(sr_uuid)
except util.CommandException as inst:
self.detach(sr_uuid)
if inst.code != errno.ENOENT:
raise xs_errors.XenError('NFSDelete')
def vdi(self, uuid, loadLocked = False):
if not loadLocked:
return NFSFileVDI(self, uuid)
return NFSFileVDI(self, uuid)
def _checkmount(self):
return util.ioretry(lambda: util.pathexists(self.path)) \
and util.ioretry(lambda: util.ismount(self.path))
def scan_exports(self, target):
util.SMlog("scanning2 (target=%s)" % target)
dom = nfs.scan_exports(target)
print >>sys.stderr,dom.toprettyxml()
def _isvalidpathstring(self, path):
if not path.startswith("/"):
return False
l = self._splitstring(path)
for char in l:
if char.isalpha():
continue
elif char.isdigit():
continue
elif char in ['/','-','_','.',':']:
continue
else:
return False
return True
class NFSFileVDI(FileSR.FileVDI):
def attach(self, sr_uuid, vdi_uuid):
try:
vdi_ref = self.sr.srcmd.params['vdi_ref']
self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
"vdi-type")
self.session.xenapi.VDI.remove_from_xenstore_data(vdi_ref, \
"storage-type")
self.session.xenapi.VDI.add_to_xenstore_data(vdi_ref, \
"storage-type", "nfs")
except:
util.logException("NFSSR:attach")
pass
return super(NFSFileVDI, self).attach(sr_uuid, vdi_uuid)
def get_mtime(self, path):
st = util.ioretry_stat(path)
return st[stat.ST_MTIME]
def clone(self, sr_uuid, vdi_uuid):
timestamp_before = int(self.get_mtime(self.sr.path))
ret = super(NFSFileVDI, self).clone(sr_uuid, vdi_uuid)
timestamp_after = int(self.get_mtime(self.sr.path))
if timestamp_after == timestamp_before:
util.SMlog("SR dir timestamp didn't change, updating")
timestamp_after += 1
os.utime(self.sr.path, (timestamp_after, timestamp_after))
return ret
if __name__ == '__main__':
SRCommand.run(NFSSR, DRIVER_INFO)
else:
SR.registerSR(NFSSR)

View File

@ -0,0 +1,65 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This file specifies the files that need
#
# to be transferred over to the XenServer.
# The format of this file is as follows:
# [Name of file]=[source path],[file permission],[destination path]
# [destination path] is required.
# If [file permission] is missing, 755 is assumed.
# If [source path] is missing, it looks in the same
# directory as the patch file.
# If [source path] starts with '/', then it is absolute path.
# If [source path] starts with '~', then it is path relative to management server home directory.
# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file.
NFSSR.py=/opt/xensource/sm
vmops=../xenserver84/,0755,/etc/xapi.d/plugins
ovstunnel=..,0755,/etc/xapi.d/plugins
vmopsSnapshot=../xenserver84/,0755,/etc/xapi.d/plugins
agent.zip=../../../../../vms,0644,/opt/xensource/packages/resources/
cloud-scripts.tgz=../../../../../vms,0644,/opt/xensource/packages/resources/
patch-sysvms.sh=../../../../../vms,0644,/opt/xensource/packages/resources/
id_rsa.cloud=../../../systemvm,0600,/root/.ssh
network_info.sh=..,0755,/opt/cloud/bin
setupxenserver.sh=..,0755,/opt/cloud/bin
make_migratable.sh=..,0755,/opt/cloud/bin
setup_iscsi.sh=..,0755,/opt/cloud/bin
pingtest.sh=../../..,0755,/opt/cloud/bin
router_proxy.sh=../../../../network/domr/,0755,/opt/cloud/bin
cloud-setup-bonding.sh=..,0755,/opt/cloud/bin
copy_vhd_to_secondarystorage.sh=..,0755,/opt/cloud/bin
copy_vhd_from_secondarystorage.sh=..,0755,/opt/cloud/bin
setup_heartbeat_sr.sh=..,0755,/opt/cloud/bin
setup_heartbeat_file.sh=..,0755,/opt/cloud/bin
check_heartbeat.sh=..,0755,/opt/cloud/bin
xenheartbeat.sh=..,0755,/opt/cloud/bin
launch_hb.sh=..,0755,/opt/cloud/bin
vhd-util=..,0755,/opt/cloud/bin
vmopspremium=../xenserver84/,0755,/etc/xapi.d/plugins
create_privatetemplate_from_snapshot.sh=..,0755,/opt/cloud/bin
upgrade_snapshot.sh=..,0755,/opt/cloud/bin
cloud-clean-vlan.sh=..,0755,/opt/cloud/bin
cloud-prepare-upgrade.sh=..,0755,/opt/cloud/bin
add_to_vcpus_params_live.sh=..,0755,/opt/cloud/bin
cloud-plugin-storage=../xenserver84/,0755,/etc/xapi.d/plugins
###add cloudstack plugin script for XCP
cloudstack_plugins.conf=..,0644,/etc/xensource
cloudstack_pluginlib.py=../xenserver84/,0755,/etc/xapi.d/plugins
cloudlog=..,0644,/etc/logrotate.d
update_host_passwd.sh=../..,0755,/opt/cloud/bin

View File

@ -0,0 +1,303 @@
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Version @VERSION@
#
# A plugin for executing script needed by vmops cloud
import os, sys, time
import XenAPIPlugin
if os.path.exists("/opt/xensource/sm"):
sys.path.extend(["/opt/xensource/sm/", "/usr/local/sbin/", "/sbin/"])
if os.path.exists("/usr/lib/xcp/sm"):
sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"])
import SR, VDI, SRCommand, util, lvutil
from util import CommandException
import vhdutil
import shutil
import lvhdutil
import errno
import subprocess
import xs_errors
import cleanup
import stat
import random
import cloudstack_pluginlib as lib
import logging
lib.setup_logging("/var/log/cloud/cloud.log")
VHDUTIL = "vhd-util"
VHD_PREFIX = 'VHD-'
CLOUD_DIR = '/var/run/cloud_mount'
def echo(fn):
def wrapped(*v, **k):
name = fn.__name__
logging.debug("#### CLOUD enter %s ####" % name )
res = fn(*v, **k)
logging.debug("#### CLOUD exit %s ####" % name )
return res
return wrapped
def getPrimarySRPath(primaryStorageSRUuid, isISCSI):
if isISCSI:
primarySRDir = lvhdutil.VG_PREFIX + primaryStorageSRUuid
return os.path.join(lvhdutil.VG_LOCATION, primarySRDir)
else:
return os.path.join(SR.MOUNT_BASE, primaryStorageSRUuid)
def getBackupVHD(UUID):
return UUID + '.' + SR.DEFAULT_TAP
def getVHD(UUID, isISCSI):
if isISCSI:
return VHD_PREFIX + UUID
else:
return UUID + '.' + SR.DEFAULT_TAP
def getIsTrueString(stringValue):
booleanValue = False
if (stringValue and stringValue == 'true'):
booleanValue = True
return booleanValue
def makeUnavailable(uuid, primarySRPath, isISCSI):
if not isISCSI:
return
VHD = getVHD(uuid, isISCSI)
path = os.path.join(primarySRPath, VHD)
manageAvailability(path, '-an')
return
def manageAvailability(path, value):
if path.__contains__("/var/run/sr-mount"):
return
logging.debug("Setting availability of " + path + " to " + value)
try:
cmd = ['/usr/sbin/lvchange', value, path]
util.pread2(cmd)
except: #CommandException, (rc, cmdListStr, stderr):
#errMsg = "CommandException thrown while executing: " + cmdListStr + " with return code: " + str(rc) + " and stderr: " + stderr
errMsg = "Unexpected exception thrown by lvchange"
logging.debug(errMsg)
if value == "-ay":
# Raise an error only if we are trying to make it available.
# Just warn if we are trying to make it unavailable after the
# snapshot operation is done.
raise xs_errors.XenError(errMsg)
return
def checkVolumeAvailability(path):
try:
if not isVolumeAvailable(path):
# The VHD file is not available on XenSever. The volume is probably
# inactive or detached.
# Do lvchange -ay to make it available on XenServer
manageAvailability(path, '-ay')
except:
errMsg = "Could not determine status of ISCSI path: " + path
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
success = False
i = 0
while i < 6:
i = i + 1
# Check if the vhd is actually visible by checking for the link
# set isISCSI to true
success = isVolumeAvailable(path)
if success:
logging.debug("Made vhd: " + path + " available and confirmed that it is visible")
break
# Sleep for 10 seconds before checking again.
time.sleep(10)
# If not visible within 1 min fail
if not success:
logging.debug("Could not make vhd: " + path + " available despite waiting for 1 minute. Does it exist?")
return success
def isVolumeAvailable(path):
# Check if iscsi volume is available on this XenServer.
status = "0"
try:
p = subprocess.Popen(["/bin/bash", "-c", "if [ -L " + path + " ]; then echo 1; else echo 0;fi"], stdout=subprocess.PIPE)
status = p.communicate()[0].strip("\n")
except:
errMsg = "Could not determine status of ISCSI path: " + path
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return (status == "1")
def scanParent(path):
# Do a scan for the parent for ISCSI volumes
# Note that the parent need not be visible on the XenServer
parentUUID = ''
try:
lvName = os.path.basename(path)
dirname = os.path.dirname(path)
vgName = os.path.basename(dirname)
vhdInfo = vhdutil.getVHDInfoLVM(lvName, lvhdutil.extractUuid, vgName)
parentUUID = vhdInfo.parentUuid
except:
errMsg = "Could not get vhd parent of " + path
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return parentUUID
def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI):
snapshotVHD = getVHD(snapshotUuid, isISCSI)
snapshotPath = os.path.join(primarySRPath, snapshotVHD)
baseCopyUuid = ''
if isISCSI:
checkVolumeAvailability(snapshotPath)
baseCopyUuid = scanParent(snapshotPath)
else:
baseCopyUuid = getParent(snapshotPath, isISCSI)
logging.debug("Base copy of snapshotUuid: " + snapshotUuid + " is " + baseCopyUuid)
return baseCopyUuid
def getParent(path, isISCSI):
parentUUID = ''
try :
if isISCSI:
parentUUID = vhdutil.getParent(path, lvhdutil.extractUuid)
else:
parentUUID = vhdutil.getParent(path, cleanup.FileVDI.extractUuid)
except:
errMsg = "Could not get vhd parent of " + path
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return parentUUID
def getVhdParent(session, args):
logging.debug("getParent with " + str(args))
try:
primaryStorageSRUuid = args['primaryStorageSRUuid']
snapshotUuid = args['snapshotUuid']
isISCSI = getIsTrueString(args['isISCSI'])
primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI)
logging.debug("primarySRPath: " + primarySRPath)
baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI)
return baseCopyUuid
except:
logging.debug('getVhdParent', exc_info=True)
raise xs_errors.XenError("Failed to getVhdParent")
def makedirs(path):
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
umount(path)
if os.path.isdir(path):
return
errMsg = "OSError while creating " + path + " with errno: " + str(e.errno) + " and strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return
def umount(localDir):
try:
cmd = ['umount', localDir]
util.pread2(cmd)
except CommandException:
errMsg = "CommandException raised while trying to umount " + localDir
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
logging.debug("Successfully unmounted " + localDir)
return
@echo
def mountNfsSecondaryStorage(session, args):
remoteDir = args['remoteDir']
localDir = args['localDir']
nfsVersion = args['nfsVersion']
logging.debug("mountNfsSecondaryStorage with params: " + str(args))
mounted = False
f = open("/proc/mounts", 'r')
for line in f:
tokens = line.split(" ")
if len(tokens) > 2 and tokens[0] == remoteDir and tokens[1] == localDir:
mounted = True
if mounted:
return "true"
makedirs(localDir)
options = "soft,tcp,timeo=133,retrans=1"
if nfsVersion:
options += ",vers=" + nfsVersion
try:
cmd = ['mount', '-o', options, remoteDir, localDir]
txt = util.pread2(cmd)
except:
txt = ''
errMsg = "Unexpected error while trying to mount " + remoteDir + " to " + localDir
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
logging.debug("Successfully mounted " + remoteDir + " to " + localDir)
return "true"
@echo
def umountNfsSecondaryStorage(session, args):
localDir = args['localDir']
try:
cmd = ['umount', localDir]
util.pread2(cmd)
except CommandException:
errMsg = "CommandException raised while trying to umount " + localDir
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
try:
os.system("rmdir " + localDir)
except:
pass
logging.debug("Successfully unmounted " + localDir)
return "true"
@echo
def makeDirectory(session, args):
path = args['path']
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
if os.path.isdir(path):
return "true"
errMsg = "OSError while creating " + path + " with errno: " + str(e.errno) + " and strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return "true"
if __name__ == "__main__":
XenAPIPlugin.dispatch({"getVhdParent":getVhdParent, "mountNfsSecondaryStorage":mountNfsSecondaryStorage,
"umountNfsSecondaryStorage":umountNfsSecondaryStorage,
"makeDirectory":makeDirectory})

View File

@ -0,0 +1,894 @@
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Common function for Cloudstack's XenAPI plugins
import configparser
import logging
import os
import subprocess
try:
import simplejson as json
except ImportError:
import json
import copy
from time import localtime, asctime
DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
DEFAULT_LOG_FILE = "/var/log/cloudstack_plugins.log"
PLUGIN_CONFIG_PATH = "/etc/xensource/cloudstack_plugins.conf"
OVSDB_PID_PATH = "/var/run/openvswitch/ovsdb-server.pid"
OVSDB_DAEMON_PATH = "ovsdb-server"
OVS_PID_PATH = "/var/run/openvswitch/ovs-vswitchd.pid"
OVS_DAEMON_PATH = "ovs-vswitchd"
VSCTL_PATH = "/usr/bin/ovs-vsctl"
OFCTL_PATH = "/usr/bin/ovs-ofctl"
XE_PATH = "/opt/xensource/bin/xe"
# OpenFlow tables set in a pipeline processing fashion for the bridge created for a VPC's that are enabled for
# distributed routing.
# L2 path (intra-tier traffic) CLASSIFIER-> L2 lookup -> L2 flooding tables
# L3 path (inter-tier traffic) CLASSIFIER-> EGRESS ACL -> L3 lookup -> INGRESS ACL-> L2 lookup -> L2 flooding tables
# Classifier table has the rules to separate broadcast/multi-cast traffic, inter-tier traffic, intra-tier traffic
CLASSIFIER_TABLE=0
# Lookup table to determine the output port (vif/tunnel port) based on the MAC address
L2_LOOKUP_TABLE=1
# flooding table has the rules to flood on ports (both VIF, tunnel ports) except on the port on which packet arrived
L2_FLOOD_TABLE=2
# table has flow rules derived from egress ACL's
EGRESS_ACL_TABLE=3
# Lookup table to determine the output port (vif/tunnel port) based on the IP address
L3_LOOKUP_TABLE=4
# table has flow rules derived from egress ACL's
INGRESS_ACL_TABLE=5
class PluginError(Exception):
"""Base Exception class for all plugin errors."""
def __init__(self, *args):
Exception.__init__(self, *args)
def setup_logging(log_file=None):
debug = False
verbose = False
log_format = DEFAULT_LOG_FORMAT
log_date_format = DEFAULT_LOG_DATE_FORMAT
# try to read plugin configuration file
if os.path.exists(PLUGIN_CONFIG_PATH):
config = configparser.ConfigParser()
config.read(PLUGIN_CONFIG_PATH)
try:
options = config.options('LOGGING')
if 'debug' in options:
debug = config.getboolean('LOGGING', 'debug')
if 'verbose' in options:
verbose = config.getboolean('LOGGING', 'verbose')
if 'format' in options:
log_format = config.get('LOGGING', 'format')
if 'date_format' in options:
log_date_format = config.get('LOGGING', 'date_format')
if 'file' in options:
log_file_2 = config.get('LOGGING', 'file')
except ValueError:
# configuration file contained invalid attributes
# ignore them
pass
except configparser.NoSectionError:
# Missing 'Logging' section in configuration file
pass
root_logger = logging.root
if debug:
root_logger.setLevel(logging.DEBUG)
elif verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(log_format, log_date_format)
log_filename = log_file or log_file_2 or DEFAULT_LOG_FILE
logfile_handler = logging.FileHandler(log_filename)
logfile_handler.setFormatter(formatter)
root_logger.addHandler(logfile_handler)
def do_cmd(cmd):
"""Abstracts out the basics of issuing system commands. If the command
returns anything in stderr, a PluginError is raised with that information.
Otherwise, the output from stdout is returned.
"""
pipe = subprocess.PIPE
logging.debug("Executing:%s", cmd)
proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
stderr=pipe, close_fds=True)
ret_code = proc.wait()
err = proc.stderr.read()
if ret_code:
logging.debug("The command exited with the error code: " +
"%s (stderr output:%s)" % (ret_code, err))
raise PluginError(err)
output = proc.stdout.read()
if output.endswith('\n'):
output = output[:-1]
return output
def _is_process_run(pidFile, name):
try:
fpid = open(pidFile, "r")
pid = fpid.readline()
fpid.close()
except IOError as e:
return -1
pid = pid[:-1]
ps = os.popen("ps -ae")
for l in ps:
if pid in l and name in l:
ps.close()
return 0
ps.close()
return -2
def _is_tool_exist(name):
if os.path.exists(name):
return 0
return -1
def check_switch():
global result
ret = _is_process_run(OVSDB_PID_PATH, OVSDB_DAEMON_PATH)
if ret < 0:
if ret == -1:
return "NO_DB_PID_FILE"
if ret == -2:
return "DB_NOT_RUN"
ret = _is_process_run(OVS_PID_PATH, OVS_DAEMON_PATH)
if ret < 0:
if ret == -1:
return "NO_SWITCH_PID_FILE"
if ret == -2:
return "SWITCH_NOT_RUN"
if _is_tool_exist(VSCTL_PATH) < 0:
return "NO_VSCTL"
if _is_tool_exist(OFCTL_PATH) < 0:
return "NO_OFCTL"
return "SUCCESS"
def _build_flow_expr(**kwargs):
is_delete_expr = kwargs.get('delete', False)
flow = ""
if not is_delete_expr:
flow = "hard_timeout=%s,idle_timeout=%s,priority=%s" \
% (kwargs.get('hard_timeout', '0'),
kwargs.get('idle_timeout', '0'),
kwargs.get('priority', '1'))
in_port = 'in_port' in kwargs and ",in_port=%s" % kwargs['in_port'] or ''
dl_type = 'dl_type' in kwargs and ",dl_type=%s" % kwargs['dl_type'] or ''
dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or ''
dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or ''
nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or ''
nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or ''
table = 'table' in kwargs and ",table=%s" % kwargs['table'] or ''
cookie = 'cookie' in kwargs and ",cookie=%s" % kwargs['cookie'] or ''
proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or ''
ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or ''
flow = (flow + cookie+ in_port + dl_type + dl_src + dl_dst +
(ip or proto) + nw_src + nw_dst + table)
return flow
def add_flow(bridge, **kwargs):
"""
Builds a flow expression for **kwargs and adds the flow entry
to an Open vSwitch instance
"""
flow = _build_flow_expr(**kwargs)
actions = 'actions' in kwargs and ",actions=%s" % kwargs['actions'] or ''
flow = flow + actions
addflow = [OFCTL_PATH, "add-flow", bridge, flow]
do_cmd(addflow)
def del_flows(bridge, **kwargs):
"""
Removes flows according to criteria passed as keyword.
"""
flow = _build_flow_expr(delete=True, **kwargs)
# out_port condition does not exist for all flow commands
out_port = ("out_port" in kwargs and
",out_port=%s" % kwargs['out_port'] or '')
flow = flow + out_port
delFlow = [OFCTL_PATH, 'del-flows', bridge, flow]
do_cmd(delFlow)
def del_all_flows(bridge):
delFlow = [OFCTL_PATH, "del-flows", bridge]
do_cmd(delFlow)
normalFlow = "priority=0 idle_timeout=0 hard_timeout=0 actions=normal"
add_flow(bridge, normalFlow)
def del_port(bridge, port):
delPort = [VSCTL_PATH, "del-port", bridge, port]
do_cmd(delPort)
def get_network_id_for_vif(vif_name):
domain_id, device_id = vif_name[3:len(vif_name)].split(".")
hostname = do_cmd(["/bin/bash", "-c", "hostname"])
this_host_uuid = do_cmd([XE_PATH, "host-list", "hostname=%s" % hostname, "--minimal"])
dom_uuid = do_cmd([XE_PATH, "vm-list", "dom-id=%s" % domain_id, "resident-on=%s" %this_host_uuid, "--minimal"])
vif_uuid = do_cmd([XE_PATH, "vif-list", "vm-uuid=%s" % dom_uuid, "device=%s" % device_id, "--minimal"])
vnet = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=other-config",
"param-key=cloudstack-network-id"])
return vnet
def get_network_id_for_tunnel_port(tunnelif_name):
vnet = do_cmd([VSCTL_PATH, "get", "interface", tunnelif_name, "options:cloudstack-network-id"])
return vnet
def clear_flooding_rules_for_port(bridge, ofport):
del_flows(bridge, in_port=ofport, table=L2_FLOOD_TABLE)
def clear_flooding_rules_for_all_ports(bridge):
del_flows(bridge, cookie=111, table=L2_FLOOD_TABLE)
def add_flooding_rules_for_port(bridge, in_ofport, out_ofports):
action = "".join("output:%s," %ofport for ofport in out_ofports)[:-1]
add_flow(bridge, cookie=111, priority=1100, in_port=in_ofport, table=L2_FLOOD_TABLE, actions=action)
def get_ofport_for_vif(vif_name):
return do_cmd([VSCTL_PATH, "get", "interface", vif_name, "ofport"])
def get_macaddress_of_vif(vif_name):
domain_id, device_id = vif_name[3:len(vif_name)].split(".")
dom_uuid = do_cmd([XE_PATH, "vm-list", "dom-id=%s" % domain_id, "--minimal"])
vif_uuid = do_cmd([XE_PATH, "vif-list", "vm-uuid=%s" % dom_uuid, "device=%s" % device_id, "--minimal"])
mac = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=MAC"])
return mac
def get_vif_name_from_macaddress(macaddress):
vif_uuid = do_cmd([XE_PATH, "vif-list", "MAC=%s" % macaddress, "--minimal"])
vif_device_id = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=device"])
vm_uuid = do_cmd([XE_PATH, "vif-param-get", "uuid=%s" % vif_uuid, "param-name=vm-uuid"])
vm_domain_id = do_cmd([XE_PATH, "vm-param-get", "uuid=%s" % vm_uuid, "param-name=dom-id"])
return "vif"+vm_domain_id+"."+vif_device_id
def add_mac_lookup_table_entry(bridge, mac_address, out_of_port):
action = "output=%s" %out_of_port
add_flow(bridge, priority=1100, dl_dst=mac_address, table=L2_LOOKUP_TABLE, actions=action)
def delete_mac_lookup_table_entry(bridge, mac_address):
del_flows(bridge, dl_dst=mac_address, table=L2_LOOKUP_TABLE)
def add_ip_lookup_table_entry(bridge, ip, dst_tier_gateway_mac, dst_vm_mac):
action_str = "mod_dl_src:%s" % dst_tier_gateway_mac + ",mod_dl_dst:%s" % dst_vm_mac + ",resubmit(,%s)"%INGRESS_ACL_TABLE
action_str = "table=%s"%L3_LOOKUP_TABLE + ", ip, nw_dst=%s" % ip + ", actions=%s" %action_str
addflow = [OFCTL_PATH, "add-flow", bridge, action_str]
do_cmd(addflow)
def get_vpc_vms_on_host(vpc, host_id):
all_vms = vpc.vms
vms_on_host = []
for vm in all_vms:
if str(vm.hostid) == (host_id):
vms_on_host.append(vm)
return vms_on_host
def get_network_details(vpc, network_uuid):
tiers = vpc.tiers
for tier in tiers:
if str(tier.networkuuid) == (network_uuid):
return tier
return None
class jsonLoader(object):
def __init__(self, obj):
for k in obj:
v = obj[k]
if isinstance(v, dict):
setattr(self, k, jsonLoader(v))
elif isinstance(v, (list, tuple)):
if len(v) > 0 and isinstance(v[0], dict):
setattr(self, k, [jsonLoader(elem) for elem in v])
else:
setattr(self, k, v)
else:
setattr(self, k, v)
def __getattr__(self, val):
if val in self.__dict__:
return self.__dict__[val]
else:
return None
def __repr__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v)
in self.__dict__.iteritems()))
def __str__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v)
in self.__dict__.iteritems()))
def get_acl(vpcconfig, required_acl_id):
acls = vpcconfig.acls
for acl in acls:
if acl.id == required_acl_id:
return acl
return None
def check_tunnel_exists(bridge, tunnel_name):
try:
res = do_cmd([VSCTL_PATH, "port-to-br", tunnel_name])
return res == bridge
except:
return False
def create_tunnel(bridge, remote_ip, gre_key, src_host, dst_host, network_uuid):
logging.debug("Creating tunnel from host %s" %src_host + " to host %s" %dst_host + " with GRE key %s" %gre_key)
res = check_switch()
if res != "SUCCESS":
logging.debug("Openvswitch running: NO")
return "FAILURE:%s" % res
# We need to keep the name below 14 characters
# src and target are enough - consider a fixed length hash
name = "t%s-%s-%s" % (gre_key, src_host, dst_host)
# Verify the xapi bridge to be created
# NOTE: Timeout should not be necessary anymore
wait = [VSCTL_PATH, "--timeout=30", "wait-until", "bridge",
bridge, "--", "get", "bridge", bridge, "name"]
res = do_cmd(wait)
if bridge not in res:
logging.debug("WARNING:Can't find bridge %s for creating " +
"tunnel!" % bridge)
return "FAILURE:NO_BRIDGE"
logging.debug("bridge %s for creating tunnel - VERIFIED" % bridge)
tunnel_setup = False
drop_flow_setup = False
try:
# Create a port and configure the tunnel interface for it
add_tunnel = [VSCTL_PATH, "add-port", bridge,
name, "--", "set", "interface",
name, "type=gre", "options:key=%s" % gre_key,
"options:remote_ip=%s" % remote_ip]
do_cmd(add_tunnel)
tunnel_setup = True
# verify port
verify_port = [VSCTL_PATH, "get", "port", name, "interfaces"]
res = do_cmd(verify_port)
# Expecting python-style list as output
iface_list = []
if len(res) > 2:
iface_list = res.strip()[1:-1].split(',')
if len(iface_list) != 1:
logging.debug("WARNING: Unexpected output while verifying " +
"port %s on bridge %s" % (name, bridge))
return "FAILURE:VERIFY_PORT_FAILED"
# verify interface
iface_uuid = iface_list[0]
verify_interface_key = [VSCTL_PATH, "get", "interface",
iface_uuid, "options:key"]
verify_interface_ip = [VSCTL_PATH, "get", "interface",
iface_uuid, "options:remote_ip"]
key_validation = do_cmd(verify_interface_key)
ip_validation = do_cmd(verify_interface_ip)
if gre_key not in key_validation or remote_ip not in ip_validation:
logging.debug("WARNING: Unexpected output while verifying " +
"interface %s on bridge %s" % (name, bridge))
return "FAILURE:VERIFY_INTERFACE_FAILED"
logging.debug("Tunnel interface validated:%s" % verify_interface_ip)
cmd_tun_ofport = [VSCTL_PATH, "get", "interface",
iface_uuid, "ofport"]
tun_ofport = do_cmd(cmd_tun_ofport)
# Ensure no trailing LF
if tun_ofport.endswith('\n'):
tun_ofport = tun_ofport[:-1]
# find xs network for this bridge, verify is used for ovs tunnel network
xs_nw_uuid = do_cmd([XE_PATH, "network-list",
"bridge=%s" % bridge, "--minimal"])
ovs_tunnel_network = is_regular_tunnel_network(xs_nw_uuid)
ovs_vpc_distributed_vr_network = is_vpc_network_with_distributed_routing(xs_nw_uuid)
if ovs_tunnel_network == 'True':
# add flow entryies for dropping broadcast coming in from gre tunnel
add_flow(bridge, priority=1000, in_port=tun_ofport,
dl_dst='ff:ff:ff:ff:ff:ff', actions='drop')
add_flow(bridge, priority=1000, in_port=tun_ofport,
nw_dst='224.0.0.0/24', actions='drop')
drop_flow_setup = True
logging.debug("Broadcast drop rules added")
if ovs_vpc_distributed_vr_network == 'True':
# add flow rules for dropping broadcast coming in from tunnel ports
add_flow(bridge, priority=1000, in_port=tun_ofport, table=0,
dl_dst='ff:ff:ff:ff:ff:ff', actions='drop')
add_flow(bridge, priority=1000, in_port=tun_ofport, table=0,
nw_dst='224.0.0.0/24', actions='drop')
# add flow rule to send the traffic from tunnel ports to L2 switching table only
add_flow(bridge, priority=1100, in_port=tun_ofport, table=0, actions='resubmit(,1)')
# mark tunnel interface with network id for which this tunnel was created
do_cmd([VSCTL_PATH, "set", "interface", name, "options:cloudstack-network-id=%s" % network_uuid])
update_flooding_rules_on_port_plug_unplug(bridge, name, 'online', network_uuid)
logging.debug("Successfully created tunnel from host %s" %src_host + " to host %s" %dst_host +
" with GRE key %s" %gre_key)
return "SUCCESS:%s" % name
except:
logging.debug("An unexpected error occurred. Rolling back")
if tunnel_setup:
logging.debug("Deleting GRE interface")
# Destroy GRE port and interface
del_port(bridge, name)
if drop_flow_setup:
# Delete flows
logging.debug("Deleting flow entries from GRE interface")
del_flows(bridge, in_port=tun_ofport)
# This will not cancel the original exception
raise
# Configures the bridge created for a VPC that is enabled for distributed routing. Management server sends VPC
# physical topology details (which VM from which tier running on which host etc). Based on the VPC physical topology L2
# lookup table and L3 lookup tables are updated by this function.
def configure_vpc_bridge_for_network_topology(bridge, this_host_id, json_config, sequence_no):
vpconfig = jsonLoader(json.loads(json_config)).vpc
if vpconfig is None:
logging.debug("WARNING:Can't find VPC topology information in the json configuration file")
return "FAILURE:IMPROPER_JSON_CONFG_FILE"
try:
if not os.path.exists('/var/run/cloud'):
os.makedirs('/var/run/cloud')
# create a temporary file to store OpenFlow rules corresponding to L2 and L3 lookup table updates
ofspec_filename = "/var/run/cloud/" + bridge + sequence_no + ".ofspec"
ofspec = open(ofspec_filename, 'w+')
# get the list of VM's in all the tiers of VPC running in this host from the JSON config
this_host_vms = get_vpc_vms_on_host(vpconfig, this_host_id)
for vm in this_host_vms:
for nic in vm.nics:
mac_addr = nic.macaddress
ip = nic.ipaddress
vif_name = get_vif_name_from_macaddress(mac_addr)
of_port = get_ofport_for_vif(vif_name)
network = get_network_details(vpconfig, nic.networkuuid)
# Add OF rule in L2 look up table, if packet's destination mac matches MAC of the VM's nic
# then send packet on the found OFPORT
ofspec.write("table=%s" %L2_LOOKUP_TABLE + " priority=1100 dl_dst=%s " %mac_addr +
" actions=output:%s" %of_port + "\n")
# Add OF rule in L3 look up table: if packet's destination IP matches VM's IP then modify the packet
# to set DST MAC = VM's MAC, SRC MAC= destination tier gateway MAC and send to egress table. This step
# emulates steps VPC virtual router would have done on the current host itself.
action_str = " mod_dl_src:%s"%network.gatewaymac + ",mod_dl_dst:%s" % mac_addr \
+ ",resubmit(,%s)"%INGRESS_ACL_TABLE
action_str = "table=%s"%L3_LOOKUP_TABLE + " ip nw_dst=%s"%ip + " actions=%s" %action_str
ofspec.write(action_str + "\n")
# Add OF rule to send intra-tier traffic from this nic of the VM to L2 lookup path (L2 switching)
action_str = "table=%s" %CLASSIFIER_TABLE + " priority=1200 in_port=%s " %of_port + \
" ip nw_dst=%s " %network.cidr + " actions=resubmit(,%s)" %L2_LOOKUP_TABLE
ofspec.write(action_str + "\n")
# Add OF rule to send inter-tier traffic from this nic of the VM to egress ACL table(L3 lookup path)
action_str = "table=%s "%CLASSIFIER_TABLE + " priority=1100 in_port=%s " %of_port + \
" ip dl_dst=%s " %network.gatewaymac + " nw_dst=%s " %vpconfig.cidr + \
" actions=resubmit(,%s)" %EGRESS_ACL_TABLE
ofspec.write(action_str + "\n")
# get the list of hosts on which VPC spans from the JSON config
vpc_spanning_hosts = vpconfig.hosts
for host in vpc_spanning_hosts:
if str(this_host_id) == str(host.hostid):
continue
other_host_vms = get_vpc_vms_on_host(vpconfig, str(host.hostid))
for vm in other_host_vms:
for nic in vm.nics:
mac_addr = nic.macaddress
ip = nic.ipaddress
network = get_network_details(vpconfig, nic.networkuuid)
gre_key = network.grekey
# generate tunnel name as per the tunnel naming convention
tunnel_name = "t%s-%s-%s" % (gre_key, this_host_id, host.hostid)
# check if tunnel exists already, if not create a tunnel from this host to remote host
if not check_tunnel_exists(bridge, tunnel_name):
create_tunnel(bridge, str(host.ipaddress), str(gre_key), this_host_id,
host.hostid, network.networkuuid)
of_port = get_ofport_for_vif(tunnel_name)
# Add flow rule in L2 look up table, if packet's destination mac matches MAC of the VM's nic
# on the remote host then send packet on the found OFPORT corresponding to the tunnel
ofspec.write("table=%s" %L2_LOOKUP_TABLE + " priority=1100 dl_dst=%s " %mac_addr +
" actions=output:%s" %of_port + "\n")
# Add flow rule in L3 look up table. if packet's destination IP matches VM's IP then modify the
# packet to set DST MAC = VM's MAC, SRC MAC=tier gateway MAC and send to ingress table. This step
# emulates steps VPC virtual router would have done on the current host itself.
action_str = "mod_dl_src:%s"%network.gatewaymac + ",mod_dl_dst:%s" % mac_addr + \
",resubmit(,%s)"%INGRESS_ACL_TABLE
action_str = "table=%s"%L3_LOOKUP_TABLE + " ip nw_dst=%s"%ip + " actions=%s" %action_str
ofspec.write(action_str + "\n")
# add a default rule in L2_LOOKUP_TABLE to send unknown mac address to L2 flooding table
ofspec.write("table=%s "%L2_LOOKUP_TABLE + " priority=0 " + " actions=resubmit(,%s)"%L2_FLOOD_TABLE + "\n")
# add a default rule in L3 lookup table to forward (unknown destination IP) packets to L2 lookup table. This
# is fallback option to send the packet to VPC VR, when routing can not be performed at the host
ofspec.write("table=%s "%L3_LOOKUP_TABLE + " priority=0 " + " actions=resubmit(,%s)"%L2_LOOKUP_TABLE + "\n")
# First flush current L2_LOOKUP_TABLE & L3_LOOKUP_TABLE before re-applying L2 & L3 lookup entries
del_flows(bridge, table=L2_LOOKUP_TABLE)
del_flows(bridge, table=L3_LOOKUP_TABLE)
ofspec.seek(0)
logging.debug("Adding below flows rules in L2 & L3 lookup tables:\n" + ofspec.read())
ofspec.close()
# update bridge with the flow-rules for L2 lookup and L3 lookup in the file in one attempt
do_cmd([OFCTL_PATH, 'add-flows', bridge, ofspec_filename])
# now that we updated the bridge with flow rules close and delete the file.
os.remove(ofspec_filename)
return "SUCCESS: successfully configured bridge as per the VPC topology update with sequence no: %s"%sequence_no
except Exception as e:
error_message = "An unexpected error occurred while configuring bridge " + bridge + \
" as per latest VPC topology update with sequence no: %s" %sequence_no
logging.debug(error_message + " due to " + str(e))
if os.path.isfile(ofspec_filename):
os.remove(ofspec_filename)
raise error_message
# Configures the bridge created for a VPC that is enabled for distributed firewall. Management server sends VPC routing
# policy (network ACL applied on the tiers etc) details. Based on the VPC routing policies ingress ACL table and
# egress ACL tables are updated by this function.
def configure_vpc_bridge_for_routing_policies(bridge, json_config, sequence_no):
vpconfig = jsonLoader(json.loads(json_config)).vpc
if vpconfig is None:
logging.debug("WARNING: Can't find VPC routing policies info in json config file")
return "FAILURE:IMPROPER_JSON_CONFG_FILE"
try:
if not os.path.exists('/var/run/cloud'):
os.makedirs('/var/run/cloud')
# create a temporary file to store OpenFlow rules corresponding to ingress and egress ACL table updates
ofspec_filename = "/var/run/cloud/" + bridge + sequence_no + ".ofspec"
ofspec = open(ofspec_filename, 'w+')
tiers = vpconfig.tiers
for tier in tiers:
tier_cidr = tier.cidr
acl = get_acl(vpconfig, tier.aclid)
acl_items = acl.aclitems
for acl_item in acl_items:
number = acl_item.number
action = acl_item.action
direction = acl_item.direction
source_port_start = acl_item.sourceportstart
source_port_end = acl_item.sourceportend
protocol = acl_item.protocol
if protocol == "all":
protocol = "*"
elif protocol == "tcp":
protocol = "6"
elif protocol == "udp":
protocol == "17"
elif protocol == "icmp":
protocol == "1"
source_cidrs = acl_item.sourcecidrs
acl_priority = 1000 + number
if direction == "ingress":
matching_table = INGRESS_ACL_TABLE
resubmit_table = L2_LOOKUP_TABLE
elif direction == "egress":
matching_table = EGRESS_ACL_TABLE
resubmit_table = L3_LOOKUP_TABLE
for source_cidr in source_cidrs:
if source_port_start is None and source_port_end is None:
if source_cidr.startswith('0.0.0.0'):
if action == "deny":
if direction == "ingress":
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" nw_dst=%s " %tier_cidr + " nw_proto=%s " %protocol +
" actions=drop" + "\n")
else:
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" nw_src=%s " %tier_cidr + " nw_proto=%s " %protocol +
" actions=drop" + "\n")
if action == "allow":
if direction == "ingress":
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" nw_dst=%s " %tier_cidr + " nw_proto=%s " %protocol +
" actions=resubmit(,%s)"%resubmit_table + "\n")
else:
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" nw_src=%s " %tier_cidr + " nw_proto=%s " %protocol +
" actions=resubmit(,%s)"%resubmit_table + "\n")
else:
if action == "deny":
if direction == "ingress":
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" nw_src=%s " %source_cidr + " nw_dst=%s " %tier_cidr +
" nw_proto=%s " %protocol + " actions=drop" + "\n")
else:
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" nw_src=%s " %tier_cidr + " nw_dst=%s " %source_cidr +
" nw_proto=%s " %protocol + " actions=drop" + "\n")
if action == "allow":
if direction == "ingress":
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" nw_src=%s "%source_cidr + " nw_dst=%s " %tier_cidr +
" nw_proto=%s " %protocol +
" actions=resubmit(,%s)"%resubmit_table + "\n")
else:
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" nw_src=%s "%tier_cidr + " nw_dst=%s " %source_cidr +
" nw_proto=%s " %protocol +
" actions=resubmit(,%s)"%resubmit_table + "\n")
continue
# add flow rule to do action (allow/deny) for flows where source IP of the packet is in
# source_cidr and destination ip is in tier_cidr
port = int(source_port_start)
while (port <= int(source_port_end)):
if source_cidr.startswith('0.0.0.0'):
if action == "deny":
if direction == "ingress":
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" tp_dst=%s " %port + " nw_dst=%s " %tier_cidr +
" nw_proto=%s " %protocol + " actions=drop" + "\n")
else:
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" tp_dst=%s " %port + " nw_src=%s " %tier_cidr +
" nw_proto=%s " %protocol + " actions=drop" + "\n")
if action == "allow":
if direction == "ingress":
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" tp_dst=%s " %port + " nw_dst=%s " %tier_cidr +
" nw_proto=%s " %protocol +
" actions=resubmit(,%s)"%resubmit_table + "\n")
else:
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" tp_dst=%s " %port + " nw_src=%s " %tier_cidr +
" nw_proto=%s " %protocol +
" actions=resubmit(,%s)"%resubmit_table + "\n")
else:
if action == "deny":
if direction == "ingress":
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" tp_dst=%s " %port + " nw_src=%s " %source_cidr +
" nw_dst=%s " %tier_cidr +
" nw_proto=%s " %protocol + " actions=drop" + "\n")
else:
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" tp_dst=%s " %port + " nw_src=%s " %tier_cidr +
" nw_dst=%s " %source_cidr +
" nw_proto=%s " %protocol + " actions=drop" + "\n")
if action == "allow":
if direction == "ingress":
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" tp_dst=%s " %port + " nw_src=%s "%source_cidr +
" nw_dst=%s " %tier_cidr +
" nw_proto=%s " %protocol +
" actions=resubmit(,%s)"%resubmit_table + "\n")
else:
ofspec.write("table=%s "%matching_table + " priority=%s " %acl_priority + " ip " +
" tp_dst=%s " %port + " nw_src=%s "%tier_cidr +
" nw_dst=%s " %source_cidr +
" nw_proto=%s " %protocol +
" actions=resubmit(,%s)"%resubmit_table + "\n")
port = port + 1
# add a default rule in egress table to allow packets (so forward packet to L3 lookup table)
ofspec.write("table=%s " %EGRESS_ACL_TABLE + " priority=0 actions=resubmit(,%s)" %L3_LOOKUP_TABLE + "\n")
# add a default rule in ingress table to drop packets
ofspec.write("table=%s " %INGRESS_ACL_TABLE + " priority=0 actions=drop" + "\n")
# First flush current ingress and egress ACL's before re-applying the ACL's
del_flows(bridge, table=EGRESS_ACL_TABLE)
del_flows(bridge, table=INGRESS_ACL_TABLE)
ofspec.seek(0)
logging.debug("Adding below flows rules Ingress & Egress ACL tables:\n" + ofspec.read())
ofspec.close()
# update bridge with the flow-rules for ingress and egress ACL's added in the file in one attempt
do_cmd([OFCTL_PATH, 'add-flows', bridge, ofspec_filename])
# now that we updated the bridge with flow rules delete the file.
os.remove(ofspec_filename)
return "SUCCESS: successfully configured bridge as per the latest routing policies update with " \
"sequence no: %s"%sequence_no
except Exception as e:
error_message = "An unexpected error occurred while configuring bridge " + bridge + \
" as per latest VPC's routing policy update with sequence number %s." %sequence_no
logging.debug(error_message + " due to " + str(e))
if os.path.isfile(ofspec_filename):
os.remove(ofspec_filename)
raise error_message
# configures bridge L2 flooding rules stored in table=2. Single bridge is used for all the tiers of VPC. So controlled
# flooding is required to restrict the broadcast to only to the ports (vifs and tunnel interfaces) in the tier. Also
# packets arrived from the tunnel ports should not be flooded on the other tunnel ports.
def update_flooding_rules_on_port_plug_unplug(bridge, interface, command, if_network_id):
class tier_ports:
tier_vif_ofports = []
tier_tunnelif_ofports = []
tier_all_ofports = []
logging.debug("Updating the flooding rules on bridge " + bridge + " as interface %s" %interface +
" is %s"%command + " now.")
try:
if not os.path.exists('/var/run/cloud'):
os.makedirs('/var/run/cloud')
# create a temporary file to store OpenFlow rules corresponding L2 flooding table
ofspec_filename = "/var/run/cloud/" + bridge + "-" +interface + "-" + command + ".ofspec"
ofspec = open(ofspec_filename, 'w+')
all_tiers = dict()
vsctl_output = do_cmd([VSCTL_PATH, 'list-ports', bridge])
ports = vsctl_output.split('\n')
for port in ports:
if_ofport = do_cmd([VSCTL_PATH, 'get', 'Interface', port, 'ofport'])
if port.startswith('vif'):
network_id = get_network_id_for_vif(port)
if network_id not in all_tiers.keys():
all_tiers[network_id] = tier_ports()
tier_ports_info = all_tiers[network_id]
tier_ports_info.tier_vif_ofports.append(if_ofport)
tier_ports_info.tier_all_ofports.append(if_ofport)
all_tiers[network_id] = tier_ports_info
if port.startswith('t'):
network_id = get_network_id_for_tunnel_port(port)[1:-1]
if network_id not in all_tiers.keys():
all_tiers[network_id] = tier_ports()
tier_ports_info = all_tiers[network_id]
tier_ports_info.tier_tunnelif_ofports.append(if_ofport)
tier_ports_info.tier_all_ofports.append(if_ofport)
all_tiers[network_id] = tier_ports_info
for network_id, tier_ports_info in all_tiers.items():
if len(tier_ports_info.tier_all_ofports) == 1 :
continue
# for a packet arrived from tunnel port, flood only on to VIF ports connected to bridge for this tier
for port in tier_ports_info.tier_tunnelif_ofports:
action = "".join("output:%s," %ofport for ofport in tier_ports_info.tier_vif_ofports)[:-1]
ofspec.write("table=%s " %L2_FLOOD_TABLE + " priority=1100 in_port=%s " %port +
"actions=%s " %action + "\n")
# for a packet arrived from VIF port send on all VIF and tunnel ports corresponding to the tier excluding
# the port on which packet arrived
for port in tier_ports_info.tier_vif_ofports:
tier_all_ofports_copy = copy.copy(tier_ports_info.tier_all_ofports)
tier_all_ofports_copy.remove(port)
action = "".join("output:%s," %ofport for ofport in tier_all_ofports_copy)[:-1]
ofspec.write("table=%s " %L2_FLOOD_TABLE + " priority=1100 in_port=%s " %port +
"actions=%s " %action + "\n")
# add a default rule in L2 flood table to drop packet
ofspec.write("table=%s " %L2_FLOOD_TABLE + " priority=0 actions=drop")
# First flush current L2 flooding table before re-populating the tables
del_flows(bridge, table=L2_FLOOD_TABLE)
ofspec.seek(0)
logging.debug("Adding below flows rules L2 flooding table: \n" + ofspec.read())
ofspec.close()
# update bridge with the flow-rules for broadcast rules added in the file in one attempt
do_cmd([OFCTL_PATH, 'add-flows', bridge, ofspec_filename])
# now that we updated the bridge with flow rules delete the file.
os.remove(ofspec_filename)
logging.debug("successfully configured bridge %s as per the latest flooding rules " %bridge)
except Exception as e:
if os.path.isfile(ofspec_filename):
os.remove(ofspec_filename)
error_message = "An unexpected error occurred while updating the flooding rules for the bridge " + \
bridge + " when interface " + " %s" %interface + " is %s" %command
logging.debug(error_message + " due to " + str(e))
raise error_message
def is_regular_tunnel_network(xs_nw_uuid):
cmd = [XE_PATH,"network-param-get", "uuid=%s" % xs_nw_uuid, "param-name=other-config",
"param-key=is-ovs-tun-network", "--minimal"]
logging.debug("Executing:%s", cmd)
pipe = subprocess.PIPE
proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
stderr=pipe, close_fds=True)
ret_code = proc.wait()
if ret_code:
return False
output = proc.stdout.read()
if output.endswith('\n'):
output = output[:-1]
return output
def is_vpc_network_with_distributed_routing(xs_nw_uuid):
cmd = [XE_PATH,"network-param-get", "uuid=%s" % xs_nw_uuid, "param-name=other-config",
"param-key=is-ovs-vpc-distributed-vr-network", "--minimal"]
logging.debug("Executing:%s", cmd)
pipe = subprocess.PIPE
proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
stderr=pipe, close_fds=True)
ret_code = proc.wait()
if ret_code:
return False
output = proc.stdout.read()
if output.endswith('\n'):
output = output[:-1]
return output

View File

@ -0,0 +1,145 @@
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# A simple script for enabling and disabling per-vif and tunnel interface rules for explicitly
# allowing broadcast/multicast traffic from the tunnel ports and on the port where the VIF is attached
import copy
import os
import sys
import logging
import cloudstack_pluginlib as pluginlib
pluginlib.setup_logging("/var/log/cloud/ovstunnel.log")
def clear_flows(bridge, this_vif_ofport, vif_ofports):
action = "".join("output:%s," %ofport
for ofport in vif_ofports)[:-1]
# Remove flow entries originating from given ofport
pluginlib.del_flows(bridge, in_port=this_vif_ofport)
# The following will remove the port being delete from actions
pluginlib.add_flow(bridge, priority=1100,
dl_dst='ff:ff:ff:ff:ff:ff', actions=action)
pluginlib.add_flow(bridge, priority=1100,
nw_dst='224.0.0.0/24', actions=action)
def apply_flows(bridge, this_vif_ofport, vif_ofports):
action = "".join("output:%s," %ofport
for ofport in vif_ofports)[:-1]
# Ensure {b|m}casts sent from VIF ports are always allowed
pluginlib.add_flow(bridge, priority=1200,
in_port=this_vif_ofport,
dl_dst='ff:ff:ff:ff:ff:ff',
actions='NORMAL')
pluginlib.add_flow(bridge, priority=1200,
in_port=this_vif_ofport,
nw_dst='224.0.0.0/24',
actions='NORMAL')
# Ensure {b|m}casts are always propagated to VIF ports
pluginlib.add_flow(bridge, priority=1100,
dl_dst='ff:ff:ff:ff:ff:ff', actions=action)
pluginlib.add_flow(bridge, priority=1100,
nw_dst='224.0.0.0/24', actions=action)
def clear_rules(vif):
try:
delcmd = "/sbin/ebtables -t nat -L PREROUTING | grep " + vif
delcmds = pluginlib.do_cmd(['/bin/bash', '-c', delcmd]).split('\n')
for cmd in delcmds:
try:
cmd = '/sbin/ebtables -t nat -D PREROUTING ' + cmd
pluginlib.do_cmd(['/bin/bash', '-c', cmd])
except:
pass
except:
pass
def main(command, vif_raw):
if command not in ('online', 'offline'):
return
vif_name, dom_id, vif_index = vif_raw.split('-')
# validate vif and dom-id
this_vif = "%s%s.%s" % (vif_name, dom_id, vif_index)
# Make sure the networking stack is not linux bridge!
net_stack = pluginlib.do_cmd(['cat', '/etc/xensource/network.conf'])
if net_stack.lower() == "bridge":
if command == 'offline':
clear_rules(this_vif)
# Nothing to do here!
return
bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'iface-to-br', this_vif])
# find xs network for this bridge, verify is used for ovs tunnel network
xs_nw_uuid = pluginlib.do_cmd([pluginlib.XE_PATH, "network-list",
"bridge=%s" % bridge, "--minimal"])
ovs_tunnel_network = pluginlib.is_regular_tunnel_network(xs_nw_uuid)
# handle case where network is reguar tunnel network
if ovs_tunnel_network == 'True':
vlan = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'br-to-vlan', bridge])
if vlan != '0':
# We need the REAL bridge name
bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH,
'br-to-parent', bridge])
vsctl_output = pluginlib.do_cmd([pluginlib.VSCTL_PATH,
'list-ports', bridge])
vifs = vsctl_output.split('\n')
vif_ofports = []
vif_other_ofports = []
for vif in vifs:
vif_ofport = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'get',
'Interface', vif, 'ofport'])
if this_vif == vif:
this_vif_ofport = vif_ofport
if vif.startswith('vif'):
vif_ofports.append(vif_ofport)
if command == 'offline':
vif_other_ofports = copy.copy(vif_ofports)
vif_other_ofports.remove(this_vif_ofport)
clear_flows(bridge, this_vif_ofport, vif_other_ofports)
if command == 'online':
apply_flows(bridge, this_vif_ofport, vif_ofports)
# handle case where bridge is setup for VPC which is enabled for distributed routing
ovs_vpc_distributed_vr_network = pluginlib.is_vpc_network_with_distributed_routing(xs_nw_uuid)
if ovs_vpc_distributed_vr_network == 'True':
vlan = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'br-to-vlan', bridge])
if vlan != '0':
# We need the REAL bridge name
bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH,
'br-to-parent', bridge])
vif_network_id = pluginlib.get_network_id_for_vif(this_vif)
pluginlib.update_flooding_rules_on_port_plug_unplug(bridge, this_vif, command, vif_network_id)
return
if __name__ == "__main__":
if len(sys.argv) != 3:
print("usage: {} [online|offline] vif-domid-idx".format(os.path.basename(sys.argv[0])))
sys.exit(1)
else:
command, vif_raw = sys.argv[1:3]
main(command, vif_raw)

View File

@ -0,0 +1,70 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This file specifies the files that need
# to be transferred over to the XenServer.
# The format of this file is as follows:
# [Name of file]=[source path],[file permission],[destination path]
# [destination path] is required.
# If [file permission] is missing, 755 is assumed.
# If [source path] is missing, it looks in the same
# directory as the patch file.
# If [source path] starts with '/', then it is absolute path.
# If [source path] starts with '~', then it is path relative to management server home directory.
# If [source path] does not start with '/' or '~', then it is relative path to the location of the patch file.
vmops=,0755,/etc/xapi.d/plugins
vmopspremium=,0755,/etc/xapi.d/plugins
vmopsSnapshot=,0755,/etc/xapi.d/plugins
xen-ovs-vif-flows.rules=..,0644,/etc/udev/rules.d
ovs-vif-flows.py=,0755,/etc/xapi.d/plugins
cloudstack_plugins.conf=..,0644,/etc/xensource
cloudstack_pluginlib.py=,0755,/etc/xapi.d/plugins
ovstunnel=..,0755,/etc/xapi.d/plugins
cloud-plugin-storage=,0755,/etc/xapi.d/plugins
agent.zip=../../../../../vms,0644,/opt/xensource/packages/resources/
cloud-scripts.tgz=../../../../../vms,0644,/opt/xensource/packages/resources/
patch-sysvms.sh=../../../../../vms,0644,/opt/xensource/packages/resources/
id_rsa.cloud=../../../systemvm,0600,/root/.ssh
network_info.sh=..,0755,/opt/cloud/bin
setupxenserver.sh=..,0755,/opt/cloud/bin
make_migratable.sh=..,0755,/opt/cloud/bin
setup_iscsi.sh=..,0755,/opt/cloud/bin
pingtest.sh=../../..,0755,/opt/cloud/bin
router_proxy.sh=../../../../network/domr/,0755,/opt/cloud/bin
cloud-setup-bonding.sh=..,0755,/opt/cloud/bin
kill_copy_process.sh=..,0755,/opt/cloud/bin
setup_heartbeat_sr.sh=..,0755,/opt/cloud/bin
setup_heartbeat_file.sh=..,0755,/opt/cloud/bin
check_heartbeat.sh=..,0755,/opt/cloud/bin
xenheartbeat.sh=..,0755,/opt/cloud/bin
launch_hb.sh=..,0755,/opt/cloud/bin
upgrade_snapshot.sh=..,0755,/opt/cloud/bin
cloud-clean-vlan.sh=..,0755,/opt/cloud/bin
cloud-prepare-upgrade.sh=..,0755,/opt/cloud/bin
swift=,0755,/opt/cloud/bin
swiftxenserver=..,0755,/etc/xapi.d/plugins
s3xenserver=..,0755,/etc/xapi.d/plugins
add_to_vcpus_params_live.sh=..,0755,/opt/cloud/bin
ovs-pvlan=..,0755,/etc/xapi.d/plugins
ovs-pvlan-dhcp-host.sh=../../../network,0755,/opt/cloud/bin
ovs-pvlan-vm.sh=../../../network,0755,/opt/cloud/bin
ovs-pvlan-cleanup.sh=../../../network,0755,/opt/cloud/bin
ovs-get-dhcp-iface.sh=..,0755,/opt/cloud/bin
ovs-get-bridge.sh=..,0755,/opt/cloud/bin
cloudlog=..,0644,/etc/logrotate.d
update_host_passwd.sh=../..,0755,/opt/cloud/bin
logrotate=..,0755,/etc/cron.hourly

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,622 @@
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Version @VERSION@
#
# A plugin for executing script needed by vmops cloud
import os, sys, time
import XenAPIPlugin
if os.path.exists("/opt/xensource/sm"):
sys.path.extend(["/opt/xensource/sm/", "/usr/local/sbin/", "/sbin/"])
if os.path.exists("/usr/lib/xcp/sm"):
sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"])
import SR, VDI, SRCommand, util, lvutil
from util import CommandException
import vhdutil
import shutil
import lvhdutil
import errno
import subprocess
import xs_errors
import cleanup
import stat
import random
import cloudstack_pluginlib as lib
import logging
lib.setup_logging("/var/log/cloud/cloud.log")
VHDUTIL = "vhd-util"
VHD_PREFIX = 'VHD-'
CLOUD_DIR = '/var/run/cloud_mount'
def echo(fn):
def wrapped(*v, **k):
name = fn.__name__
logging.debug("#### CLOUD enter %s ####" % name )
res = fn(*v, **k)
logging.debug("#### CLOUD exit %s ####" % name )
return res
return wrapped
@echo
def create_secondary_storage_folder(session, args):
local_mount_path = None
logging.debug("create_secondary_storage_folder, args: " + str(args))
try:
try:
# Mount the remote resource folder locally
remote_mount_path = args["remoteMountPath"]
local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
nfsVersion = args["nfsVersion"]
mount(remote_mount_path, local_mount_path, nfsVersion)
# Create the new folder
new_folder = local_mount_path + "/" + args["newFolder"]
if not os.path.isdir(new_folder):
current_umask = os.umask(0)
os.makedirs(new_folder)
os.umask(current_umask)
except OSError as e:
errMsg = "create_secondary_storage_folder failed: errno: " + str(e.errno) + ", strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
except:
errMsg = "create_secondary_storage_folder failed."
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
finally:
if local_mount_path != None:
# Unmount the local folder
umount(local_mount_path)
# Remove the local folder
os.system("rmdir " + local_mount_path)
return "1"
@echo
def delete_secondary_storage_folder(session, args):
local_mount_path = None
logging.debug("delete_secondary_storage_folder, args: " + str(args))
try:
try:
# Mount the remote resource folder locally
remote_mount_path = args["remoteMountPath"]
local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
nfsVersion = args["nfsVersion"]
mount(remote_mount_path, local_mount_path, nfsVersion)
# Delete the specified folder
folder = local_mount_path + "/" + args["folder"]
if os.path.isdir(folder):
os.system("rm -f " + folder + "/*")
os.system("rmdir " + folder)
except OSError as e:
errMsg = "delete_secondary_storage_folder failed: errno: " + str(e.errno) + ", strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
except:
errMsg = "delete_secondary_storage_folder failed."
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
finally:
if local_mount_path != None:
# Unmount the local folder
umount(local_mount_path)
# Remove the local folder
os.system("rmdir " + local_mount_path)
return "1"
@echo
def post_create_private_template(session, args):
local_mount_path = None
try:
try:
# get local template folder
templatePath = args["templatePath"]
local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
nfsVersion = args["nfsVersion"]
mount(templatePath, local_mount_path, nfsVersion)
# Retrieve args
filename = args["templateFilename"]
name = args["templateName"]
description = args["templateDescription"]
checksum = args["checksum"]
file_size = args["size"]
virtual_size = args["virtualSize"]
template_id = args["templateId"]
# Create the template.properties file
template_properties_install_path = local_mount_path + "/template.properties"
f = open(template_properties_install_path, "w")
f.write("filename=" + filename + "\n")
f.write("vhd=true\n")
f.write("id=" + template_id + "\n")
f.write("vhd.filename=" + filename + "\n")
f.write("public=false\n")
f.write("uniquename=" + name + "\n")
f.write("vhd.virtualsize=" + virtual_size + "\n")
f.write("virtualsize=" + virtual_size + "\n")
f.write("checksum=" + checksum + "\n")
f.write("hvm=true\n")
f.write("description=" + description + "\n")
f.write("vhd.size=" + str(file_size) + "\n")
f.write("size=" + str(file_size) + "\n")
f.close()
logging.debug("Created template.properties file")
# Set permissions
permissions = stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH
os.chmod(template_properties_install_path, permissions)
logging.debug("Set permissions on template and template.properties")
except:
errMsg = "post_create_private_template failed."
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
finally:
if local_mount_path != None:
# Unmount the local folder
umount(local_mount_path)
# Remove the local folder
os.system("rmdir " + local_mount_path)
return "1"
def isfile(path, isISCSI):
errMsg = ''
exists = True
if isISCSI:
exists = checkVolumeAvailability(path)
else:
exists = os.path.isfile(path)
if not exists:
errMsg = "File " + path + " does not exist."
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return errMsg
def copyfile(fromFile, toFile, isISCSI):
logging.debug("Starting to copy " + fromFile + " to " + toFile)
errMsg = ''
if isISCSI:
bs = "4M"
else:
bs = "128k"
try:
cmd = ['dd', 'if=' + fromFile, 'iflag=direct', 'of=' + toFile, 'oflag=direct', 'bs=' + bs]
txt = util.pread2(cmd)
except:
try:
os.system("rm -f " + toFile)
except:
txt = ''
txt = ''
errMsg = "Error while copying " + fromFile + " to " + toFile + " in secondary storage"
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
logging.debug("Successfully copied " + fromFile + " to " + toFile)
return errMsg
def chdir(path):
try:
os.chdir(path)
except OSError as e:
errMsg = "Unable to chdir to " + path + " because of OSError with errno: " + str(e.errno) + " and strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
logging.debug("Chdired to " + path)
return
def scanParent(path):
# Do a scan for the parent for ISCSI volumes
# Note that the parent need not be visible on the XenServer
parentUUID = ''
try:
lvName = os.path.basename(path)
dirname = os.path.dirname(path)
vgName = os.path.basename(dirname)
vhdInfo = vhdutil.getVHDInfoLVM(lvName, lvhdutil.extractUuid, vgName)
parentUUID = vhdInfo.parentUuid
except:
errMsg = "Could not get vhd parent of " + path
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return parentUUID
def getParent(path, isISCSI):
parentUUID = ''
try :
if isISCSI:
parentUUID = vhdutil.getParent(path, lvhdutil.extractUuid)
else:
parentUUID = vhdutil.getParent(path, cleanup.FileVDI.extractUuid)
except:
errMsg = "Could not get vhd parent of " + path
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return parentUUID
def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI):
snapshotVHD = getVHD(snapshotUuid, isISCSI)
snapshotPath = os.path.join(primarySRPath, snapshotVHD)
baseCopyUuid = ''
if isISCSI:
checkVolumeAvailability(snapshotPath)
baseCopyUuid = scanParent(snapshotPath)
else:
baseCopyUuid = getParent(snapshotPath, isISCSI)
logging.debug("Base copy of snapshotUuid: " + snapshotUuid + " is " + baseCopyUuid)
return baseCopyUuid
def setParent(parent, child):
try:
cmd = [VHDUTIL, "modify", "-p", parent, "-n", child]
txt = util.pread2(cmd)
except:
errMsg = "Unexpected error while trying to set parent of " + child + " to " + parent
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
logging.debug("Successfully set parent of " + child + " to " + parent)
return
def rename(originalVHD, newVHD):
try:
os.rename(originalVHD, newVHD)
except OSError as e:
errMsg = "OSError while renaming " + origiinalVHD + " to " + newVHD + "with errno: " + str(e.errno) + " and strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return
def makedirs(path):
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
umount(path)
if os.path.isdir(path):
return
errMsg = "OSError while creating " + path + " with errno: " + str(e.errno) + " and strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return
def mount(remoteDir, localDir, nfsVersion=None):
makedirs(localDir)
options = "soft,tcp,timeo=133,retrans=1"
if nfsVersion:
options += ",vers=" + nfsVersion
try:
cmd = ['mount', '-o', options, remoteDir, localDir]
txt = util.pread2(cmd)
except:
txt = ''
errMsg = "Unexpected error while trying to mount " + remoteDir + " to " + localDir
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
logging.debug("Successfully mounted " + remoteDir + " to " + localDir)
return
def umount(localDir):
try:
cmd = ['umount', localDir]
util.pread2(cmd)
except CommandException:
errMsg = "CommandException raised while trying to umount " + localDir
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
logging.debug("Successfully unmounted " + localDir)
return
def mountSnapshotsDir(secondaryStorageMountPath, localMountPointPath, path):
# The aim is to mount secondaryStorageMountPath on
# And create <accountId>/<instanceId> dir on it, if it doesn't exist already.
# Assuming that secondaryStorageMountPath exists remotely
# Just mount secondaryStorageMountPath/<relativeDir>/SecondaryStorageHost/ everytime
# Never unmount.
# path is like "snapshots/account/volumeId", we mount secondary_storage:/snapshots
relativeDir = path.split("/")[0]
restDir = "/".join(path.split("/")[1:])
snapshotsDir = os.path.join(secondaryStorageMountPath, relativeDir)
makedirs(localMountPointPath)
# if something is not mounted already on localMountPointPath,
# mount secondaryStorageMountPath on localMountPath
if os.path.ismount(localMountPointPath):
# There is more than one secondary storage per zone.
# And we are mounting each sec storage under a zone-specific directory
# So two secondary storage snapshot dirs will never get mounted on the same point on the same XenServer.
logging.debug("The remote snapshots directory has already been mounted on " + localMountPointPath)
else:
mount(snapshotsDir, localMountPointPath)
# Create accountId/instanceId dir on localMountPointPath, if it doesn't exist
backupsDir = os.path.join(localMountPointPath, restDir)
makedirs(backupsDir)
return backupsDir
def unmountAll(path):
try:
for dir in os.listdir(path):
if dir.isdigit():
logging.debug("Unmounting Sub-Directory: " + dir)
localMountPointPath = os.path.join(path, dir)
umount(localMountPointPath)
except:
logging.debug("Ignoring the error while trying to unmount the snapshots dir")
@echo
def unmountSnapshotsDir(session, args):
dcId = args['dcId']
localMountPointPath = os.path.join(CLOUD_DIR, dcId)
localMountPointPath = os.path.join(localMountPointPath, "snapshots")
unmountAll(localMountPointPath)
try:
umount(localMountPointPath)
except:
logging.debug("Ignoring the error while trying to unmount the snapshots dir.")
return "1"
def getPrimarySRPath(primaryStorageSRUuid, isISCSI):
if isISCSI:
primarySRDir = lvhdutil.VG_PREFIX + primaryStorageSRUuid
return os.path.join(lvhdutil.VG_LOCATION, primarySRDir)
else:
return os.path.join(SR.MOUNT_BASE, primaryStorageSRUuid)
def getBackupVHD(UUID):
return UUID + '.' + SR.DEFAULT_TAP
def getVHD(UUID, isISCSI):
if isISCSI:
return VHD_PREFIX + UUID
else:
return UUID + '.' + SR.DEFAULT_TAP
def getIsTrueString(stringValue):
booleanValue = False
if (stringValue and stringValue == 'true'):
booleanValue = True
return booleanValue
def makeUnavailable(uuid, primarySRPath, isISCSI):
if not isISCSI:
return
VHD = getVHD(uuid, isISCSI)
path = os.path.join(primarySRPath, VHD)
manageAvailability(path, '-an')
return
def manageAvailability(path, value):
if path.__contains__("/var/run/sr-mount"):
return
logging.debug("Setting availability of " + path + " to " + value)
try:
cmd = ['/usr/sbin/lvchange', value, path]
util.pread2(cmd)
except: #CommandException, (rc, cmdListStr, stderr):
#errMsg = "CommandException thrown while executing: " + cmdListStr + " with return code: " + str(rc) + " and stderr: " + stderr
errMsg = "Unexpected exception thrown by lvchange"
logging.debug(errMsg)
if value == "-ay":
# Raise an error only if we are trying to make it available.
# Just warn if we are trying to make it unavailable after the
# snapshot operation is done.
raise xs_errors.XenError(errMsg)
return
def checkVolumeAvailability(path):
try:
if not isVolumeAvailable(path):
# The VHD file is not available on XenSever. The volume is probably
# inactive or detached.
# Do lvchange -ay to make it available on XenServer
manageAvailability(path, '-ay')
except:
errMsg = "Could not determine status of ISCSI path: " + path
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
success = False
i = 0
while i < 6:
i = i + 1
# Check if the vhd is actually visible by checking for the link
# set isISCSI to true
success = isVolumeAvailable(path)
if success:
logging.debug("Made vhd: " + path + " available and confirmed that it is visible")
break
# Sleep for 10 seconds before checking again.
time.sleep(10)
# If not visible within 1 min fail
if not success:
logging.debug("Could not make vhd: " + path + " available despite waiting for 1 minute. Does it exist?")
return success
def isVolumeAvailable(path):
# Check if iscsi volume is available on this XenServer.
status = "0"
try:
p = subprocess.Popen(["/bin/bash", "-c", "if [ -L " + path + " ]; then echo 1; else echo 0;fi"], stdout=subprocess.PIPE)
status = p.communicate()[0].strip("\n")
except:
errMsg = "Could not determine status of ISCSI path: " + path
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return (status == "1")
def getVhdParent(session, args):
logging.debug("getParent with " + str(args))
primaryStorageSRUuid = args['primaryStorageSRUuid']
snapshotUuid = args['snapshotUuid']
isISCSI = getIsTrueString(args['isISCSI'])
primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI)
logging.debug("primarySRPath: " + primarySRPath)
baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI)
return baseCopyUuid
def getSnapshotSize(session, args):
primaryStorageSRUuid = args['primaryStorageSRUuid']
snapshotUuid = args['snapshotUuid']
isISCSI = getIsTrueString(args['isISCSI'])
primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI)
logging.debug("primarySRPath: " + primarySRPath)
snapshotVHD = getVHD(snapshotUuid, isISCSI)
snapshotPath = os.path.join(primarySRPath, snapshotVHD)
physicalSize = vhdutil.getSizePhys(snapshotPath)
return str(physicalSize)
def backupSnapshot(session, args):
logging.debug("Called backupSnapshot with " + str(args))
primaryStorageSRUuid = args['primaryStorageSRUuid']
secondaryStorageMountPath = args['secondaryStorageMountPath']
snapshotUuid = args['snapshotUuid']
prevBackupUuid = args['prevBackupUuid']
backupUuid = args['backupUuid']
isISCSI = getIsTrueString(args['isISCSI'])
path = args['path']
localMountPoint = args['localMountPoint']
primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI)
logging.debug("primarySRPath: " + primarySRPath)
baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI)
baseCopyVHD = getVHD(baseCopyUuid, isISCSI)
baseCopyPath = os.path.join(primarySRPath, baseCopyVHD)
logging.debug("Base copy path: " + baseCopyPath)
# Mount secondary storage mount path on XenServer along the path
# /var/run/sr-mount/<dcId>/snapshots/ and create <accountId>/<volumeId> dir
# on it.
backupsDir = mountSnapshotsDir(secondaryStorageMountPath, localMountPoint, path)
logging.debug("Backups dir " + backupsDir)
prevBackupUuid = prevBackupUuid.split("/")[-1]
# Check existence of snapshot on primary storage
isfile(baseCopyPath, isISCSI)
physicalSize = vhdutil.getSizePhys(baseCopyPath)
if prevBackupUuid:
# Check existence of prevBackupFile
prevBackupVHD = getBackupVHD(prevBackupUuid)
prevBackupFile = os.path.join(backupsDir, prevBackupVHD)
isfile(prevBackupFile, False)
# copy baseCopyPath to backupsDir with new uuid
backupVHD = getBackupVHD(backupUuid)
backupFile = os.path.join(backupsDir, backupVHD)
logging.debug("Back up " + baseCopyUuid + " to Secondary Storage as " + backupUuid)
copyfile(baseCopyPath, backupFile, isISCSI)
vhdutil.setHidden(backupFile, False)
# Because the primary storage is always scanned, the parent of this base copy is always the first base copy.
# We don't want that, we want a chain of VHDs each of which is a delta from the previous.
# So set the parent of the current baseCopyVHD to prevBackupVHD
if prevBackupUuid:
# If there was a previous snapshot
setParent(prevBackupFile, backupFile)
txt = "1#" + backupUuid + "#" + str(physicalSize)
return txt
@echo
def deleteSnapshotBackup(session, args):
logging.debug("Calling deleteSnapshotBackup with " + str(args))
secondaryStorageMountPath = args['secondaryStorageMountPath']
backupUUID = args['backupUUID']
path = args['path']
localMountPoint = args['localMountPoint']
backupsDir = mountSnapshotsDir(secondaryStorageMountPath, localMountPoint, path)
# chdir to the backupsDir for convenience
chdir(backupsDir)
backupVHD = getBackupVHD(backupUUID)
logging.debug("checking existence of " + backupVHD)
# The backupVHD is on secondary which is NFS and not ISCSI.
if not os.path.isfile(backupVHD):
logging.debug("backupVHD " + backupVHD + "does not exist. Not trying to delete it")
return "1"
logging.debug("backupVHD " + backupVHD + " exists.")
# Just delete the backupVHD
try:
os.remove(backupVHD)
except OSError as e:
errMsg = "OSError while removing " + backupVHD + " with errno: " + str(e.errno) + " and strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return "1"
@echo
def revert_memory_snapshot(session, args):
logging.debug("Calling revert_memory_snapshot with " + str(args))
vmName = args['vmName']
snapshotUUID = args['snapshotUUID']
oldVmUuid = args['oldVmUuid']
snapshotMemory = args['snapshotMemory']
hostUUID = args['hostUUID']
try:
cmd = '''xe vbd-list vm-uuid=%s | grep 'vdi-uuid' | grep -v 'not in database' | sed -e 's/vdi-uuid ( RO)://g' ''' % oldVmUuid
vdiUuids = os.popen(cmd).read().split()
cmd2 = '''xe vm-param-get param-name=power-state uuid=''' + oldVmUuid
if os.popen(cmd2).read().split()[0] != 'halted':
os.system("xe vm-shutdown force=true vm=" + vmName)
os.system("xe vm-destroy uuid=" + oldVmUuid)
os.system("xe snapshot-revert snapshot-uuid=" + snapshotUUID)
if snapshotMemory == 'true':
os.system("xe vm-resume vm=" + vmName + " on=" + hostUUID)
for vdiUuid in vdiUuids:
os.system("xe vdi-destroy uuid=" + vdiUuid)
except OSError as e:
errMsg = "OSError while reverting vm " + vmName + " to snapshot " + snapshotUUID + " with errno: " + str(e.errno) + " and strerr: " + e.strerror
logging.debug(errMsg)
raise xs_errors.XenError(errMsg)
return "0"
if __name__ == "__main__":
XenAPIPlugin.dispatch({"getVhdParent":getVhdParent, "create_secondary_storage_folder":create_secondary_storage_folder, "delete_secondary_storage_folder":delete_secondary_storage_folder, "post_create_private_template":post_create_private_template, "backupSnapshot": backupSnapshot, "deleteSnapshotBackup": deleteSnapshotBackup, "unmountSnapshotsDir": unmountSnapshotsDir, "revert_memory_snapshot":revert_memory_snapshot, "getSnapshotSize":getSnapshotSize})

View File

@ -0,0 +1,159 @@
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Version @VERSION@
#
# A plugin for executing script needed by vmops cloud
import os, sys, time
import XenAPIPlugin
if os.path.exists("/opt/xensource/sm"):
sys.path.extend(["/opt/xensource/sm/", "/usr/local/sbin/", "/sbin/"])
if os.path.exists("/usr/lib/xcp/sm"):
sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"])
import util
import socket
import cloudstack_pluginlib as lib
import logging
lib.setup_logging("/var/log/cloud/cloud.log")
def echo(fn):
def wrapped(*v, **k):
name = fn.__name__
logging.debug("#### CLOUD enter %s ####" % name )
res = fn(*v, **k)
logging.debug("#### CLOUD exit %s ####" % name )
return res
return wrapped
@echo
def forceShutdownVM(session, args):
domId = args['domId']
try:
cmd = ["/opt/xensource/debug/xenops", "destroy_domain", "-domid", domId]
txt = util.pread2(cmd)
except:
txt = '10#failed'
return txt
@echo
def create_privatetemplate_from_snapshot(session, args):
templatePath = args['templatePath']
snapshotPath = args['snapshotPath']
tmpltLocalDir = args['tmpltLocalDir']
try:
cmd = ["bash", "/opt/cloud/bin/create_privatetemplate_from_snapshot.sh",snapshotPath, templatePath, tmpltLocalDir]
txt = util.pread2(cmd)
except:
txt = '10#failed'
return txt
@echo
def upgrade_snapshot(session, args):
templatePath = args['templatePath']
snapshotPath = args['snapshotPath']
try:
cmd = ["bash", "/opt/cloud/bin/upgrate_snapshot.sh",snapshotPath, templatePath]
txt = util.pread2(cmd)
except:
txt = '10#failed'
return txt
@echo
def copy_vhd_to_secondarystorage(session, args):
mountpoint = args['mountpoint']
vdiuuid = args['vdiuuid']
sruuid = args['sruuid']
try:
cmd = ["bash", "/opt/cloud/bin/copy_vhd_to_secondarystorage.sh", mountpoint, vdiuuid, sruuid]
txt = util.pread2(cmd)
except:
txt = '10#failed'
return txt
@echo
def copy_vhd_from_secondarystorage(session, args):
mountpoint = args['mountpoint']
sruuid = args['sruuid']
namelabel = args['namelabel']
try:
cmd = ["bash", "/opt/cloud/bin/copy_vhd_from_secondarystorage.sh", mountpoint, sruuid, namelabel]
txt = util.pread2(cmd)
except:
txt = '10#failed'
return txt
@echo
def remove_corrupt_vdi(session, args):
vdifile = args['vdifile']
try:
cmd = ['rm', '-f', vdifile]
txt = util.pread2(cmd)
except:
txt = '10#failed'
return txt
@echo
def setup_heartbeat_sr(session, args):
host = args['host']
sr = args['sr']
try:
cmd = ["bash", "/opt/cloud/bin/setup_heartbeat_sr.sh", host, sr]
txt = util.pread2(cmd)
except:
txt = ''
return txt
@echo
def setup_heartbeat_file(session, args):
host = args['host']
sr = args['sr']
add = args['add']
try:
cmd = ["bash", "/opt/cloud/bin/setup_heartbeat_file.sh", host, sr, add]
txt = util.pread2(cmd)
except:
txt = ''
return txt
@echo
def heartbeat(session, args):
host = args['host']
timeout = args['timeout']
interval = args['interval']
try:
cmd = ["/bin/bash", "/opt/cloud/bin/launch_hb.sh", host, timeout, interval]
txt = util.pread2(cmd)
except:
txt='fail'
return txt
@echo
def asmonitor(session, args):
try:
perfmod = __import__("perfmon")
result = perfmod.get_vm_group_perfmon(args)
return result
except:
return 'fail'
if __name__ == "__main__":
XenAPIPlugin.dispatch({"forceShutdownVM":forceShutdownVM, "upgrade_snapshot":upgrade_snapshot, "create_privatetemplate_from_snapshot":create_privatetemplate_from_snapshot, "copy_vhd_to_secondarystorage":copy_vhd_to_secondarystorage, "copy_vhd_from_secondarystorage":copy_vhd_from_secondarystorage, "setup_heartbeat_sr":setup_heartbeat_sr, "setup_heartbeat_file":setup_heartbeat_file, "heartbeat": heartbeat, "asmonitor": asmonitor, "remove_corrupt_vdi": remove_corrupt_vdi})

View File

@ -19,6 +19,7 @@ package com.cloud.alert;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.text.DecimalFormat; import java.text.DecimalFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date; import java.util.Date;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
@ -88,41 +89,55 @@ import com.cloud.utils.Pair;
import com.cloud.utils.component.ManagerBase; import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria;
import org.jetbrains.annotations.Nullable;
public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable { public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable {
protected Logger logger = LogManager.getLogger(AlertManagerImpl.class.getName()); protected Logger logger = LogManager.getLogger(AlertManagerImpl.class.getName());
public static final List<AlertType> ALERTS = Arrays.asList(AlertType.ALERT_TYPE_HOST
, AlertType.ALERT_TYPE_USERVM
, AlertType.ALERT_TYPE_DOMAIN_ROUTER
, AlertType.ALERT_TYPE_CONSOLE_PROXY
, AlertType.ALERT_TYPE_SSVM
, AlertType.ALERT_TYPE_STORAGE_MISC
, AlertType.ALERT_TYPE_MANAGEMENT_NODE
, AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED
, AlertType.ALERT_TYPE_UPLOAD_FAILED
, AlertType.ALERT_TYPE_OOBM_AUTH_ERROR
, AlertType.ALERT_TYPE_HA_ACTION
, AlertType.ALERT_TYPE_CA_CERT);
private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // Thirty seconds expressed in milliseconds. private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // Thirty seconds expressed in milliseconds.
private static final DecimalFormat DfPct = new DecimalFormat("###.##"); private static final DecimalFormat DfPct = new DecimalFormat("###.##");
private static final DecimalFormat DfWhole = new DecimalFormat("########"); private static final DecimalFormat DfWhole = new DecimalFormat("########");
@Inject @Inject
private AlertDao _alertDao; AlertDao _alertDao;
@Inject @Inject
protected StorageManager _storageMgr; protected StorageManager _storageMgr;
@Inject @Inject
protected CapacityManager _capacityMgr; protected CapacityManager _capacityMgr;
@Inject @Inject
private CapacityDao _capacityDao; CapacityDao _capacityDao;
@Inject @Inject
private DataCenterDao _dcDao; DataCenterDao _dcDao;
@Inject @Inject
private HostPodDao _podDao; HostPodDao _podDao;
@Inject @Inject
private ClusterDao _clusterDao; ClusterDao _clusterDao;
@Inject @Inject
private IPAddressDao _publicIPAddressDao; IPAddressDao _publicIPAddressDao;
@Inject @Inject
private DataCenterIpAddressDao _privateIPAddressDao; DataCenterIpAddressDao _privateIPAddressDao;
@Inject @Inject
private PrimaryDataStoreDao _storagePoolDao; PrimaryDataStoreDao _storagePoolDao;
@Inject @Inject
private ConfigurationDao _configDao; ConfigurationDao _configDao;
@Inject @Inject
private ResourceManager _resourceMgr; ResourceManager _resourceMgr;
@Inject @Inject
private ConfigurationManager _configMgr; ConfigurationManager _configMgr;
@Inject @Inject
protected ConfigDepot _configDepot; protected ConfigDepot _configDepot;
@Inject @Inject
@ -138,7 +153,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
private double _vlanCapacityThreshold = 0.75; private double _vlanCapacityThreshold = 0.75;
private double _directNetworkPublicIpCapacityThreshold = 0.75; private double _directNetworkPublicIpCapacityThreshold = 0.75;
private double _localStorageCapacityThreshold = 0.75; private double _localStorageCapacityThreshold = 0.75;
Map<Short, Double> _capacityTypeThresholdMap = new HashMap<Short, Double>(); Map<Short, Double> _capacityTypeThresholdMap = new HashMap<>();
private final ExecutorService _executor; private final ExecutorService _executor;
@ -402,18 +417,15 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
private void createOrUpdateVlanCapacity(long dcId, AllocationState capacityState) { private void createOrUpdateVlanCapacity(long dcId, AllocationState capacityState) {
SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria(); SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
capacitySC = _capacityDao.createSearchCriteria();
capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, dcId); capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, dcId);
capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_VLAN); capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_VLAN);
capacities = _capacityDao.search(capacitySC, null); List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
int totalVlans = _dcDao.countZoneVlans(dcId, false); int totalVlans = _dcDao.countZoneVlans(dcId, false);
int allocatedVlans = _dcDao.countZoneVlans(dcId, true); int allocatedVlans = _dcDao.countZoneVlans(dcId, true);
CapacityState vlanCapacityState = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; CapacityState vlanCapacityState = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled;
if (capacities.size() == 0) { if (capacities.isEmpty()) {
CapacityVO newVlanCapacity = new CapacityVO(null, dcId, null, null, allocatedVlans, totalVlans, Capacity.CAPACITY_TYPE_VLAN); CapacityVO newVlanCapacity = new CapacityVO(null, dcId, null, null, allocatedVlans, totalVlans, Capacity.CAPACITY_TYPE_VLAN);
newVlanCapacity.setCapacityState(vlanCapacityState); newVlanCapacity.setCapacityState(vlanCapacityState);
_capacityDao.persist(newVlanCapacity); _capacityDao.persist(newVlanCapacity);
@ -430,16 +442,13 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
public void createOrUpdateIpCapacity(Long dcId, Long podId, short capacityType, AllocationState capacityState) { public void createOrUpdateIpCapacity(Long dcId, Long podId, short capacityType, AllocationState capacityState) {
SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria(); SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria();
List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
capacitySC = _capacityDao.createSearchCriteria();
capacitySC.addAnd("podId", SearchCriteria.Op.EQ, podId); capacitySC.addAnd("podId", SearchCriteria.Op.EQ, podId);
capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, dcId); capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, dcId);
capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType); capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType);
int totalIPs; int totalIPs;
int allocatedIPs; int allocatedIPs;
capacities = _capacityDao.search(capacitySC, null); List<CapacityVO> capacities = _capacityDao.search(capacitySC, null);
if (capacityType == Capacity.CAPACITY_TYPE_PRIVATE_IP) { if (capacityType == Capacity.CAPACITY_TYPE_PRIVATE_IP) {
totalIPs = _privateIPAddressDao.countIPs(podId, dcId, false); totalIPs = _privateIPAddressDao.countIPs(podId, dcId, false);
allocatedIPs = _privateIPAddressDao.countIPs(podId, dcId, true); allocatedIPs = _privateIPAddressDao.countIPs(podId, dcId, true);
@ -452,7 +461,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
} }
CapacityState ipCapacityState = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; CapacityState ipCapacityState = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled;
if (capacities.size() == 0) { if (capacities.isEmpty()) {
CapacityVO newPublicIPCapacity = new CapacityVO(null, dcId, podId, null, allocatedIPs, totalIPs, capacityType); CapacityVO newPublicIPCapacity = new CapacityVO(null, dcId, podId, null, allocatedIPs, totalIPs, capacityType);
newPublicIPCapacity.setCapacityState(ipCapacityState); newPublicIPCapacity.setCapacityState(ipCapacityState);
_capacityDao.persist(newPublicIPCapacity); _capacityDao.persist(newPublicIPCapacity);
@ -477,7 +486,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
int total = usedTotal.second(); int total = usedTotal.second();
int allocated = usedTotal.first(); int allocated = usedTotal.first();
CapacityState state = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; CapacityState state = (capacityState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled;
if (capacities.size() == 0) { if (capacities.isEmpty()) {
CapacityVO capacityVO = new CapacityVO(null, dcId, null, null, allocated, total, capacityType); CapacityVO capacityVO = new CapacityVO(null, dcId, null, null, allocated, total, capacityType);
capacityVO.setCapacityState(state); capacityVO.setCapacityState(state);
_capacityDao.persist(capacityVO); _capacityDao.persist(capacityVO);
@ -524,13 +533,12 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
// Generate Alerts for Zone Level capacities // Generate Alerts for Zone Level capacities
for (DataCenterVO dc : dataCenterList) { for (DataCenterVO dc : dataCenterList) {
for (Short capacityType : dataCenterCapacityTypes) { for (Short capacityType : dataCenterCapacityTypes) {
List<SummedCapacity> capacity = new ArrayList<SummedCapacity>(); List<SummedCapacity> capacity = _capacityDao.findCapacityBy(capacityType.intValue(), dc.getId(), null, null);
capacity = _capacityDao.findCapacityBy(capacityType.intValue(), dc.getId(), null, null);
if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE) { if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE) {
capacity.add(getUsedStats(capacityType, dc.getId(), null, null)); capacity.add(getUsedStats(capacityType, dc.getId(), null, null));
} }
if (capacity == null || capacity.size() == 0) { if (capacity == null || capacity.isEmpty()) {
continue; continue;
} }
double totalCapacity = capacity.get(0).getTotalCapacity(); double totalCapacity = capacity.get(0).getTotalCapacity();
@ -545,7 +553,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
for (HostPodVO pod : podList) { for (HostPodVO pod : podList) {
for (Short capacityType : podCapacityTypes) { for (Short capacityType : podCapacityTypes) {
List<SummedCapacity> capacity = _capacityDao.findCapacityBy(capacityType.intValue(), pod.getDataCenterId(), pod.getId(), null); List<SummedCapacity> capacity = _capacityDao.findCapacityBy(capacityType.intValue(), pod.getDataCenterId(), pod.getId(), null);
if (capacity == null || capacity.size() == 0) { if (capacity == null || capacity.isEmpty()) {
continue; continue;
} }
double totalCapacity = capacity.get(0).getTotalCapacity(); double totalCapacity = capacity.get(0).getTotalCapacity();
@ -559,11 +567,10 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
// Generate Alerts for Cluster Level capacities // Generate Alerts for Cluster Level capacities
for (ClusterVO cluster : clusterList) { for (ClusterVO cluster : clusterList) {
for (Short capacityType : clusterCapacityTypes) { for (Short capacityType : clusterCapacityTypes) {
List<SummedCapacity> capacity = new ArrayList<SummedCapacity>(); List<SummedCapacity> capacity = _capacityDao.findCapacityBy(capacityType.intValue(), cluster.getDataCenterId(), null, cluster.getId());
capacity = _capacityDao.findCapacityBy(capacityType.intValue(), cluster.getDataCenterId(), null, cluster.getId());
// cpu and memory allocated capacity notification threshold can be defined at cluster level, so getting the value if they are defined at cluster level // cpu and memory allocated capacity notification threshold can be defined at cluster level, so getting the value if they are defined at cluster level
double threshold = 0; double threshold;
switch (capacityType) { switch (capacityType) {
case Capacity.CAPACITY_TYPE_STORAGE: case Capacity.CAPACITY_TYPE_STORAGE:
capacity.add(getUsedStats(capacityType, cluster.getDataCenterId(), cluster.getPodId(), cluster.getId())); capacity.add(getUsedStats(capacityType, cluster.getDataCenterId(), cluster.getPodId(), cluster.getId()));
@ -581,7 +588,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
default: default:
threshold = _capacityTypeThresholdMap.get(capacityType); threshold = _capacityTypeThresholdMap.get(capacityType);
} }
if (capacity == null || capacity.size() == 0) { if (capacity == null || capacity.isEmpty()) {
continue; continue;
} }
@ -697,7 +704,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
private List<Short> getCapacityTypesAtZoneLevel() { private List<Short> getCapacityTypesAtZoneLevel() {
List<Short> dataCenterCapacityTypes = new ArrayList<Short>(); List<Short> dataCenterCapacityTypes = new ArrayList<>();
dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP); dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP);
dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP); dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP);
dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_SECONDARY_STORAGE); dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_SECONDARY_STORAGE);
@ -709,7 +716,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
private List<Short> getCapacityTypesAtPodLevel() { private List<Short> getCapacityTypesAtPodLevel() {
List<Short> podCapacityTypes = new ArrayList<Short>(); List<Short> podCapacityTypes = new ArrayList<>();
podCapacityTypes.add(Capacity.CAPACITY_TYPE_PRIVATE_IP); podCapacityTypes.add(Capacity.CAPACITY_TYPE_PRIVATE_IP);
return podCapacityTypes; return podCapacityTypes;
@ -717,7 +724,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
private List<Short> getCapacityTypesAtClusterLevel() { private List<Short> getCapacityTypesAtClusterLevel() {
List<Short> clusterCapacityTypes = new ArrayList<Short>(); List<Short> clusterCapacityTypes = new ArrayList<>();
clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_CPU); clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_CPU);
clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_MEMORY); clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_MEMORY);
clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_STORAGE); clusterCapacityTypes.add(Capacity.CAPACITY_TYPE_STORAGE);
@ -748,19 +755,11 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
public void sendAlert(AlertType alertType, DataCenter dataCenter, Pod pod, Cluster cluster, String subject, String content) public void sendAlert(AlertType alertType, DataCenter dataCenter, Pod pod, Cluster cluster, String subject, String content)
throws MessagingException, UnsupportedEncodingException { throws MessagingException, UnsupportedEncodingException {
logger.warn(String.format("alertType=[%s] dataCenter=[%s] pod=[%s] cluster=[%s] message=[%s].", alertType, dataCenter, pod, cluster, subject));
AlertVO alert = null;
Long clusterId = cluster == null ? null : cluster.getId(); Long clusterId = cluster == null ? null : cluster.getId();
Long podId = pod == null ? null : pod.getId(); Long podId = pod == null ? null : pod.getId();
long dcId = dataCenter == null ? 0L : dataCenter.getId(); long dcId = dataCenter == null ? 0L : dataCenter.getId();
if ((alertType != AlertManager.AlertType.ALERT_TYPE_HOST) && (alertType != AlertManager.AlertType.ALERT_TYPE_USERVM) logger.warn(String.format("alertType=[%s] dataCenterId=[%s] podId=[%s] clusterId=[%s] message=[%s].", alertType, dcId, podId, clusterId, subject));
&& (alertType != AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER) && (alertType != AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY) AlertVO alert = getAlertForTrivialAlertType(alertType, dcId, podId, clusterId);
&& (alertType != AlertManager.AlertType.ALERT_TYPE_SSVM) && (alertType != AlertManager.AlertType.ALERT_TYPE_STORAGE_MISC)
&& (alertType != AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE) && (alertType != AlertManager.AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED)
&& (alertType != AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED) && (alertType != AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR)
&& (alertType != AlertManager.AlertType.ALERT_TYPE_HA_ACTION) && (alertType != AlertManager.AlertType.ALERT_TYPE_CA_CERT)) {
alert = _alertDao.getLastAlert(alertType.getType(), dcId, podId, clusterId);
}
if (alert == null) { if (alert == null) {
AlertVO newAlert = new AlertVO(); AlertVO newAlert = new AlertVO();
@ -802,6 +801,15 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
} }
@Nullable
private AlertVO getAlertForTrivialAlertType(AlertType alertType, long dataCenterId, Long podId, Long clusterId) {
AlertVO alert = null;
if (!ALERTS.contains(alertType)) {
alert = _alertDao.getLastAlert(alertType.getType(), dataCenterId, podId, clusterId);
}
return alert;
}
protected void sendMessage(SMTPMailProperties mailProps) { protected void sendMessage(SMTPMailProperties mailProps) {
_executor.execute(new Runnable() { _executor.execute(new Runnable() {
@Override @Override

View File

@ -2921,6 +2921,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
Account callerAccount = _accountMgr.getActiveAccountById(user.getAccountId()); Account callerAccount = _accountMgr.getActiveAccountById(user.getAccountId());
_accountMgr.checkAccess(callerAccount, AccessType.OperateEntry, true, network); _accountMgr.checkAccess(callerAccount, AccessType.OperateEntry, true, network);
if (!network.isRedundant() && makeRedundant) { if (!network.isRedundant() && makeRedundant) {
NetworkOffering networkOffering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId());
Map<Network.Capability, String> sourceNatCapabilities = getNetworkOfferingServiceCapabilities(networkOffering, Service.SourceNat);
String isRedundantRouterSupported = sourceNatCapabilities.get(Capability.RedundantRouter);
if (!Boolean.parseBoolean(isRedundantRouterSupported)) {
throw new InvalidParameterValueException(String.format("Redundant router is not supported by the network offering %s", networkOffering));
}
network.setRedundant(true); network.setRedundant(true);
if (!_networksDao.update(network.getId(), network)) { if (!_networksDao.update(network.getId(), network)) {
throw new CloudRuntimeException("Failed to update network into a redundant one, please try again"); throw new CloudRuntimeException("Failed to update network into a redundant one, please try again");

View File

@ -304,7 +304,18 @@ public class UploadManagerImpl extends ManagerBase implements UploadManager {
logger.error(errorString); logger.error(errorString);
return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE); return new CreateEntityDownloadURLAnswer(errorString, CreateEntityDownloadURLAnswer.RESULT_FAILURE);
} }
File parentFolder = file.getParentFile();
if (parentFolder != null && parentFolder.exists()) {
Path folderPath = parentFolder.toPath();
Script script = new Script(true, "chmod", 1440 * 1000, logger);
script.add("755", folderPath.toString());
result = script.execute();
if (result != null) {
String errMsg = "Unable to set permissions for " + folderPath + " due to " + result;
logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
}
return new CreateEntityDownloadURLAnswer("", CreateEntityDownloadURLAnswer.RESULT_SUCCESS); return new CreateEntityDownloadURLAnswer("", CreateEntityDownloadURLAnswer.RESULT_SUCCESS);
} }

View File

@ -513,7 +513,7 @@ class TestAddConfigtoDeployVM(cloudstackTestCase):
raise self.skipTest("Skipping test case for non-xenserver hypervisor") raise self.skipTest("Skipping test case for non-xenserver hypervisor")
""" """
Following commands are used to convert a VM from HVM to PV and set using vm-param-set Following commands are used to convert a VM from HVM to PV and set using vm-param-set
HVM-boot-policy= HVM-boot-policy=""
PV-bootloader=pygrub PV-bootloader=pygrub
PV-args=hvc0 PV-args=hvc0
""" """
@ -524,7 +524,7 @@ class TestAddConfigtoDeployVM(cloudstackTestCase):
add_config_response = self.add_global_config(name, value) add_config_response = self.add_global_config(name, value)
if add_config_response.name: if add_config_response.name:
extraconfig = 'HVM-boot-policy%3D%0APV-bootloader%3Dpygrub%0APV-args%3Dhvc0' extraconfig = 'HVM-boot-policy%3D%22%22%0APV-bootloader%3Dpygrub%0APV-args%3Dhvc0'
try: try:
response = self.deploy_vm(hypervisor, extraconfig) response = self.deploy_vm(hypervisor, extraconfig)
host_id = response.hostid host_id = response.hostid

View File

@ -183,7 +183,10 @@ class TestGuestOS(cloudstackTestCase):
raise unittest.SkipTest("OS name check with hypervisor is supported only on XenServer and VMware") raise unittest.SkipTest("OS name check with hypervisor is supported only on XenServer and VMware")
if self.hypervisor.hypervisor.lower() == "xenserver": if self.hypervisor.hypervisor.lower() == "xenserver":
testosname="Debian Jessie 8.0" if tuple(map(int, self.hypervisor.hypervisorversion.split("."))) >= (8, 3, 0):
testosname = "Debian Bookworm 12"
else:
testosname = "Debian Jessie 8.0"
else: else:
testosname="debian4_64Guest" testosname="debian4_64Guest"