Merge release branch 4.7 to 4.8

* 4.7:
  Fix Sync of template.properties in Swift
  Configure rVPC for router.redundant.vrrp.interval advert_int setting
  Have rVPCs use the router.redundant.vrrp.interval setting
  Resolve conflict as forceencap is already in master
  Split the cidr lists so we won't hit the iptables-resture limits
  Check the existence of 'forceencap' parameter before use
  Do not load previous firewall rules as we replace everyhing anyway
  Wait for dnsmasq to finish restart
  Remove duplicate spaces, and thus duplicate rules.
  Restore iptables at once using iptables-restore instead of calling iptables numerous times
  Add iptables copnversion script.
This commit is contained in:
Will Stevens 2016-05-18 15:54:32 -04:00
commit 8f330b0b92
16 changed files with 801 additions and 302 deletions

View File

@ -156,6 +156,7 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
// template.properties
// there
}
ts.setInstallPath(installPath);
ts.setState(ObjectInDataStoreStateMachine.State.Allocated);
ts = templateDataStoreDao.persist(ts);

View File

@ -1598,6 +1598,9 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
if (isRedundant) {
buf.append(" redundant_router=1");
final int advertInt = NumbersUtil.parseInt(_configDao.getValue(Config.RedundantRouterVrrpInterval.key()), 1);
buf.append(" advert_int=").append(advertInt);
final Long vpcId = router.getVpcId();
final List<DomainRouterVO> routers;
if (vpcId != null) {

View File

@ -0,0 +1,178 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.test;
import com.google.common.base.Joiner;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableMap;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.springframework.util.Assert;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Strings.isNullOrEmpty;
import static java.lang.String.format;
import static org.apache.log4j.Level.ALL;
import static org.apache.log4j.Level.DEBUG;
import static org.apache.log4j.Level.ERROR;
import static org.apache.log4j.Level.FATAL;
import static org.apache.log4j.Level.INFO;
import static org.apache.log4j.Level.OFF;
/**
*
* Tracks one or more patterns to determine whether or not they have been
* logged. It uses a streaming approach to determine whether or not a message
* has a occurred to prevent unnecessary memory consumption. Instances of this
* of this class are created using the {@link TestAppenderBuilder}.
*
* To use this class, register a one or more expected patterns by level as part
* of the test setup and retain an reference to the appender instance. After the
* expected logging events have occurred in the test case, call
* {@link TestAppender#assertMessagesLogged()} which will fail the test if any of the
* expected patterns were not logged.
*
*/
public final class TestAppender extends AppenderSkeleton {
private final static String APPENDER_NAME = "test_appender";
private final ImmutableMap<Level, Set<PatternResult>> expectedPatternResults;
private TestAppender(final Map<Level, Set<PatternResult>> expectedPatterns) {
super();
expectedPatternResults = ImmutableMap.copyOf(expectedPatterns);
}
protected void append(LoggingEvent loggingEvent) {
checkArgument(loggingEvent != null, "append requires a non-null loggingEvent");
final Level level = loggingEvent.getLevel();
checkState(expectedPatternResults.containsKey(level), "level " + level + " not supported by append");
for (final PatternResult patternResult : expectedPatternResults.get(level)) {
if (patternResult.getPattern().matcher(loggingEvent.getRenderedMessage()).matches()) {
patternResult.markFound();
}
}
}
public void close() {
// Do nothing ...
}
public boolean requiresLayout() {
return false;
}
public void assertMessagesLogged() {
final List<String> unloggedPatterns = new ArrayList<>();
for (final Map.Entry<Level, Set<PatternResult>> expectedPatternResult : expectedPatternResults.entrySet()) {
for (final PatternResult patternResults : expectedPatternResult.getValue()) {
if (!patternResults.isFound()) {
unloggedPatterns.add(format("%1$s was not logged for level %2$s",
patternResults.getPattern().toString(), expectedPatternResult.getKey()));
}
}
}
if (!unloggedPatterns.isEmpty()) {
//Raise an assert
Assert.isTrue(false, Joiner.on(",").join(unloggedPatterns));
}
}
private static final class PatternResult {
private final Pattern pattern;
private boolean foundFlag = false;
private PatternResult(Pattern pattern) {
super();
this.pattern = pattern;
}
public Pattern getPattern() {
return pattern;
}
public void markFound() {
// This operation is thread-safe because the value will only ever be switched from false to true. Therefore,
// multiple threads mutating the value for a pattern will not corrupt the value ...
foundFlag = true;
}
public boolean isFound() {
return foundFlag;
}
@Override
public boolean equals(Object thatObject) {
if (this == thatObject) {
return true;
}
if (thatObject == null || getClass() != thatObject.getClass()) {
return false;
}
PatternResult thatPatternResult = (PatternResult) thatObject;
return foundFlag == thatPatternResult.foundFlag &&
Objects.equal(pattern, thatPatternResult.pattern);
}
@Override
public int hashCode() {
return Objects.hashCode(pattern, foundFlag);
}
@Override
public String toString() {
return format("Pattern Result [ pattern: %1$s, markFound: %2$s ]", pattern.toString(), foundFlag);
}
}
public static final class TestAppenderBuilder {
private final Map<Level, Set<PatternResult>> expectedPatterns;
public TestAppenderBuilder() {
super();
expectedPatterns = new HashMap<>();
expectedPatterns.put(ALL, new HashSet<PatternResult>());
expectedPatterns.put(DEBUG, new HashSet<PatternResult>());
expectedPatterns.put(ERROR, new HashSet<PatternResult>());
expectedPatterns.put(FATAL, new HashSet<PatternResult>());
expectedPatterns.put(INFO, new HashSet<PatternResult>());
expectedPatterns.put(OFF, new HashSet<PatternResult>());
}
public TestAppenderBuilder addExpectedPattern(final Level level, final String pattern) {
checkArgument(level != null, "addExpectedPattern requires a non-null level");
checkArgument(!isNullOrEmpty(pattern), "addExpectedPattern requires a non-blank pattern");
checkState(expectedPatterns.containsKey(level), "level " + level + " is not supported by " + getClass().getName());
expectedPatterns.get(level).add(new PatternResult(Pattern.compile(pattern)));
return this;
}
public TestAppender build() {
return new TestAppender(expectedPatterns);
}
}
/**
*
* Attaches a {@link TestAppender} to a {@link Logger} and ensures that it is the only
* test appender attached to the logger.
*
* @param logger The logger which will be monitored by the test
* @param testAppender The test appender to attach to {@code logger}
*/
public static void safeAddAppender(Logger logger, TestAppender testAppender) {
logger.removeAppender(APPENDER_NAME);
logger.addAppender(testAppender);
}
}

View File

@ -26,9 +26,6 @@
<version>4.8.1-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<properties>
<skipTests>true</skipTests>
</properties>
<dependencies>
<dependency>
<groupId>log4j</groupId>

View File

@ -16,91 +16,7 @@
// under the License.
package org.apache.cloudstack.storage.resource;
import static com.cloud.utils.storage.S3.S3Utils.putFile;
import static com.cloud.utils.StringUtils.join;
import static java.lang.String.format;
import static java.util.Arrays.asList;
import static org.apache.commons.lang.StringUtils.substringAfterLast;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.naming.ConfigurationException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.storage.Storage;
import com.cloud.storage.template.TemplateConstants;
import com.cloud.utils.EncryptionUtil;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpContentCompressor;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
import org.apache.cloudstack.storage.template.UploadEntity;
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.log4j.Logger;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import org.apache.cloudstack.framework.security.keystore.KeystoreManager;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.command.DownloadCommand;
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
import org.apache.cloudstack.storage.command.UploadStatusAnswer;
import org.apache.cloudstack.storage.command.UploadStatusAnswer.UploadStatus;
import org.apache.cloudstack.storage.command.UploadStatusCommand;
import org.apache.cloudstack.storage.template.DownloadManager;
import org.apache.cloudstack.storage.template.DownloadManagerImpl;
import org.apache.cloudstack.storage.template.DownloadManagerImpl.ZfsPathParser;
import org.apache.cloudstack.storage.template.UploadManager;
import org.apache.cloudstack.storage.template.UploadManagerImpl;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.CheckHealthAnswer;
import com.cloud.agent.api.CheckHealthCommand;
@ -135,11 +51,13 @@ import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.api.to.S3TO;
import com.cloud.agent.api.to.SwiftTO;
import com.cloud.exception.InternalErrorException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.Host;
import com.cloud.host.Host.Type;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ServerResourceBase;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.StorageLayer;
import com.cloud.storage.VMTemplateStorageResourceAssoc;
@ -149,24 +67,102 @@ import com.cloud.storage.template.Processor.FormatInfo;
import com.cloud.storage.template.QCOW2Processor;
import com.cloud.storage.template.RawImageProcessor;
import com.cloud.storage.template.TARProcessor;
import com.cloud.storage.template.TemplateConstants;
import com.cloud.storage.template.TemplateLocation;
import com.cloud.storage.template.TemplateProp;
import com.cloud.storage.template.VhdProcessor;
import com.cloud.storage.template.VmdkProcessor;
import com.cloud.utils.EncryptionUtil;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.storage.S3.S3Utils;
import com.cloud.utils.SwiftUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.NetUtils;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
import com.cloud.utils.storage.S3.S3Utils;
import com.cloud.vm.SecondaryStorageVm;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpContentCompressor;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import org.apache.cloudstack.framework.security.keystore.KeystoreManager;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.command.DownloadCommand;
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
import org.apache.cloudstack.storage.command.UploadStatusAnswer;
import org.apache.cloudstack.storage.command.UploadStatusAnswer.UploadStatus;
import org.apache.cloudstack.storage.command.UploadStatusCommand;
import org.apache.cloudstack.storage.template.DownloadManager;
import org.apache.cloudstack.storage.template.DownloadManagerImpl;
import org.apache.cloudstack.storage.template.DownloadManagerImpl.ZfsPathParser;
import org.apache.cloudstack.storage.template.UploadEntity;
import org.apache.cloudstack.storage.template.UploadManager;
import org.apache.cloudstack.storage.template.UploadManagerImpl;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.NameValuePair;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.log4j.Logger;
import org.joda.time.DateTime;
import org.joda.time.format.ISODateTimeFormat;
import javax.naming.ConfigurationException;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import static com.cloud.utils.StringUtils.join;
import static com.cloud.utils.storage.S3.S3Utils.putFile;
import static java.lang.String.format;
import static java.util.Arrays.asList;
import static org.apache.commons.lang.StringUtils.substringAfterLast;
public class NfsSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource {
private static final Logger s_logger = Logger.getLogger(NfsSecondaryStorageResource.class);
public static final Logger s_logger = Logger.getLogger(NfsSecondaryStorageResource.class);
private static final String TEMPLATE_ROOT_DIR = "template/tmpl";
private static final String VOLUME_ROOT_DIR = "volumes";
@ -499,10 +495,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
String destFileFullPath = destFile.getAbsolutePath() + File.separator + fileName;
s_logger.debug("copy snapshot " + srcFile.getAbsolutePath() + " to template " + destFileFullPath);
Script.runSimpleBashScript("cp " + srcFile.getAbsolutePath() + " " + destFileFullPath);
String metaFileName = destFile.getAbsolutePath() + File.separator + "template.properties";
String metaFileName = destFile.getAbsolutePath() + File.separator + _tmpltpp;
File metaFile = new File(metaFileName);
try {
_storage.create(destFile.getAbsolutePath(), "template.properties");
_storage.create(destFile.getAbsolutePath(), _tmpltpp);
try ( // generate template.properties file
FileWriter writer = new FileWriter(metaFile);
BufferedWriter bufferWriter = new BufferedWriter(writer);
@ -597,32 +593,14 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
return answer;
}
s_logger.debug("starting copy template to swift");
DataTO newTemplate = answer.getNewData();
File templateFile = getFile(newTemplate.getPath(), ((NfsTO)srcDataStore).getUrl());
SwiftTO swift = (SwiftTO)destDataStore;
String containterName = SwiftUtil.getContainerName(destData.getObjectType().toString(), destData.getId());
String swiftPath = SwiftUtil.putObject(swift, templateFile, containterName, templateFile.getName());
//upload template.properties
File properties = new File(templateFile.getParent() + File.separator + _tmpltpp);
if (properties.exists()) {
SwiftUtil.putObject(swift, properties, containterName, _tmpltpp);
}
TemplateObjectTO newTemplate = (TemplateObjectTO)answer.getNewData();
newTemplate.setDataStore(srcDataStore);
CopyCommand newCpyCmd = new CopyCommand(newTemplate, destData, cmd.getWait(), cmd.executeInSequence());
Answer result = copyFromNfsToSwift(newCpyCmd);
//clean up template data on staging area
try {
DeleteCommand deleteCommand = new DeleteCommand(newTemplate);
execute(deleteCommand);
} catch (Exception e) {
s_logger.debug("Failed to clean up staging area:", e);
}
cleanupStagingNfs(newTemplate);
return result;
TemplateObjectTO template = new TemplateObjectTO();
template.setPath(swiftPath);
template.setSize(templateFile.length());
template.setPhysicalSize(template.getSize());
SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData;
template.setFormat(snapshot.getVolume().getFormat());
return new CopyCmdAnswer(template);
} else if (destDataStore instanceof S3TO) {
//create template on the same data store
CopyCmdAnswer answer =
@ -635,18 +613,27 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
newTemplate.setDataStore(srcDataStore);
CopyCommand newCpyCmd = new CopyCommand(newTemplate, destData, cmd.getWait(), cmd.executeInSequence());
Answer result = copyFromNfsToS3(newCpyCmd);
//clean up template data on staging area
try {
DeleteCommand deleteCommand = new DeleteCommand(newTemplate);
execute(deleteCommand);
} catch (Exception e) {
s_logger.debug("Failed to clean up staging area:", e);
}
cleanupStagingNfs(newTemplate);
return result;
}
}
s_logger.debug("Failed to create templat from snapshot");
return new CopyCmdAnswer("Unsupported prototcol");
s_logger.debug("Failed to create template from snapshot");
return new CopyCmdAnswer("Unsupported protocol");
}
/**
* clean up template data on staging area
* @param newTemplate: The template on the secondary storage that needs to be cleaned up
*/
protected void cleanupStagingNfs(TemplateObjectTO newTemplate) {
try {
DeleteCommand deleteCommand = new DeleteCommand(newTemplate);
execute(deleteCommand);
} catch (Exception e) {
s_logger.debug("Failed to clean up staging area:", e);
}
}
protected Answer copyFromNfsToImage(CopyCommand cmd) {
@ -759,22 +746,18 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
String container = "T-" + cmd.getId();
String swiftPath = SwiftUtil.putObject(swiftTO, file, container, null);
long virtualSize = getVirtualSize(file, getTemplateFormat(file.getName()));
long size = file.length();
String uniqueName = cmd.getName();
//put metda file
File uniqDir = _storage.createUniqDir();
String metaFileName = uniqDir.getAbsolutePath() + File.separator + "template.properties";
_storage.create(uniqDir.getAbsolutePath(), "template.properties");
File metaFile = new File(metaFileName);
FileWriter writer = new FileWriter(metaFile);
BufferedWriter bufferWriter = new BufferedWriter(writer);
bufferWriter.write("uniquename=" + cmd.getName());
bufferWriter.write("\n");
bufferWriter.write("filename=" + fileName);
bufferWriter.write("\n");
bufferWriter.write("size=" + file.length());
bufferWriter.close();
writer.close();
String metaFileName = uniqDir.getAbsolutePath() + File.separator + _tmpltpp;
_storage.create(uniqDir.getAbsolutePath(), _tmpltpp);
SwiftUtil.putObject(swiftTO, metaFile, container, "template.properties");
File metaFile = swiftWriteMetadataFile(metaFileName, uniqueName, fileName, size, virtualSize);
SwiftUtil.putObject(swiftTO, metaFile, container, _tmpltpp);
metaFile.delete();
uniqDir.delete();
String md5sum = null;
@ -785,7 +768,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
}
DownloadAnswer answer =
new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, swiftPath, swiftPath, file.length(), file.length(), md5sum);
new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, swiftPath, swiftPath, virtualSize, file.length(), md5sum);
return answer;
} catch (IOException e) {
s_logger.debug("Failed to register template into swift", e);
@ -942,6 +925,118 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
}
}
/***
*This method will create a file using the filenName and metaFileName.
*That file will contain the given attributes (unique name, file name, size, and virtualSize).
*
* @param metaFileName : The path of the metadata file
* @param filename :attribute: Filename of the template
* @param uniqueName :attribute: Unique name of the template
* @param size :attribute: physical size of the template
* @param virtualSize :attribute: virtual size of the template
* @return File representing the metadata file
* @throws IOException
*/
protected File swiftWriteMetadataFile(String metaFileName, String uniqueName, String filename, long size, long virtualSize) throws IOException {
File metaFile = new File(metaFileName);
FileWriter writer = new FileWriter(metaFile);
BufferedWriter bufferWriter = new BufferedWriter(writer);
bufferWriter.write("uniquename=" + uniqueName);
bufferWriter.write("\n");
bufferWriter.write("filename=" + filename);
bufferWriter.write("\n");
bufferWriter.write("size=" + size);
bufferWriter.write("\n");
bufferWriter.write("virtualsize=" + virtualSize);
bufferWriter.close();
writer.close();
return metaFile;
}
/**
* Creates a template.properties for Swift with its correct unique name
*
* @param swift The swift object
* @param srcFile Source file on the staging NFS
* @param containerName Destination container
* @return true on successful write
*/
protected boolean swiftUploadMetadataFile(SwiftTO swift, File srcFile, String containerName) throws IOException {
String uniqueName = FilenameUtils.getBaseName(srcFile.getName());
File uniqDir = _storage.createUniqDir();
String metaFileName = uniqDir.getAbsolutePath() + File.separator + _tmpltpp;
_storage.create(uniqDir.getAbsolutePath(), _tmpltpp);
long virtualSize = getVirtualSize(srcFile, getTemplateFormat(srcFile.getName()));
File metaFile = swiftWriteMetadataFile(metaFileName,
uniqueName,
srcFile.getName(),
srcFile.length(),
virtualSize);
SwiftUtil.putObject(swift, metaFile, containerName, _tmpltpp);
metaFile.delete();
uniqDir.delete();
return true;
}
/**
* Copies data from NFS and uploads it into a Swift container
*
* @param cmd CopyComand
* @return CopyCmdAnswer
*/
protected Answer copyFromNfsToSwift(CopyCommand cmd) {
final DataTO srcData = cmd.getSrcTO();
final DataTO destData = cmd.getDestTO();
DataStoreTO srcDataStore = srcData.getDataStore();
NfsTO srcStore = (NfsTO)srcDataStore;
DataStoreTO destDataStore = destData.getDataStore();
File srcFile = getFile(srcData.getPath(), srcStore.getUrl());
SwiftTO swift = (SwiftTO)destDataStore;
try {
String containerName = SwiftUtil.getContainerName(destData.getObjectType().toString(), destData.getId());
String swiftPath = SwiftUtil.putObject(swift, srcFile, containerName, srcFile.getName());
DataTO retObj = null;
if (destData.getObjectType() == DataObjectType.TEMPLATE) {
swiftUploadMetadataFile(swift, srcFile, containerName);
TemplateObjectTO newTemplate = new TemplateObjectTO();
newTemplate.setPath(swiftPath);
newTemplate.setSize(getVirtualSize(srcFile, getTemplateFormat(srcFile.getName())));
newTemplate.setPhysicalSize(srcFile.length());
newTemplate.setFormat(getTemplateFormat(srcFile.getName()));
retObj = newTemplate;
} else if (destData.getObjectType() == DataObjectType.VOLUME) {
VolumeObjectTO newVol = new VolumeObjectTO();
newVol.setPath(containerName);
newVol.setSize(getVirtualSize(srcFile, getTemplateFormat(srcFile.getName())));
retObj = newVol;
} else if (destData.getObjectType() == DataObjectType.SNAPSHOT) {
SnapshotObjectTO newSnapshot = new SnapshotObjectTO();
newSnapshot.setPath(containerName);
retObj = newSnapshot;
}
return new CopyCmdAnswer(retObj);
} catch (Exception e) {
s_logger.error("failed to upload " + srcData.getPath(), e);
return new CopyCmdAnswer("failed to upload " + srcData.getPath() + e.toString());
}
}
String swiftDownload(SwiftTO swift, String container, String rfilename, String lFullPath) {
Script command = new Script("/bin/bash", s_logger);
command.add("-c");
@ -1458,13 +1553,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
Map<String, TemplateProp> tmpltInfos = new HashMap<String, TemplateProp>();
for (String container : containers) {
if (container.startsWith("T-")) {
String[] files = SwiftUtil.list(swift, container, "template.properties");
String[] files = SwiftUtil.list(swift, container, _tmpltpp);
if (files.length != 1) {
continue;
}
try {
File tempFile = File.createTempFile("template", ".tmp");
File tmpFile = SwiftUtil.getObject(swift, tempFile, container + File.separator + "template.properties");
File tmpFile = SwiftUtil.getObject(swift, tempFile, container + File.separator + _tmpltpp);
if (tmpFile == null) {
continue;
}
@ -1779,7 +1874,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
} else {
boolean found = false;
for (File f : tmpltFiles) {
if (!found && f.getName().equals("template.properties")) {
if (!found && f.getName().equals(_tmpltpp)) {
found = true;
}

View File

@ -18,29 +18,6 @@
*/
package org.apache.cloudstack.storage.resource;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import javax.naming.ConfigurationException;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.log4j.Logger;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.DownloadCommand;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import com.cloud.agent.api.storage.DownloadAnswer;
import com.cloud.agent.api.storage.ListTemplateAnswer;
import com.cloud.agent.api.storage.ListTemplateCommand;
@ -51,7 +28,28 @@ import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Storage;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.command.DownloadCommand;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.log4j.Logger;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Mockito;
import javax.naming.ConfigurationException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
@Ignore
public class LocalNfsSecondaryStorageResourceTest extends TestCase {
private static Map<String, Object> testParams;

View File

@ -18,91 +18,67 @@
*/
package org.apache.cloudstack.storage.resource;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.Map;
import java.util.Properties;
import javax.naming.ConfigurationException;
import junit.framework.Assert;
import junit.framework.TestCase;
import com.cloud.test.TestAppender;
import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.StringWriter;
public class NfsSecondaryStorageResourceTest extends TestCase {
private static Map<String, Object> testParams;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
private static final Logger s_logger = Logger.getLogger(NfsSecondaryStorageResourceTest.class.getName());
@RunWith(PowerMockRunner.class)
public class NfsSecondaryStorageResourceTest {
NfsSecondaryStorageResource resource;
private NfsSecondaryStorageResource resource;
@Before
@Override
public void setUp() throws ConfigurationException {
s_logger.setLevel(Level.ALL);
public void setUp() {
resource = new NfsSecondaryStorageResource();
resource.setInSystemVM(true);
testParams = PropertiesUtil.toMap(loadProperties());
resource.configureStorageLayerClass(testParams);
Object testLocalRoot = testParams.get("testLocalRoot");
if (testLocalRoot != null) {
resource.setParentPath((String)testLocalRoot);
}
}
@Test
public void testMount() throws Exception {
String sampleUriStr = "cifs://192.168.1.128/CSHV3?user=administrator&password=1pass%40word1&foo=bar";
URI sampleUri = new URI(sampleUriStr);
@PrepareForTest(NfsSecondaryStorageResource.class)
public void testSwiftWriteMetadataFile() throws Exception {
String expected = "uniquename=test\nfilename=testfile\nsize=100\nvirtualsize=1000";
s_logger.info("Check HostIp parsing");
String hostIpStr = resource.getUriHostIp(sampleUri);
Assert.assertEquals("Expected host IP " + sampleUri.getHost() + " and actual host IP " + hostIpStr + " differ.", sampleUri.getHost(), hostIpStr);
StringWriter stringWriter = new StringWriter();
BufferedWriter bufferWriter = new BufferedWriter(stringWriter);
PowerMockito.whenNew(BufferedWriter.class).withArguments(any(FileWriter.class)).thenReturn(bufferWriter);
s_logger.info("Check option parsing");
String expected = "user=administrator,password=1pass@word1,foo=bar,";
String actualOpts = resource.parseCifsMountOptions(sampleUri);
Assert.assertEquals("Options should be " + expected + " and not " + actualOpts, expected, actualOpts);
resource.swiftWriteMetadataFile("testfile", "test", "testfile", 100, 1000);
// attempt a configured mount
final Map<String, Object> params = PropertiesUtil.toMap(loadProperties());
String sampleMount = (String)params.get("testCifsMount");
if (!sampleMount.isEmpty()) {
s_logger.info("functional test, mount " + sampleMount);
URI realMntUri = new URI(sampleMount);
String mntSubDir = resource.mountUri(realMntUri);
s_logger.info("functional test, umount " + mntSubDir);
resource.umount(resource.getMountingRoot() + mntSubDir, realMntUri);
} else {
s_logger.info("no entry for testCifsMount in " + "./conf/agent.properties - skip functional test");
}
Assert.assertEquals(expected, stringWriter.toString());
}
public static Properties loadProperties() throws ConfigurationException {
Properties properties = new Properties();
final File file = PropertiesUtil.findConfigFile("agent.properties");
if (file == null) {
throw new ConfigurationException("Unable to find agent.properties.");
}
s_logger.info("agent.properties found at " + file.getAbsolutePath());
try(FileInputStream fs = new FileInputStream(file);) {
properties.load(fs);
} catch (final FileNotFoundException ex) {
throw new CloudRuntimeException("Cannot find the file: " + file.getAbsolutePath(), ex);
} catch (final IOException ex) {
throw new CloudRuntimeException("IOException in reading " + file.getAbsolutePath(), ex);
}
return properties;
}
@Test
public void testCleanupStagingNfs() throws Exception{
NfsSecondaryStorageResource spyResource = spy(resource);
RuntimeException exception = new RuntimeException();
doThrow(exception).when(spyResource).execute(any(DeleteCommand.class));
TemplateObjectTO mockTemplate = Mockito.mock(TemplateObjectTO.class);
TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder();
appenderBuilder.addExpectedPattern(Level.DEBUG, "Failed to clean up staging area:");
TestAppender testLogAppender = appenderBuilder.build();
TestAppender.safeAddAppender(NfsSecondaryStorageResource.s_logger, testLogAppender);
spyResource.cleanupStagingNfs(mockTemplate);
testLogAppender.assertMessagesLogged();
}
}

View File

@ -17,27 +17,16 @@
# specific language governing permissions and limitations
# under the License.
import sys
import os
import base64
from merge import DataBag
from pprint import pprint
import subprocess
import logging
import re
import time
import shutil
import os.path
import os
from fcntl import flock, LOCK_EX, LOCK_UN
from cs.CsDatabag import CsDataBag, CsCmdLine
import cs.CsHelper
from cs.CsDatabag import CsDataBag
from cs.CsNetfilter import CsNetfilters
from cs.CsDhcp import CsDhcp
from cs.CsRedundant import *
from cs.CsFile import CsFile
from cs.CsApp import CsApache, CsDnsmasq
from cs.CsMonitor import CsMonitor
from cs.CsLoadBalancer import CsLoadBalancer
from cs.CsConfig import CsConfig
@ -208,7 +197,23 @@ class CsAcl(CsDataBag):
def process(self, direction, rule_list, base):
count = base
for i in rule_list:
rule_list_splitted = []
for rule in rule_list:
if ',' in rule['cidr']:
cidrs = rule['cidr'].split(',')
for cidr in cidrs:
new_rule = {
'cidr': cidr,
'last_port': rule['last_port'],
'type': rule['type'],
'first_port': rule['first_port'],
'allowed': rule['allowed']
}
rule_list_splitted.append(new_rule)
else:
rule_list_splitted.append(rule)
for i in rule_list_splitted:
r = self.AclRule(direction, self, i, self.config, count)
r.create()
count += 1
@ -261,7 +266,7 @@ class CsAcl(CsDataBag):
rstr = "%s -m icmp --icmp-type %s" % (rstr, self.icmp_type)
rstr = "%s %s -j %s" % (rstr, self.dport, self.action)
rstr = rstr.replace(" ", " ").lstrip()
self.fw.append([self.table, self.count, rstr])
self.fw.append([self.table, "", rstr])
def process(self):
for item in self.dbag:
@ -475,7 +480,7 @@ class CsSite2SiteVpn(CsDataBag):
self.fw.append(["", "front", "-A INPUT -i %s -p udp -m udp --dport 500 -s %s -d %s -j ACCEPT" % (dev, obj['peer_gateway_ip'], obj['local_public_ip'])])
self.fw.append(["", "front", "-A INPUT -i %s -p udp -m udp --dport 4500 -s %s -d %s -j ACCEPT" % (dev, obj['peer_gateway_ip'], obj['local_public_ip'])])
self.fw.append(["", "front", "-A INPUT -i %s -p esp -s %s -d %s -j ACCEPT" % (dev, obj['peer_gateway_ip'], obj['local_public_ip'])])
self.fw.append(["nat", "front", "-A POSTROUTING -t nat -o %s -m mark --mark 0x525 -j ACCEPT" % dev])
self.fw.append(["nat", "front", "-A POSTROUTING -o %s -m mark --mark 0x525 -j ACCEPT" % dev])
for net in obj['peer_guest_cidr_list'].lstrip().rstrip().split(','):
self.fw.append(["mangle", "front",
"-A FORWARD -s %s -d %s -j MARK --set-xmark 0x525/0xffffffff" % (obj['local_guest_cidr'], net)])
@ -791,7 +796,7 @@ class CsForwardingRules(CsDataBag):
rule['internal_ip'],
internal_fwports
)
fw4 = "-j SNAT --to-source %s -A POSTROUTING -s %s -d %s/32 -o %s -p %s -m %s --dport %s" % \
fw4 = "-A POSTROUTING -j SNAT --to-source %s -s %s -d %s/32 -o %s -p %s -m %s --dport %s" % \
(
self.getGuestIp(),
self.getNetworkByIp(rule['internal_ip']),
@ -986,7 +991,7 @@ def main(argv):
lb.process()
logging.debug("Configuring iptables rules")
nf = CsNetfilters()
nf = CsNetfilters(False)
nf.compare(config.get_fw())
logging.debug("Configuring iptables rules done ...saving rules")

View File

@ -15,9 +15,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from CsDatabag import CsDataBag, CsCmdLine
from CsDatabag import CsDataBag
from CsApp import CsApache, CsDnsmasq, CsPasswdSvc
import CsHelper
import logging
from netaddr import IPAddress, IPNetwork
import CsHelper
@ -198,7 +197,7 @@ class CsInterface:
return self.get_attr("add")
def to_str(self):
pprint(self.address)
print(self.address)
class CsDevice:
@ -371,8 +370,6 @@ class CsIP:
self.fw.append(["mangle", "front",
"-A FIREWALL_%s " % self.address['public_ip'] +
"-m state --state RELATED,ESTABLISHED -j ACCEPT"])
self.fw.append(["mangle", "",
"-A FIREWALL_%s DROP" % self.address['public_ip']])
self.fw.append(["mangle", "",
"-A VPN_%s -m state --state RELATED,ESTABLISHED -j ACCEPT" % self.address['public_ip']])
self.fw.append(["mangle", "",
@ -390,8 +387,7 @@ class CsIP:
self.fw.append(["filter", "", "-A INPUT -d 224.0.0.18/32 -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -d 225.0.0.50/32 -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT" %
self.dev])
self.fw.append(["filter", "", "-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT" % self.dev])
self.fw.append(["filter", "", "-A INPUT -p icmp -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -i lo -j ACCEPT"])
@ -434,6 +430,13 @@ class CsIP:
self.fw.append(["mangle", "front", "-A PREROUTING " +
"-m state --state RELATED,ESTABLISHED " +
"-j CONNMARK --restore-mark --nfmask 0xffffffff --ctmask 0xffffffff"])
self.fw.append(["", "front", "-A FORWARD -j NETWORK_STATS"])
self.fw.append(["", "front", "-A INPUT -j NETWORK_STATS"])
self.fw.append(["", "front", "-A OUTPUT -j NETWORK_STATS"])
self.fw.append(["filter", "", "-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT"])
if self.get_type() in ["guest"]:
self.fw.append(["filter", "", "-A FORWARD -d %s -o %s -j ACL_INBOUND_%s" %
(self.address['network'], self.dev, self.dev)])
@ -472,10 +475,6 @@ class CsIP:
])
if self.get_type() in ["public"]:
self.fw.append(["", "front",
"-A FORWARD -o %s -d %s -j ACL_INBOUND_%s" % (
self.dev, self.address['network'], self.dev)
])
self.fw.append(
["mangle", "", "-A FORWARD -j VPN_STATS_%s" % self.dev])
self.fw.append(
@ -483,11 +482,7 @@ class CsIP:
self.fw.append(
["mangle", "", "-A VPN_STATS_%s -i %s -m mark --mark 0x524/0xffffffff" % (self.dev, self.dev)])
self.fw.append(
["", "front", "-A FORWARD -j NETWORK_STATS_%s" % self.dev])
self.fw.append(["", "front", "-A FORWARD -j NETWORK_STATS"])
self.fw.append(["", "front", "-A INPUT -j NETWORK_STATS"])
self.fw.append(["", "front", "-A OUTPUT -j NETWORK_STATS"])
["", "front", "-A FORWARD -j NETWORK_STATS_eth1"])
self.fw.append(["", "", "-A NETWORK_STATS -i eth0 -o eth2 -p tcp"])
self.fw.append(["", "", "-A NETWORK_STATS -i eth2 -o eth0 -p tcp"])
@ -496,9 +491,11 @@ class CsIP:
self.fw.append(["filter", "", "-A INPUT -d 224.0.0.18/32 -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -d 225.0.0.50/32 -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT" % self.dev])
self.fw.append(["filter", "", "-A INPUT -i lo -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -p icmp -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -i eth0 -p tcp -m tcp --dport 3922 -m state --state NEW,ESTABLISHED -j ACCEPT"])
self.fw.append(["filter", "", "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT"])
self.fw.append(["filter", "", "-P INPUT DROP"])
self.fw.append(["filter", "", "-P FORWARD DROP"])

View File

@ -154,3 +154,7 @@ class CsCmdLine(CsDataBag):
return self.idata()['useextdns']
return False
def get_advert_int(self):
if 'advert_int' in self.idata():
return self.idata()['advert_int']
return 1

View File

@ -54,7 +54,7 @@ class CsDhcp(CsDataBag):
self.cloud.commit()
# We restart DNSMASQ every time the configure.py is called in order to avoid lease problems.
CsHelper.service("dnsmasq", "restart")
CsHelper.execute2("service dnsmasq restart")
def configure_server(self):
# self.conf.addeq("dhcp-hostsfile=%s" % DHCP_HOSTS)

View File

@ -113,6 +113,7 @@ class CsFile:
self.new_config[sind:eind] = content
def greplace(self, search, replace):
logging.debug("Searching for %s and replacing with %s" % (search, replace))
self.new_config = [w.replace(search, replace) for w in self.new_config]
def search(self, search, replace):

View File

@ -71,14 +71,16 @@ class CsLoadBalancer(CsDataBag):
port = path[1]
firewall.append(["filter", "", "-A INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])
for rules in remove_rules:
path = rules.split(':')
ip = path[0]
port = path[1]
firewall.append(["filter", "", "-D INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])
for rules in stat_rules:
path = rules.split(':')
ip = path[0]
port = path[1]
firewall.append(["filter", "", "-A INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])
for rules in remove_rules:
path = rules.split(':')
ip = path[0]
port = path[1]
if ["filter", "", "-A INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)] in firewall:
firewall.remove(["filter", "", "-A INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])

View File

@ -15,10 +15,12 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import CsHelper
from pprint import pprint
from CsDatabag import CsDataBag, CsCmdLine
from CsDatabag import CsCmdLine
import logging
from cs_iptables_save import Tables
class CsChain(object):
@ -81,6 +83,7 @@ class CsNetfilters(object):
def __init__(self, load=True):
self.rules = []
self.iptablerules = []
self.table = CsTable()
self.chain = CsChain()
if load:
@ -91,7 +94,10 @@ class CsNetfilters(object):
if i.startswith('*'): # Table
self.table.add(i[1:])
if i.startswith(':'): # Chain
self.chain.add(self.table.last(), i[1:].split(' ')[0])
string = i[1:].split(' ')[0]
cmd = "iptables -t %s -N %s" % (self.table.last(), string)
self.iptablerules.append(cmd)
self.chain.add(self.table.last(), string)
if i.startswith('-A'): # Rule
self.chain.add_rule(i.split()[1])
rule = CsNetfilter()
@ -125,10 +131,7 @@ class CsNetfilters(object):
def get_unseen(self):
del_list = [x for x in self.rules if x.unseen()]
for r in del_list:
cmd = "iptables -t %s %s" % (r.get_table(), r.to_str(True))
logging.debug("unseen cmd: %s ", cmd)
CsHelper.execute(cmd)
# print "Delete rule %s from table %s" % (r.to_str(True), r.get_table())
self.delete(r)
logging.info("Delete rule %s from table %s", r.to_str(True), r.get_table())
def compare(self, list):
@ -137,12 +140,16 @@ class CsNetfilters(object):
# Ensure all inbound/outbound chains have a default drop rule
if c.startswith("ACL_INBOUND") or c.startswith("ACL_OUTBOUND"):
list.append(["filter", "", "-A %s -j DROP" % c])
# PASS 1: Ensure all chains are present
# PASS 1: Ensure all chains are present and cleanup unused rules.
for fw in list:
new_rule = CsNetfilter()
new_rule.parse(fw[2])
new_rule.set_table(fw[0])
self.add_chain(new_rule)
self.has_rule(new_rule)
self.del_standard()
self.get_unseen()
# PASS 2: Create rules
for fw in list:
new_rule = CsNetfilter()
@ -151,28 +158,33 @@ class CsNetfilters(object):
if isinstance(fw[1], int):
new_rule.set_count(fw[1])
logging.debug("Checking if the rule already exists: rule=%s table=%s chain=%s", new_rule.get_rule(), new_rule.get_table(), new_rule.get_chain())
if self.has_rule(new_rule):
logging.debug("Exists: rule=%s table=%s", fw[2], new_rule.get_table())
else:
# print "Add rule %s in table %s" % ( fw[2], new_rule.get_table())
logging.info("Add: rule=%s table=%s", fw[2], new_rule.get_table())
# front means insert instead of append
cpy = fw[2]
if fw[1] == "front":
cpy = cpy.replace('-A', '-I')
if isinstance(fw[1], int):
cpy = cpy.replace("-A %s" % new_rule.get_chain(), '-I %s %s' % (new_rule.get_chain(), fw[1]))
logging.info("Add: rule=%s table=%s", fw[2], new_rule.get_table())
# front means insert instead of append
cpy = fw[2]
if fw[1] == "front":
cpy = cpy.replace('-A', '-I')
if isinstance(fw[1], int):
cpy = cpy.replace("-A %s" % new_rule.get_chain(), '-I %s %s' % (new_rule.get_chain(), fw[1]))
CsHelper.execute("iptables -t %s %s" % (new_rule.get_table(), cpy))
self.del_standard()
self.get_unseen()
self.iptablerules.append("iptables -t %s %s" % (new_rule.get_table(), cpy))
self.apply_rules()
def add_chain(self, rule):
""" Add the given chain if it is not already present """
if not self.has_chain(rule.get_table(), rule.get_chain()):
CsHelper.execute("iptables -t %s -N %s" % (rule.get_table(), rule.get_chain()))
self.chain.add(rule.get_table(), rule.get_chain())
def apply_rules(self):
s = []
for r in self.iptablerules:
r.replace(' ', ' ') # Remove duplicate spaces
if r not in s:
s.append(r)
chains = Tables(s)
chains.table_printout()
# COMMIT all rules.
result = CsHelper.execute("iptables-restore < /tmp/rules.save")
if result:
logging.info("iptables-restore result: %s", result)
else:
logging.info("iptables-restore result: success!")
def del_standard(self):
""" Del rules that are there but should not be deleted

View File

@ -138,6 +138,9 @@ class CsRedundant(object):
" router_id ", " router_id %s" % self.cl.get_name())
keepalived_conf.search(
" interface ", " interface %s" % guest.get_device())
keepalived_conf.search(
" advert_int ", " advert_int %s" % self.cl.get_advert_int())
keepalived_conf.greplace("[RROUTER_BIN_PATH]", self.CS_ROUTER_DIR)
keepalived_conf.section("authentication {", "}", [
" auth_type AH \n", " auth_pass %s\n" % self.cl.get_router_password()])

View File

@ -0,0 +1,227 @@
#!/usr/bin/python
#
# -*- coding: utf-8 -*-
#
"""
iptables_converter.py:
convert iptables commands within a script
into a correspondig iptables-save script
default filename to read is rules, to read some other
file, append: -s filename
output is written to stdout for maximum flexibilty
Author: Johannes Hubertz <johannes@hubertz.de>
Date: 2015-03-17
version: 0.9.8
License: GNU General Public License version 3 or later
Have Fun!
"""
from __future__ import print_function
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
import re
import sys
import logging
class ConverterError():
"""on accidential case of error show given reason"""
def __init__(self, message):
"""message to stdout to compatible testings 2.7 and 3.4"""
print (message)
sys.exit(1)
class Chains(UserDict):
"""this is for one type of tables"""
def __init__(self, name, tables):
"""init Chains object"""
UserDict.__init__(self)
self.name = name
self.tables = tables
self.predef = tables
self.reset() # name, tables)
def put_into_fgr(self, content):
"""fill this line into this tabular"""
self.length += 1
cha = "filter"
# act = ""
liste = content.split()
action = liste[0]
if "-t" in action:
liste.pop(0) # remove 1st: -t
fname = liste.pop(0)
legals = ["filter", "nat", "raw", "mangle"]
if fname not in legals:
msg = "Valid is one of %s, got: %s" % (legals, fname)
raise ValueError(msg)
action = liste[0]
content = "" # rebuild content from here
for elem in liste:
content = content + elem + " "
if len(liste) > 1:
chain_name = liste[1]
if "-F" in action:
self.reset()
return
if "-P" in action:
liste.pop(0)
cha = liste.pop(0)
new = liste.pop(0)
if new not in ["ACCEPT", "DROP", "REJECT"]:
msg = "Illegal policy: % s" % (new)
raise ValueError(msg)
self.poli[cha] = new
return
if "-X" in action:
predef = ['INPUT', 'FORWARD', 'OUTPUT',
'PREROUTING', 'POSTROUTING']
rem_chain_name = liste.pop(1)
if rem_chain_name in predef:
msg = "Cannot remove predefined chain"
raise ValueError(msg)
if rem_chain_name in self.data:
self.data[rem_chain_name] = [] # empty list
self.poli[rem_chain_name] = "-" # empty policy, no need
self.data.pop(rem_chain_name)
return
if "-N" in action:
new_chain_name = liste.pop(1)
existing = self.data.keys()
if new_chain_name in existing:
logging.debug("Chain %s already exists" % new_chain_name)
return
self.data[new_chain_name] = [] # empty list
self.poli[new_chain_name] = "-" # empty policy, no need
return
if "-I" in action: # or "-A" in action:
chain_name = liste[1]
existing = self.data.keys()
if chain_name not in existing:
self.data[chain_name] = []
self.poli[chain_name] = "-"
kette = self.data[chain_name]
kette.insert(0, content.replace("-I", "-A"))
self.data[chain_name] = kette
return
if "-A" in action: # or "-I" in action:
chain_name = liste[1]
existing = self.data.keys()
if chain_name not in existing:
self.data[chain_name] = []
self.poli[chain_name] = "-"
kette = self.data[chain_name]
kette.append(content)
self.data[chain_name] = kette
return
msg = "Unknown filter command in input:", content
raise ValueError(msg)
def reset(self): # name, tables):
"""
name is one of filter, nat, raw, mangle,
tables is a list of tables in that table-class
"""
self.poli = {} # empty dict
self.length = 0
self.policy = "-"
for tabular in self.tables:
self.data[tabular] = []
self.poli[tabular] = "ACCEPT"
class Tables(UserDict):
"""
some chaingroups in tables are predef: filter, nat, mangle, raw
"""
def __init__(self, rules):
"""init Tables Object is easy going"""
UserDict.__init__(self)
self.reset(rules)
def reset(self, rules):
"""all predefined Chains aka lists are setup as new here"""
filter = Chains("filter", ["INPUT", "FORWARD", "OUTPUT"])
mang = ["PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING", ]
mangle = Chains("mangle", mang)
# kernel 2.6.32 has no INPUT in NAT!
nat = Chains("nat", ["PREROUTING", "OUTPUT", "POSTROUTING"])
raw = Chains("raw", ["PREROUTING", "OUTPUT", ])
self.data["filter"] = filter
self.data["mangle"] = mangle
self.data["nat"] = nat
self.data["raw"] = raw
if rules is not None:
self.read_file(rules)
def table_printout(self):
"""printout nonempty tabulars in fixed sequence"""
with open("/tmp/rules.save", 'w') as f:
for key in ["raw", "nat", "mangle", "filter"]:
len = self.data[key].length
if len > -1:
print("*%s" % (self.data[key].name), file=f)
for chain in self.data[key].keys():
poli = self.data[key].poli[chain]
print(":%s %s [0:0]" % (chain, poli), file=f)
for chain in self.data[key].values():
for elem in chain:
print(elem, file=f)
print("COMMIT", file=f)
def put_into_tables(self, line):
"""put line into matching Chains-object"""
liste = line.split()
liste.pop(0) # we always know, it's iptables
rest = ""
for elem in liste: # remove redirects and the like
if ">" not in elem:
rest = rest + elem + " " # string again with single blanks
action = liste.pop(0) # action is one of {N,F,A,I, etc.}
fam = "filter"
if "-t nat" in line: # nat filter group
fam = "nat"
elif "-t mangle" in line: # mangle filter group
fam = "mangle"
elif "-t raw" in line: # raw filter group
fam = "raw"
fam_dict = self.data[fam] # select the group dictionary
fam_dict.put_into_fgr(rest) # do action thers
def read_file(self, rules):
"""read file into Tables-object"""
self.linecounter = 0
self.tblctr = 0
for zeile in rules:
line = str(zeile.strip())
self.linecounter += 1
if line.startswith('#'):
continue
for element in ['\$', '\(', '\)', ]:
if re.search(element, line):
m1 = "Line %d:\n%s\nplain files only, " % \
(self.linecounter, line)
if element in ['\(', '\)', ]:
m2 = "unable to convert shell functions, abort"
else:
m2 = "unable to resolve shell variables, abort"
msg = m1 + m2
raise ConverterError(msg)
for muster in ["^/sbin/iptables ", "^iptables "]:
if re.search(muster, line):
self.tblctr += 1
self.put_into_tables(line)