mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch '4.18' into 4.19
This commit is contained in:
commit
050ee44137
@ -29,6 +29,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
|
||||
|
||||
import com.cloud.exception.DiscoveryException;
|
||||
@ -110,6 +111,8 @@ public interface StorageService {
|
||||
*/
|
||||
ImageStore migrateToObjectStore(String name, String url, String providerName, Map<String, String> details) throws DiscoveryException;
|
||||
|
||||
ImageStore updateImageStore(UpdateImageStoreCmd cmd);
|
||||
|
||||
ImageStore updateImageStoreStatus(Long id, Boolean readonly);
|
||||
|
||||
void updateStorageCapabilities(Long poolId, boolean failOnChecks);
|
||||
|
||||
@ -41,10 +41,17 @@ public class UpdateImageStoreCmd extends BaseCmd {
|
||||
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID")
|
||||
private Long id;
|
||||
|
||||
@Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " +
|
||||
"hence not considering them during storage migration")
|
||||
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = false, description = "The new name for the Image Store.")
|
||||
private String name;
|
||||
|
||||
@Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = false,
|
||||
description = "If set to true, it designates the corresponding image store to read-only, hence not considering them during storage migration")
|
||||
private Boolean readonly;
|
||||
|
||||
@Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false,
|
||||
description = "The number of bytes CloudStack can use on this image storage.\n\tNOTE: this will be overwritten by the StatsCollector as soon as there is a SSVM to query the storage.")
|
||||
private Long capacityBytes;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
@ -53,17 +60,25 @@ public class UpdateImageStoreCmd extends BaseCmd {
|
||||
return id;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Boolean getReadonly() {
|
||||
return readonly;
|
||||
}
|
||||
|
||||
public Long getCapacityBytes() {
|
||||
return capacityBytes;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly());
|
||||
ImageStore result = _storageService.updateImageStore(this);
|
||||
ImageStoreResponse storeResponse = null;
|
||||
if (result != null) {
|
||||
storeResponse = _responseGenerator.createImageStoreResponse(result);
|
||||
|
||||
@ -27,11 +27,11 @@ import com.google.gson.annotations.SerializedName;
|
||||
|
||||
@EntityReference(value = ImageStore.class)
|
||||
public class ImageStoreResponse extends BaseResponseWithAnnotations {
|
||||
@SerializedName("id")
|
||||
@SerializedName(ApiConstants.ID)
|
||||
@Param(description = "the ID of the image store")
|
||||
private String id;
|
||||
|
||||
@SerializedName("zoneid")
|
||||
@SerializedName(ApiConstants.ZONE_ID)
|
||||
@Param(description = "the Zone ID of the image store")
|
||||
private String zoneId;
|
||||
|
||||
@ -39,15 +39,15 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations {
|
||||
@Param(description = "the Zone name of the image store")
|
||||
private String zoneName;
|
||||
|
||||
@SerializedName("name")
|
||||
@SerializedName(ApiConstants.NAME)
|
||||
@Param(description = "the name of the image store")
|
||||
private String name;
|
||||
|
||||
@SerializedName("url")
|
||||
@SerializedName(ApiConstants.URL)
|
||||
@Param(description = "the url of the image store")
|
||||
private String url;
|
||||
|
||||
@SerializedName("protocol")
|
||||
@SerializedName(ApiConstants.PROTOCOL)
|
||||
@Param(description = "the protocol of the image store")
|
||||
private String protocol;
|
||||
|
||||
@ -55,11 +55,11 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations {
|
||||
@Param(description = "the provider name of the image store")
|
||||
private String providerName;
|
||||
|
||||
@SerializedName("scope")
|
||||
@SerializedName(ApiConstants.SCOPE)
|
||||
@Param(description = "the scope of the image store")
|
||||
private ScopeType scope;
|
||||
|
||||
@SerializedName("readonly")
|
||||
@SerializedName(ApiConstants.READ_ONLY)
|
||||
@Param(description = "defines if store is read-only")
|
||||
private Boolean readonly;
|
||||
|
||||
|
||||
@ -361,6 +361,8 @@ public interface StorageManager extends StorageService {
|
||||
|
||||
Long getDiskIopsWriteRate(ServiceOffering offering, DiskOffering diskOffering);
|
||||
|
||||
ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes);
|
||||
|
||||
void cleanupDownloadUrls();
|
||||
|
||||
void setDiskProfileThrottling(DiskProfile dskCh, ServiceOffering offering, DiskOffering diskOffering);
|
||||
|
||||
@ -3126,7 +3126,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
||||
@Override
|
||||
public ListResponse<ImageStoreResponse> searchForImageStores(ListImageStoresCmd cmd) {
|
||||
Pair<List<ImageStoreJoinVO>, Integer> result = searchForImageStoresInternal(cmd);
|
||||
ListResponse<ImageStoreResponse> response = new ListResponse<ImageStoreResponse>();
|
||||
ListResponse<ImageStoreResponse> response = new ListResponse<>();
|
||||
|
||||
List<ImageStoreResponse> poolResponses = ViewResponseHelper.createImageStoreResponse(result.first().toArray(new ImageStoreJoinVO[result.first().size()]));
|
||||
response.setResponses(poolResponses, result.second());
|
||||
|
||||
@ -453,7 +453,10 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
|
||||
ServiceOfferingDetailsVO offeringDetails = null;
|
||||
if (host == null) {
|
||||
s_logger.debug("The last host of this VM cannot be found");
|
||||
} else if (avoids.shouldAvoid(host)) {
|
||||
} else {
|
||||
_hostDao.loadHostTags(host);
|
||||
_hostDao.loadDetails(host);
|
||||
if (avoids.shouldAvoid(host)) {
|
||||
s_logger.debug("The last host of this VM is in avoid set");
|
||||
} else if (plan.getClusterId() != null && host.getClusterId() != null
|
||||
&& !plan.getClusterId().equals(host.getClusterId())) {
|
||||
@ -549,6 +552,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
|
||||
host.getResourceState());
|
||||
}
|
||||
}
|
||||
}
|
||||
s_logger.debug("Cannot choose the last host to deploy this VM ");
|
||||
}
|
||||
|
||||
|
||||
@ -1672,7 +1672,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
||||
}
|
||||
|
||||
List<DataStore> stores = _dataStoreMgr.listImageStores();
|
||||
ConcurrentHashMap<Long, StorageStats> storageStats = new ConcurrentHashMap<Long, StorageStats>();
|
||||
ConcurrentHashMap<Long, StorageStats> storageStats = new ConcurrentHashMap<>();
|
||||
for (DataStore store : stores) {
|
||||
if (store.getUri() == null) {
|
||||
continue;
|
||||
@ -1692,7 +1692,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
||||
LOGGER.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes()));
|
||||
}
|
||||
}
|
||||
_storageStats = storageStats;
|
||||
updateStorageStats(storageStats);
|
||||
ConcurrentHashMap<Long, StorageStats> storagePoolStats = new ConcurrentHashMap<Long, StorageStats>();
|
||||
|
||||
List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
|
||||
@ -1742,6 +1742,19 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
||||
LOGGER.error("Error trying to retrieve storage stats", t);
|
||||
}
|
||||
}
|
||||
|
||||
private void updateStorageStats(ConcurrentHashMap<Long, StorageStats> storageStats) {
|
||||
for (Long storeId : storageStats.keySet()) {
|
||||
if (_storageStats.containsKey(storeId)
|
||||
&& (_storageStats.get(storeId).getCapacityBytes() == 0l
|
||||
|| _storageStats.get(storeId).getCapacityBytes() != storageStats.get(storeId).getCapacityBytes())) {
|
||||
// get add to DB rigorously
|
||||
_storageManager.updateImageStoreStatus(storeId, null, null, storageStats.get(storeId).getCapacityBytes());
|
||||
}
|
||||
}
|
||||
// if in _storageStats and not in storageStats it gets discarded
|
||||
_storageStats = storageStats;
|
||||
}
|
||||
}
|
||||
|
||||
class AutoScaleMonitor extends ManagedContextRunnable {
|
||||
|
||||
@ -62,6 +62,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd;
|
||||
import org.apache.cloudstack.api.command.admin.storage.heuristics.RemoveSecondaryStorageSelectorCmd;
|
||||
@ -138,7 +139,6 @@ import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang.time.DateUtils;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@ -235,6 +235,7 @@ import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.UriUtils;
|
||||
import com.cloud.utils.StringUtils;
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
import com.cloud.utils.component.ManagerBase;
|
||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||
@ -3279,20 +3280,38 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
||||
return discoverImageStore(name, url, providerName, null, details);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImageStore updateImageStore(UpdateImageStoreCmd cmd) {
|
||||
return updateImageStoreStatus(cmd.getId(), cmd.getName(), cmd.getReadonly(), cmd.getCapacityBytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_UPDATE_IMAGE_STORE_ACCESS_STATE,
|
||||
eventDescription = "image store access updated")
|
||||
public ImageStore updateImageStoreStatus(Long id, Boolean readonly) {
|
||||
public ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes) {
|
||||
// Input validation
|
||||
ImageStoreVO imageStoreVO = _imageStoreDao.findById(id);
|
||||
if (imageStoreVO == null) {
|
||||
throw new IllegalArgumentException("Unable to find image store with ID: " + id);
|
||||
}
|
||||
if (com.cloud.utils.StringUtils.isNotBlank(name)) {
|
||||
imageStoreVO.setName(name);
|
||||
}
|
||||
if (capacityBytes != null) {
|
||||
imageStoreVO.setTotalSize(capacityBytes);
|
||||
}
|
||||
if (readonly != null) {
|
||||
imageStoreVO.setReadonly(readonly);
|
||||
}
|
||||
_imageStoreDao.update(id, imageStoreVO);
|
||||
return imageStoreVO;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImageStore updateImageStoreStatus(Long id, Boolean readonly) {
|
||||
return updateImageStoreStatus(id, null, readonly, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param poolId - Storage pool id for pool to update.
|
||||
* @param failOnChecks - If true, throw an error if pool type and state checks fail.
|
||||
|
||||
@ -531,8 +531,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
|
||||
|
||||
/**
|
||||
* Get the default network for the secondary storage VM, based on the zone it is in. Delegates to
|
||||
* either {@link #getDefaultNetworkForZone(DataCenter)} or {@link #getDefaultNetworkForAdvancedSGZone(DataCenter)},
|
||||
* depending on the zone network type and whether or not security groups are enabled in the zone.
|
||||
* either {@link #getDefaultNetworkForAdvancedZone(DataCenter)} or {@link #getDefaultNetworkForBasicZone(DataCenter)},
|
||||
* depending on the zone network type and whether security groups are enabled in the zone.
|
||||
* @param dc - The zone (DataCenter) of the secondary storage VM.
|
||||
* @return The default network for use with the secondary storage VM.
|
||||
*/
|
||||
|
||||
@ -0,0 +1,66 @@
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package org.apache.cloudstack.storage.resource;
|
||||
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class IpTablesHelper {
|
||||
public static final Logger LOGGER = Logger.getLogger(IpTablesHelper.class);
|
||||
|
||||
public static final String OUTPUT_CHAIN = "OUTPUT";
|
||||
public static final String INPUT_CHAIN = "INPUT";
|
||||
public static final String INSERT = " -I ";
|
||||
public static final String APPEND = " -A ";
|
||||
|
||||
public static boolean needsAdding(String chain, String rule) {
|
||||
Script command = new Script("/bin/bash", LOGGER);
|
||||
command.add("-c");
|
||||
command.add("iptables -C " + chain + " " + rule);
|
||||
|
||||
String commandOutput = command.execute();
|
||||
boolean needsAdding = (commandOutput != null && commandOutput.contains("iptables: Bad rule (does a matching rule exist in that chain?)."));
|
||||
LOGGER.debug(String.format("Rule [%s], %s need adding to [%s] : %s",
|
||||
rule,
|
||||
needsAdding ? "does indeed" : "doesn't",
|
||||
chain,
|
||||
commandOutput
|
||||
));
|
||||
return needsAdding;
|
||||
}
|
||||
|
||||
public static String addConditionally(String chain, boolean insert, String rule, String errMsg) {
|
||||
LOGGER.info(String.format("Adding rule [%s] to [%s] if required.", rule, chain));
|
||||
if (needsAdding(chain, rule)) {
|
||||
Script command = new Script("/bin/bash", LOGGER);
|
||||
command.add("-c");
|
||||
command.add("iptables" + (insert ? INSERT : APPEND) + chain + " " + rule);
|
||||
String result = command.execute();
|
||||
LOGGER.debug(String.format("Executed [%s] with result [%s]", command, result));
|
||||
if (result != null) {
|
||||
LOGGER.warn(String.format("%s , err = %s", errMsg, result));
|
||||
return errMsg + result;
|
||||
}
|
||||
} else {
|
||||
LOGGER.warn("Rule already defined in SVM: " + rule);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -2329,15 +2329,14 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
||||
if (!_inSystemVM) {
|
||||
return null;
|
||||
}
|
||||
Script command = new Script("/bin/bash", s_logger);
|
||||
String intf = "eth1";
|
||||
command.add("-c");
|
||||
command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT");
|
||||
String rule = String.format("-o %s -d %s -p tcp -m state --state NEW -m tcp -j ACCEPT", intf, destCidr);
|
||||
String errMsg = String.format("Error in allowing outgoing to %s", destCidr);
|
||||
|
||||
String result = command.execute();
|
||||
s_logger.info(String.format("Adding rule if required: " + rule));
|
||||
String result = IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN, true, rule, errMsg);
|
||||
if (result != null) {
|
||||
s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result);
|
||||
return "Error in allowing outgoing to " + destCidr + ", err=" + result;
|
||||
return result;
|
||||
}
|
||||
|
||||
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr);
|
||||
@ -2874,13 +2873,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
||||
if (result != null) {
|
||||
s_logger.warn("Error in starting sshd service err=" + result);
|
||||
}
|
||||
command = new Script("/bin/bash", s_logger);
|
||||
command.add("-c");
|
||||
command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT");
|
||||
result = command.execute();
|
||||
if (result != null) {
|
||||
s_logger.warn("Error in opening up ssh port err=" + result);
|
||||
}
|
||||
String rule = "-i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT";
|
||||
IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN, true, rule, "Error in opening up ssh port");
|
||||
}
|
||||
|
||||
private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) {
|
||||
|
||||
@ -48,6 +48,7 @@ import org.apache.cloudstack.storage.command.DownloadCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
|
||||
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
|
||||
import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType;
|
||||
import org.apache.cloudstack.storage.resource.IpTablesHelper;
|
||||
import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource;
|
||||
import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
|
||||
import org.apache.cloudstack.utils.security.ChecksumValue;
|
||||
@ -1225,17 +1226,14 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
|
||||
}
|
||||
|
||||
private void blockOutgoingOnPrivate() {
|
||||
Script command = new Script("/bin/bash", LOGGER);
|
||||
String intf = "eth1";
|
||||
command.add("-c");
|
||||
command.add("iptables -A OUTPUT -o " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "80" + " -j REJECT;" + "iptables -A OUTPUT -o " + intf +
|
||||
" -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j REJECT;");
|
||||
|
||||
String result = command.execute();
|
||||
if (result != null) {
|
||||
LOGGER.warn("Error in blocking outgoing to port 80/443 err=" + result);
|
||||
return;
|
||||
}
|
||||
IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN
|
||||
, false
|
||||
, "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 80 -j REJECT;"
|
||||
, "Error in blocking outgoing to port 80");
|
||||
IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN
|
||||
, false
|
||||
, "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 443 -j REJECT;"
|
||||
, "Error in blocking outgoing to port 443");
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -1261,17 +1259,19 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
|
||||
if (result != null) {
|
||||
LOGGER.warn("Error in stopping httpd service err=" + result);
|
||||
}
|
||||
String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT);
|
||||
String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF;
|
||||
|
||||
command = new Script("/bin/bash", LOGGER);
|
||||
command.add("-c");
|
||||
command.add("iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j ACCEPT;" + "iptables -I INPUT -i " + intf +
|
||||
" -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j ACCEPT;");
|
||||
|
||||
result = command.execute();
|
||||
result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN
|
||||
, true
|
||||
, "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport " + TemplateConstants.DEFAULT_TMPLT_COPY_PORT + " -j ACCEPT"
|
||||
, "Error in opening up apache2 port " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE);
|
||||
if (result != null) {
|
||||
return;
|
||||
}
|
||||
result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN
|
||||
, true
|
||||
, "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT;"
|
||||
, "Error in opening up apache2 port 443");
|
||||
if (result != null) {
|
||||
LOGGER.warn("Error in opening up apache2 port err=" + result);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -221,7 +221,7 @@ def save_iptables(command, iptables_file):
|
||||
|
||||
def execute2(command, wait=True):
|
||||
""" Execute command """
|
||||
logging.info("Executing: %s" % command)
|
||||
logging.info("Executing2: %s" % command)
|
||||
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
if wait:
|
||||
p.wait()
|
||||
|
||||
@ -97,21 +97,10 @@ export default {
|
||||
},
|
||||
{
|
||||
api: 'updateImageStore',
|
||||
icon: 'stop-outlined',
|
||||
label: 'label.action.image.store.read.only',
|
||||
message: 'message.action.secondary.storage.read.only',
|
||||
icon: 'edit-outlined',
|
||||
label: 'label.edit',
|
||||
dataView: true,
|
||||
defaultArgs: { readonly: true },
|
||||
show: (record) => { return record.readonly === false }
|
||||
},
|
||||
{
|
||||
api: 'updateImageStore',
|
||||
icon: 'check-circle-outlined',
|
||||
label: 'label.action.image.store.read.write',
|
||||
message: 'message.action.secondary.storage.read.write',
|
||||
dataView: true,
|
||||
defaultArgs: { readonly: false },
|
||||
show: (record) => { return record.readonly === true }
|
||||
args: ['name', 'readonly', 'capacitybytes']
|
||||
},
|
||||
{
|
||||
api: 'deleteImageStore',
|
||||
|
||||
@ -705,7 +705,6 @@ export default {
|
||||
},
|
||||
getOkProps () {
|
||||
if (this.selectedRowKeys.length > 0 && this.currentAction?.groupAction) {
|
||||
return { props: { type: 'default' } }
|
||||
} else {
|
||||
return { props: { type: 'primary' } }
|
||||
}
|
||||
|
||||
@ -371,12 +371,19 @@ public class Script implements Callable<String> {
|
||||
//process completed successfully
|
||||
if (_process.exitValue() == 0 || _process.exitValue() == exitValue) {
|
||||
_logger.debug("Execution is successful.");
|
||||
String result;
|
||||
String method;
|
||||
if (interpreter != null) {
|
||||
return interpreter.drain() ? task.getResult() : interpreter.interpret(ir);
|
||||
_logger.debug("interpreting the result...");
|
||||
method = "result interpretation of execution: ";
|
||||
result= interpreter.drain() ? task.getResult() : interpreter.interpret(ir);
|
||||
} else {
|
||||
// null return exitValue apparently
|
||||
return String.valueOf(_process.exitValue());
|
||||
method = "return code of execution: ";
|
||||
result = String.valueOf(_process.exitValue());
|
||||
}
|
||||
_logger.debug(method + result);
|
||||
return result;
|
||||
} else { //process failed
|
||||
break;
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user