mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch '4.18' into 4.19
This commit is contained in:
commit
050ee44137
@ -29,6 +29,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
|
|||||||
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
|
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
|
||||||
|
|
||||||
import com.cloud.exception.DiscoveryException;
|
import com.cloud.exception.DiscoveryException;
|
||||||
@ -110,6 +111,8 @@ public interface StorageService {
|
|||||||
*/
|
*/
|
||||||
ImageStore migrateToObjectStore(String name, String url, String providerName, Map<String, String> details) throws DiscoveryException;
|
ImageStore migrateToObjectStore(String name, String url, String providerName, Map<String, String> details) throws DiscoveryException;
|
||||||
|
|
||||||
|
ImageStore updateImageStore(UpdateImageStoreCmd cmd);
|
||||||
|
|
||||||
ImageStore updateImageStoreStatus(Long id, Boolean readonly);
|
ImageStore updateImageStoreStatus(Long id, Boolean readonly);
|
||||||
|
|
||||||
void updateStorageCapabilities(Long poolId, boolean failOnChecks);
|
void updateStorageCapabilities(Long poolId, boolean failOnChecks);
|
||||||
|
|||||||
@ -41,10 +41,17 @@ public class UpdateImageStoreCmd extends BaseCmd {
|
|||||||
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID")
|
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID")
|
||||||
private Long id;
|
private Long id;
|
||||||
|
|
||||||
@Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " +
|
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = false, description = "The new name for the Image Store.")
|
||||||
"hence not considering them during storage migration")
|
private String name;
|
||||||
|
|
||||||
|
@Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = false,
|
||||||
|
description = "If set to true, it designates the corresponding image store to read-only, hence not considering them during storage migration")
|
||||||
private Boolean readonly;
|
private Boolean readonly;
|
||||||
|
|
||||||
|
@Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false,
|
||||||
|
description = "The number of bytes CloudStack can use on this image storage.\n\tNOTE: this will be overwritten by the StatsCollector as soon as there is a SSVM to query the storage.")
|
||||||
|
private Long capacityBytes;
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
/////////////////// Accessors ///////////////////////
|
/////////////////// Accessors ///////////////////////
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
@ -53,17 +60,25 @@ public class UpdateImageStoreCmd extends BaseCmd {
|
|||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
public Boolean getReadonly() {
|
public Boolean getReadonly() {
|
||||||
return readonly;
|
return readonly;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Long getCapacityBytes() {
|
||||||
|
return capacityBytes;
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
/////////////// API Implementation///////////////////
|
/////////////// API Implementation///////////////////
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void execute() {
|
public void execute() {
|
||||||
ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly());
|
ImageStore result = _storageService.updateImageStore(this);
|
||||||
ImageStoreResponse storeResponse = null;
|
ImageStoreResponse storeResponse = null;
|
||||||
if (result != null) {
|
if (result != null) {
|
||||||
storeResponse = _responseGenerator.createImageStoreResponse(result);
|
storeResponse = _responseGenerator.createImageStoreResponse(result);
|
||||||
|
|||||||
@ -27,11 +27,11 @@ import com.google.gson.annotations.SerializedName;
|
|||||||
|
|
||||||
@EntityReference(value = ImageStore.class)
|
@EntityReference(value = ImageStore.class)
|
||||||
public class ImageStoreResponse extends BaseResponseWithAnnotations {
|
public class ImageStoreResponse extends BaseResponseWithAnnotations {
|
||||||
@SerializedName("id")
|
@SerializedName(ApiConstants.ID)
|
||||||
@Param(description = "the ID of the image store")
|
@Param(description = "the ID of the image store")
|
||||||
private String id;
|
private String id;
|
||||||
|
|
||||||
@SerializedName("zoneid")
|
@SerializedName(ApiConstants.ZONE_ID)
|
||||||
@Param(description = "the Zone ID of the image store")
|
@Param(description = "the Zone ID of the image store")
|
||||||
private String zoneId;
|
private String zoneId;
|
||||||
|
|
||||||
@ -39,15 +39,15 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations {
|
|||||||
@Param(description = "the Zone name of the image store")
|
@Param(description = "the Zone name of the image store")
|
||||||
private String zoneName;
|
private String zoneName;
|
||||||
|
|
||||||
@SerializedName("name")
|
@SerializedName(ApiConstants.NAME)
|
||||||
@Param(description = "the name of the image store")
|
@Param(description = "the name of the image store")
|
||||||
private String name;
|
private String name;
|
||||||
|
|
||||||
@SerializedName("url")
|
@SerializedName(ApiConstants.URL)
|
||||||
@Param(description = "the url of the image store")
|
@Param(description = "the url of the image store")
|
||||||
private String url;
|
private String url;
|
||||||
|
|
||||||
@SerializedName("protocol")
|
@SerializedName(ApiConstants.PROTOCOL)
|
||||||
@Param(description = "the protocol of the image store")
|
@Param(description = "the protocol of the image store")
|
||||||
private String protocol;
|
private String protocol;
|
||||||
|
|
||||||
@ -55,11 +55,11 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations {
|
|||||||
@Param(description = "the provider name of the image store")
|
@Param(description = "the provider name of the image store")
|
||||||
private String providerName;
|
private String providerName;
|
||||||
|
|
||||||
@SerializedName("scope")
|
@SerializedName(ApiConstants.SCOPE)
|
||||||
@Param(description = "the scope of the image store")
|
@Param(description = "the scope of the image store")
|
||||||
private ScopeType scope;
|
private ScopeType scope;
|
||||||
|
|
||||||
@SerializedName("readonly")
|
@SerializedName(ApiConstants.READ_ONLY)
|
||||||
@Param(description = "defines if store is read-only")
|
@Param(description = "defines if store is read-only")
|
||||||
private Boolean readonly;
|
private Boolean readonly;
|
||||||
|
|
||||||
|
|||||||
@ -361,6 +361,8 @@ public interface StorageManager extends StorageService {
|
|||||||
|
|
||||||
Long getDiskIopsWriteRate(ServiceOffering offering, DiskOffering diskOffering);
|
Long getDiskIopsWriteRate(ServiceOffering offering, DiskOffering diskOffering);
|
||||||
|
|
||||||
|
ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes);
|
||||||
|
|
||||||
void cleanupDownloadUrls();
|
void cleanupDownloadUrls();
|
||||||
|
|
||||||
void setDiskProfileThrottling(DiskProfile dskCh, ServiceOffering offering, DiskOffering diskOffering);
|
void setDiskProfileThrottling(DiskProfile dskCh, ServiceOffering offering, DiskOffering diskOffering);
|
||||||
|
|||||||
@ -3126,7 +3126,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||||||
@Override
|
@Override
|
||||||
public ListResponse<ImageStoreResponse> searchForImageStores(ListImageStoresCmd cmd) {
|
public ListResponse<ImageStoreResponse> searchForImageStores(ListImageStoresCmd cmd) {
|
||||||
Pair<List<ImageStoreJoinVO>, Integer> result = searchForImageStoresInternal(cmd);
|
Pair<List<ImageStoreJoinVO>, Integer> result = searchForImageStoresInternal(cmd);
|
||||||
ListResponse<ImageStoreResponse> response = new ListResponse<ImageStoreResponse>();
|
ListResponse<ImageStoreResponse> response = new ListResponse<>();
|
||||||
|
|
||||||
List<ImageStoreResponse> poolResponses = ViewResponseHelper.createImageStoreResponse(result.first().toArray(new ImageStoreJoinVO[result.first().size()]));
|
List<ImageStoreResponse> poolResponses = ViewResponseHelper.createImageStoreResponse(result.first().toArray(new ImageStoreJoinVO[result.first().size()]));
|
||||||
response.setResponses(poolResponses, result.second());
|
response.setResponses(poolResponses, result.second());
|
||||||
|
|||||||
@ -453,100 +453,104 @@ StateListener<State, VirtualMachine.Event, VirtualMachine>, Configurable {
|
|||||||
ServiceOfferingDetailsVO offeringDetails = null;
|
ServiceOfferingDetailsVO offeringDetails = null;
|
||||||
if (host == null) {
|
if (host == null) {
|
||||||
s_logger.debug("The last host of this VM cannot be found");
|
s_logger.debug("The last host of this VM cannot be found");
|
||||||
} else if (avoids.shouldAvoid(host)) {
|
|
||||||
s_logger.debug("The last host of this VM is in avoid set");
|
|
||||||
} else if (plan.getClusterId() != null && host.getClusterId() != null
|
|
||||||
&& !plan.getClusterId().equals(host.getClusterId())) {
|
|
||||||
s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: "
|
|
||||||
+ plan.getClusterId());
|
|
||||||
} else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
|
|
||||||
s_logger.debug("The last Host, hostId: " + host.getId() +
|
|
||||||
" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
|
|
||||||
} else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
|
|
||||||
ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
|
|
||||||
if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){
|
|
||||||
s_logger.debug("The last host of this VM does not have required GPU devices available");
|
|
||||||
}
|
|
||||||
} else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) {
|
|
||||||
s_logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host));
|
|
||||||
} else {
|
} else {
|
||||||
if (host.getStatus() == Status.Up) {
|
_hostDao.loadHostTags(host);
|
||||||
if (checkVmProfileAndHost(vmProfile, host)) {
|
_hostDao.loadDetails(host);
|
||||||
long cluster_id = host.getClusterId();
|
if (avoids.shouldAvoid(host)) {
|
||||||
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
|
s_logger.debug("The last host of this VM is in avoid set");
|
||||||
"cpuOvercommitRatio");
|
} else if (plan.getClusterId() != null && host.getClusterId() != null
|
||||||
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
|
&& !plan.getClusterId().equals(host.getClusterId())) {
|
||||||
"memoryOvercommitRatio");
|
s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: "
|
||||||
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
|
+ plan.getClusterId());
|
||||||
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
|
} else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
|
||||||
|
s_logger.debug("The last Host, hostId: " + host.getId() +
|
||||||
|
" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
|
||||||
|
} else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
|
||||||
|
ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
|
||||||
|
if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){
|
||||||
|
s_logger.debug("The last host of this VM does not have required GPU devices available");
|
||||||
|
}
|
||||||
|
} else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) {
|
||||||
|
s_logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host));
|
||||||
|
} else {
|
||||||
|
if (host.getStatus() == Status.Up) {
|
||||||
|
if (checkVmProfileAndHost(vmProfile, host)) {
|
||||||
|
long cluster_id = host.getClusterId();
|
||||||
|
ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
|
||||||
|
"cpuOvercommitRatio");
|
||||||
|
ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
|
||||||
|
"memoryOvercommitRatio");
|
||||||
|
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
|
||||||
|
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
|
||||||
|
|
||||||
boolean hostHasCpuCapability, hostHasCapacity = false;
|
boolean hostHasCpuCapability, hostHasCapacity = false;
|
||||||
hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed());
|
hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed());
|
||||||
|
|
||||||
if (hostHasCpuCapability) {
|
if (hostHasCpuCapability) {
|
||||||
// first check from reserved capacity
|
// first check from reserved capacity
|
||||||
hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true);
|
hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true);
|
||||||
|
|
||||||
// if not reserved, check the free capacity
|
// if not reserved, check the free capacity
|
||||||
if (!hostHasCapacity)
|
if (!hostHasCapacity)
|
||||||
hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true);
|
hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true);
|
||||||
}
|
|
||||||
|
|
||||||
boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile);
|
|
||||||
if (hostHasCapacity
|
|
||||||
&& hostHasCpuCapability) {
|
|
||||||
s_logger.debug("The last host of this VM is UP and has enough capacity");
|
|
||||||
s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
|
|
||||||
+ ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
|
|
||||||
|
|
||||||
Pod pod = _podDao.findById(host.getPodId());
|
|
||||||
Cluster cluster = _clusterDao.findById(host.getClusterId());
|
|
||||||
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
|
|
||||||
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>(), displayStorage);
|
|
||||||
s_logger.debug("Returning Deployment Destination: " + dest);
|
|
||||||
return dest;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// search for storage under the zone, pod, cluster
|
boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile);
|
||||||
// of
|
if (hostHasCapacity
|
||||||
// the last host.
|
&& hostHasCpuCapability) {
|
||||||
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(),
|
s_logger.debug("The last host of this VM is UP and has enough capacity");
|
||||||
host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
|
s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
|
||||||
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(
|
+ ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
|
||||||
vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
|
|
||||||
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
|
|
||||||
List<Volume> readyAndReusedVolumes = result.second();
|
|
||||||
|
|
||||||
// choose the potential pool for this VM for this
|
Pod pod = _podDao.findById(host.getPodId());
|
||||||
// host
|
Cluster cluster = _clusterDao.findById(host.getClusterId());
|
||||||
if (!suitableVolumeStoragePools.isEmpty()) {
|
if (vm.getHypervisorType() == HypervisorType.BareMetal) {
|
||||||
List<Host> suitableHosts = new ArrayList<Host>();
|
DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>(), displayStorage);
|
||||||
suitableHosts.add(host);
|
|
||||||
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
|
|
||||||
suitableHosts, suitableVolumeStoragePools, avoids,
|
|
||||||
getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm);
|
|
||||||
if (potentialResources != null) {
|
|
||||||
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
|
|
||||||
// remove the reused vol<->pool from
|
|
||||||
// destination, since we don't have to
|
|
||||||
// prepare
|
|
||||||
// this volume.
|
|
||||||
for (Volume vol : readyAndReusedVolumes) {
|
|
||||||
storageVolMap.remove(vol);
|
|
||||||
}
|
|
||||||
DeployDestination dest = new DeployDestination(dc, pod, cluster, host,
|
|
||||||
storageVolMap, displayStorage);
|
|
||||||
s_logger.debug("Returning Deployment Destination: " + dest);
|
s_logger.debug("Returning Deployment Destination: " + dest);
|
||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// search for storage under the zone, pod, cluster
|
||||||
|
// of
|
||||||
|
// the last host.
|
||||||
|
DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(),
|
||||||
|
host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
|
||||||
|
Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(
|
||||||
|
vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
|
||||||
|
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
|
||||||
|
List<Volume> readyAndReusedVolumes = result.second();
|
||||||
|
|
||||||
|
// choose the potential pool for this VM for this
|
||||||
|
// host
|
||||||
|
if (!suitableVolumeStoragePools.isEmpty()) {
|
||||||
|
List<Host> suitableHosts = new ArrayList<Host>();
|
||||||
|
suitableHosts.add(host);
|
||||||
|
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
|
||||||
|
suitableHosts, suitableVolumeStoragePools, avoids,
|
||||||
|
getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm);
|
||||||
|
if (potentialResources != null) {
|
||||||
|
Map<Volume, StoragePool> storageVolMap = potentialResources.second();
|
||||||
|
// remove the reused vol<->pool from
|
||||||
|
// destination, since we don't have to
|
||||||
|
// prepare
|
||||||
|
// this volume.
|
||||||
|
for (Volume vol : readyAndReusedVolumes) {
|
||||||
|
storageVolMap.remove(vol);
|
||||||
|
}
|
||||||
|
DeployDestination dest = new DeployDestination(dc, pod, cluster, host,
|
||||||
|
storageVolMap, displayStorage);
|
||||||
|
s_logger.debug("Returning Deployment Destination: " + dest);
|
||||||
|
return dest;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s_logger.debug("The last host of this VM does not have enough capacity");
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
s_logger.debug("The last host of this VM does not have enough capacity");
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " +
|
||||||
|
host.getResourceState());
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " +
|
|
||||||
host.getResourceState());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s_logger.debug("Cannot choose the last host to deploy this VM ");
|
s_logger.debug("Cannot choose the last host to deploy this VM ");
|
||||||
|
|||||||
@ -1672,7 +1672,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
|||||||
}
|
}
|
||||||
|
|
||||||
List<DataStore> stores = _dataStoreMgr.listImageStores();
|
List<DataStore> stores = _dataStoreMgr.listImageStores();
|
||||||
ConcurrentHashMap<Long, StorageStats> storageStats = new ConcurrentHashMap<Long, StorageStats>();
|
ConcurrentHashMap<Long, StorageStats> storageStats = new ConcurrentHashMap<>();
|
||||||
for (DataStore store : stores) {
|
for (DataStore store : stores) {
|
||||||
if (store.getUri() == null) {
|
if (store.getUri() == null) {
|
||||||
continue;
|
continue;
|
||||||
@ -1692,7 +1692,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
|||||||
LOGGER.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes()));
|
LOGGER.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_storageStats = storageStats;
|
updateStorageStats(storageStats);
|
||||||
ConcurrentHashMap<Long, StorageStats> storagePoolStats = new ConcurrentHashMap<Long, StorageStats>();
|
ConcurrentHashMap<Long, StorageStats> storagePoolStats = new ConcurrentHashMap<Long, StorageStats>();
|
||||||
|
|
||||||
List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
|
List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
|
||||||
@ -1742,6 +1742,19 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
|
|||||||
LOGGER.error("Error trying to retrieve storage stats", t);
|
LOGGER.error("Error trying to retrieve storage stats", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void updateStorageStats(ConcurrentHashMap<Long, StorageStats> storageStats) {
|
||||||
|
for (Long storeId : storageStats.keySet()) {
|
||||||
|
if (_storageStats.containsKey(storeId)
|
||||||
|
&& (_storageStats.get(storeId).getCapacityBytes() == 0l
|
||||||
|
|| _storageStats.get(storeId).getCapacityBytes() != storageStats.get(storeId).getCapacityBytes())) {
|
||||||
|
// get add to DB rigorously
|
||||||
|
_storageManager.updateImageStoreStatus(storeId, null, null, storageStats.get(storeId).getCapacityBytes());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if in _storageStats and not in storageStats it gets discarded
|
||||||
|
_storageStats = storageStats;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class AutoScaleMonitor extends ManagedContextRunnable {
|
class AutoScaleMonitor extends ManagedContextRunnable {
|
||||||
|
|||||||
@ -62,6 +62,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd;
|
|||||||
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
|
import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd;
|
||||||
|
import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
|
import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd;
|
import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd;
|
||||||
import org.apache.cloudstack.api.command.admin.storage.heuristics.RemoveSecondaryStorageSelectorCmd;
|
import org.apache.cloudstack.api.command.admin.storage.heuristics.RemoveSecondaryStorageSelectorCmd;
|
||||||
@ -138,7 +139,6 @@ import org.apache.commons.collections.CollectionUtils;
|
|||||||
import org.apache.commons.collections.MapUtils;
|
import org.apache.commons.collections.MapUtils;
|
||||||
import org.apache.commons.lang.time.DateUtils;
|
import org.apache.commons.lang.time.DateUtils;
|
||||||
import org.apache.commons.lang3.EnumUtils;
|
import org.apache.commons.lang3.EnumUtils;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
@ -235,6 +235,7 @@ import com.cloud.utils.DateUtil;
|
|||||||
import com.cloud.utils.NumbersUtil;
|
import com.cloud.utils.NumbersUtil;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.UriUtils;
|
import com.cloud.utils.UriUtils;
|
||||||
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.utils.component.ComponentContext;
|
import com.cloud.utils.component.ComponentContext;
|
||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
@ -3279,20 +3280,38 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
return discoverImageStore(name, url, providerName, null, details);
|
return discoverImageStore(name, url, providerName, null, details);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ImageStore updateImageStore(UpdateImageStoreCmd cmd) {
|
||||||
|
return updateImageStoreStatus(cmd.getId(), cmd.getName(), cmd.getReadonly(), cmd.getCapacityBytes());
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ActionEvent(eventType = EventTypes.EVENT_UPDATE_IMAGE_STORE_ACCESS_STATE,
|
@ActionEvent(eventType = EventTypes.EVENT_UPDATE_IMAGE_STORE_ACCESS_STATE,
|
||||||
eventDescription = "image store access updated")
|
eventDescription = "image store access updated")
|
||||||
public ImageStore updateImageStoreStatus(Long id, Boolean readonly) {
|
public ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes) {
|
||||||
// Input validation
|
// Input validation
|
||||||
ImageStoreVO imageStoreVO = _imageStoreDao.findById(id);
|
ImageStoreVO imageStoreVO = _imageStoreDao.findById(id);
|
||||||
if (imageStoreVO == null) {
|
if (imageStoreVO == null) {
|
||||||
throw new IllegalArgumentException("Unable to find image store with ID: " + id);
|
throw new IllegalArgumentException("Unable to find image store with ID: " + id);
|
||||||
}
|
}
|
||||||
imageStoreVO.setReadonly(readonly);
|
if (com.cloud.utils.StringUtils.isNotBlank(name)) {
|
||||||
|
imageStoreVO.setName(name);
|
||||||
|
}
|
||||||
|
if (capacityBytes != null) {
|
||||||
|
imageStoreVO.setTotalSize(capacityBytes);
|
||||||
|
}
|
||||||
|
if (readonly != null) {
|
||||||
|
imageStoreVO.setReadonly(readonly);
|
||||||
|
}
|
||||||
_imageStoreDao.update(id, imageStoreVO);
|
_imageStoreDao.update(id, imageStoreVO);
|
||||||
return imageStoreVO;
|
return imageStoreVO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ImageStore updateImageStoreStatus(Long id, Boolean readonly) {
|
||||||
|
return updateImageStoreStatus(id, null, readonly, null);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param poolId - Storage pool id for pool to update.
|
* @param poolId - Storage pool id for pool to update.
|
||||||
* @param failOnChecks - If true, throw an error if pool type and state checks fail.
|
* @param failOnChecks - If true, throw an error if pool type and state checks fail.
|
||||||
|
|||||||
@ -531,8 +531,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the default network for the secondary storage VM, based on the zone it is in. Delegates to
|
* Get the default network for the secondary storage VM, based on the zone it is in. Delegates to
|
||||||
* either {@link #getDefaultNetworkForZone(DataCenter)} or {@link #getDefaultNetworkForAdvancedSGZone(DataCenter)},
|
* either {@link #getDefaultNetworkForAdvancedZone(DataCenter)} or {@link #getDefaultNetworkForBasicZone(DataCenter)},
|
||||||
* depending on the zone network type and whether or not security groups are enabled in the zone.
|
* depending on the zone network type and whether security groups are enabled in the zone.
|
||||||
* @param dc - The zone (DataCenter) of the secondary storage VM.
|
* @param dc - The zone (DataCenter) of the secondary storage VM.
|
||||||
* @return The default network for use with the secondary storage VM.
|
* @return The default network for use with the secondary storage VM.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@ -0,0 +1,66 @@
|
|||||||
|
//
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
//
|
||||||
|
|
||||||
|
package org.apache.cloudstack.storage.resource;
|
||||||
|
|
||||||
|
import com.cloud.utils.script.Script;
|
||||||
|
import org.apache.log4j.Logger;
|
||||||
|
|
||||||
|
public class IpTablesHelper {
|
||||||
|
public static final Logger LOGGER = Logger.getLogger(IpTablesHelper.class);
|
||||||
|
|
||||||
|
public static final String OUTPUT_CHAIN = "OUTPUT";
|
||||||
|
public static final String INPUT_CHAIN = "INPUT";
|
||||||
|
public static final String INSERT = " -I ";
|
||||||
|
public static final String APPEND = " -A ";
|
||||||
|
|
||||||
|
public static boolean needsAdding(String chain, String rule) {
|
||||||
|
Script command = new Script("/bin/bash", LOGGER);
|
||||||
|
command.add("-c");
|
||||||
|
command.add("iptables -C " + chain + " " + rule);
|
||||||
|
|
||||||
|
String commandOutput = command.execute();
|
||||||
|
boolean needsAdding = (commandOutput != null && commandOutput.contains("iptables: Bad rule (does a matching rule exist in that chain?)."));
|
||||||
|
LOGGER.debug(String.format("Rule [%s], %s need adding to [%s] : %s",
|
||||||
|
rule,
|
||||||
|
needsAdding ? "does indeed" : "doesn't",
|
||||||
|
chain,
|
||||||
|
commandOutput
|
||||||
|
));
|
||||||
|
return needsAdding;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String addConditionally(String chain, boolean insert, String rule, String errMsg) {
|
||||||
|
LOGGER.info(String.format("Adding rule [%s] to [%s] if required.", rule, chain));
|
||||||
|
if (needsAdding(chain, rule)) {
|
||||||
|
Script command = new Script("/bin/bash", LOGGER);
|
||||||
|
command.add("-c");
|
||||||
|
command.add("iptables" + (insert ? INSERT : APPEND) + chain + " " + rule);
|
||||||
|
String result = command.execute();
|
||||||
|
LOGGER.debug(String.format("Executed [%s] with result [%s]", command, result));
|
||||||
|
if (result != null) {
|
||||||
|
LOGGER.warn(String.format("%s , err = %s", errMsg, result));
|
||||||
|
return errMsg + result;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOGGER.warn("Rule already defined in SVM: " + rule);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -2329,15 +2329,14 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||||||
if (!_inSystemVM) {
|
if (!_inSystemVM) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
Script command = new Script("/bin/bash", s_logger);
|
|
||||||
String intf = "eth1";
|
String intf = "eth1";
|
||||||
command.add("-c");
|
String rule = String.format("-o %s -d %s -p tcp -m state --state NEW -m tcp -j ACCEPT", intf, destCidr);
|
||||||
command.add("iptables -I OUTPUT -o " + intf + " -d " + destCidr + " -p tcp -m state --state NEW -m tcp -j ACCEPT");
|
String errMsg = String.format("Error in allowing outgoing to %s", destCidr);
|
||||||
|
|
||||||
String result = command.execute();
|
s_logger.info(String.format("Adding rule if required: " + rule));
|
||||||
|
String result = IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN, true, rule, errMsg);
|
||||||
if (result != null) {
|
if (result != null) {
|
||||||
s_logger.warn("Error in allowing outgoing to " + destCidr + ", err=" + result);
|
return result;
|
||||||
return "Error in allowing outgoing to " + destCidr + ", err=" + result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr);
|
addRouteToInternalIpOrCidr(_localgw, _eth1ip, _eth1mask, destCidr);
|
||||||
@ -2874,13 +2873,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||||||
if (result != null) {
|
if (result != null) {
|
||||||
s_logger.warn("Error in starting sshd service err=" + result);
|
s_logger.warn("Error in starting sshd service err=" + result);
|
||||||
}
|
}
|
||||||
command = new Script("/bin/bash", s_logger);
|
String rule = "-i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT";
|
||||||
command.add("-c");
|
IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN, true, rule, "Error in opening up ssh port");
|
||||||
command.add("iptables -I INPUT -i eth1 -p tcp -m state --state NEW -m tcp --dport 3922 -j ACCEPT");
|
|
||||||
result = command.execute();
|
|
||||||
if (result != null) {
|
|
||||||
s_logger.warn("Error in opening up ssh port err=" + result);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) {
|
private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) {
|
||||||
|
|||||||
@ -48,6 +48,7 @@ import org.apache.cloudstack.storage.command.DownloadCommand;
|
|||||||
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
|
import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType;
|
||||||
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
|
import org.apache.cloudstack.storage.command.DownloadProgressCommand;
|
||||||
import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType;
|
import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType;
|
||||||
|
import org.apache.cloudstack.storage.resource.IpTablesHelper;
|
||||||
import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource;
|
import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource;
|
||||||
import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
|
import org.apache.cloudstack.storage.resource.SecondaryStorageResource;
|
||||||
import org.apache.cloudstack.utils.security.ChecksumValue;
|
import org.apache.cloudstack.utils.security.ChecksumValue;
|
||||||
@ -1225,17 +1226,14 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void blockOutgoingOnPrivate() {
|
private void blockOutgoingOnPrivate() {
|
||||||
Script command = new Script("/bin/bash", LOGGER);
|
IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN
|
||||||
String intf = "eth1";
|
, false
|
||||||
command.add("-c");
|
, "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 80 -j REJECT;"
|
||||||
command.add("iptables -A OUTPUT -o " + intf + " -p tcp -m state --state NEW -m tcp --dport " + "80" + " -j REJECT;" + "iptables -A OUTPUT -o " + intf +
|
, "Error in blocking outgoing to port 80");
|
||||||
" -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j REJECT;");
|
IpTablesHelper.addConditionally(IpTablesHelper.OUTPUT_CHAIN
|
||||||
|
, false
|
||||||
String result = command.execute();
|
, "-o " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE + " -p tcp -m state --state NEW -m tcp --dport 443 -j REJECT;"
|
||||||
if (result != null) {
|
, "Error in blocking outgoing to port 443");
|
||||||
LOGGER.warn("Error in blocking outgoing to port 80/443 err=" + result);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -1261,17 +1259,19 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager
|
|||||||
if (result != null) {
|
if (result != null) {
|
||||||
LOGGER.warn("Error in stopping httpd service err=" + result);
|
LOGGER.warn("Error in stopping httpd service err=" + result);
|
||||||
}
|
}
|
||||||
String port = Integer.toString(TemplateConstants.DEFAULT_TMPLT_COPY_PORT);
|
|
||||||
String intf = TemplateConstants.DEFAULT_TMPLT_COPY_INTF;
|
|
||||||
|
|
||||||
command = new Script("/bin/bash", LOGGER);
|
result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN
|
||||||
command.add("-c");
|
, true
|
||||||
command.add("iptables -I INPUT -i " + intf + " -p tcp -m state --state NEW -m tcp --dport " + port + " -j ACCEPT;" + "iptables -I INPUT -i " + intf +
|
, "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport " + TemplateConstants.DEFAULT_TMPLT_COPY_PORT + " -j ACCEPT"
|
||||||
" -p tcp -m state --state NEW -m tcp --dport " + "443" + " -j ACCEPT;");
|
, "Error in opening up apache2 port " + TemplateConstants.TMPLT_COPY_INTF_PRIVATE);
|
||||||
|
if (result != null) {
|
||||||
result = command.execute();
|
return;
|
||||||
|
}
|
||||||
|
result = IpTablesHelper.addConditionally(IpTablesHelper.INPUT_CHAIN
|
||||||
|
, true
|
||||||
|
, "-i " + TemplateConstants.DEFAULT_TMPLT_COPY_INTF + " -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT;"
|
||||||
|
, "Error in opening up apache2 port 443");
|
||||||
if (result != null) {
|
if (result != null) {
|
||||||
LOGGER.warn("Error in opening up apache2 port err=" + result);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -221,7 +221,7 @@ def save_iptables(command, iptables_file):
|
|||||||
|
|
||||||
def execute2(command, wait=True):
|
def execute2(command, wait=True):
|
||||||
""" Execute command """
|
""" Execute command """
|
||||||
logging.info("Executing: %s" % command)
|
logging.info("Executing2: %s" % command)
|
||||||
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||||
if wait:
|
if wait:
|
||||||
p.wait()
|
p.wait()
|
||||||
|
|||||||
@ -97,21 +97,10 @@ export default {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
api: 'updateImageStore',
|
api: 'updateImageStore',
|
||||||
icon: 'stop-outlined',
|
icon: 'edit-outlined',
|
||||||
label: 'label.action.image.store.read.only',
|
label: 'label.edit',
|
||||||
message: 'message.action.secondary.storage.read.only',
|
|
||||||
dataView: true,
|
dataView: true,
|
||||||
defaultArgs: { readonly: true },
|
args: ['name', 'readonly', 'capacitybytes']
|
||||||
show: (record) => { return record.readonly === false }
|
|
||||||
},
|
|
||||||
{
|
|
||||||
api: 'updateImageStore',
|
|
||||||
icon: 'check-circle-outlined',
|
|
||||||
label: 'label.action.image.store.read.write',
|
|
||||||
message: 'message.action.secondary.storage.read.write',
|
|
||||||
dataView: true,
|
|
||||||
defaultArgs: { readonly: false },
|
|
||||||
show: (record) => { return record.readonly === true }
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
api: 'deleteImageStore',
|
api: 'deleteImageStore',
|
||||||
|
|||||||
@ -705,7 +705,6 @@ export default {
|
|||||||
},
|
},
|
||||||
getOkProps () {
|
getOkProps () {
|
||||||
if (this.selectedRowKeys.length > 0 && this.currentAction?.groupAction) {
|
if (this.selectedRowKeys.length > 0 && this.currentAction?.groupAction) {
|
||||||
return { props: { type: 'default' } }
|
|
||||||
} else {
|
} else {
|
||||||
return { props: { type: 'primary' } }
|
return { props: { type: 'primary' } }
|
||||||
}
|
}
|
||||||
|
|||||||
@ -371,12 +371,19 @@ public class Script implements Callable<String> {
|
|||||||
//process completed successfully
|
//process completed successfully
|
||||||
if (_process.exitValue() == 0 || _process.exitValue() == exitValue) {
|
if (_process.exitValue() == 0 || _process.exitValue() == exitValue) {
|
||||||
_logger.debug("Execution is successful.");
|
_logger.debug("Execution is successful.");
|
||||||
|
String result;
|
||||||
|
String method;
|
||||||
if (interpreter != null) {
|
if (interpreter != null) {
|
||||||
return interpreter.drain() ? task.getResult() : interpreter.interpret(ir);
|
_logger.debug("interpreting the result...");
|
||||||
|
method = "result interpretation of execution: ";
|
||||||
|
result= interpreter.drain() ? task.getResult() : interpreter.interpret(ir);
|
||||||
} else {
|
} else {
|
||||||
// null return exitValue apparently
|
// null return exitValue apparently
|
||||||
return String.valueOf(_process.exitValue());
|
method = "return code of execution: ";
|
||||||
|
result = String.valueOf(_process.exitValue());
|
||||||
}
|
}
|
||||||
|
_logger.debug(method + result);
|
||||||
|
return result;
|
||||||
} else { //process failed
|
} else { //process failed
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user