mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge remote-tracking branch 'origin/4.19'
Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com>
This commit is contained in:
commit
60c9c9adb6
@ -141,6 +141,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
|
||||
@Param(description = "the storage pool capabilities")
|
||||
private Map<String, String> caps;
|
||||
|
||||
@SerializedName(ApiConstants.MANAGED)
|
||||
@Param(description = "whether this pool is managed or not")
|
||||
private Boolean managed;
|
||||
|
||||
public Map<String, String> getCaps() {
|
||||
return caps;
|
||||
}
|
||||
@ -383,4 +387,12 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations {
|
||||
public void setTagARule(Boolean tagARule) {
|
||||
isTagARule = tagARule;
|
||||
}
|
||||
|
||||
public Boolean getManaged() {
|
||||
return managed;
|
||||
}
|
||||
|
||||
public void setManaged(Boolean managed) {
|
||||
this.managed = managed;
|
||||
}
|
||||
}
|
||||
|
||||
@ -533,11 +533,6 @@ public class TemplateServiceImpl implements TemplateService {
|
||||
logger.info("Skip downloading template " + tmplt.getUniqueName() + " since no url is specified.");
|
||||
continue;
|
||||
}
|
||||
// if this is private template, skip sync to a new image store
|
||||
if (isSkipTemplateStoreDownload(tmplt, zoneId)) {
|
||||
logger.info("Skip sync downloading private template " + tmplt.getUniqueName() + " to a new image store");
|
||||
continue;
|
||||
}
|
||||
|
||||
// if this is a region store, and there is already an DOWNLOADED entry there without install_path information, which
|
||||
// means that this is a duplicate entry from migration of previous NFS to staging.
|
||||
|
||||
12
plugins/storage/volume/linstor/CHANGELOG.md
Normal file
12
plugins/storage/volume/linstor/CHANGELOG.md
Normal file
@ -0,0 +1,12 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to Linstor CloudStack plugin will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [2024-08-27]
|
||||
|
||||
### Changed
|
||||
|
||||
- Allow two primaries(+protocol c) is now set on resource-connection level instead of rd
|
||||
@ -16,6 +16,7 @@
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.storage;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
@ -26,6 +27,7 @@ import javax.annotation.Nonnull;
|
||||
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
@ -44,8 +46,8 @@ import com.linbit.linstor.api.model.ApiCallRcList;
|
||||
import com.linbit.linstor.api.model.Properties;
|
||||
import com.linbit.linstor.api.model.ProviderKind;
|
||||
import com.linbit.linstor.api.model.Resource;
|
||||
import com.linbit.linstor.api.model.ResourceConnectionModify;
|
||||
import com.linbit.linstor.api.model.ResourceDefinition;
|
||||
import com.linbit.linstor.api.model.ResourceDefinitionModify;
|
||||
import com.linbit.linstor.api.model.ResourceGroupSpawn;
|
||||
import com.linbit.linstor.api.model.ResourceMakeAvailable;
|
||||
import com.linbit.linstor.api.model.ResourceWithVolumes;
|
||||
@ -242,15 +244,19 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
* @throws ApiException if any problem connecting to the Linstor controller
|
||||
*/
|
||||
private void allow2PrimariesIfInUse(DevelopersApi api, String rscName) throws ApiException {
|
||||
if (LinstorUtil.isResourceInUse(api, rscName)) {
|
||||
String inUseNode = LinstorUtil.isResourceInUse(api, rscName);
|
||||
if (inUseNode != null && !inUseNode.equalsIgnoreCase(localNodeName)) {
|
||||
// allow 2 primaries for live migration, should be removed by disconnect on the other end
|
||||
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
|
||||
ResourceConnectionModify rcm = new ResourceConnectionModify();
|
||||
Properties props = new Properties();
|
||||
props.put("DrbdOptions/Net/allow-two-primaries", "yes");
|
||||
rdm.setOverrideProps(props);
|
||||
ApiCallRcList answers = api.resourceDefinitionModify(rscName, rdm);
|
||||
props.put("DrbdOptions/Net/protocol", "C");
|
||||
rcm.setOverrideProps(props);
|
||||
ApiCallRcList answers = api.resourceConnectionModify(rscName, inUseNode, localNodeName, rcm);
|
||||
if (answers.hasError()) {
|
||||
logger.error("Unable to set 'allow-two-primaries' on {} ", rscName);
|
||||
logger.error(String.format(
|
||||
"Unable to set protocol C and 'allow-two-primaries' on %s/%s/%s",
|
||||
inUseNode, localNodeName, rscName));
|
||||
// do not fail here as adding allow-two-primaries property is only a problem while live migrating
|
||||
}
|
||||
}
|
||||
@ -290,6 +296,23 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
return true;
|
||||
}
|
||||
|
||||
private void removeTwoPrimariesRcProps(DevelopersApi api, String inUseNode, String rscName) throws ApiException {
|
||||
ResourceConnectionModify rcm = new ResourceConnectionModify();
|
||||
List<String> deleteProps = new ArrayList<>();
|
||||
deleteProps.add("DrbdOptions/Net/allow-two-primaries");
|
||||
deleteProps.add("DrbdOptions/Net/protocol");
|
||||
rcm.deleteProps(deleteProps);
|
||||
ApiCallRcList answers = api.resourceConnectionModify(rscName, localNodeName, inUseNode, rcm);
|
||||
if (answers.hasError()) {
|
||||
logger.error(
|
||||
String.format("Failed to remove 'protocol' and 'allow-two-primaries' on %s/%s/%s: %s",
|
||||
localNodeName,
|
||||
inUseNode,
|
||||
rscName, LinstorUtil.getBestErrorMessage(answers)));
|
||||
// do not fail here as removing allow-two-primaries property isn't fatal
|
||||
}
|
||||
}
|
||||
|
||||
private boolean tryDisconnectLinstor(String volumePath, KVMStoragePool pool)
|
||||
{
|
||||
if (volumePath == null) {
|
||||
@ -319,9 +342,18 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
|
||||
if (optRsc.isPresent()) {
|
||||
try {
|
||||
Resource rsc = optRsc.get();
|
||||
try {
|
||||
String inUseNode = LinstorUtil.isResourceInUse(api, rsc.getName());
|
||||
if (inUseNode != null && !inUseNode.equalsIgnoreCase(localNodeName)) {
|
||||
removeTwoPrimariesRcProps(api, inUseNode, rsc.getName());
|
||||
}
|
||||
} catch (ApiException apiEx) {
|
||||
logger.error(apiEx.getBestMessage());
|
||||
// do not fail here as removing allow-two-primaries property or deleting diskless isn't fatal
|
||||
}
|
||||
|
||||
try {
|
||||
// if diskless resource remove it, in the worst case it will be transformed to a tiebreaker
|
||||
if (rsc.getFlags() != null &&
|
||||
rsc.getFlags().contains(ApiConsts.FLAG_DRBD_DISKLESS) &&
|
||||
@ -329,17 +361,6 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
ApiCallRcList delAnswers = api.resourceDelete(rsc.getName(), localNodeName);
|
||||
logLinstorAnswers(delAnswers);
|
||||
}
|
||||
|
||||
// remove allow-two-primaries
|
||||
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
|
||||
rdm.deleteProps(Collections.singletonList("DrbdOptions/Net/allow-two-primaries"));
|
||||
ApiCallRcList answers = api.resourceDefinitionModify(rsc.getName(), rdm);
|
||||
if (answers.hasError()) {
|
||||
logger.error(
|
||||
String.format("Failed to remove 'allow-two-primaries' on %s: %s",
|
||||
rsc.getName(), LinstorUtil.getBestErrorMessage(answers)));
|
||||
// do not fail here as removing allow-two-primaries property isn't fatal
|
||||
}
|
||||
} catch (ApiException apiEx) {
|
||||
logger.error(apiEx.getBestMessage());
|
||||
// do not fail here as removing allow-two-primaries property or deleting diskless isn't fatal
|
||||
|
||||
@ -192,17 +192,20 @@ public class LinstorUtil {
|
||||
*
|
||||
* @param api developer api object to use
|
||||
* @param rscName resource name to check in use state.
|
||||
* @return True if a resource found that is in use(primary) state, else false.
|
||||
* @return NodeName where the resource is inUse, if not in use `null`
|
||||
* @throws ApiException forwards api errors
|
||||
*/
|
||||
public static boolean isResourceInUse(DevelopersApi api, String rscName) throws ApiException {
|
||||
public static String isResourceInUse(DevelopersApi api, String rscName) throws ApiException {
|
||||
List<Resource> rscs = api.resourceList(rscName, null, null);
|
||||
if (rscs != null) {
|
||||
return rscs.stream()
|
||||
.anyMatch(rsc -> rsc.getState() != null && Boolean.TRUE.equals(rsc.getState().isInUse()));
|
||||
.filter(rsc -> rsc.getState() != null && Boolean.TRUE.equals(rsc.getState().isInUse()))
|
||||
.map(Resource::getNodeName)
|
||||
.findFirst()
|
||||
.orElse(null);
|
||||
}
|
||||
LOGGER.error("isResourceInUse: null returned from resourceList");
|
||||
return false;
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -172,6 +172,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
|
||||
poolResponse.setTags(pool.getTag());
|
||||
poolResponse.setIsTagARule(pool.getIsTagARule());
|
||||
poolResponse.setOverProvisionFactor(Double.toString(CapacityManager.StorageOverprovisioningFactor.valueIn(pool.getId())));
|
||||
poolResponse.setManaged(storagePool.isManaged());
|
||||
|
||||
// set async job
|
||||
if (pool.getJobId() != null) {
|
||||
@ -204,6 +205,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
|
||||
|
||||
@Override
|
||||
public StoragePoolResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO pool) {
|
||||
StoragePool storagePool = storagePoolDao.findById(pool.getId());
|
||||
StoragePoolResponse poolResponse = new StoragePoolResponse();
|
||||
poolResponse.setId(pool.getUuid());
|
||||
poolResponse.setName(pool.getName());
|
||||
@ -230,6 +232,17 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
|
||||
poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
|
||||
poolResponse.setDiskSizeAllocated(allocatedSize);
|
||||
poolResponse.setCapacityIops(pool.getCapacityIops());
|
||||
|
||||
if (storagePool != null) {
|
||||
poolResponse.setManaged(storagePool.isManaged());
|
||||
if (storagePool.isManaged()) {
|
||||
DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver) store.getDriver();
|
||||
long usedIops = driver.getUsedIops(storagePool);
|
||||
poolResponse.setAllocatedIops(usedIops);
|
||||
}
|
||||
}
|
||||
|
||||
poolResponse.setOverProvisionFactor(Double.toString(CapacityManager.StorageOverprovisioningFactor.valueIn(pool.getId())));
|
||||
|
||||
// TODO: StatsCollector does not persist data
|
||||
|
||||
@ -307,6 +307,8 @@ import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.googlecode.ipv6.IPv6Network;
|
||||
|
||||
import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites;
|
||||
|
||||
public class ConfigurationManagerImpl extends ManagerBase implements ConfigurationManager, ConfigurationService, Configurable {
|
||||
public static final String PERACCOUNT = "peraccount";
|
||||
public static final String PERZONE = "perzone";
|
||||
@ -1320,6 +1322,18 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
||||
}
|
||||
}
|
||||
|
||||
if (type.equals(String.class)) {
|
||||
if (name.equalsIgnoreCase(SecStorageAllowedInternalDownloadSites.key()) && StringUtils.isNotEmpty(value)) {
|
||||
final String[] cidrs = value.split(",");
|
||||
for (final String cidr : cidrs) {
|
||||
if (!NetUtils.isValidIp4(cidr) && !NetUtils.isValidIp6(cidr) && !NetUtils.getCleanIp4Cidr(cidr).equals(cidr)) {
|
||||
logger.error(String.format("Invalid CIDR %s value specified for the config %s", cidr, name));
|
||||
throw new InvalidParameterValueException(String.format("Invalid CIDR %s value specified for the config %s", cidr, name));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (configuration == null ) {
|
||||
//range validation has to be done per case basis, for now
|
||||
//return in case of Configkey parameters
|
||||
|
||||
@ -4296,7 +4296,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
||||
}
|
||||
} else if (storeForNewStoreScope.getScopeType() == ScopeType.HOST
|
||||
&& (storeForExistingStoreScope.getScopeType() == ScopeType.CLUSTER || storeForExistingStoreScope.getScopeType() == ScopeType.ZONE)) {
|
||||
Long hostId = _vmInstanceDao.findById(existingVolume.getInstanceId()).getHostId();
|
||||
VMInstanceVO vm = _vmInstanceDao.findById(existingVolume.getInstanceId());
|
||||
Long hostId = vm.getHostId();
|
||||
if (hostId == null) {
|
||||
hostId = vm.getLastHostId();
|
||||
}
|
||||
if (storeForNewStoreScope.getScopeId().equals(hostId)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -104,6 +104,10 @@ public class SnapshotHelper {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!DataStoreRole.Image.equals(snapInfo.getDataStore().getRole())) {
|
||||
logger.debug(String.format("Expunge template for Snapshot [%s] is called for primary storage role. Not expunging it, " +
|
||||
"but we will still expunge the database reference of the snapshot for image storage role if any", snapInfo.getId()));
|
||||
} else {
|
||||
logger.debug(String.format("Expunging snapshot [%s] due to it is a temporary backup to create a volume from snapshot. It is occurring because the global setting [%s]"
|
||||
+ " has the value [%s].", snapInfo.getId(), SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key(), backupSnapshotAfterTakingSnapshot));
|
||||
|
||||
@ -113,6 +117,8 @@ public class SnapshotHelper {
|
||||
logger.warn(String.format("Unable to delete the temporary snapshot [%s] on secondary storage due to [%s]. We still will expunge the database reference, consider"
|
||||
+ " manually deleting the file [%s].", snapInfo.getId(), ex.getMessage(), snapInfo.getPath()), ex);
|
||||
}
|
||||
}
|
||||
|
||||
long storeId = snapInfo.getDataStore().getId();
|
||||
if (!DataStoreRole.Image.equals(snapInfo.getDataStore().getRole())) {
|
||||
long zoneId = dataStorageManager.getStoreZoneId(storeId, snapInfo.getDataStore().getRole());
|
||||
|
||||
@ -157,6 +157,8 @@ import com.cloud.vm.dao.SecondaryStorageVmDao;
|
||||
import com.cloud.vm.dao.UserVmDetailsDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites;
|
||||
|
||||
/**
|
||||
* Class to manage secondary storages. <br><br>
|
||||
* Possible secondary storage VM state transition cases:<br>
|
||||
@ -399,6 +401,9 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
|
||||
String[] cidrs = _allowedInternalSites.split(",");
|
||||
for (String cidr : cidrs) {
|
||||
if (NetUtils.isValidIp4Cidr(cidr) || NetUtils.isValidIp4(cidr) || !cidr.startsWith("0.0.0.0")) {
|
||||
if (NetUtils.getCleanIp4Cidr(cidr).equals(cidr)) {
|
||||
logger.warn(String.format("Invalid CIDR %s in %s", cidr, SecStorageAllowedInternalDownloadSites.key()));
|
||||
}
|
||||
allowedCidrs.add(cidr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -153,7 +153,6 @@ class CsFile:
|
||||
logging.debug("Searching for %s string " % search)
|
||||
|
||||
for index, line in enumerate(self.new_config):
|
||||
print(' line = ' + line)
|
||||
if line.lstrip().startswith(ignoreLinesStartWith):
|
||||
continue
|
||||
if search in line:
|
||||
|
||||
@ -22,8 +22,6 @@ import copy
|
||||
def merge(dbag, data):
|
||||
dbagc = copy.deepcopy(dbag)
|
||||
|
||||
print(dbag)
|
||||
print(data)
|
||||
if "vpn_users" not in data:
|
||||
return dbagc
|
||||
|
||||
|
||||
@ -274,7 +274,7 @@ export default {
|
||||
this.$message.error(this.$t('message.error.save.setting'))
|
||||
this.$notification.error({
|
||||
message: this.$t('label.error'),
|
||||
description: this.$t('message.error.save.setting')
|
||||
description: error?.response?.data?.updateconfigurationresponse?.errortext || this.$t('message.error.save.setting')
|
||||
})
|
||||
}).finally(() => {
|
||||
this.valueLoading = false
|
||||
|
||||
@ -628,6 +628,18 @@ public class NetUtils {
|
||||
return long2Ip(firstPart) + "/" + size;
|
||||
}
|
||||
|
||||
public static String getCleanIp4Cidr(final String cidr) {
|
||||
if (!isValidIp4Cidr(cidr)) {
|
||||
throw new CloudRuntimeException("Invalid CIDR: " + cidr);
|
||||
}
|
||||
String gateway = cidr.split("/")[0];
|
||||
Long netmaskSize = Long.parseLong(cidr.split("/")[1]);
|
||||
final long ip = ip2Long(gateway);
|
||||
final long startNetMask = ip2Long(getCidrNetmask(netmaskSize));
|
||||
final long start = (ip & startNetMask);
|
||||
return String.format("%s/%s", long2Ip(start), netmaskSize);
|
||||
}
|
||||
|
||||
public static String[] getIpRangeFromCidr(final String cidr, final long size) {
|
||||
assert size < MAX_CIDR : "You do know this is not for ipv6 right? Keep it smaller than 32 but you have " + size;
|
||||
final String[] result = new String[2];
|
||||
|
||||
@ -296,6 +296,17 @@ public class NetUtilsTest {
|
||||
assertTrue(NetUtils.isValidIp4Cidr(cidrThird));;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCleanIp4Cidr() throws Exception {
|
||||
final String cidrFirst = "10.0.144.0/20";
|
||||
final String cidrSecond = "10.0.151.5/20";
|
||||
final String cidrThird = "10.0.144.10/21";
|
||||
|
||||
assertEquals(cidrFirst, NetUtils.getCleanIp4Cidr(cidrFirst));
|
||||
assertEquals("10.0.144.0/20", NetUtils.getCleanIp4Cidr(cidrSecond));
|
||||
assertEquals("10.0.144.0/21", NetUtils.getCleanIp4Cidr(cidrThird));;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsValidCidrList() throws Exception {
|
||||
final String cidrFirst = "10.0.144.0/20,1.2.3.4/32,5.6.7.8/24";
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user