mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch '4.20' into missing-labels-nsx
This commit is contained in:
commit
21191fae34
1
.github/workflows/ui.yml
vendored
1
.github/workflows/ui.yml
vendored
@ -56,6 +56,7 @@ jobs:
|
|||||||
npm run test:unit
|
npm run test:unit
|
||||||
|
|
||||||
- uses: codecov/codecov-action@v4
|
- uses: codecov/codecov-action@v4
|
||||||
|
if: github.repository == 'apache/cloudstack'
|
||||||
with:
|
with:
|
||||||
working-directory: ui
|
working-directory: ui
|
||||||
files: ./coverage/lcov.info
|
files: ./coverage/lcov.info
|
||||||
|
|||||||
@ -823,7 +823,7 @@ public class AgentProperties{
|
|||||||
private T defaultValue;
|
private T defaultValue;
|
||||||
private Class<T> typeClass;
|
private Class<T> typeClass;
|
||||||
|
|
||||||
Property(String name, T value) {
|
public Property(String name, T value) {
|
||||||
init(name, value);
|
init(name, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -171,6 +171,13 @@ public interface VolumeApiService {
|
|||||||
* </table>
|
* </table>
|
||||||
*/
|
*/
|
||||||
boolean doesStoragePoolSupportDiskOffering(StoragePool destPool, DiskOffering diskOffering);
|
boolean doesStoragePoolSupportDiskOffering(StoragePool destPool, DiskOffering diskOffering);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the storage pool supports the required disk offering tags
|
||||||
|
* destPool the storage pool to check the disk offering tags
|
||||||
|
* diskOfferingTags the tags that should be supported
|
||||||
|
* return whether the tags are supported in the storage pool
|
||||||
|
*/
|
||||||
boolean doesStoragePoolSupportDiskOfferingTags(StoragePool destPool, String diskOfferingTags);
|
boolean doesStoragePoolSupportDiskOfferingTags(StoragePool destPool, String diskOfferingTags);
|
||||||
|
|
||||||
Volume destroyVolume(long volumeId, Account caller, boolean expunge, boolean forceExpunge);
|
Volume destroyVolume(long volumeId, Account caller, boolean expunge, boolean forceExpunge);
|
||||||
|
|||||||
@ -16,8 +16,6 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package com.cloud.agent.api;
|
package com.cloud.agent.api;
|
||||||
|
|
||||||
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
|
|
||||||
|
|
||||||
public class ConvertInstanceAnswer extends Answer {
|
public class ConvertInstanceAnswer extends Answer {
|
||||||
|
|
||||||
private String temporaryConvertUuid;
|
private String temporaryConvertUuid;
|
||||||
@ -25,16 +23,6 @@ public class ConvertInstanceAnswer extends Answer {
|
|||||||
public ConvertInstanceAnswer() {
|
public ConvertInstanceAnswer() {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
private UnmanagedInstanceTO convertedInstance;
|
|
||||||
|
|
||||||
public ConvertInstanceAnswer(Command command, boolean success, String details) {
|
|
||||||
super(command, success, details);
|
|
||||||
}
|
|
||||||
|
|
||||||
public ConvertInstanceAnswer(Command command, UnmanagedInstanceTO convertedInstance) {
|
|
||||||
super(command, true, "");
|
|
||||||
this.convertedInstance = convertedInstance;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ConvertInstanceAnswer(Command command, String temporaryConvertUuid) {
|
public ConvertInstanceAnswer(Command command, String temporaryConvertUuid) {
|
||||||
super(command, true, "");
|
super(command, true, "");
|
||||||
@ -44,8 +32,4 @@ public class ConvertInstanceAnswer extends Answer {
|
|||||||
public String getTemporaryConvertUuid() {
|
public String getTemporaryConvertUuid() {
|
||||||
return temporaryConvertUuid;
|
return temporaryConvertUuid;
|
||||||
}
|
}
|
||||||
|
|
||||||
public UnmanagedInstanceTO getConvertedInstance() {
|
|
||||||
return convertedInstance;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,13 +20,10 @@ import com.cloud.agent.api.to.DataStoreTO;
|
|||||||
import com.cloud.agent.api.to.RemoteInstanceTO;
|
import com.cloud.agent.api.to.RemoteInstanceTO;
|
||||||
import com.cloud.hypervisor.Hypervisor;
|
import com.cloud.hypervisor.Hypervisor;
|
||||||
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public class ConvertInstanceCommand extends Command {
|
public class ConvertInstanceCommand extends Command {
|
||||||
|
|
||||||
private RemoteInstanceTO sourceInstance;
|
private RemoteInstanceTO sourceInstance;
|
||||||
private Hypervisor.HypervisorType destinationHypervisorType;
|
private Hypervisor.HypervisorType destinationHypervisorType;
|
||||||
private List<String> destinationStoragePools;
|
|
||||||
private DataStoreTO conversionTemporaryLocation;
|
private DataStoreTO conversionTemporaryLocation;
|
||||||
private String templateDirOnConversionLocation;
|
private String templateDirOnConversionLocation;
|
||||||
private boolean checkConversionSupport;
|
private boolean checkConversionSupport;
|
||||||
@ -36,12 +33,10 @@ public class ConvertInstanceCommand extends Command {
|
|||||||
public ConvertInstanceCommand() {
|
public ConvertInstanceCommand() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public ConvertInstanceCommand(RemoteInstanceTO sourceInstance, Hypervisor.HypervisorType destinationHypervisorType,
|
public ConvertInstanceCommand(RemoteInstanceTO sourceInstance, Hypervisor.HypervisorType destinationHypervisorType, DataStoreTO conversionTemporaryLocation,
|
||||||
List<String> destinationStoragePools, DataStoreTO conversionTemporaryLocation,
|
|
||||||
String templateDirOnConversionLocation, boolean checkConversionSupport, boolean exportOvfToConversionLocation) {
|
String templateDirOnConversionLocation, boolean checkConversionSupport, boolean exportOvfToConversionLocation) {
|
||||||
this.sourceInstance = sourceInstance;
|
this.sourceInstance = sourceInstance;
|
||||||
this.destinationHypervisorType = destinationHypervisorType;
|
this.destinationHypervisorType = destinationHypervisorType;
|
||||||
this.destinationStoragePools = destinationStoragePools;
|
|
||||||
this.conversionTemporaryLocation = conversionTemporaryLocation;
|
this.conversionTemporaryLocation = conversionTemporaryLocation;
|
||||||
this.templateDirOnConversionLocation = templateDirOnConversionLocation;
|
this.templateDirOnConversionLocation = templateDirOnConversionLocation;
|
||||||
this.checkConversionSupport = checkConversionSupport;
|
this.checkConversionSupport = checkConversionSupport;
|
||||||
@ -56,10 +51,6 @@ public class ConvertInstanceCommand extends Command {
|
|||||||
return destinationHypervisorType;
|
return destinationHypervisorType;
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<String> getDestinationStoragePools() {
|
|
||||||
return destinationStoragePools;
|
|
||||||
}
|
|
||||||
|
|
||||||
public DataStoreTO getConversionTemporaryLocation() {
|
public DataStoreTO getConversionTemporaryLocation() {
|
||||||
return conversionTemporaryLocation;
|
return conversionTemporaryLocation;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -39,9 +39,7 @@ import java.util.Map;
|
|||||||
|
|
||||||
import javax.net.ssl.HttpsURLConnection;
|
import javax.net.ssl.HttpsURLConnection;
|
||||||
import javax.net.ssl.SSLContext;
|
import javax.net.ssl.SSLContext;
|
||||||
import javax.net.ssl.TrustManager;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.utils.security.SSLUtils;
|
|
||||||
import org.apache.commons.collections.MapUtils;
|
import org.apache.commons.collections.MapUtils;
|
||||||
import org.apache.commons.httpclient.HttpStatus;
|
import org.apache.commons.httpclient.HttpStatus;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
@ -55,6 +53,7 @@ import org.apache.http.client.methods.HttpUriRequest;
|
|||||||
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
|
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
|
||||||
import org.apache.http.impl.client.CloseableHttpClient;
|
import org.apache.http.impl.client.CloseableHttpClient;
|
||||||
import org.apache.http.impl.client.HttpClients;
|
import org.apache.http.impl.client.HttpClients;
|
||||||
|
import org.apache.http.ssl.SSLContexts;
|
||||||
import org.apache.http.util.EntityUtils;
|
import org.apache.http.util.EntityUtils;
|
||||||
|
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
@ -120,10 +119,10 @@ public class HttpsDirectTemplateDownloader extends DirectTemplateDownloaderImpl
|
|||||||
String password = "changeit";
|
String password = "changeit";
|
||||||
defaultKeystore.load(is, password.toCharArray());
|
defaultKeystore.load(is, password.toCharArray());
|
||||||
}
|
}
|
||||||
TrustManager[] tm = HttpsMultiTrustManager.getTrustManagersFromKeyStores(customKeystore, defaultKeystore);
|
return SSLContexts.custom()
|
||||||
SSLContext sslContext = SSLUtils.getSSLContext();
|
.loadTrustMaterial(customKeystore, null)
|
||||||
sslContext.init(null, tm, null);
|
.loadTrustMaterial(defaultKeystore, null)
|
||||||
return sslContext;
|
.build();
|
||||||
} catch (KeyStoreException | NoSuchAlgorithmException | CertificateException | IOException | KeyManagementException e) {
|
} catch (KeyStoreException | NoSuchAlgorithmException | CertificateException | IOException | KeyManagementException e) {
|
||||||
logger.error(String.format("Failure getting SSL context for HTTPS downloader, using default SSL context: %s", e.getMessage()), e);
|
logger.error(String.format("Failure getting SSL context for HTTPS downloader, using default SSL context: %s", e.getMessage()), e);
|
||||||
try {
|
try {
|
||||||
|
|||||||
@ -1,102 +0,0 @@
|
|||||||
// Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
// or more contributor license agreements. See the NOTICE file
|
|
||||||
// distributed with this work for additional information
|
|
||||||
// regarding copyright ownership. The ASF licenses this file
|
|
||||||
// to you under the Apache License, Version 2.0 (the
|
|
||||||
// "License"); you may not use this file except in compliance
|
|
||||||
// with the License. You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing,
|
|
||||||
// software distributed under the License is distributed on an
|
|
||||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
// KIND, either express or implied. See the License for the
|
|
||||||
// specific language governing permissions and limitations
|
|
||||||
// under the License.
|
|
||||||
package org.apache.cloudstack.direct.download;
|
|
||||||
|
|
||||||
import java.security.KeyStore;
|
|
||||||
import java.security.KeyStoreException;
|
|
||||||
import java.security.NoSuchAlgorithmException;
|
|
||||||
import java.security.cert.CertificateException;
|
|
||||||
import java.security.cert.X509Certificate;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import javax.net.ssl.TrustManager;
|
|
||||||
import javax.net.ssl.TrustManagerFactory;
|
|
||||||
import javax.net.ssl.X509TrustManager;
|
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
|
||||||
import com.google.common.collect.Iterables;
|
|
||||||
|
|
||||||
public class HttpsMultiTrustManager implements X509TrustManager {
|
|
||||||
|
|
||||||
private final List<X509TrustManager> trustManagers;
|
|
||||||
|
|
||||||
public HttpsMultiTrustManager(KeyStore... keystores) {
|
|
||||||
List<X509TrustManager> trustManagers = new ArrayList<>();
|
|
||||||
trustManagers.add(getTrustManager(null));
|
|
||||||
for (KeyStore keystore : keystores) {
|
|
||||||
trustManagers.add(getTrustManager(keystore));
|
|
||||||
}
|
|
||||||
this.trustManagers = ImmutableList.copyOf(trustManagers);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static TrustManager[] getTrustManagersFromKeyStores(KeyStore... keyStore) {
|
|
||||||
return new TrustManager[] { new HttpsMultiTrustManager(keyStore) };
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
|
|
||||||
for (X509TrustManager trustManager : trustManagers) {
|
|
||||||
try {
|
|
||||||
trustManager.checkClientTrusted(chain, authType);
|
|
||||||
return;
|
|
||||||
} catch (CertificateException ignored) {}
|
|
||||||
}
|
|
||||||
throw new CertificateException("None of the TrustManagers trust this certificate chain");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
|
|
||||||
for (X509TrustManager trustManager : trustManagers) {
|
|
||||||
try {
|
|
||||||
trustManager.checkServerTrusted(chain, authType);
|
|
||||||
return;
|
|
||||||
} catch (CertificateException ignored) {}
|
|
||||||
}
|
|
||||||
throw new CertificateException("None of the TrustManagers trust this certificate chain");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public X509Certificate[] getAcceptedIssuers() {
|
|
||||||
ImmutableList.Builder<X509Certificate> certificates = ImmutableList.builder();
|
|
||||||
for (X509TrustManager trustManager : trustManagers) {
|
|
||||||
for (X509Certificate cert : trustManager.getAcceptedIssuers()) {
|
|
||||||
certificates.add(cert);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Iterables.toArray(certificates.build(), X509Certificate.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
public X509TrustManager getTrustManager(KeyStore keystore) {
|
|
||||||
return getTrustManager(TrustManagerFactory.getDefaultAlgorithm(), keystore);
|
|
||||||
}
|
|
||||||
|
|
||||||
public X509TrustManager getTrustManager(String algorithm, KeyStore keystore) {
|
|
||||||
TrustManagerFactory factory;
|
|
||||||
try {
|
|
||||||
factory = TrustManagerFactory.getInstance(algorithm);
|
|
||||||
factory.init(keystore);
|
|
||||||
return Iterables.getFirst(Iterables.filter(
|
|
||||||
Arrays.asList(factory.getTrustManagers()), X509TrustManager.class), null);
|
|
||||||
} catch (NoSuchAlgorithmException | KeyStoreException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -100,8 +100,6 @@ public class Upgrade42000to42010 extends DbUpgradeAbstractImpl implements DbUpgr
|
|||||||
|
|
||||||
DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "name");
|
DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "name");
|
||||||
|
|
||||||
DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "resource_id", "resource_type");
|
|
||||||
|
|
||||||
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "cpu");
|
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "cpu");
|
||||||
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "speed");
|
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "speed");
|
||||||
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "ram_size");
|
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "ram_size");
|
||||||
|
|||||||
@ -37,4 +37,6 @@ public interface UsageJobDao extends GenericDao<UsageJobVO, Long> {
|
|||||||
UsageJobVO isOwner(String hostname, int pid);
|
UsageJobVO isOwner(String hostname, int pid);
|
||||||
|
|
||||||
void updateJobSuccess(Long jobId, long startMillis, long endMillis, long execTime, boolean success);
|
void updateJobSuccess(Long jobId, long startMillis, long endMillis, long execTime, boolean success);
|
||||||
|
|
||||||
|
void removeLastOpenJobsOwned(String hostname, int pid);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import java.util.Date;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
import com.cloud.usage.UsageJobVO;
|
import com.cloud.usage.UsageJobVO;
|
||||||
@ -114,7 +115,7 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
|
|||||||
public UsageJobVO isOwner(String hostname, int pid) {
|
public UsageJobVO isOwner(String hostname, int pid) {
|
||||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
||||||
try {
|
try {
|
||||||
if ((hostname == null) || (pid <= 0)) {
|
if (hostname == null || pid <= 0) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,7 +175,7 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
|
|||||||
SearchCriteria<UsageJobVO> sc = createSearchCriteria();
|
SearchCriteria<UsageJobVO> sc = createSearchCriteria();
|
||||||
sc.addAnd("endMillis", SearchCriteria.Op.EQ, Long.valueOf(0));
|
sc.addAnd("endMillis", SearchCriteria.Op.EQ, Long.valueOf(0));
|
||||||
sc.addAnd("jobType", SearchCriteria.Op.EQ, Integer.valueOf(UsageJobVO.JOB_TYPE_SINGLE));
|
sc.addAnd("jobType", SearchCriteria.Op.EQ, Integer.valueOf(UsageJobVO.JOB_TYPE_SINGLE));
|
||||||
sc.addAnd("scheduled", SearchCriteria.Op.EQ, Integer.valueOf(0));
|
sc.addAnd("scheduled", SearchCriteria.Op.EQ, Integer.valueOf(UsageJobVO.JOB_NOT_SCHEDULED));
|
||||||
List<UsageJobVO> jobs = search(sc, filter);
|
List<UsageJobVO> jobs = search(sc, filter);
|
||||||
|
|
||||||
if ((jobs == null) || jobs.isEmpty()) {
|
if ((jobs == null) || jobs.isEmpty()) {
|
||||||
@ -194,4 +195,36 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
|
|||||||
}
|
}
|
||||||
return jobs.get(0).getHeartbeat();
|
return jobs.get(0).getHeartbeat();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private List<UsageJobVO> getLastOpenJobsOwned(String hostname, int pid) {
|
||||||
|
SearchCriteria<UsageJobVO> sc = createSearchCriteria();
|
||||||
|
sc.addAnd("endMillis", SearchCriteria.Op.EQ, Long.valueOf(0));
|
||||||
|
sc.addAnd("host", SearchCriteria.Op.EQ, hostname);
|
||||||
|
if (pid > 0) {
|
||||||
|
sc.addAnd("pid", SearchCriteria.Op.EQ, Integer.valueOf(pid));
|
||||||
|
}
|
||||||
|
return listBy(sc);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeLastOpenJobsOwned(String hostname, int pid) {
|
||||||
|
if (hostname == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
||||||
|
try {
|
||||||
|
List<UsageJobVO> jobs = getLastOpenJobsOwned(hostname, pid);
|
||||||
|
if (CollectionUtils.isNotEmpty(jobs)) {
|
||||||
|
logger.info("Found {} opens job, to remove", jobs.size());
|
||||||
|
for (UsageJobVO job : jobs) {
|
||||||
|
logger.debug("Removing job - id: {}, pid: {}, job type: {}, scheduled: {}, heartbeat: {}",
|
||||||
|
job.getId(), job.getPid(), job.getJobType(), job.getScheduled(), job.getHeartbeat());
|
||||||
|
remove(job.getId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
txn.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -51,6 +51,7 @@ import javax.inject.Inject;
|
|||||||
import java.text.SimpleDateFormat;
|
import java.text.SimpleDateFormat;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.Comparator;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
@ -162,6 +163,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||||||
|
|
||||||
if (VirtualMachine.State.Stopped.equals(vm.getState())) {
|
if (VirtualMachine.State.Stopped.equals(vm.getState())) {
|
||||||
List<VolumeVO> vmVolumes = volumeDao.findByInstance(vm.getId());
|
List<VolumeVO> vmVolumes = volumeDao.findByInstance(vm.getId());
|
||||||
|
vmVolumes.sort(Comparator.comparing(Volume::getDeviceId));
|
||||||
List<String> volumePaths = getVolumePaths(vmVolumes);
|
List<String> volumePaths = getVolumePaths(vmVolumes);
|
||||||
command.setVolumePaths(volumePaths);
|
command.setVolumePaths(volumePaths);
|
||||||
}
|
}
|
||||||
@ -212,7 +214,10 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||||||
@Override
|
@Override
|
||||||
public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) {
|
public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) {
|
||||||
List<Backup.VolumeInfo> backedVolumes = backup.getBackedUpVolumes();
|
List<Backup.VolumeInfo> backedVolumes = backup.getBackedUpVolumes();
|
||||||
List<VolumeVO> volumes = backedVolumes.stream().map(volume -> volumeDao.findByUuid(volume.getUuid())).collect(Collectors.toList());
|
List<VolumeVO> volumes = backedVolumes.stream()
|
||||||
|
.map(volume -> volumeDao.findByUuid(volume.getUuid()))
|
||||||
|
.sorted((v1, v2) -> Long.compare(v1.getDeviceId(), v2.getDeviceId()))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
|
||||||
LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup);
|
LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup);
|
||||||
BackupRepository backupRepository = getBackupRepository(vm, backup);
|
BackupRepository backupRepository = getBackupRepository(vm, backup);
|
||||||
@ -246,9 +251,13 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||||||
if (Objects.isNull(storagePool)) {
|
if (Objects.isNull(storagePool)) {
|
||||||
throw new CloudRuntimeException("Unable to find storage pool associated to the volume");
|
throw new CloudRuntimeException("Unable to find storage pool associated to the volume");
|
||||||
}
|
}
|
||||||
String volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid());
|
String volumePathPrefix;
|
||||||
if (ScopeType.HOST.equals(storagePool.getScope())) {
|
if (ScopeType.HOST.equals(storagePool.getScope())) {
|
||||||
volumePathPrefix = storagePool.getPath();
|
volumePathPrefix = storagePool.getPath();
|
||||||
|
} else if (Storage.StoragePoolType.SharedMountPoint.equals(storagePool.getPoolType())) {
|
||||||
|
volumePathPrefix = storagePool.getPath();
|
||||||
|
} else {
|
||||||
|
volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid());
|
||||||
}
|
}
|
||||||
volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath()));
|
volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath()));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3368,7 +3368,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||||||
if (!meetRequirements) {
|
if (!meetRequirements) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return isUbuntuHost() || isIoUringSupportedByQemu();
|
return isUbuntuOrDebianHost() || isIoUringSupportedByQemu();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3381,13 +3381,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||||||
return diskBus != DiskDef.DiskBus.IDE || getHypervisorQemuVersion() >= HYPERVISOR_QEMU_VERSION_IDE_DISCARD_FIXED;
|
return diskBus != DiskDef.DiskBus.IDE || getHypervisorQemuVersion() >= HYPERVISOR_QEMU_VERSION_IDE_DISCARD_FIXED;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isUbuntuHost() {
|
public boolean isUbuntuOrDebianHost() {
|
||||||
Map<String, String> versionString = getVersionStrings();
|
Map<String, String> versionString = getVersionStrings();
|
||||||
String hostKey = "Host.OS";
|
String hostKey = "Host.OS";
|
||||||
if (MapUtils.isEmpty(versionString) || !versionString.containsKey(hostKey) || versionString.get(hostKey) == null) {
|
if (MapUtils.isEmpty(versionString) || !versionString.containsKey(hostKey) || versionString.get(hostKey) == null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return versionString.get(hostKey).equalsIgnoreCase("ubuntu");
|
return versionString.get(hostKey).equalsIgnoreCase("ubuntu")
|
||||||
|
|| versionString.get(hostKey).toLowerCase().startsWith("debian");
|
||||||
}
|
}
|
||||||
|
|
||||||
private KVMPhysicalDisk getPhysicalDiskFromNfsStore(String dataStoreUrl, DataTO data) {
|
private KVMPhysicalDisk getPhysicalDiskFromNfsStore(String dataStoreUrl, DataTO data) {
|
||||||
@ -5357,14 +5358,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||||||
|
|
||||||
public boolean hostSupportsInstanceConversion() {
|
public boolean hostSupportsInstanceConversion() {
|
||||||
int exitValue = Script.runSimpleBashScriptForExitValue(INSTANCE_CONVERSION_SUPPORTED_CHECK_CMD);
|
int exitValue = Script.runSimpleBashScriptForExitValue(INSTANCE_CONVERSION_SUPPORTED_CHECK_CMD);
|
||||||
if (isUbuntuHost() && exitValue == 0) {
|
if (isUbuntuOrDebianHost() && exitValue == 0) {
|
||||||
exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_NBDKIT_PKG_CHECK_CMD);
|
exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_NBDKIT_PKG_CHECK_CMD);
|
||||||
}
|
}
|
||||||
return exitValue == 0;
|
return exitValue == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean hostSupportsWindowsGuestConversion() {
|
public boolean hostSupportsWindowsGuestConversion() {
|
||||||
if (isUbuntuHost()) {
|
if (isUbuntuOrDebianHost()) {
|
||||||
int exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD);
|
int exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD);
|
||||||
return exitValue == 0;
|
return exitValue == 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -32,7 +32,7 @@ public class LibvirtCheckConvertInstanceCommandWrapper extends CommandWrapper<Ch
|
|||||||
public Answer execute(CheckConvertInstanceCommand cmd, LibvirtComputingResource serverResource) {
|
public Answer execute(CheckConvertInstanceCommand cmd, LibvirtComputingResource serverResource) {
|
||||||
if (!serverResource.hostSupportsInstanceConversion()) {
|
if (!serverResource.hostSupportsInstanceConversion()) {
|
||||||
String msg = String.format("Cannot convert the instance from VMware as the virt-v2v binary is not found on host %s. " +
|
String msg = String.format("Cannot convert the instance from VMware as the virt-v2v binary is not found on host %s. " +
|
||||||
"Please install virt-v2v%s on the host before attempting the instance conversion.", serverResource.getPrivateIp(), serverResource.isUbuntuHost()? ", nbdkit" : "");
|
"Please install virt-v2v%s on the host before attempting the instance conversion.", serverResource.getPrivateIp(), serverResource.isUbuntuOrDebianHost()? ", nbdkit" : "");
|
||||||
logger.info(msg);
|
logger.info(msg);
|
||||||
return new CheckConvertInstanceAnswer(cmd, false, msg);
|
return new CheckConvertInstanceAnswer(cmd, false, msg);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,22 +18,12 @@
|
|||||||
//
|
//
|
||||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileInputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.net.URLEncoder;
|
import java.net.URLEncoder;
|
||||||
import java.nio.charset.Charset;
|
import java.nio.charset.Charset;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||||
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
|
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
|
||||||
import com.cloud.agent.api.Answer;
|
import com.cloud.agent.api.Answer;
|
||||||
@ -44,17 +34,12 @@ import com.cloud.agent.api.to.NfsTO;
|
|||||||
import com.cloud.agent.api.to.RemoteInstanceTO;
|
import com.cloud.agent.api.to.RemoteInstanceTO;
|
||||||
import com.cloud.hypervisor.Hypervisor;
|
import com.cloud.hypervisor.Hypervisor;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
|
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
|
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
|
||||||
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
|
|
||||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||||
import com.cloud.resource.CommandWrapper;
|
import com.cloud.resource.CommandWrapper;
|
||||||
import com.cloud.resource.ResourceWrapper;
|
import com.cloud.resource.ResourceWrapper;
|
||||||
import com.cloud.storage.Storage;
|
|
||||||
import com.cloud.utils.FileUtil;
|
import com.cloud.utils.FileUtil;
|
||||||
import com.cloud.utils.Pair;
|
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
|
||||||
import com.cloud.utils.script.OutputInterpreter;
|
import com.cloud.utils.script.OutputInterpreter;
|
||||||
import com.cloud.utils.script.Script;
|
import com.cloud.utils.script.Script;
|
||||||
|
|
||||||
@ -75,9 +60,9 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
|
|
||||||
if (cmd.getCheckConversionSupport() && !serverResource.hostSupportsInstanceConversion()) {
|
if (cmd.getCheckConversionSupport() && !serverResource.hostSupportsInstanceConversion()) {
|
||||||
String msg = String.format("Cannot convert the instance %s from VMware as the virt-v2v binary is not found. " +
|
String msg = String.format("Cannot convert the instance %s from VMware as the virt-v2v binary is not found. " +
|
||||||
"Please install virt-v2v%s on the host before attempting the instance conversion.", sourceInstanceName, serverResource.isUbuntuHost()? ", nbdkit" : "");
|
"Please install virt-v2v%s on the host before attempting the instance conversion.", sourceInstanceName, serverResource.isUbuntuOrDebianHost()? ", nbdkit" : "");
|
||||||
logger.info(msg);
|
logger.info(msg);
|
||||||
return new ConvertInstanceAnswer(cmd, false, msg);
|
return new Answer(cmd, false, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!areSourceAndDestinationHypervisorsSupported(sourceHypervisorType, destinationHypervisorType)) {
|
if (!areSourceAndDestinationHypervisorsSupported(sourceHypervisorType, destinationHypervisorType)) {
|
||||||
@ -85,7 +70,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
String.format("The destination hypervisor type is %s, KVM was expected, cannot handle it", destinationHypervisorType) :
|
String.format("The destination hypervisor type is %s, KVM was expected, cannot handle it", destinationHypervisorType) :
|
||||||
String.format("The source hypervisor type %s is not supported for KVM conversion", sourceHypervisorType);
|
String.format("The source hypervisor type %s is not supported for KVM conversion", sourceHypervisorType);
|
||||||
logger.error(err);
|
logger.error(err);
|
||||||
return new ConvertInstanceAnswer(cmd, false, err);
|
return new Answer(cmd, false, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
final KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr();
|
final KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr();
|
||||||
@ -103,7 +88,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
if (StringUtils.isBlank(exportInstanceOVAUrl)) {
|
if (StringUtils.isBlank(exportInstanceOVAUrl)) {
|
||||||
String err = String.format("Couldn't export OVA for the VM %s, due to empty url", sourceInstanceName);
|
String err = String.format("Couldn't export OVA for the VM %s, due to empty url", sourceInstanceName);
|
||||||
logger.error(err);
|
logger.error(err);
|
||||||
return new ConvertInstanceAnswer(cmd, false, err);
|
return new Answer(cmd, false, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
int noOfThreads = cmd.getThreadsCountToExportOvf();
|
int noOfThreads = cmd.getThreadsCountToExportOvf();
|
||||||
@ -117,7 +102,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
if (!ovfExported) {
|
if (!ovfExported) {
|
||||||
String err = String.format("Export OVA for the VM %s failed", sourceInstanceName);
|
String err = String.format("Export OVA for the VM %s failed", sourceInstanceName);
|
||||||
logger.error(err);
|
logger.error(err);
|
||||||
return new ConvertInstanceAnswer(cmd, false, err);
|
return new Answer(cmd, false, err);
|
||||||
}
|
}
|
||||||
sourceOVFDirPath = String.format("%s%s/", sourceOVFDirPath, sourceInstanceName);
|
sourceOVFDirPath = String.format("%s%s/", sourceOVFDirPath, sourceInstanceName);
|
||||||
} else {
|
} else {
|
||||||
@ -140,7 +125,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
"has a different virt-v2v version.",
|
"has a different virt-v2v version.",
|
||||||
ovfTemplateDirOnConversionLocation);
|
ovfTemplateDirOnConversionLocation);
|
||||||
logger.error(err);
|
logger.error(err);
|
||||||
return new ConvertInstanceAnswer(cmd, false, err);
|
return new Answer(cmd, false, err);
|
||||||
}
|
}
|
||||||
return new ConvertInstanceAnswer(cmd, temporaryConvertUuid);
|
return new ConvertInstanceAnswer(cmd, temporaryConvertUuid);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
@ -148,7 +133,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
sourceInstanceName, sourceHypervisorType, e.getMessage());
|
sourceInstanceName, sourceHypervisorType, e.getMessage());
|
||||||
logger.error(error, e);
|
logger.error(error, e);
|
||||||
cleanupSecondaryStorage = true;
|
cleanupSecondaryStorage = true;
|
||||||
return new ConvertInstanceAnswer(cmd, false, error);
|
return new Answer(cmd, false, error);
|
||||||
} finally {
|
} finally {
|
||||||
if (ovfExported && StringUtils.isNotBlank(ovfTemplateDirOnConversionLocation)) {
|
if (ovfExported && StringUtils.isNotBlank(ovfTemplateDirOnConversionLocation)) {
|
||||||
String sourceOVFDir = String.format("%s/%s", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
String sourceOVFDir = String.format("%s/%s", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||||
@ -205,55 +190,6 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
encodedUsername, encodedPassword, vcenter, datacenter, vm);
|
encodedUsername, encodedPassword, vcenter, datacenter, vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected List<KVMPhysicalDisk> getTemporaryDisksFromParsedXml(KVMStoragePool pool, LibvirtDomainXMLParser xmlParser, String convertedBasePath) {
|
|
||||||
List<LibvirtVMDef.DiskDef> disksDefs = xmlParser.getDisks();
|
|
||||||
disksDefs = disksDefs.stream().filter(x -> x.getDiskType() == LibvirtVMDef.DiskDef.DiskType.FILE &&
|
|
||||||
x.getDeviceType() == LibvirtVMDef.DiskDef.DeviceType.DISK).collect(Collectors.toList());
|
|
||||||
if (CollectionUtils.isEmpty(disksDefs)) {
|
|
||||||
String err = String.format("Cannot find any disk defined on the converted XML domain %s.xml", convertedBasePath);
|
|
||||||
logger.error(err);
|
|
||||||
throw new CloudRuntimeException(err);
|
|
||||||
}
|
|
||||||
sanitizeDisksPath(disksDefs);
|
|
||||||
return getPhysicalDisksFromDefPaths(disksDefs, pool);
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<KVMPhysicalDisk> getPhysicalDisksFromDefPaths(List<LibvirtVMDef.DiskDef> disksDefs, KVMStoragePool pool) {
|
|
||||||
List<KVMPhysicalDisk> disks = new ArrayList<>();
|
|
||||||
for (LibvirtVMDef.DiskDef diskDef : disksDefs) {
|
|
||||||
KVMPhysicalDisk physicalDisk = pool.getPhysicalDisk(diskDef.getDiskPath());
|
|
||||||
disks.add(physicalDisk);
|
|
||||||
}
|
|
||||||
return disks;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected List<KVMPhysicalDisk> getTemporaryDisksWithPrefixFromTemporaryPool(KVMStoragePool pool, String path, String prefix) {
|
|
||||||
String msg = String.format("Could not parse correctly the converted XML domain, checking for disks on %s with prefix %s", path, prefix);
|
|
||||||
logger.info(msg);
|
|
||||||
pool.refresh();
|
|
||||||
List<KVMPhysicalDisk> disksWithPrefix = pool.listPhysicalDisks()
|
|
||||||
.stream()
|
|
||||||
.filter(x -> x.getName().startsWith(prefix) && !x.getName().endsWith(".xml"))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
if (CollectionUtils.isEmpty(disksWithPrefix)) {
|
|
||||||
msg = String.format("Could not find any converted disk with prefix %s on temporary location %s", prefix, path);
|
|
||||||
logger.error(msg);
|
|
||||||
throw new CloudRuntimeException(msg);
|
|
||||||
}
|
|
||||||
return disksWithPrefix;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void cleanupDisksAndDomainFromTemporaryLocation(List<KVMPhysicalDisk> disks,
|
|
||||||
KVMStoragePool temporaryStoragePool,
|
|
||||||
String temporaryConvertUuid) {
|
|
||||||
for (KVMPhysicalDisk disk : disks) {
|
|
||||||
logger.info(String.format("Cleaning up temporary disk %s after conversion from temporary location", disk.getName()));
|
|
||||||
temporaryStoragePool.deletePhysicalDisk(disk.getName(), Storage.ImageFormat.QCOW2);
|
|
||||||
}
|
|
||||||
logger.info(String.format("Cleaning up temporary domain %s after conversion from temporary location", temporaryConvertUuid));
|
|
||||||
FileUtil.deleteFiles(temporaryStoragePool.getLocalPath(), temporaryConvertUuid, ".xml");
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void sanitizeDisksPath(List<LibvirtVMDef.DiskDef> disks) {
|
protected void sanitizeDisksPath(List<LibvirtVMDef.DiskDef> disks) {
|
||||||
for (LibvirtVMDef.DiskDef disk : disks) {
|
for (LibvirtVMDef.DiskDef disk : disks) {
|
||||||
String[] diskPathParts = disk.getDiskPath().split("/");
|
String[] diskPathParts = disk.getDiskPath().split("/");
|
||||||
@ -262,114 +198,6 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected List<KVMPhysicalDisk> moveTemporaryDisksToDestination(List<KVMPhysicalDisk> temporaryDisks,
|
|
||||||
List<String> destinationStoragePools,
|
|
||||||
KVMStoragePoolManager storagePoolMgr) {
|
|
||||||
List<KVMPhysicalDisk> targetDisks = new ArrayList<>();
|
|
||||||
if (temporaryDisks.size() != destinationStoragePools.size()) {
|
|
||||||
String warn = String.format("Discrepancy between the converted instance disks (%s) " +
|
|
||||||
"and the expected number of disks (%s)", temporaryDisks.size(), destinationStoragePools.size());
|
|
||||||
logger.warn(warn);
|
|
||||||
}
|
|
||||||
for (int i = 0; i < temporaryDisks.size(); i++) {
|
|
||||||
String poolPath = destinationStoragePools.get(i);
|
|
||||||
KVMStoragePool destinationPool = storagePoolMgr.getStoragePool(Storage.StoragePoolType.NetworkFilesystem, poolPath);
|
|
||||||
if (destinationPool == null) {
|
|
||||||
String err = String.format("Could not find a storage pool by URI: %s", poolPath);
|
|
||||||
logger.error(err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (destinationPool.getType() != Storage.StoragePoolType.NetworkFilesystem) {
|
|
||||||
String err = String.format("Storage pool by URI: %s is not an NFS storage", poolPath);
|
|
||||||
logger.error(err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
KVMPhysicalDisk sourceDisk = temporaryDisks.get(i);
|
|
||||||
if (logger.isDebugEnabled()) {
|
|
||||||
String msg = String.format("Trying to copy converted instance disk number %s from the temporary location %s" +
|
|
||||||
" to destination storage pool %s", i, sourceDisk.getPool().getLocalPath(), destinationPool.getUuid());
|
|
||||||
logger.debug(msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
String destinationName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
KVMPhysicalDisk destinationDisk = storagePoolMgr.copyPhysicalDisk(sourceDisk, destinationName, destinationPool, 7200 * 1000);
|
|
||||||
targetDisks.add(destinationDisk);
|
|
||||||
}
|
|
||||||
return targetDisks;
|
|
||||||
}
|
|
||||||
|
|
||||||
private UnmanagedInstanceTO getConvertedUnmanagedInstance(String baseName,
|
|
||||||
List<KVMPhysicalDisk> vmDisks,
|
|
||||||
LibvirtDomainXMLParser xmlParser) {
|
|
||||||
UnmanagedInstanceTO instanceTO = new UnmanagedInstanceTO();
|
|
||||||
instanceTO.setName(baseName);
|
|
||||||
instanceTO.setDisks(getUnmanagedInstanceDisks(vmDisks, xmlParser));
|
|
||||||
instanceTO.setNics(getUnmanagedInstanceNics(xmlParser));
|
|
||||||
return instanceTO;
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<UnmanagedInstanceTO.Nic> getUnmanagedInstanceNics(LibvirtDomainXMLParser xmlParser) {
|
|
||||||
List<UnmanagedInstanceTO.Nic> nics = new ArrayList<>();
|
|
||||||
if (xmlParser != null) {
|
|
||||||
List<LibvirtVMDef.InterfaceDef> interfaces = xmlParser.getInterfaces();
|
|
||||||
for (LibvirtVMDef.InterfaceDef interfaceDef : interfaces) {
|
|
||||||
UnmanagedInstanceTO.Nic nic = new UnmanagedInstanceTO.Nic();
|
|
||||||
nic.setMacAddress(interfaceDef.getMacAddress());
|
|
||||||
nic.setNicId(interfaceDef.getBrName());
|
|
||||||
nic.setAdapterType(interfaceDef.getModel().toString());
|
|
||||||
nics.add(nic);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nics;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected List<UnmanagedInstanceTO.Disk> getUnmanagedInstanceDisks(List<KVMPhysicalDisk> vmDisks, LibvirtDomainXMLParser xmlParser) {
|
|
||||||
List<UnmanagedInstanceTO.Disk> instanceDisks = new ArrayList<>();
|
|
||||||
List<LibvirtVMDef.DiskDef> diskDefs = xmlParser != null ? xmlParser.getDisks() : null;
|
|
||||||
for (int i = 0; i< vmDisks.size(); i++) {
|
|
||||||
KVMPhysicalDisk physicalDisk = vmDisks.get(i);
|
|
||||||
KVMStoragePool storagePool = physicalDisk.getPool();
|
|
||||||
UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk();
|
|
||||||
disk.setPosition(i);
|
|
||||||
Pair<String, String> storagePoolHostAndPath = getNfsStoragePoolHostAndPath(storagePool);
|
|
||||||
disk.setDatastoreHost(storagePoolHostAndPath.first());
|
|
||||||
disk.setDatastorePath(storagePoolHostAndPath.second());
|
|
||||||
disk.setDatastoreName(storagePool.getUuid());
|
|
||||||
disk.setDatastoreType(storagePool.getType().name());
|
|
||||||
disk.setCapacity(physicalDisk.getVirtualSize());
|
|
||||||
disk.setFileBaseName(physicalDisk.getName());
|
|
||||||
if (CollectionUtils.isNotEmpty(diskDefs)) {
|
|
||||||
LibvirtVMDef.DiskDef diskDef = diskDefs.get(i);
|
|
||||||
disk.setController(diskDef.getBusType() != null ? diskDef.getBusType().toString() : LibvirtVMDef.DiskDef.DiskBus.VIRTIO.toString());
|
|
||||||
} else {
|
|
||||||
// If the job is finished but we cannot parse the XML, the guest VM can use the virtio driver
|
|
||||||
disk.setController(LibvirtVMDef.DiskDef.DiskBus.VIRTIO.toString());
|
|
||||||
}
|
|
||||||
instanceDisks.add(disk);
|
|
||||||
}
|
|
||||||
return instanceDisks;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Pair<String, String> getNfsStoragePoolHostAndPath(KVMStoragePool storagePool) {
|
|
||||||
String sourceHostIp = null;
|
|
||||||
String sourcePath = null;
|
|
||||||
List<String[]> commands = new ArrayList<>();
|
|
||||||
commands.add(new String[]{Script.getExecutableAbsolutePath("mount")});
|
|
||||||
commands.add(new String[]{Script.getExecutableAbsolutePath("grep"), storagePool.getLocalPath()});
|
|
||||||
String storagePoolMountPoint = Script.executePipedCommands(commands, 0).second();
|
|
||||||
logger.debug(String.format("NFS Storage pool: %s - local path: %s, mount point: %s", storagePool.getUuid(), storagePool.getLocalPath(), storagePoolMountPoint));
|
|
||||||
if (StringUtils.isNotEmpty(storagePoolMountPoint)) {
|
|
||||||
String[] res = storagePoolMountPoint.strip().split(" ");
|
|
||||||
res = res[0].split(":");
|
|
||||||
if (res.length > 1) {
|
|
||||||
sourceHostIp = res[0].strip();
|
|
||||||
sourcePath = res[1].strip();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return new Pair<>(sourceHostIp, sourcePath);
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean exportOVAFromVMOnVcenter(String vmExportUrl,
|
private boolean exportOVAFromVMOnVcenter(String vmExportUrl,
|
||||||
String targetOvfDir,
|
String targetOvfDir,
|
||||||
int noOfThreads,
|
int noOfThreads,
|
||||||
@ -412,27 +240,6 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||||||
return exitValue == 0;
|
return exitValue == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected LibvirtDomainXMLParser parseMigratedVMXmlDomain(String installPath) throws IOException {
|
|
||||||
String xmlPath = String.format("%s.xml", installPath);
|
|
||||||
if (!new File(xmlPath).exists()) {
|
|
||||||
String err = String.format("Conversion failed. Unable to find the converted XML domain, expected %s", xmlPath);
|
|
||||||
logger.error(err);
|
|
||||||
throw new CloudRuntimeException(err);
|
|
||||||
}
|
|
||||||
InputStream is = new BufferedInputStream(new FileInputStream(xmlPath));
|
|
||||||
String xml = IOUtils.toString(is, Charset.defaultCharset());
|
|
||||||
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
|
|
||||||
try {
|
|
||||||
parser.parseDomainXML(xml);
|
|
||||||
return parser;
|
|
||||||
} catch (RuntimeException e) {
|
|
||||||
String err = String.format("Error parsing the converted instance XML domain at %s: %s", xmlPath, e.getMessage());
|
|
||||||
logger.error(err, e);
|
|
||||||
logger.debug(xml);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected String encodeUsername(String username) {
|
protected String encodeUsername(String username) {
|
||||||
return URLEncoder.encode(username, Charset.defaultCharset());
|
return URLEncoder.encode(username, Charset.defaultCharset());
|
||||||
}
|
}
|
||||||
|
|||||||
@ -43,7 +43,7 @@ public final class LibvirtReadyCommandWrapper extends CommandWrapper<ReadyComman
|
|||||||
public Answer execute(final ReadyCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
public Answer execute(final ReadyCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||||
Map<String, String> hostDetails = new HashMap<String, String>();
|
Map<String, String> hostDetails = new HashMap<String, String>();
|
||||||
|
|
||||||
if (hostSupportsUefi(libvirtComputingResource.isUbuntuHost()) && libvirtComputingResource.isUefiPropertiesFileLoaded()) {
|
if (hostSupportsUefi(libvirtComputingResource.isUbuntuOrDebianHost()) && libvirtComputingResource.isUefiPropertiesFileLoaded()) {
|
||||||
hostDetails.put(Host.HOST_UEFI_ENABLE, Boolean.TRUE.toString());
|
hostDetails.put(Host.HOST_UEFI_ENABLE, Boolean.TRUE.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,10 +58,10 @@ public final class LibvirtReadyCommandWrapper extends CommandWrapper<ReadyComman
|
|||||||
return new ReadyAnswer(command, hostDetails);
|
return new ReadyAnswer(command, hostDetails);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean hostSupportsUefi(boolean isUbuntuHost) {
|
private boolean hostSupportsUefi(boolean isUbuntuOrDebianHost) {
|
||||||
int timeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_SCRIPT_TIMEOUT) * 1000; // Get property value & convert to milliseconds
|
int timeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_SCRIPT_TIMEOUT) * 1000; // Get property value & convert to milliseconds
|
||||||
int result;
|
int result;
|
||||||
if (isUbuntuHost) {
|
if (isUbuntuOrDebianHost) {
|
||||||
logger.debug("Running command : [dpkg -l ovmf] with timeout : " + timeout + " ms");
|
logger.debug("Running command : [dpkg -l ovmf] with timeout : " + timeout + " ms");
|
||||||
result = Script.executeCommandForExitValue(timeout, Script.getExecutableAbsolutePath("dpkg"), "-l", "ovmf");
|
result = Script.executeCommandForExitValue(timeout, Script.getExecutableAbsolutePath("dpkg"), "-l", "ovmf");
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -62,16 +62,25 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||||||
String restoreVolumeUuid = command.getRestoreVolumeUUID();
|
String restoreVolumeUuid = command.getRestoreVolumeUUID();
|
||||||
|
|
||||||
String newVolumeId = null;
|
String newVolumeId = null;
|
||||||
if (Objects.isNull(vmExists)) {
|
try {
|
||||||
String volumePath = volumePaths.get(0);
|
if (Objects.isNull(vmExists)) {
|
||||||
int lastIndex = volumePath.lastIndexOf("/");
|
String volumePath = volumePaths.get(0);
|
||||||
newVolumeId = volumePath.substring(lastIndex + 1);
|
int lastIndex = volumePath.lastIndexOf("/");
|
||||||
restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, diskType, restoreVolumeUuid,
|
newVolumeId = volumePath.substring(lastIndex + 1);
|
||||||
new Pair<>(vmName, command.getVmState()), mountOptions);
|
restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, diskType, restoreVolumeUuid,
|
||||||
} else if (Boolean.TRUE.equals(vmExists)) {
|
new Pair<>(vmName, command.getVmState()), mountOptions);
|
||||||
restoreVolumesOfExistingVM(volumePaths, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
} else if (Boolean.TRUE.equals(vmExists)) {
|
||||||
} else {
|
restoreVolumesOfExistingVM(volumePaths, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
||||||
restoreVolumesOfDestroyedVMs(volumePaths, vmName, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
} else {
|
||||||
|
restoreVolumesOfDestroyedVMs(volumePaths, vmName, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
||||||
|
}
|
||||||
|
} catch (CloudRuntimeException e) {
|
||||||
|
String errorMessage = "Failed to restore backup for VM: " + vmName + ".";
|
||||||
|
if (e.getMessage() != null && !e.getMessage().isEmpty()) {
|
||||||
|
errorMessage += " Details: " + e.getMessage();
|
||||||
|
}
|
||||||
|
logger.error(errorMessage);
|
||||||
|
return new BackupAnswer(command, false, errorMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new BackupAnswer(command, true, newVolumeId);
|
return new BackupAnswer(command, true, newVolumeId);
|
||||||
@ -86,10 +95,8 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||||||
String volumePath = volumePaths.get(idx);
|
String volumePath = volumePaths.get(idx);
|
||||||
Pair<String, String> bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null);
|
Pair<String, String> bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null);
|
||||||
diskType = "datadisk";
|
diskType = "datadisk";
|
||||||
try {
|
if (!replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first())) {
|
||||||
replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first());
|
throw new CloudRuntimeException(String.format("Unable to restore backup for volume [%s].", bkpPathAndVolUuid.second()));
|
||||||
} catch (IOException e) {
|
|
||||||
throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
@ -108,10 +115,8 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||||||
String volumePath = volumePaths.get(i);
|
String volumePath = volumePaths.get(i);
|
||||||
Pair<String, String> bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null);
|
Pair<String, String> bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null);
|
||||||
diskType = "datadisk";
|
diskType = "datadisk";
|
||||||
try {
|
if (!replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first())) {
|
||||||
replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first());
|
throw new CloudRuntimeException(String.format("Unable to restore backup for volume [%s].", bkpPathAndVolUuid.second()));
|
||||||
} catch (IOException e) {
|
|
||||||
throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
@ -126,15 +131,13 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||||||
Pair<String, String> bkpPathAndVolUuid;
|
Pair<String, String> bkpPathAndVolUuid;
|
||||||
try {
|
try {
|
||||||
bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, volumeUUID);
|
bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, volumeUUID);
|
||||||
try {
|
if (!replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first())) {
|
||||||
replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first());
|
throw new CloudRuntimeException(String.format("Unable to restore backup for volume [%s].", bkpPathAndVolUuid.second()));
|
||||||
if (VirtualMachine.State.Running.equals(vmNameAndState.second())) {
|
}
|
||||||
if (!attachVolumeToVm(vmNameAndState.first(), volumePath)) {
|
if (VirtualMachine.State.Running.equals(vmNameAndState.second())) {
|
||||||
throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first()));
|
if (!attachVolumeToVm(vmNameAndState.first(), volumePath)) {
|
||||||
}
|
throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first()));
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
|
||||||
throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e);
|
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new CloudRuntimeException("Failed to restore volume", e);
|
throw new CloudRuntimeException("Failed to restore volume", e);
|
||||||
@ -194,8 +197,9 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||||||
return new Pair<>(bkpPath, volUuid);
|
return new Pair<>(bkpPath, volUuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void replaceVolumeWithBackup(String volumePath, String backupPath) throws IOException {
|
private boolean replaceVolumeWithBackup(String volumePath, String backupPath) {
|
||||||
Script.runSimpleBashScript(String.format(RSYNC_COMMAND, backupPath, volumePath));
|
int exitValue = Script.runSimpleBashScriptForExitValue(String.format(RSYNC_COMMAND, backupPath, volumePath));
|
||||||
|
return exitValue == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean attachVolumeToVm(String vmName, String volumePath) {
|
private boolean attachVolumeToVm(String vmName, String volumePath) {
|
||||||
|
|||||||
@ -84,7 +84,7 @@ public class LibvirtSetupDirectDownloadCertificateCommandWrapper extends Command
|
|||||||
private void importCertificate(String tempCerFilePath, String keyStoreFile, String certificateName, String privatePassword) {
|
private void importCertificate(String tempCerFilePath, String keyStoreFile, String certificateName, String privatePassword) {
|
||||||
logger.debug("Importing certificate from temporary file to keystore");
|
logger.debug("Importing certificate from temporary file to keystore");
|
||||||
String keyToolPath = Script.getExecutableAbsolutePath("keytool");
|
String keyToolPath = Script.getExecutableAbsolutePath("keytool");
|
||||||
int result = Script.executeCommandForExitValue(keyToolPath, "-importcert", "file", tempCerFilePath,
|
int result = Script.executeCommandForExitValue(keyToolPath, "-importcert", "-file", tempCerFilePath,
|
||||||
"-keystore", keyStoreFile, "-alias", sanitizeBashCommandArgument(certificateName), "-storepass",
|
"-keystore", keyStoreFile, "-alias", sanitizeBashCommandArgument(certificateName), "-storepass",
|
||||||
privatePassword, "-noprompt");
|
privatePassword, "-noprompt");
|
||||||
if (result != 0) {
|
if (result != 0) {
|
||||||
|
|||||||
@ -37,6 +37,7 @@ import org.apache.cloudstack.utils.qemu.QemuImg;
|
|||||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||||
import org.apache.cloudstack.utils.qemu.QemuObject;
|
import org.apache.cloudstack.utils.qemu.QemuObject;
|
||||||
|
import org.apache.commons.collections.MapUtils;
|
||||||
import org.apache.commons.io.filefilter.WildcardFileFilter;
|
import org.apache.commons.io.filefilter.WildcardFileFilter;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
@ -581,14 +582,23 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!ScaleIOUtil.isSDCServiceActive()) {
|
if (!ScaleIOUtil.isSDCServiceActive()) {
|
||||||
|
logger.debug("SDC service is not active on host, starting it");
|
||||||
if (!ScaleIOUtil.startSDCService()) {
|
if (!ScaleIOUtil.startSDCService()) {
|
||||||
return new Ternary<>(false, null, "Couldn't start SDC service on host");
|
return new Ternary<>(false, null, "Couldn't start SDC service on host");
|
||||||
}
|
}
|
||||||
} else if (!ScaleIOUtil.restartSDCService()) {
|
} else {
|
||||||
return new Ternary<>(false, null, "Couldn't restart SDC service on host");
|
logger.debug("SDC service is active on host, re-starting it");
|
||||||
|
if (!ScaleIOUtil.restartSDCService()) {
|
||||||
|
return new Ternary<>(false, null, "Couldn't restart SDC service on host");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Ternary<>( true, getSDCDetails(details), "Prepared client successfully");
|
Map<String, String> sdcDetails = getSDCDetails(details);
|
||||||
|
if (MapUtils.isEmpty(sdcDetails)) {
|
||||||
|
return new Ternary<>(false, null, "Couldn't get the SDC details on the host");
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Ternary<>( true, sdcDetails, "Prepared client successfully");
|
||||||
}
|
}
|
||||||
|
|
||||||
public Pair<Boolean, String> unprepareStorageClient(Storage.StoragePoolType type, String uuid) {
|
public Pair<Boolean, String> unprepareStorageClient(Storage.StoragePoolType type, String uuid) {
|
||||||
@ -611,20 +621,40 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||||||
|
|
||||||
private Map<String, String> getSDCDetails(Map<String, String> details) {
|
private Map<String, String> getSDCDetails(Map<String, String> details) {
|
||||||
Map<String, String> sdcDetails = new HashMap<String, String>();
|
Map<String, String> sdcDetails = new HashMap<String, String>();
|
||||||
if (details == null || !details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)) {
|
if (MapUtils.isEmpty(details) || !details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)) {
|
||||||
return sdcDetails;
|
return sdcDetails;
|
||||||
}
|
}
|
||||||
|
|
||||||
String storageSystemId = details.get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
|
String storageSystemId = details.get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
|
||||||
String sdcId = ScaleIOUtil.getSdcId(storageSystemId);
|
if (StringUtils.isEmpty(storageSystemId)) {
|
||||||
if (sdcId != null) {
|
return sdcDetails;
|
||||||
sdcDetails.put(ScaleIOGatewayClient.SDC_ID, sdcId);
|
|
||||||
} else {
|
|
||||||
String sdcGuId = ScaleIOUtil.getSdcGuid();
|
|
||||||
if (sdcGuId != null) {
|
|
||||||
sdcDetails.put(ScaleIOGatewayClient.SDC_GUID, sdcGuId);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int numberOfTries = 5;
|
||||||
|
int timeBetweenTries = 1000; // Try more frequently (every sec) and return early when SDC Id or Guid found
|
||||||
|
int attempt = 1;
|
||||||
|
do {
|
||||||
|
logger.debug("Get SDC details, attempt #{}", attempt);
|
||||||
|
String sdcId = ScaleIOUtil.getSdcId(storageSystemId);
|
||||||
|
if (sdcId != null) {
|
||||||
|
sdcDetails.put(ScaleIOGatewayClient.SDC_ID, sdcId);
|
||||||
|
return sdcDetails;
|
||||||
|
} else {
|
||||||
|
String sdcGuId = ScaleIOUtil.getSdcGuid();
|
||||||
|
if (sdcGuId != null) {
|
||||||
|
sdcDetails.put(ScaleIOGatewayClient.SDC_GUID, sdcGuId);
|
||||||
|
return sdcDetails;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
Thread.sleep(timeBetweenTries);
|
||||||
|
} catch (Exception ignore) {
|
||||||
|
}
|
||||||
|
numberOfTries--;
|
||||||
|
attempt++;
|
||||||
|
} while (numberOfTries > 0);
|
||||||
|
|
||||||
return sdcDetails;
|
return sdcDetails;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -22,7 +22,6 @@ import java.util.List;
|
|||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||||
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
|
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
@ -40,13 +39,10 @@ import com.cloud.agent.api.to.NfsTO;
|
|||||||
import com.cloud.agent.api.to.RemoteInstanceTO;
|
import com.cloud.agent.api.to.RemoteInstanceTO;
|
||||||
import com.cloud.hypervisor.Hypervisor;
|
import com.cloud.hypervisor.Hypervisor;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
|
|
||||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
|
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
|
||||||
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
|
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
|
||||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||||
import com.cloud.storage.Storage;
|
|
||||||
import com.cloud.utils.Pair;
|
|
||||||
import com.cloud.utils.script.Script;
|
import com.cloud.utils.script.Script;
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
@ -118,72 +114,6 @@ public class LibvirtConvertInstanceCommandWrapperTest {
|
|||||||
Assert.assertEquals(relativePath, diskDef.getDiskPath());
|
Assert.assertEquals(relativePath, diskDef.getDiskPath());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testMoveTemporaryDisksToDestination() {
|
|
||||||
KVMPhysicalDisk sourceDisk = Mockito.mock(KVMPhysicalDisk.class);
|
|
||||||
List<KVMPhysicalDisk> disks = List.of(sourceDisk);
|
|
||||||
String destinationPoolUuid = UUID.randomUUID().toString();
|
|
||||||
List<String> destinationPools = List.of(destinationPoolUuid);
|
|
||||||
|
|
||||||
KVMPhysicalDisk destDisk = Mockito.mock(KVMPhysicalDisk.class);
|
|
||||||
Mockito.when(destDisk.getPath()).thenReturn("xyz");
|
|
||||||
Mockito.when(storagePoolManager.getStoragePool(Storage.StoragePoolType.NetworkFilesystem, destinationPoolUuid))
|
|
||||||
.thenReturn(destinationPool);
|
|
||||||
Mockito.when(destinationPool.getType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
|
||||||
Mockito.when(storagePoolManager.copyPhysicalDisk(Mockito.eq(sourceDisk), Mockito.anyString(), Mockito.eq(destinationPool), Mockito.anyInt()))
|
|
||||||
.thenReturn(destDisk);
|
|
||||||
|
|
||||||
List<KVMPhysicalDisk> movedDisks = convertInstanceCommandWrapper.moveTemporaryDisksToDestination(disks, destinationPools, storagePoolManager);
|
|
||||||
Assert.assertEquals(1, movedDisks.size());
|
|
||||||
Assert.assertEquals("xyz", movedDisks.get(0).getPath());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetUnmanagedInstanceDisks() {
|
|
||||||
try (MockedStatic<Script> ignored = Mockito.mockStatic(Script.class)) {
|
|
||||||
String relativePath = UUID.randomUUID().toString();
|
|
||||||
LibvirtVMDef.DiskDef diskDef = new LibvirtVMDef.DiskDef();
|
|
||||||
LibvirtVMDef.DiskDef.DiskBus bus = LibvirtVMDef.DiskDef.DiskBus.IDE;
|
|
||||||
LibvirtVMDef.DiskDef.DiskFmtType type = LibvirtVMDef.DiskDef.DiskFmtType.QCOW2;
|
|
||||||
diskDef.defFileBasedDisk(relativePath, relativePath, bus, type);
|
|
||||||
|
|
||||||
KVMPhysicalDisk sourceDisk = Mockito.mock(KVMPhysicalDisk.class);
|
|
||||||
Mockito.when(sourceDisk.getName()).thenReturn(UUID.randomUUID().toString());
|
|
||||||
Mockito.when(sourceDisk.getPool()).thenReturn(destinationPool);
|
|
||||||
Mockito.when(destinationPool.getType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
|
||||||
List<KVMPhysicalDisk> disks = List.of(sourceDisk);
|
|
||||||
|
|
||||||
LibvirtDomainXMLParser parser = Mockito.mock(LibvirtDomainXMLParser.class);
|
|
||||||
Mockito.when(parser.getDisks()).thenReturn(List.of(diskDef));
|
|
||||||
Mockito.doReturn(new Pair<String, String>(null, null)).when(convertInstanceCommandWrapper).getNfsStoragePoolHostAndPath(destinationPool);
|
|
||||||
|
|
||||||
Mockito.when(Script.executePipedCommands(Mockito.anyList(), Mockito.anyLong()))
|
|
||||||
.thenReturn(new Pair<>(0, null));
|
|
||||||
|
|
||||||
List<UnmanagedInstanceTO.Disk> unmanagedInstanceDisks = convertInstanceCommandWrapper.getUnmanagedInstanceDisks(disks, parser);
|
|
||||||
Assert.assertEquals(1, unmanagedInstanceDisks.size());
|
|
||||||
UnmanagedInstanceTO.Disk disk = unmanagedInstanceDisks.get(0);
|
|
||||||
Assert.assertEquals(LibvirtVMDef.DiskDef.DiskBus.IDE.toString(), disk.getController());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetNfsStoragePoolHostAndPath() {
|
|
||||||
try (MockedStatic<Script> ignored = Mockito.mockStatic(Script.class)) {
|
|
||||||
String localMountPoint = "/mnt/xyz";
|
|
||||||
String host = "192.168.1.2";
|
|
||||||
String path = "/secondary";
|
|
||||||
String mountOutput = String.format("%s:%s on %s type nfs (...)", host, path, localMountPoint);
|
|
||||||
Mockito.when(temporaryPool.getLocalPath()).thenReturn(localMountPoint);
|
|
||||||
Mockito.when(Script.executePipedCommands(Mockito.anyList(), Mockito.anyLong()))
|
|
||||||
.thenReturn(new Pair<>(0, mountOutput));
|
|
||||||
|
|
||||||
Pair<String, String> pair = convertInstanceCommandWrapper.getNfsStoragePoolHostAndPath(temporaryPool);
|
|
||||||
Assert.assertEquals(host, pair.first());
|
|
||||||
Assert.assertEquals(path, pair.second());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private RemoteInstanceTO getRemoteInstanceTO(Hypervisor.HypervisorType hypervisorType) {
|
private RemoteInstanceTO getRemoteInstanceTO(Hypervisor.HypervisorType hypervisorType) {
|
||||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||||
Mockito.when(remoteInstanceTO.getHypervisorType()).thenReturn(hypervisorType);
|
Mockito.when(remoteInstanceTO.getHypervisorType()).thenReturn(hypervisorType);
|
||||||
|
|||||||
@ -116,9 +116,9 @@ public class ScaleIOStorageAdaptorTest {
|
|||||||
|
|
||||||
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, new HashMap<>());
|
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, new HashMap<>());
|
||||||
|
|
||||||
Assert.assertTrue(result.first());
|
Assert.assertFalse(result.first());
|
||||||
Assert.assertNotNull(result.second());
|
Assert.assertNull(result.second());
|
||||||
Assert.assertTrue(result.second().isEmpty());
|
Assert.assertEquals("Couldn't get the SDC details on the host", result.third());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|||||||
@ -159,6 +159,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||||||
|
|
||||||
protected String kubernetesClusterNodeNamePrefix;
|
protected String kubernetesClusterNodeNamePrefix;
|
||||||
|
|
||||||
|
private static final int MAX_CLUSTER_PREFIX_LENGTH = 43;
|
||||||
|
|
||||||
protected KubernetesClusterResourceModifierActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
|
protected KubernetesClusterResourceModifierActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
|
||||||
super(kubernetesCluster, clusterManager);
|
super(kubernetesCluster, clusterManager);
|
||||||
}
|
}
|
||||||
@ -775,19 +777,35 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a valid name prefix for Kubernetes cluster nodes.
|
||||||
|
*
|
||||||
|
* <p>The prefix must comply with Kubernetes naming constraints:
|
||||||
|
* <ul>
|
||||||
|
* <li>Maximum 63 characters total</li>
|
||||||
|
* <li>Only lowercase alphanumeric characters and hyphens</li>
|
||||||
|
* <li>Must start with a letter</li>
|
||||||
|
* <li>Must end with an alphanumeric character</li>
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* <p>The generated prefix is limited to 43 characters to accommodate the full node naming pattern:
|
||||||
|
* <pre>{'prefix'}-{'control' | 'node'}-{'11-digit-hash'}</pre>
|
||||||
|
*
|
||||||
|
* @return A valid node name prefix, truncated if necessary
|
||||||
|
* @see <a href="https://kubernetes.io/docs/concepts/overview/working-with-objects/names/">Kubernetes "Object Names and IDs" documentation</a>
|
||||||
|
*/
|
||||||
protected String getKubernetesClusterNodeNamePrefix() {
|
protected String getKubernetesClusterNodeNamePrefix() {
|
||||||
String prefix = kubernetesCluster.getName();
|
String prefix = kubernetesCluster.getName().toLowerCase();
|
||||||
if (!NetUtils.verifyDomainNameLabel(prefix, true)) {
|
|
||||||
prefix = prefix.replaceAll("[^a-zA-Z0-9-]", "");
|
if (NetUtils.verifyDomainNameLabel(prefix, true)) {
|
||||||
if (prefix.length() == 0) {
|
return StringUtils.truncate(prefix, MAX_CLUSTER_PREFIX_LENGTH);
|
||||||
prefix = kubernetesCluster.getUuid();
|
|
||||||
}
|
|
||||||
prefix = "k8s-" + prefix;
|
|
||||||
}
|
}
|
||||||
if (prefix.length() > 40) {
|
|
||||||
prefix = prefix.substring(0, 40);
|
prefix = prefix.replaceAll("[^a-z0-9-]", "");
|
||||||
|
if (prefix.isEmpty()) {
|
||||||
|
prefix = kubernetesCluster.getUuid();
|
||||||
}
|
}
|
||||||
return prefix;
|
return StringUtils.truncate("k8s-" + prefix, MAX_CLUSTER_PREFIX_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, final Long size,
|
protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, final Long size,
|
||||||
|
|||||||
@ -0,0 +1,138 @@
|
|||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
|
||||||
|
package com.cloud.kubernetes.cluster.actionworkers;
|
||||||
|
|
||||||
|
import com.cloud.kubernetes.cluster.KubernetesCluster;
|
||||||
|
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
|
||||||
|
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
|
||||||
|
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDao;
|
||||||
|
import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao;
|
||||||
|
import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
|
|
||||||
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
|
public class KubernetesClusterResourceModifierActionWorkerTest {
|
||||||
|
@Mock
|
||||||
|
private KubernetesClusterDao kubernetesClusterDaoMock;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private KubernetesClusterDetailsDao kubernetesClusterDetailsDaoMock;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private KubernetesClusterVmMapDao kubernetesClusterVmMapDaoMock;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private KubernetesSupportedVersionDao kubernetesSupportedVersionDaoMock;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private KubernetesClusterManagerImpl kubernetesClusterManagerMock;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private KubernetesCluster kubernetesClusterMock;
|
||||||
|
|
||||||
|
private KubernetesClusterResourceModifierActionWorker kubernetesClusterResourceModifierActionWorker;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() {
|
||||||
|
kubernetesClusterManagerMock.kubernetesClusterDao = kubernetesClusterDaoMock;
|
||||||
|
kubernetesClusterManagerMock.kubernetesSupportedVersionDao = kubernetesSupportedVersionDaoMock;
|
||||||
|
kubernetesClusterManagerMock.kubernetesClusterDetailsDao = kubernetesClusterDetailsDaoMock;
|
||||||
|
kubernetesClusterManagerMock.kubernetesClusterVmMapDao = kubernetesClusterVmMapDaoMock;
|
||||||
|
|
||||||
|
kubernetesClusterResourceModifierActionWorker = new KubernetesClusterResourceModifierActionWorker(kubernetesClusterMock, kubernetesClusterManagerMock);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getKubernetesClusterNodeNamePrefixTestReturnOriginalPrefixWhenNamingAllRequirementsAreMet() {
|
||||||
|
String originalPrefix = "k8s-cluster-01";
|
||||||
|
String expectedPrefix = "k8s-cluster-01";
|
||||||
|
|
||||||
|
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||||
|
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldOnlyContainLowerCaseCharacters() {
|
||||||
|
String originalPrefix = "k8s-CLUSTER-01";
|
||||||
|
String expectedPrefix = "k8s-cluster-01";
|
||||||
|
|
||||||
|
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||||
|
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldBeTruncatedWhenRequired() {
|
||||||
|
int maxPrefixLength = 43;
|
||||||
|
|
||||||
|
String originalPrefix = "c".repeat(maxPrefixLength + 1);
|
||||||
|
String expectedPrefix = "c".repeat(maxPrefixLength);
|
||||||
|
|
||||||
|
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||||
|
String normalizedPrefix = kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix();
|
||||||
|
Assert.assertEquals(expectedPrefix, normalizedPrefix);
|
||||||
|
Assert.assertEquals(maxPrefixLength, normalizedPrefix.length());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldBeTruncatedWhenRequiredAndWhenOriginalPrefixIsInvalid() {
|
||||||
|
int maxPrefixLength = 43;
|
||||||
|
|
||||||
|
String originalPrefix = "1!" + "c".repeat(maxPrefixLength);
|
||||||
|
String expectedPrefix = "k8s-1" + "c".repeat(maxPrefixLength - 5);
|
||||||
|
|
||||||
|
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||||
|
String normalizedPrefix = kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix();
|
||||||
|
Assert.assertEquals(expectedPrefix, normalizedPrefix);
|
||||||
|
Assert.assertEquals(maxPrefixLength, normalizedPrefix.length());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldOnlyIncludeAlphanumericCharactersAndHyphen() {
|
||||||
|
String originalPrefix = "Cluster!@#$%^&*()_+?.-01|<>";
|
||||||
|
String expectedPrefix = "k8s-cluster-01";
|
||||||
|
|
||||||
|
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||||
|
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldContainClusterUuidWhenAllCharactersAreInvalid() {
|
||||||
|
String clusterUuid = "2699b547-cb56-4a59-a2c6-331cfb21d2e4";
|
||||||
|
String originalPrefix = "!@#$%^&*()_+?.|<>";
|
||||||
|
String expectedPrefix = "k8s-" + clusterUuid;
|
||||||
|
|
||||||
|
Mockito.when(kubernetesClusterMock.getUuid()).thenReturn(clusterUuid);
|
||||||
|
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||||
|
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldNotStartWithADigit() {
|
||||||
|
String originalPrefix = "1 cluster";
|
||||||
|
String expectedPrefix = "k8s-1cluster";
|
||||||
|
|
||||||
|
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||||
|
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -140,13 +140,18 @@ public class StorageVmSharedFSLifeCycle implements SharedFSLifeCycle {
|
|||||||
return fsVmConfig;
|
return fsVmConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getStorageVmName(String fileShareName) {
|
private String getStorageVmPrefix(String fileShareName) {
|
||||||
String prefix = String.format("%s-%s", SharedFSVmNamePrefix, fileShareName);
|
String prefix = String.format("%s-%s", SharedFSVmNamePrefix, fileShareName);
|
||||||
String suffix = Long.toHexString(System.currentTimeMillis());
|
|
||||||
|
|
||||||
if (!NetUtils.verifyDomainNameLabel(prefix, true)) {
|
if (!NetUtils.verifyDomainNameLabel(prefix, true)) {
|
||||||
prefix = prefix.replaceAll("[^a-zA-Z0-9-]", "");
|
prefix = prefix.replaceAll("[^a-zA-Z0-9-]", "");
|
||||||
}
|
}
|
||||||
|
return prefix;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getStorageVmName(String fileShareName) {
|
||||||
|
String prefix = getStorageVmPrefix(fileShareName);
|
||||||
|
String suffix = Long.toHexString(System.currentTimeMillis());
|
||||||
|
|
||||||
int nameLength = prefix.length() + suffix.length() + SharedFSVmNamePrefix.length();
|
int nameLength = prefix.length() + suffix.length() + SharedFSVmNamePrefix.length();
|
||||||
if (nameLength > 63) {
|
if (nameLength > 63) {
|
||||||
int prefixLength = prefix.length() - (nameLength - 63);
|
int prefixLength = prefix.length() - (nameLength - 63);
|
||||||
@ -236,8 +241,18 @@ public class StorageVmSharedFSLifeCycle implements SharedFSLifeCycle {
|
|||||||
Account owner = accountMgr.getActiveAccountById(sharedFS.getAccountId());
|
Account owner = accountMgr.getActiveAccountById(sharedFS.getAccountId());
|
||||||
UserVm vm = deploySharedFSVM(sharedFS.getDataCenterId(), owner, List.of(networkId), sharedFS.getName(), sharedFS.getServiceOfferingId(), diskOfferingId, sharedFS.getFsType(), size, minIops, maxIops);
|
UserVm vm = deploySharedFSVM(sharedFS.getDataCenterId(), owner, List.of(networkId), sharedFS.getName(), sharedFS.getServiceOfferingId(), diskOfferingId, sharedFS.getFsType(), size, minIops, maxIops);
|
||||||
|
|
||||||
List<VolumeVO> volumes = volumeDao.findByInstanceAndType(vm.getId(), Volume.Type.DATADISK);
|
List<VolumeVO> volumes = volumeDao.findByInstance(vm.getId());
|
||||||
return new Pair<>(volumes.get(0).getId(), vm.getId());
|
VolumeVO dataVol = null;
|
||||||
|
for (VolumeVO vol : volumes) {
|
||||||
|
String volumeName = vol.getName();
|
||||||
|
String updatedVolumeName = SharedFSVmNamePrefix + "-" + volumeName;
|
||||||
|
vol.setName(updatedVolumeName);
|
||||||
|
volumeDao.update(vol.getId(), vol);
|
||||||
|
if (vol.getVolumeType() == Volume.Type.DATADISK) {
|
||||||
|
dataVol = vol;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return new Pair<>(dataVol.getId(), vm.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -260,9 +260,14 @@ public class StorageVmSharedFSLifeCycleTest {
|
|||||||
anyMap(), isNull(), isNull(), isNull(), isNull(),
|
anyMap(), isNull(), isNull(), isNull(), isNull(),
|
||||||
anyBoolean(), anyString(), isNull())).thenReturn(vm);
|
anyBoolean(), anyString(), isNull())).thenReturn(vm);
|
||||||
|
|
||||||
VolumeVO volume = mock(VolumeVO.class);
|
VolumeVO rootVol = mock(VolumeVO.class);
|
||||||
when(volume.getId()).thenReturn(s_volumeId);
|
when(rootVol.getVolumeType()).thenReturn(Volume.Type.ROOT);
|
||||||
when(volumeDao.findByInstanceAndType(s_vmId, Volume.Type.DATADISK)).thenReturn(List.of(volume));
|
when(rootVol.getName()).thenReturn("ROOT-1");
|
||||||
|
VolumeVO dataVol = mock(VolumeVO.class);
|
||||||
|
when(dataVol.getId()).thenReturn(s_volumeId);
|
||||||
|
when(dataVol.getName()).thenReturn("DATA-1");
|
||||||
|
when(dataVol.getVolumeType()).thenReturn(Volume.Type.DATADISK);
|
||||||
|
when(volumeDao.findByInstance(s_vmId)).thenReturn(List.of(rootVol, dataVol));
|
||||||
|
|
||||||
Pair<Long, Long> result = lifeCycle.deploySharedFS(sharedFS, s_networkId, s_diskOfferingId, s_size, s_minIops, s_maxIops);
|
Pair<Long, Long> result = lifeCycle.deploySharedFS(sharedFS, s_networkId, s_diskOfferingId, s_size, s_minIops, s_maxIops);
|
||||||
Assert.assertEquals(Optional.ofNullable(result.first()), Optional.ofNullable(s_volumeId));
|
Assert.assertEquals(Optional.ofNullable(result.first()), Optional.ofNullable(s_volumeId));
|
||||||
|
|||||||
@ -1230,13 +1230,13 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = client.getVolume(scaleIOVolumeId);
|
org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = client.getVolume(scaleIOVolumeId);
|
||||||
long newSizeInGB = newSizeInBytes / (1024 * 1024 * 1024);
|
double newSizeInGB = newSizeInBytes / (1024.0 * 1024 * 1024);
|
||||||
long newSizeIn8gbBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
|
long newSizeIn8GBBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
|
||||||
|
|
||||||
if (scaleIOVolume.getSizeInKb() == newSizeIn8gbBoundary << 20) {
|
if (scaleIOVolume.getSizeInKb() == newSizeIn8GBBoundary << 20) {
|
||||||
logger.debug("No resize necessary at API");
|
logger.debug("No resize necessary at API");
|
||||||
} else {
|
} else {
|
||||||
scaleIOVolume = client.resizeVolume(scaleIOVolumeId, (int) newSizeIn8gbBoundary);
|
scaleIOVolume = client.resizeVolume(scaleIOVolumeId, (int) newSizeIn8GBBoundary);
|
||||||
if (scaleIOVolume == null) {
|
if (scaleIOVolume == null) {
|
||||||
throw new CloudRuntimeException("Failed to resize volume: " + volumeInfo.getName());
|
throw new CloudRuntimeException("Failed to resize volume: " + volumeInfo.getName());
|
||||||
}
|
}
|
||||||
@ -1362,12 +1362,12 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) {
|
public long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) {
|
||||||
long newSizeInGB = volumeSize / (1024 * 1024 * 1024);
|
double newSizeInGB = volumeSize / (1024.0 * 1024 * 1024);
|
||||||
if (templateSize != null && isEncryptionRequired && needsExpansionForEncryptionHeader(templateSize, volumeSize)) {
|
if (templateSize != null && isEncryptionRequired && needsExpansionForEncryptionHeader(templateSize, volumeSize)) {
|
||||||
newSizeInGB = (volumeSize + (1<<30)) / (1024 * 1024 * 1024);
|
newSizeInGB = (volumeSize + (1<<30)) / (1024.0 * 1024 * 1024);
|
||||||
}
|
}
|
||||||
long newSizeIn8gbBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
|
long newSizeIn8GBBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
|
||||||
return newSizeIn8gbBoundary * (1024 * 1024 * 1024);
|
return newSizeIn8GBBoundary * (1024 * 1024 * 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@ -183,12 +183,13 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
|
|||||||
storagePoolHost.setLocalPath(sdcId);
|
storagePoolHost.setLocalPath(sdcId);
|
||||||
storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
|
storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int waitTimeInSecs = 15; // Wait for 15 secs (usual tests with SDC service start took 10-15 secs)
|
||||||
|
if (hostSdcConnected(sdcId, dataStore, waitTimeInSecs)) {
|
||||||
|
return sdcId;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int waitTimeInSecs = 15; // Wait for 15 secs (usual tests with SDC service start took 10-15 secs)
|
|
||||||
if (hostSdcConnected(sdcId, dataStore, waitTimeInSecs)) {
|
|
||||||
return sdcId;
|
|
||||||
}
|
|
||||||
return null;
|
return null;
|
||||||
} finally {
|
} finally {
|
||||||
if (storageSystemIdLock != null) {
|
if (storageSystemIdLock != null) {
|
||||||
@ -246,7 +247,7 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
logger.warn("Couldn't retrieve PowerFlex storage SDC details from the host: {}, try (re)install SDC and restart agent", host);
|
logger.warn("Couldn't retrieve PowerFlex storage SDC details from the host: {}, add MDMs if not or try (re)install SDC & restart agent", host);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -381,6 +382,9 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
|
|||||||
|
|
||||||
private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception {
|
private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception {
|
||||||
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
|
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
|
||||||
|
if (storagePool == null) {
|
||||||
|
throw new CloudRuntimeException("Unable to find the storage pool with id " + storagePoolId);
|
||||||
|
}
|
||||||
return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao);
|
return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -102,12 +102,12 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||||||
if (systemId == null) {
|
if (systemId == null) {
|
||||||
throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool " + storagePool.getName());
|
throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool " + storagePool.getName());
|
||||||
}
|
}
|
||||||
Map<String,String> details = new HashMap<>();
|
Map<String, String> details = new HashMap<>();
|
||||||
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
|
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
|
||||||
|
|
||||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool, storagePool.getPath(), details);
|
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool, storagePool.getPath(), details);
|
||||||
ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, host);
|
ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, host);
|
||||||
Map<String,String> poolDetails = answer.getPoolInfo().getDetails();
|
Map<String, String> poolDetails = answer.getPoolInfo().getDetails();
|
||||||
if (MapUtils.isEmpty(poolDetails)) {
|
if (MapUtils.isEmpty(poolDetails)) {
|
||||||
String msg = String.format("PowerFlex storage SDC details not found on the host: %s, (re)install SDC and restart agent", host);
|
String msg = String.format("PowerFlex storage SDC details not found on the host: %s, (re)install SDC and restart agent", host);
|
||||||
logger.warn(msg);
|
logger.warn(msg);
|
||||||
@ -124,7 +124,7 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (StringUtils.isBlank(sdcId)) {
|
if (StringUtils.isBlank(sdcId)) {
|
||||||
String msg = String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, (re)install SDC and restart agent", host);
|
String msg = String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, add MDMs if not or try (re)install SDC & restart agent", host);
|
||||||
logger.warn(msg);
|
logger.warn(msg);
|
||||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg);
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg);
|
||||||
return null;
|
return null;
|
||||||
|
|||||||
@ -17,6 +17,8 @@
|
|||||||
|
|
||||||
package org.apache.cloudstack.storage.datastore.util;
|
package org.apache.cloudstack.storage.datastore.util;
|
||||||
|
|
||||||
|
import com.cloud.agent.properties.AgentProperties;
|
||||||
|
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
|
||||||
@ -60,6 +62,14 @@ public class ScaleIOUtil {
|
|||||||
private static final String SDC_SERVICE_ENABLE_CMD = "systemctl enable scini";
|
private static final String SDC_SERVICE_ENABLE_CMD = "systemctl enable scini";
|
||||||
|
|
||||||
public static final String CONNECTED_SDC_COUNT_STAT = "ConnectedSDCCount";
|
public static final String CONNECTED_SDC_COUNT_STAT = "ConnectedSDCCount";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Time (in seconds) to wait after SDC service 'scini' start/restart/stop.<br>
|
||||||
|
* Data type: Integer.<br>
|
||||||
|
* Default value: <code>3</code>
|
||||||
|
*/
|
||||||
|
public static final AgentProperties.Property<Integer> SDC_SERVICE_ACTION_WAIT = new AgentProperties.Property<>("powerflex.sdc.service.wait", 3);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cmd for querying volumes in SDC
|
* Cmd for querying volumes in SDC
|
||||||
* Sample output for cmd: drv_cfg --query_vols:
|
* Sample output for cmd: drv_cfg --query_vols:
|
||||||
@ -216,16 +226,41 @@ public class ScaleIOUtil {
|
|||||||
|
|
||||||
public static boolean startSDCService() {
|
public static boolean startSDCService() {
|
||||||
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_START_CMD);
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_START_CMD);
|
||||||
return exitValue == 0;
|
if (exitValue != 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
waitForSdcServiceActionToComplete();
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean stopSDCService() {
|
public static boolean stopSDCService() {
|
||||||
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_STOP_CMD);
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_STOP_CMD);
|
||||||
return exitValue == 0;
|
if (exitValue != 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
waitForSdcServiceActionToComplete();
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean restartSDCService() {
|
public static boolean restartSDCService() {
|
||||||
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_RESTART_CMD);
|
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_RESTART_CMD);
|
||||||
return exitValue == 0;
|
if (exitValue != 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
waitForSdcServiceActionToComplete();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void waitForSdcServiceActionToComplete() {
|
||||||
|
// Wait for the SDC service to settle after start/restart/stop and reaches a stable state
|
||||||
|
int waitTimeInSecs = AgentPropertiesFileHandler.getPropertyValue(SDC_SERVICE_ACTION_WAIT);
|
||||||
|
if (waitTimeInSecs < 0) {
|
||||||
|
waitTimeInSecs = SDC_SERVICE_ACTION_WAIT.getDefaultValue();
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
LOGGER.debug(String.format("Waiting for %d secs after SDC service action, to reach a stable state", waitTimeInSecs));
|
||||||
|
Thread.sleep(waitTimeInSecs * 1000L);
|
||||||
|
} catch (InterruptedException ignore) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -555,6 +555,18 @@ public class ScaleIOPrimaryDataStoreDriverTest {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetVolumeSizeRequiredOnPool() {
|
public void testGetVolumeSizeRequiredOnPool() {
|
||||||
|
Assert.assertEquals(8L * (1024 * 1024 * 1024),
|
||||||
|
scaleIOPrimaryDataStoreDriver.getVolumeSizeRequiredOnPool(
|
||||||
|
52428800,
|
||||||
|
null,
|
||||||
|
false));
|
||||||
|
|
||||||
|
Assert.assertEquals(8L * (1024 * 1024 * 1024),
|
||||||
|
scaleIOPrimaryDataStoreDriver.getVolumeSizeRequiredOnPool(
|
||||||
|
52428800,
|
||||||
|
52428800L,
|
||||||
|
true));
|
||||||
|
|
||||||
Assert.assertEquals(16L * (1024 * 1024 * 1024),
|
Assert.assertEquals(16L * (1024 * 1024 * 1024),
|
||||||
scaleIOPrimaryDataStoreDriver.getVolumeSizeRequiredOnPool(
|
scaleIOPrimaryDataStoreDriver.getVolumeSizeRequiredOnPool(
|
||||||
10L * (1024 * 1024 * 1024),
|
10L * (1024 * 1024 * 1024),
|
||||||
|
|||||||
@ -139,6 +139,9 @@ public class StorPoolStorageAdaptor implements StorageAdaptor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static String getVolumeNameFromPath(final String volumeUuid, boolean tildeNeeded) {
|
public static String getVolumeNameFromPath(final String volumeUuid, boolean tildeNeeded) {
|
||||||
|
if (volumeUuid == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
if (volumeUuid.startsWith("/dev/storpool/")) {
|
if (volumeUuid.startsWith("/dev/storpool/")) {
|
||||||
return volumeUuid.split("/")[3];
|
return volumeUuid.split("/")[3];
|
||||||
} else if (volumeUuid.startsWith("/dev/storpool-byid/")) {
|
} else if (volumeUuid.startsWith("/dev/storpool-byid/")) {
|
||||||
|
|||||||
@ -16,6 +16,7 @@
|
|||||||
// under the License.
|
// under the License.
|
||||||
package org.apache.cloudstack.ldap;
|
package org.apache.cloudstack.ldap;
|
||||||
|
|
||||||
|
import java.io.FileInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Hashtable;
|
import java.util.Hashtable;
|
||||||
|
|
||||||
@ -24,6 +25,7 @@ import javax.naming.Context;
|
|||||||
import javax.naming.NamingException;
|
import javax.naming.NamingException;
|
||||||
import javax.naming.ldap.InitialLdapContext;
|
import javax.naming.ldap.InitialLdapContext;
|
||||||
import javax.naming.ldap.LdapContext;
|
import javax.naming.ldap.LdapContext;
|
||||||
|
import java.security.KeyStore;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
@ -52,14 +54,14 @@ public class LdapContextFactory {
|
|||||||
return createInitialDirContext(bindPrincipal, bindPassword, providerUrl, true, domainId);
|
return createInitialDirContext(bindPrincipal, bindPassword, providerUrl, true, domainId);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LdapContext createInitialDirContext(final String principal, final String password, final boolean isSystemContext, Long domainId) throws NamingException, IOException {
|
private LdapContext createInitialDirContext(final String principal, final String password, final boolean isSystemContext, Long domainId) throws NamingException {
|
||||||
return createInitialDirContext(principal, password, null, isSystemContext, domainId);
|
return createInitialDirContext(principal, password, null, isSystemContext, domainId);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LdapContext createInitialDirContext(final String principal, final String password, final String providerUrl, final boolean isSystemContext, Long domainId)
|
private LdapContext createInitialDirContext(final String principal, final String password, final String providerUrl, final boolean isSystemContext, Long domainId)
|
||||||
throws NamingException, IOException {
|
throws NamingException {
|
||||||
Hashtable<String, String> environment = getEnvironment(principal, password, providerUrl, isSystemContext, domainId);
|
Hashtable<String, String> environment = getEnvironment(principal, password, providerUrl, isSystemContext, domainId);
|
||||||
logger.debug("initializing ldap with provider url: " + environment.get(Context.PROVIDER_URL));
|
logger.debug("initializing ldap with provider url: {}", environment.get(Context.PROVIDER_URL));
|
||||||
return new InitialLdapContext(environment, null);
|
return new InitialLdapContext(environment, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,8 +75,36 @@ public class LdapContextFactory {
|
|||||||
if (sslStatus) {
|
if (sslStatus) {
|
||||||
logger.info("LDAP SSL enabled.");
|
logger.info("LDAP SSL enabled.");
|
||||||
environment.put(Context.SECURITY_PROTOCOL, "ssl");
|
environment.put(Context.SECURITY_PROTOCOL, "ssl");
|
||||||
System.setProperty("javax.net.ssl.trustStore", _ldapConfiguration.getTrustStore(domainId));
|
String trustStore = _ldapConfiguration.getTrustStore(domainId);
|
||||||
System.setProperty("javax.net.ssl.trustStorePassword", _ldapConfiguration.getTrustStorePassword(domainId));
|
String trustStorePassword = _ldapConfiguration.getTrustStorePassword(domainId);
|
||||||
|
|
||||||
|
if (!validateTrustStore(trustStore, trustStorePassword)) {
|
||||||
|
throw new RuntimeException("Invalid truststore or truststore password");
|
||||||
|
}
|
||||||
|
|
||||||
|
System.setProperty("javax.net.ssl.trustStore", trustStore);
|
||||||
|
System.setProperty("javax.net.ssl.trustStorePassword", trustStorePassword);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean validateTrustStore(String trustStore, String trustStorePassword) {
|
||||||
|
if (trustStore == null) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (trustStorePassword == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
KeyStore.getInstance("JKS").load(
|
||||||
|
new FileInputStream(trustStore),
|
||||||
|
trustStorePassword.toCharArray()
|
||||||
|
);
|
||||||
|
return true;
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.warn("Failed to validate truststore: {}", e.getMessage());
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -184,6 +184,11 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||||||
} catch (NamingException | IOException e) {
|
} catch (NamingException | IOException e) {
|
||||||
logger.debug("NamingException while doing an LDAP bind", e);
|
logger.debug("NamingException while doing an LDAP bind", e);
|
||||||
throw new InvalidParameterValueException("Unable to bind to the given LDAP server");
|
throw new InvalidParameterValueException("Unable to bind to the given LDAP server");
|
||||||
|
} catch (RuntimeException e) {
|
||||||
|
if (e.getMessage().contains("Invalid truststore")) {
|
||||||
|
throw new InvalidParameterValueException("Invalid truststore or truststore password");
|
||||||
|
}
|
||||||
|
throw e;
|
||||||
} finally {
|
} finally {
|
||||||
closeContext(context);
|
closeContext(context);
|
||||||
}
|
}
|
||||||
|
|||||||
2
pom.xml
2
pom.xml
@ -50,7 +50,7 @@
|
|||||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||||
<project.systemvm.template.location>https://download.cloudstack.org/systemvm</project.systemvm.template.location>
|
<project.systemvm.template.location>https://download.cloudstack.org/systemvm</project.systemvm.template.location>
|
||||||
<project.systemvm.template.version>4.20.1.0</project.systemvm.template.version>
|
<project.systemvm.template.version>4.20.2.0</project.systemvm.template.version>
|
||||||
<sonar.organization>apache</sonar.organization>
|
<sonar.organization>apache</sonar.organization>
|
||||||
<sonar.host.url>https://sonarcloud.io</sonar.host.url>
|
<sonar.host.url>https://sonarcloud.io</sonar.host.url>
|
||||||
|
|
||||||
|
|||||||
@ -89,6 +89,10 @@ import com.cloud.utils.Pair;
|
|||||||
import com.cloud.utils.component.ManagerBase;
|
import com.cloud.utils.component.ManagerBase;
|
||||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||||
import com.cloud.utils.db.SearchCriteria;
|
import com.cloud.utils.db.SearchCriteria;
|
||||||
|
import com.cloud.utils.db.Transaction;
|
||||||
|
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
||||||
|
import com.cloud.utils.db.TransactionStatus;
|
||||||
|
|
||||||
import org.jetbrains.annotations.Nullable;
|
import org.jetbrains.annotations.Nullable;
|
||||||
|
|
||||||
public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable {
|
public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable {
|
||||||
@ -290,8 +294,13 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
|
|||||||
Math.min(CapacityManager.CapacityCalculateWorkers.value(), hostIds.size())));
|
Math.min(CapacityManager.CapacityCalculateWorkers.value(), hostIds.size())));
|
||||||
for (Long hostId : hostIds) {
|
for (Long hostId : hostIds) {
|
||||||
futures.put(hostId, executorService.submit(() -> {
|
futures.put(hostId, executorService.submit(() -> {
|
||||||
final HostVO host = hostDao.findById(hostId);
|
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||||
_capacityMgr.updateCapacityForHost(host);
|
@Override
|
||||||
|
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||||
|
final HostVO host = hostDao.findById(hostId);
|
||||||
|
_capacityMgr.updateCapacityForHost(host);
|
||||||
|
}
|
||||||
|
});
|
||||||
return null;
|
return null;
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
@ -316,13 +325,18 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
|
|||||||
Math.min(CapacityManager.CapacityCalculateWorkers.value(), storagePoolIds.size())));
|
Math.min(CapacityManager.CapacityCalculateWorkers.value(), storagePoolIds.size())));
|
||||||
for (Long poolId: storagePoolIds) {
|
for (Long poolId: storagePoolIds) {
|
||||||
futures.put(poolId, executorService.submit(() -> {
|
futures.put(poolId, executorService.submit(() -> {
|
||||||
final StoragePoolVO pool = _storagePoolDao.findById(poolId);
|
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||||
long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null);
|
@Override
|
||||||
if (pool.isShared()) {
|
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||||
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk);
|
final StoragePoolVO pool = _storagePoolDao.findById(poolId);
|
||||||
} else {
|
long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null);
|
||||||
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk);
|
if (pool.isShared()) {
|
||||||
}
|
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk);
|
||||||
|
} else {
|
||||||
|
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
return null;
|
return null;
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -139,6 +139,7 @@ import com.cloud.vm.dao.VMInstanceDao;
|
|||||||
@Component
|
@Component
|
||||||
public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLimitService, Configurable {
|
public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLimitService, Configurable {
|
||||||
|
|
||||||
|
public static final String CHECKING_IF = "Checking if {}";
|
||||||
@Inject
|
@Inject
|
||||||
private AccountManager _accountMgr;
|
private AccountManager _accountMgr;
|
||||||
@Inject
|
@Inject
|
||||||
@ -164,8 +165,6 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
@Inject
|
@Inject
|
||||||
private ResourceLimitDao _resourceLimitDao;
|
private ResourceLimitDao _resourceLimitDao;
|
||||||
@Inject
|
@Inject
|
||||||
private ResourceLimitService resourceLimitService;
|
|
||||||
@Inject
|
|
||||||
private ReservationDao reservationDao;
|
private ReservationDao reservationDao;
|
||||||
@Inject
|
@Inject
|
||||||
protected SnapshotDao _snapshotDao;
|
protected SnapshotDao _snapshotDao;
|
||||||
@ -330,7 +329,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
final long numToIncrement = (delta.length == 0) ? 1 : delta[0].longValue();
|
final long numToIncrement = (delta.length == 0) ? 1 : delta[0];
|
||||||
removeResourceReservationIfNeededAndIncrementResourceCount(accountId, type, tag, numToIncrement);
|
removeResourceReservationIfNeededAndIncrementResourceCount(accountId, type, tag, numToIncrement);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,7 +345,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
logger.trace("Not decrementing resource count for system accounts, returning");
|
logger.trace("Not decrementing resource count for system accounts, returning");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
long numToDecrement = (delta.length == 0) ? 1 : delta[0].longValue();
|
long numToDecrement = (delta.length == 0) ? 1 : delta[0];
|
||||||
|
|
||||||
if (!updateResourceCountForAccount(accountId, type, tag, false, numToDecrement)) {
|
if (!updateResourceCountForAccount(accountId, type, tag, false, numToDecrement)) {
|
||||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, "Failed to decrement resource count of type " + type + " for account id=" + accountId,
|
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, "Failed to decrement resource count of type " + type + " for account id=" + accountId,
|
||||||
@ -373,11 +372,11 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
|
|
||||||
// Check if limit is configured for account
|
// Check if limit is configured for account
|
||||||
if (limit != null) {
|
if (limit != null) {
|
||||||
max = limit.getMax().longValue();
|
max = limit.getMax();
|
||||||
} else {
|
} else {
|
||||||
String resourceTypeName = type.name();
|
String resourceTypeName = type.name();
|
||||||
// If the account has an no limit set, then return global default account limits
|
// If the account has an no limit set, then return global default account limits
|
||||||
Long value = null;
|
Long value;
|
||||||
if (account.getType() == Account.Type.PROJECT) {
|
if (account.getType() == Account.Type.PROJECT) {
|
||||||
value = projectResourceLimitMap.get(resourceTypeName);
|
value = projectResourceLimitMap.get(resourceTypeName);
|
||||||
} else {
|
} else {
|
||||||
@ -418,10 +417,10 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
|
|
||||||
// Check if limit is configured for account
|
// Check if limit is configured for account
|
||||||
if (limit != null) {
|
if (limit != null) {
|
||||||
max = limit.longValue();
|
max = limit;
|
||||||
} else {
|
} else {
|
||||||
// If the account has an no limit set, then return global default account limits
|
// If the account has an no limit set, then return global default account limits
|
||||||
Long value = null;
|
Long value;
|
||||||
if (account.getType() == Account.Type.PROJECT) {
|
if (account.getType() == Account.Type.PROJECT) {
|
||||||
value = projectResourceLimitMap.get(type.getName());
|
value = projectResourceLimitMap.get(type.getName());
|
||||||
} else {
|
} else {
|
||||||
@ -453,7 +452,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
ResourceLimitVO limit = _resourceLimitDao.findByOwnerIdAndTypeAndTag(domain.getId(), ResourceOwnerType.Domain, type, tag);
|
ResourceLimitVO limit = _resourceLimitDao.findByOwnerIdAndTypeAndTag(domain.getId(), ResourceOwnerType.Domain, type, tag);
|
||||||
|
|
||||||
if (limit != null) {
|
if (limit != null) {
|
||||||
max = limit.getMax().longValue();
|
max = limit.getMax();
|
||||||
} else {
|
} else {
|
||||||
// check domain hierarchy
|
// check domain hierarchy
|
||||||
Long domainId = domain.getParent();
|
Long domainId = domain.getParent();
|
||||||
@ -467,12 +466,12 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (limit != null) {
|
if (limit != null) {
|
||||||
max = limit.getMax().longValue();
|
max = limit.getMax();
|
||||||
} else {
|
} else {
|
||||||
if (StringUtils.isNotEmpty(tag)) {
|
if (StringUtils.isNotEmpty(tag)) {
|
||||||
return findCorrectResourceLimitForDomain(domain, type, null);
|
return findCorrectResourceLimitForDomain(domain, type, null);
|
||||||
}
|
}
|
||||||
Long value = null;
|
Long value;
|
||||||
value = domainResourceLimitMap.get(type.name());
|
value = domainResourceLimitMap.get(type.name());
|
||||||
if (value != null) {
|
if (value != null) {
|
||||||
if (value < 0) { // return unlimit if value is set to negative
|
if (value < 0) { // return unlimit if value is set to negative
|
||||||
@ -491,7 +490,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
|
|
||||||
protected void checkDomainResourceLimit(final Account account, final Project project, final ResourceType type, String tag, long numResources) throws ResourceAllocationException {
|
protected void checkDomainResourceLimit(final Account account, final Project project, final ResourceType type, String tag, long numResources) throws ResourceAllocationException {
|
||||||
// check all domains in the account's domain hierarchy
|
// check all domains in the account's domain hierarchy
|
||||||
Long domainId = null;
|
Long domainId;
|
||||||
if (project != null) {
|
if (project != null) {
|
||||||
domainId = project.getDomainId();
|
domainId = project.getDomainId();
|
||||||
} else {
|
} else {
|
||||||
@ -530,9 +529,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
convCurrentDomainResourceCount, convCurrentResourceReservation, convNumResources
|
convCurrentDomainResourceCount, convCurrentResourceReservation, convNumResources
|
||||||
);
|
);
|
||||||
|
|
||||||
if (logger.isDebugEnabled()) {
|
logger.debug(CHECKING_IF, messageSuffix);
|
||||||
logger.debug("Checking if" + messageSuffix);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (domainResourceLimit != Resource.RESOURCE_UNLIMITED && requestedDomainResourceCount > domainResourceLimit) {
|
if (domainResourceLimit != Resource.RESOURCE_UNLIMITED && requestedDomainResourceCount > domainResourceLimit) {
|
||||||
String message = "Maximum" + messageSuffix;
|
String message = "Maximum" + messageSuffix;
|
||||||
@ -571,9 +568,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
convertedAccountResourceLimit, convertedCurrentResourceCount, convertedCurrentResourceReservation, convertedNumResources
|
convertedAccountResourceLimit, convertedCurrentResourceCount, convertedCurrentResourceReservation, convertedNumResources
|
||||||
);
|
);
|
||||||
|
|
||||||
if (logger.isDebugEnabled()) {
|
logger.debug(CHECKING_IF, messageSuffix);
|
||||||
logger.debug("Checking if" + messageSuffix);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (accountResourceLimit != Resource.RESOURCE_UNLIMITED && requestedResourceCount > accountResourceLimit) {
|
if (accountResourceLimit != Resource.RESOURCE_UNLIMITED && requestedResourceCount > accountResourceLimit) {
|
||||||
String message = "Maximum" + messageSuffix;
|
String message = "Maximum" + messageSuffix;
|
||||||
@ -592,14 +587,14 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long findDefaultResourceLimitForDomain(ResourceType resourceType) {
|
public long findDefaultResourceLimitForDomain(ResourceType resourceType) {
|
||||||
Long resourceLimit = null;
|
Long resourceLimit;
|
||||||
resourceLimit = domainResourceLimitMap.get(resourceType.getName());
|
resourceLimit = domainResourceLimitMap.get(resourceType.getName());
|
||||||
if (resourceLimit != null && (resourceType == ResourceType.primary_storage || resourceType == ResourceType.secondary_storage)) {
|
if (resourceLimit != null && (resourceType == ResourceType.primary_storage || resourceType == ResourceType.secondary_storage)) {
|
||||||
if (! Long.valueOf(Resource.RESOURCE_UNLIMITED).equals(resourceLimit)) {
|
if (! Long.valueOf(Resource.RESOURCE_UNLIMITED).equals(resourceLimit)) {
|
||||||
resourceLimit = resourceLimit * ResourceType.bytesToGiB;
|
resourceLimit = resourceLimit * ResourceType.bytesToGiB;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
resourceLimit = Long.valueOf(Resource.RESOURCE_UNLIMITED);
|
resourceLimit = (long) Resource.RESOURCE_UNLIMITED;
|
||||||
}
|
}
|
||||||
return resourceLimit;
|
return resourceLimit;
|
||||||
}
|
}
|
||||||
@ -677,8 +672,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
@Override
|
@Override
|
||||||
public List<ResourceLimitVO> searchForLimits(Long id, Long accountId, Long domainId, ResourceType resourceType, String tag, Long startIndex, Long pageSizeVal) {
|
public List<ResourceLimitVO> searchForLimits(Long id, Long accountId, Long domainId, ResourceType resourceType, String tag, Long startIndex, Long pageSizeVal) {
|
||||||
Account caller = CallContext.current().getCallingAccount();
|
Account caller = CallContext.current().getCallingAccount();
|
||||||
List<ResourceLimitVO> limits = new ArrayList<ResourceLimitVO>();
|
List<ResourceLimitVO> limits = new ArrayList<>();
|
||||||
boolean isAccount = true;
|
boolean isAccount;
|
||||||
|
|
||||||
if (!_accountMgr.isAdmin(caller.getId())) {
|
if (!_accountMgr.isAdmin(caller.getId())) {
|
||||||
accountId = caller.getId();
|
accountId = caller.getId();
|
||||||
@ -770,7 +765,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
if (foundLimits.isEmpty()) {
|
if (foundLimits.isEmpty()) {
|
||||||
ResourceOwnerType ownerType = ResourceOwnerType.Domain;
|
ResourceOwnerType ownerType = ResourceOwnerType.Domain;
|
||||||
Long ownerId = domainId;
|
Long ownerId = domainId;
|
||||||
long max = 0;
|
long max;
|
||||||
if (isAccount) {
|
if (isAccount) {
|
||||||
ownerType = ResourceOwnerType.Account;
|
ownerType = ResourceOwnerType.Account;
|
||||||
ownerId = accountId;
|
ownerId = accountId;
|
||||||
@ -788,8 +783,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
// see if any limits are missing from the table, and if yes - get it from the config table and add
|
// see if any limits are missing from the table, and if yes - get it from the config table and add
|
||||||
ResourceType[] resourceTypes = ResourceCount.ResourceType.values();
|
ResourceType[] resourceTypes = ResourceCount.ResourceType.values();
|
||||||
if (foundLimits.size() != resourceTypes.length) {
|
if (foundLimits.size() != resourceTypes.length) {
|
||||||
List<String> accountLimitStr = new ArrayList<String>();
|
List<String> accountLimitStr = new ArrayList<>();
|
||||||
List<String> domainLimitStr = new ArrayList<String>();
|
List<String> domainLimitStr = new ArrayList<>();
|
||||||
for (ResourceLimitVO foundLimit : foundLimits) {
|
for (ResourceLimitVO foundLimit : foundLimits) {
|
||||||
if (foundLimit.getAccountId() != null) {
|
if (foundLimit.getAccountId() != null) {
|
||||||
accountLimitStr.add(foundLimit.getType().toString());
|
accountLimitStr.add(foundLimit.getType().toString());
|
||||||
@ -883,8 +878,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
Account caller = CallContext.current().getCallingAccount();
|
Account caller = CallContext.current().getCallingAccount();
|
||||||
|
|
||||||
if (max == null) {
|
if (max == null) {
|
||||||
max = new Long(Resource.RESOURCE_UNLIMITED);
|
max = (long)Resource.RESOURCE_UNLIMITED;
|
||||||
} else if (max.longValue() < Resource.RESOURCE_UNLIMITED) {
|
} else if (max < Resource.RESOURCE_UNLIMITED) {
|
||||||
throw new InvalidParameterValueException("Please specify either '-1' for an infinite limit, or a limit that is at least '0'.");
|
throw new InvalidParameterValueException("Please specify either '-1' for an infinite limit, or a limit that is at least '0'.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -892,7 +887,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
ResourceType resourceType = null;
|
ResourceType resourceType = null;
|
||||||
if (typeId != null) {
|
if (typeId != null) {
|
||||||
for (ResourceType type : Resource.ResourceType.values()) {
|
for (ResourceType type : Resource.ResourceType.values()) {
|
||||||
if (type.getOrdinal() == typeId.intValue()) {
|
if (type.getOrdinal() == typeId) {
|
||||||
resourceType = type;
|
resourceType = type;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -929,7 +924,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
throw new InvalidParameterValueException("Only " + Resource.RESOURCE_UNLIMITED + " limit is supported for Root Admin accounts");
|
throw new InvalidParameterValueException("Only " + Resource.RESOURCE_UNLIMITED + " limit is supported for Root Admin accounts");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((caller.getAccountId() == accountId.longValue()) && (_accountMgr.isDomainAdmin(caller.getId()) || caller.getType() == Account.Type.RESOURCE_DOMAIN_ADMIN)) {
|
if ((caller.getAccountId() == accountId) && (_accountMgr.isDomainAdmin(caller.getId()) || caller.getType() == Account.Type.RESOURCE_DOMAIN_ADMIN)) {
|
||||||
// If the admin is trying to update their own account, disallow.
|
// If the admin is trying to update their own account, disallow.
|
||||||
throw new PermissionDeniedException(String.format("Unable to update resource limit for their own account %s, permission denied", account));
|
throw new PermissionDeniedException(String.format("Unable to update resource limit for their own account %s, permission denied", account));
|
||||||
}
|
}
|
||||||
@ -955,12 +950,12 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
|
|
||||||
_accountMgr.checkAccess(caller, domain);
|
_accountMgr.checkAccess(caller, domain);
|
||||||
|
|
||||||
if (Domain.ROOT_DOMAIN == domainId.longValue()) {
|
if (Domain.ROOT_DOMAIN == domainId) {
|
||||||
// no one can add limits on ROOT domain, disallow...
|
// no one can add limits on ROOT domain, disallow...
|
||||||
throw new PermissionDeniedException("Cannot update resource limit for ROOT domain " + domainId + ", permission denied");
|
throw new PermissionDeniedException("Cannot update resource limit for ROOT domain " + domainId + ", permission denied");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((caller.getDomainId() == domainId.longValue()) && caller.getType() == Account.Type.DOMAIN_ADMIN || caller.getType() == Account.Type.RESOURCE_DOMAIN_ADMIN) {
|
if ((caller.getDomainId() == domainId) && caller.getType() == Account.Type.DOMAIN_ADMIN || caller.getType() == Account.Type.RESOURCE_DOMAIN_ADMIN) {
|
||||||
// if the admin is trying to update their own domain, disallow...
|
// if the admin is trying to update their own domain, disallow...
|
||||||
throw new PermissionDeniedException("Unable to update resource limit for domain " + domainId + ", permission denied");
|
throw new PermissionDeniedException("Unable to update resource limit for domain " + domainId + ", permission denied");
|
||||||
}
|
}
|
||||||
@ -975,7 +970,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
if (parentDomainId != null) {
|
if (parentDomainId != null) {
|
||||||
DomainVO parentDomain = _domainDao.findById(parentDomainId);
|
DomainVO parentDomain = _domainDao.findById(parentDomainId);
|
||||||
long parentMaximum = findCorrectResourceLimitForDomain(parentDomain, resourceType, tag);
|
long parentMaximum = findCorrectResourceLimitForDomain(parentDomain, resourceType, tag);
|
||||||
if ((parentMaximum >= 0) && (max.longValue() > parentMaximum)) {
|
if ((parentMaximum >= 0) && (max > parentMaximum)) {
|
||||||
throw new InvalidParameterValueException(String.format("Domain %s has maximum allowed resource limit %d for %s, please specify a value less than or equal to %d", parentDomain, parentMaximum, resourceType, parentMaximum));
|
throw new InvalidParameterValueException(String.format("Domain %s has maximum allowed resource limit %d for %s, please specify a value less than or equal to %d", parentDomain, parentMaximum, resourceType, parentMaximum));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1064,15 +1059,15 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
@Override
|
@Override
|
||||||
public List<? extends ResourceCount> recalculateResourceCount(Long accountId, Long domainId, Integer typeId, String tag) throws CloudRuntimeException {
|
public List<? extends ResourceCount> recalculateResourceCount(Long accountId, Long domainId, Integer typeId, String tag) throws CloudRuntimeException {
|
||||||
Account callerAccount = CallContext.current().getCallingAccount();
|
Account callerAccount = CallContext.current().getCallingAccount();
|
||||||
long count = 0;
|
long count;
|
||||||
List<ResourceCountVO> counts = new ArrayList<ResourceCountVO>();
|
List<ResourceCountVO> counts = new ArrayList<>();
|
||||||
List<ResourceType> resourceTypes = new ArrayList<ResourceType>();
|
List<ResourceType> resourceTypes = new ArrayList<>();
|
||||||
|
|
||||||
ResourceType resourceType = null;
|
ResourceType resourceType = null;
|
||||||
|
|
||||||
if (typeId != null) {
|
if (typeId != null) {
|
||||||
for (ResourceType type : Resource.ResourceType.values()) {
|
for (ResourceType type : Resource.ResourceType.values()) {
|
||||||
if (type.getOrdinal() == typeId.intValue()) {
|
if (type.getOrdinal() == typeId) {
|
||||||
resourceType = type;
|
resourceType = type;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1091,12 +1086,13 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
throw new InvalidParameterValueException("Please specify a valid domain ID.");
|
throw new InvalidParameterValueException("Please specify a valid domain ID.");
|
||||||
}
|
}
|
||||||
_accountMgr.checkAccess(callerAccount, domain);
|
_accountMgr.checkAccess(callerAccount, domain);
|
||||||
Account account = _entityMgr.findById(Account.class, accountId);
|
if (accountId != null) {
|
||||||
if (account == null) {
|
Account account = _entityMgr.findById(Account.class, accountId);
|
||||||
throw new InvalidParameterValueException("Unable to find account " + accountId);
|
if (account == null) {
|
||||||
|
throw new InvalidParameterValueException("Unable to find account " + accountId);
|
||||||
|
}
|
||||||
|
_accountMgr.verifyCallerPrivilegeForUserOrAccountOperations(account);
|
||||||
}
|
}
|
||||||
_accountMgr.verifyCallerPrivilegeForUserOrAccountOperations(account);
|
|
||||||
|
|
||||||
if (resourceType != null) {
|
if (resourceType != null) {
|
||||||
resourceTypes.add(resourceType);
|
resourceTypes.add(resourceType);
|
||||||
} else {
|
} else {
|
||||||
@ -1145,7 +1141,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
convertedDelta = toHumanReadableSize(delta);
|
convertedDelta = toHumanReadableSize(delta);
|
||||||
}
|
}
|
||||||
String typeStr = StringUtils.isNotEmpty(tag) ? String.format("%s (tag: %s)", type, tag) : type.getName();
|
String typeStr = StringUtils.isNotEmpty(tag) ? String.format("%s (tag: %s)", type, tag) : type.getName();
|
||||||
logger.debug("Updating resource Type = " + typeStr + " count for Account = " + accountId + " Operation = " + (increment ? "increasing" : "decreasing") + " Amount = " + convertedDelta);
|
logger.debug("Updating resource Type = {} count for Account with id = {} Operation = {} Amount = {}", typeStr, accountId, (increment ? "increasing" : "decreasing"), convertedDelta);
|
||||||
}
|
}
|
||||||
Set<Long> rowIdsToUpdate = _resourceCountDao.listAllRowsToUpdate(accountId, ResourceOwnerType.Account, type, tag);
|
Set<Long> rowIdsToUpdate = _resourceCountDao.listAllRowsToUpdate(accountId, ResourceOwnerType.Account, type, tag);
|
||||||
return _resourceCountDao.updateCountByDeltaForIds(new ArrayList<>(rowIdsToUpdate), increment, delta);
|
return _resourceCountDao.updateCountByDeltaForIds(new ArrayList<>(rowIdsToUpdate), increment, delta);
|
||||||
@ -1200,6 +1196,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
newResourceCount += _projectDao.countProjectsForDomain(domainId);
|
newResourceCount += _projectDao.countProjectsForDomain(domainId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO make sure that the resource counts are not null
|
||||||
for (ResourceCountVO resourceCount : resourceCounts) {
|
for (ResourceCountVO resourceCount : resourceCounts) {
|
||||||
if (resourceCount.getResourceOwnerType() == ResourceOwnerType.Domain && resourceCount.getDomainId() == domainId) {
|
if (resourceCount.getResourceOwnerType() == ResourceOwnerType.Domain && resourceCount.getDomainId() == domainId) {
|
||||||
oldResourceCount = resourceCount.getCount();
|
oldResourceCount = resourceCount.getCount();
|
||||||
@ -1209,11 +1206,12 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO domainRC may be null if there are no resource counts for the domain found in the loop above
|
||||||
if (oldResourceCount != newResourceCount) {
|
if (oldResourceCount != newResourceCount) {
|
||||||
domainRC.setCount(newResourceCount);
|
domainRC.setCount(newResourceCount);
|
||||||
_resourceCountDao.update(domainRC.getId(), domainRC);
|
_resourceCountDao.update(domainRC.getId(), domainRC);
|
||||||
logger.warn("Discrepency in the resource count has been detected " + "(original count = " + oldResourceCount + " correct count = " + newResourceCount + ") for Type = " + type
|
logger.warn("Discrepency in the resource count has been detected (original count = {} correct count = {}) for Type = {} for Domain ID = {} is fixed during resource count recalculation.",
|
||||||
+ " for Domain ID = " + domainId + " is fixed during resource count recalculation.");
|
oldResourceCount, newResourceCount, type, domainId);
|
||||||
}
|
}
|
||||||
return newResourceCount;
|
return newResourceCount;
|
||||||
});
|
});
|
||||||
@ -1280,8 +1278,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
// resource count which will not lead to any discrepancy.
|
// resource count which will not lead to any discrepancy.
|
||||||
if (newCount != null && !newCount.equals(oldCount) &&
|
if (newCount != null && !newCount.equals(oldCount) &&
|
||||||
type != Resource.ResourceType.primary_storage && type != Resource.ResourceType.secondary_storage) {
|
type != Resource.ResourceType.primary_storage && type != Resource.ResourceType.secondary_storage) {
|
||||||
logger.warn("Discrepancy in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type +
|
logger.warn("Discrepancy in the resource count (original count={} correct count = {}) for type {} for account ID {} is fixed during resource count recalculation.",
|
||||||
" for account ID " + accountId + " is fixed during resource count recalculation.");
|
oldCount, newCount, type, accountId);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (newCount == null) ? 0 : newCount;
|
return (newCount == null) ? 0 : newCount;
|
||||||
@ -1425,20 +1423,16 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
}
|
}
|
||||||
|
|
||||||
private long calculatePublicIpForAccount(long accountId) {
|
private long calculatePublicIpForAccount(long accountId) {
|
||||||
Long dedicatedCount = 0L;
|
long dedicatedCount = 0L;
|
||||||
Long allocatedCount = 0L;
|
long allocatedCount;
|
||||||
|
|
||||||
List<VlanVO> dedicatedVlans = _vlanDao.listDedicatedVlans(accountId);
|
List<VlanVO> dedicatedVlans = _vlanDao.listDedicatedVlans(accountId);
|
||||||
for (VlanVO dedicatedVlan : dedicatedVlans) {
|
for (VlanVO dedicatedVlan : dedicatedVlans) {
|
||||||
List<IPAddressVO> ips = _ipAddressDao.listByVlanId(dedicatedVlan.getId());
|
List<IPAddressVO> ips = _ipAddressDao.listByVlanId(dedicatedVlan.getId());
|
||||||
dedicatedCount += new Long(ips.size());
|
dedicatedCount += ips.size();
|
||||||
}
|
}
|
||||||
allocatedCount = _ipAddressDao.countAllocatedIPsForAccount(accountId);
|
allocatedCount = _ipAddressDao.countAllocatedIPsForAccount(accountId);
|
||||||
if (dedicatedCount > allocatedCount) {
|
return Math.max(dedicatedCount, allocatedCount);
|
||||||
return dedicatedCount;
|
|
||||||
} else {
|
|
||||||
return allocatedCount;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected long calculatePrimaryStorageForAccount(long accountId, String tag) {
|
protected long calculatePrimaryStorageForAccount(long accountId, String tag) {
|
||||||
@ -1526,10 +1520,10 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
|
|
||||||
protected TaggedResourceLimitAndCountResponse getTaggedResourceLimitAndCountResponse(Account account,
|
protected TaggedResourceLimitAndCountResponse getTaggedResourceLimitAndCountResponse(Account account,
|
||||||
Domain domain, ResourceOwnerType ownerType, ResourceType type, String tag) {
|
Domain domain, ResourceOwnerType ownerType, ResourceType type, String tag) {
|
||||||
Long limit = ResourceOwnerType.Account.equals(ownerType) ?
|
long limit = ResourceOwnerType.Account.equals(ownerType) ?
|
||||||
findCorrectResourceLimitForAccount(account, type, tag) :
|
findCorrectResourceLimitForAccount(account, type, tag) :
|
||||||
findCorrectResourceLimitForDomain(domain, type, tag);
|
findCorrectResourceLimitForDomain(domain, type, tag);
|
||||||
Long count = 0L;
|
long count = 0L;
|
||||||
ResourceCountVO countVO = _resourceCountDao.findByOwnerAndTypeAndTag(
|
ResourceCountVO countVO = _resourceCountDao.findByOwnerAndTypeAndTag(
|
||||||
ResourceOwnerType.Account.equals(ownerType) ? account.getId() : domain.getId(), ownerType, type, tag);
|
ResourceOwnerType.Account.equals(ownerType) ? account.getId() : domain.getId(), ownerType, type, tag);
|
||||||
if (countVO != null) {
|
if (countVO != null) {
|
||||||
@ -1778,7 +1772,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
if (currentOfferingTags.isEmpty() && newOfferingTags.isEmpty()) {
|
if (currentOfferingTags.isEmpty() && newOfferingTags.isEmpty()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
Set<String> sameTags = currentOfferingTags.stream().filter(newOfferingTags::contains).collect(Collectors.toSet());;
|
Set<String> sameTags = currentOfferingTags.stream().filter(newOfferingTags::contains).collect(Collectors.toSet());
|
||||||
Set<String> newTags = newOfferingTags.stream().filter(tag -> !currentOfferingTags.contains(tag)).collect(Collectors.toSet());
|
Set<String> newTags = newOfferingTags.stream().filter(tag -> !currentOfferingTags.contains(tag)).collect(Collectors.toSet());
|
||||||
Set<String> removedTags = currentOfferingTags.stream().filter(tag -> !newOfferingTags.contains(tag)).collect(Collectors.toSet());
|
Set<String> removedTags = currentOfferingTags.stream().filter(tag -> !newOfferingTags.contains(tag)).collect(Collectors.toSet());
|
||||||
return new Ternary<>(sameTags, newTags, removedTags);
|
return new Ternary<>(sameTags, newTags, removedTags);
|
||||||
@ -1849,7 +1843,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim
|
|||||||
if (currentOfferingTags.isEmpty() && newOfferingTags.isEmpty()) {
|
if (currentOfferingTags.isEmpty() && newOfferingTags.isEmpty()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
Set<String> sameTags = currentOfferingTags.stream().filter(newOfferingTags::contains).collect(Collectors.toSet());;
|
Set<String> sameTags = currentOfferingTags.stream().filter(newOfferingTags::contains).collect(Collectors.toSet());
|
||||||
Set<String> newTags = newOfferingTags.stream().filter(tag -> !currentOfferingTags.contains(tag)).collect(Collectors.toSet());
|
Set<String> newTags = newOfferingTags.stream().filter(tag -> !currentOfferingTags.contains(tag)).collect(Collectors.toSet());
|
||||||
Set<String> removedTags = currentOfferingTags.stream().filter(tag -> !newOfferingTags.contains(tag)).collect(Collectors.toSet());
|
Set<String> removedTags = currentOfferingTags.stream().filter(tag -> !newOfferingTags.contains(tag)).collect(Collectors.toSet());
|
||||||
return new Ternary<>(sameTags, newTags, removedTags);
|
return new Ternary<>(sameTags, newTags, removedTags);
|
||||||
|
|||||||
@ -257,6 +257,7 @@ import com.cloud.utils.db.SearchCriteria;
|
|||||||
import com.cloud.utils.db.SearchCriteria.Op;
|
import com.cloud.utils.db.SearchCriteria.Op;
|
||||||
import com.cloud.utils.db.Transaction;
|
import com.cloud.utils.db.Transaction;
|
||||||
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
||||||
|
import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn;
|
||||||
import com.cloud.utils.db.TransactionLegacy;
|
import com.cloud.utils.db.TransactionLegacy;
|
||||||
import com.cloud.utils.db.TransactionStatus;
|
import com.cloud.utils.db.TransactionStatus;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
@ -1591,22 +1592,27 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||||||
if (exceptionOccurred.get()) {
|
if (exceptionOccurred.get()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
HostVO host = _hostDao.findById(hostId);
|
Transaction.execute(new TransactionCallbackWithExceptionNoReturn<Exception>() {
|
||||||
try {
|
@Override
|
||||||
connectHostToSharedPool(host, primaryStore.getId());
|
public void doInTransactionWithoutResult(TransactionStatus status) throws Exception {
|
||||||
poolHostIds.add(hostId);
|
HostVO host = _hostDao.findById(hostId);
|
||||||
} catch (Exception e) {
|
try {
|
||||||
if (handleExceptionsPartially && e.getCause() instanceof StorageConflictException) {
|
connectHostToSharedPool(host, primaryStore.getId());
|
||||||
exceptionOccurred.set(true);
|
poolHostIds.add(hostId);
|
||||||
throw e;
|
} catch (Exception e) {
|
||||||
|
if (handleExceptionsPartially && e.getCause() instanceof StorageConflictException) {
|
||||||
|
exceptionOccurred.set(true);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
logger.warn("Unable to establish a connection between {} and {}", host, primaryStore, e);
|
||||||
|
String reason = getStoragePoolMountFailureReason(e.getMessage());
|
||||||
|
if (handleExceptionsPartially && reason != null) {
|
||||||
|
exceptionOccurred.set(true);
|
||||||
|
throw new CloudRuntimeException(reason);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
logger.warn("Unable to establish a connection between {} and {}", host, primaryStore, e);
|
});
|
||||||
String reason = getStoragePoolMountFailureReason(e.getMessage());
|
|
||||||
if (handleExceptionsPartially && reason != null) {
|
|
||||||
exceptionOccurred.set(true);
|
|
||||||
throw new CloudRuntimeException(reason);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
return null;
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2605,7 +2605,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
|
|
||||||
excludeLocalStorageIfNeeded(volumeToAttach);
|
excludeLocalStorageIfNeeded(volumeToAttach);
|
||||||
|
|
||||||
checkForDevicesInCopies(vmId, vm);
|
checkForVMSnapshots(vmId, vm);
|
||||||
|
|
||||||
|
checkForBackups(vm, true);
|
||||||
|
|
||||||
checkRightsToAttach(caller, volumeToAttach, vm);
|
checkRightsToAttach(caller, volumeToAttach, vm);
|
||||||
|
|
||||||
@ -2707,18 +2709,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkForDevicesInCopies(Long vmId, UserVmVO vm) {
|
private void checkForVMSnapshots(Long vmId, UserVmVO vm) {
|
||||||
// if target VM has associated VM snapshots
|
// if target VM has associated VM snapshots
|
||||||
List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vmId);
|
List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vmId);
|
||||||
if (vmSnapshots.size() > 0) {
|
if (vmSnapshots.size() > 0) {
|
||||||
throw new InvalidParameterValueException(String.format("Unable to attach volume to VM %s/%s, please specify a VM that does not have VM snapshots", vm.getName(), vm.getUuid()));
|
throw new InvalidParameterValueException(String.format("Unable to attach volume to VM %s/%s, please specify a VM that does not have VM snapshots", vm.getName(), vm.getUuid()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// if target VM has backups
|
|
||||||
List<Backup> backups = backupDao.listByVmId(vm.getDataCenterId(), vm.getId());
|
|
||||||
if (vm.getBackupOfferingId() != null && !backups.isEmpty()) {
|
|
||||||
throw new InvalidParameterValueException(String.format("Unable to attach volume to VM %s/%s, please specify a VM that does not have any backups", vm.getName(), vm.getUuid()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2818,7 +2814,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
return volumeToAttach;
|
return volumeToAttach;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void validateIfVmHasBackups(UserVmVO vm, boolean attach) {
|
protected void checkForBackups(UserVmVO vm, boolean attach) {
|
||||||
if ((vm.getBackupOfferingId() == null || CollectionUtils.isEmpty(vm.getBackupVolumeList())) || BooleanUtils.isTrue(BackupManager.BackupEnableAttachDetachVolumes.value())) {
|
if ((vm.getBackupOfferingId() == null || CollectionUtils.isEmpty(vm.getBackupVolumeList())) || BooleanUtils.isTrue(BackupManager.BackupEnableAttachDetachVolumes.value())) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -3038,7 +3034,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||||||
throw new InvalidParameterValueException("Unable to detach volume, please specify a VM that does not have VM snapshots");
|
throw new InvalidParameterValueException("Unable to detach volume, please specify a VM that does not have VM snapshots");
|
||||||
}
|
}
|
||||||
|
|
||||||
validateIfVmHasBackups(vm, false);
|
checkForBackups(vm, false);
|
||||||
|
|
||||||
AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext();
|
AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext();
|
||||||
if (asyncExecutionContext != null) {
|
if (asyncExecutionContext != null) {
|
||||||
|
|||||||
@ -194,7 +194,8 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
|
|||||||
|
|
||||||
if (!isAdmin && zoneIdList == null && !isRegionStore ) {
|
if (!isAdmin && zoneIdList == null && !isRegionStore ) {
|
||||||
// domain admin and user should also be able to register template on a region store
|
// domain admin and user should also be able to register template on a region store
|
||||||
throw new InvalidParameterValueException("Please specify a valid zone Id. Only admins can create templates in all zones.");
|
throw new InvalidParameterValueException("Template registered for 'All zones' can only be owned a Root Admin account. " +
|
||||||
|
"Please select specific zone(s).");
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for the url format only when url is not null. url can be null incase of form based upload
|
// check for the url format only when url is not null. url can be null incase of form based upload
|
||||||
|
|||||||
@ -19,6 +19,7 @@ package org.apache.cloudstack.backup;
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.Comparator;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
@ -277,6 +278,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||||||
|
|
||||||
public static String createVolumeInfoFromVolumes(List<VolumeVO> vmVolumes) {
|
public static String createVolumeInfoFromVolumes(List<VolumeVO> vmVolumes) {
|
||||||
List<Backup.VolumeInfo> list = new ArrayList<>();
|
List<Backup.VolumeInfo> list = new ArrayList<>();
|
||||||
|
vmVolumes.sort(Comparator.comparing(VolumeVO::getDeviceId));
|
||||||
for (VolumeVO vol : vmVolumes) {
|
for (VolumeVO vol : vmVolumes) {
|
||||||
list.add(new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize()));
|
list.add(new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize()));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1962,27 +1962,12 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||||||
RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVM);
|
RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVM);
|
||||||
List<String> destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks(), serviceOffering, dataDiskOfferingMap);
|
List<String> destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks(), serviceOffering, dataDiskOfferingMap);
|
||||||
ConvertInstanceCommand cmd = new ConvertInstanceCommand(remoteInstanceTO,
|
ConvertInstanceCommand cmd = new ConvertInstanceCommand(remoteInstanceTO,
|
||||||
Hypervisor.HypervisorType.KVM, destinationStoragePools, temporaryConvertLocation, ovfTemplateDirConvertLocation, false, false);
|
Hypervisor.HypervisorType.KVM, temporaryConvertLocation, ovfTemplateDirConvertLocation, false, false);
|
||||||
int timeoutSeconds = UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout.value() * 60 * 60;
|
int timeoutSeconds = UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout.value() * 60 * 60;
|
||||||
cmd.setWait(timeoutSeconds);
|
cmd.setWait(timeoutSeconds);
|
||||||
|
|
||||||
Answer convertAnswer;
|
return convertAndImportToKVM(cmd, convertHost, importHost, sourceVM,
|
||||||
try {
|
remoteInstanceTO, destinationStoragePools, temporaryConvertLocation);
|
||||||
convertAnswer = agentManager.send(convertHost.getId(), cmd);
|
|
||||||
} catch (AgentUnavailableException | OperationTimedoutException e) {
|
|
||||||
String err = String.format("Could not send the convert instance command to host %s due to: %s",
|
|
||||||
convertHost, e.getMessage());
|
|
||||||
logger.error(err, e);
|
|
||||||
throw new CloudRuntimeException(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!convertAnswer.getResult()) {
|
|
||||||
String err = String.format("The convert process failed for instance %s from VMware to KVM on host %s: %s",
|
|
||||||
sourceVM, convertHost, convertAnswer.getDetails());
|
|
||||||
logger.error(err);
|
|
||||||
throw new CloudRuntimeException(err);
|
|
||||||
}
|
|
||||||
return ((ConvertInstanceAnswer) convertAnswer).getConvertedInstance();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private UnmanagedInstanceTO convertVmwareInstanceToKVMAfterExportingOVFToConvertLocation(
|
private UnmanagedInstanceTO convertVmwareInstanceToKVMAfterExportingOVFToConvertLocation(
|
||||||
@ -1997,7 +1982,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||||||
RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVMwareInstance.getName(), sourceVMwareInstance.getPath(), vcenterHost, vcenterUsername, vcenterPassword, datacenterName);
|
RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVMwareInstance.getName(), sourceVMwareInstance.getPath(), vcenterHost, vcenterUsername, vcenterPassword, datacenterName);
|
||||||
List<String> destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks(), serviceOffering, dataDiskOfferingMap);
|
List<String> destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks(), serviceOffering, dataDiskOfferingMap);
|
||||||
ConvertInstanceCommand cmd = new ConvertInstanceCommand(remoteInstanceTO,
|
ConvertInstanceCommand cmd = new ConvertInstanceCommand(remoteInstanceTO,
|
||||||
Hypervisor.HypervisorType.KVM, destinationStoragePools, temporaryConvertLocation, null, false, true);
|
Hypervisor.HypervisorType.KVM, temporaryConvertLocation, null, false, true);
|
||||||
int timeoutSeconds = UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout.value() * 60 * 60;
|
int timeoutSeconds = UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout.value() * 60 * 60;
|
||||||
cmd.setWait(timeoutSeconds);
|
cmd.setWait(timeoutSeconds);
|
||||||
int noOfThreads = UnmanagedVMsManager.ThreadsOnKVMHostToImportVMwareVMFiles.value();
|
int noOfThreads = UnmanagedVMsManager.ThreadsOnKVMHostToImportVMwareVMFiles.value();
|
||||||
@ -2065,7 +2050,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||||||
List<StoragePoolVO> pools = new ArrayList<>();
|
List<StoragePoolVO> pools = new ArrayList<>();
|
||||||
pools.addAll(primaryDataStoreDao.findClusterWideStoragePoolsByHypervisorAndPoolType(destinationCluster.getId(), Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem));
|
pools.addAll(primaryDataStoreDao.findClusterWideStoragePoolsByHypervisorAndPoolType(destinationCluster.getId(), Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem));
|
||||||
pools.addAll(primaryDataStoreDao.findZoneWideStoragePoolsByHypervisorAndPoolType(destinationCluster.getDataCenterId(), Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem));
|
pools.addAll(primaryDataStoreDao.findZoneWideStoragePoolsByHypervisorAndPoolType(destinationCluster.getDataCenterId(), Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem));
|
||||||
List<String> diskOfferingTags = new ArrayList<>();
|
|
||||||
if (pools.isEmpty()) {
|
if (pools.isEmpty()) {
|
||||||
String msg = String.format("Cannot find suitable storage pools in the cluster %s for the conversion", destinationCluster.getName());
|
String msg = String.format("Cannot find suitable storage pools in the cluster %s for the conversion", destinationCluster.getName());
|
||||||
logger.error(msg);
|
logger.error(msg);
|
||||||
@ -2092,39 +2076,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||||||
logger.error(msg);
|
logger.error(msg);
|
||||||
throw new CloudRuntimeException(msg);
|
throw new CloudRuntimeException(msg);
|
||||||
}
|
}
|
||||||
diskOfferingTags.add(diskOffering.getTags());
|
if (getStoragePoolWithTags(pools, diskOffering.getTags()) == null) {
|
||||||
}
|
String msg = String.format("Cannot find suitable storage pool for disk offering %s", diskOffering.getName());
|
||||||
if (serviceOffering.getDiskOfferingId() != null) {
|
|
||||||
DiskOfferingVO diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId());
|
|
||||||
if (diskOffering != null) {
|
|
||||||
diskOfferingTags.add(diskOffering.getTags());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pools = getPoolsWithMatchingTags(pools, diskOfferingTags);
|
|
||||||
if (pools.isEmpty()) {
|
|
||||||
String msg = String.format("Cannot find suitable storage pools in cluster %s for the conversion", destinationCluster);
|
|
||||||
logger.error(msg);
|
|
||||||
throw new CloudRuntimeException(msg);
|
|
||||||
}
|
|
||||||
return pools;
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<StoragePoolVO> getPoolsWithMatchingTags(List<StoragePoolVO> pools, List<String> diskOfferingTags) {
|
|
||||||
if (diskOfferingTags.isEmpty()) {
|
|
||||||
return pools;
|
|
||||||
}
|
|
||||||
List<StoragePoolVO> poolsSupportingTags = new ArrayList<>(pools);
|
|
||||||
for (String tags : diskOfferingTags) {
|
|
||||||
boolean tagsMatched = false;
|
|
||||||
for (StoragePoolVO pool : pools) {
|
|
||||||
if (volumeApiService.doesStoragePoolSupportDiskOfferingTags(pool, tags)) {
|
|
||||||
poolsSupportingTags.add(pool);
|
|
||||||
tagsMatched = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!tagsMatched) {
|
|
||||||
String msg = String.format("Cannot find suitable storage pools for the conversion with disk offering tags %s", tags);
|
|
||||||
logger.error(msg);
|
logger.error(msg);
|
||||||
throw new CloudRuntimeException(msg);
|
throw new CloudRuntimeException(msg);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -666,7 +666,6 @@ public class VolumeApiServiceImplTest {
|
|||||||
when(vm.getState()).thenReturn(State.Running);
|
when(vm.getState()).thenReturn(State.Running);
|
||||||
when(vm.getDataCenterId()).thenReturn(34L);
|
when(vm.getDataCenterId()).thenReturn(34L);
|
||||||
when(vm.getBackupOfferingId()).thenReturn(null);
|
when(vm.getBackupOfferingId()).thenReturn(null);
|
||||||
when(backupDaoMock.listByVmId(anyLong(), anyLong())).thenReturn(Collections.emptyList());
|
|
||||||
when(volumeDaoMock.findByInstanceAndType(anyLong(), any(Volume.Type.class))).thenReturn(new ArrayList<>(10));
|
when(volumeDaoMock.findByInstanceAndType(anyLong(), any(Volume.Type.class))).thenReturn(new ArrayList<>(10));
|
||||||
when(volumeDataFactoryMock.getVolume(9L)).thenReturn(volumeToAttach);
|
when(volumeDataFactoryMock.getVolume(9L)).thenReturn(volumeToAttach);
|
||||||
when(volumeToAttach.getState()).thenReturn(Volume.State.Uploaded);
|
when(volumeToAttach.getState()).thenReturn(Volume.State.Uploaded);
|
||||||
@ -1305,7 +1304,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
try {
|
try {
|
||||||
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
||||||
when(vm.getBackupOfferingId()).thenReturn(1l);
|
when(vm.getBackupOfferingId()).thenReturn(1l);
|
||||||
volumeApiServiceImpl.validateIfVmHasBackups(vm, false);
|
volumeApiServiceImpl.checkForBackups(vm, false);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
Assert.assertEquals("Unable to detach volume, cannot detach volume from a VM that has backups. First remove the VM from the backup offering or set the global configuration 'backup.enable.attach.detach.of.volumes' to true.", e.getMessage());
|
Assert.assertEquals("Unable to detach volume, cannot detach volume from a VM that has backups. First remove the VM from the backup offering or set the global configuration 'backup.enable.attach.detach.of.volumes' to true.", e.getMessage());
|
||||||
}
|
}
|
||||||
@ -1316,7 +1315,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
try {
|
try {
|
||||||
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
||||||
when(vm.getBackupOfferingId()).thenReturn(1l);
|
when(vm.getBackupOfferingId()).thenReturn(1l);
|
||||||
volumeApiServiceImpl.validateIfVmHasBackups(vm, true);
|
volumeApiServiceImpl.checkForBackups(vm, true);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
Assert.assertEquals("Unable to attach volume, please specify a VM that does not have any backups or set the global configuration 'backup.enable.attach.detach.of.volumes' to true.", e.getMessage());
|
Assert.assertEquals("Unable to attach volume, please specify a VM that does not have any backups or set the global configuration 'backup.enable.attach.detach.of.volumes' to true.", e.getMessage());
|
||||||
}
|
}
|
||||||
@ -1326,7 +1325,7 @@ public class VolumeApiServiceImplTest {
|
|||||||
public void validateIfVmHaveBackupsTestSuccessWhenVMDontHaveBackupOffering() {
|
public void validateIfVmHaveBackupsTestSuccessWhenVMDontHaveBackupOffering() {
|
||||||
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
UserVmVO vm = Mockito.mock(UserVmVO.class);
|
||||||
when(vm.getBackupOfferingId()).thenReturn(null);
|
when(vm.getBackupOfferingId()).thenReturn(null);
|
||||||
volumeApiServiceImpl.validateIfVmHasBackups(vm, true);
|
volumeApiServiceImpl.checkForBackups(vm, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|||||||
@ -700,11 +700,9 @@ public class UnmanagedVMsManagerImplTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
when(volumeApiService.doesStoragePoolSupportDiskOffering(any(StoragePool.class), any(DiskOffering.class))).thenReturn(true);
|
when(volumeApiService.doesStoragePoolSupportDiskOffering(any(StoragePool.class), any(DiskOffering.class))).thenReturn(true);
|
||||||
when(volumeApiService.doesStoragePoolSupportDiskOfferingTags(any(StoragePool.class), any())).thenReturn(true);
|
|
||||||
|
|
||||||
ConvertInstanceAnswer convertInstanceAnswer = mock(ConvertInstanceAnswer.class);
|
ConvertInstanceAnswer convertInstanceAnswer = mock(ConvertInstanceAnswer.class);
|
||||||
ImportConvertedInstanceAnswer convertImportedInstanceAnswer = mock(ImportConvertedInstanceAnswer.class);
|
ImportConvertedInstanceAnswer convertImportedInstanceAnswer = mock(ImportConvertedInstanceAnswer.class);
|
||||||
when(convertInstanceAnswer.getConvertedInstance()).thenReturn(instance);
|
|
||||||
when(convertInstanceAnswer.getResult()).thenReturn(vcenterParameter != VcenterParameter.CONVERT_FAILURE);
|
when(convertInstanceAnswer.getResult()).thenReturn(vcenterParameter != VcenterParameter.CONVERT_FAILURE);
|
||||||
Mockito.lenient().when(convertImportedInstanceAnswer.getConvertedInstance()).thenReturn(instance);
|
Mockito.lenient().when(convertImportedInstanceAnswer.getConvertedInstance()).thenReturn(instance);
|
||||||
Mockito.lenient().when(convertImportedInstanceAnswer.getResult()).thenReturn(vcenterParameter != VcenterParameter.CONVERT_FAILURE);
|
Mockito.lenient().when(convertImportedInstanceAnswer.getResult()).thenReturn(vcenterParameter != VcenterParameter.CONVERT_FAILURE);
|
||||||
|
|||||||
@ -398,7 +398,7 @@ CREATE TABLE `cloud`.`op_lock` (
|
|||||||
`waiters` int NOT NULL DEFAULT 0 COMMENT 'How many have the thread acquired this lock (reentrant)',
|
`waiters` int NOT NULL DEFAULT 0 COMMENT 'How many have the thread acquired this lock (reentrant)',
|
||||||
PRIMARY KEY (`key`),
|
PRIMARY KEY (`key`),
|
||||||
INDEX `i_op_lock__mac_ip_thread`(`mac`, `ip`, `thread`)
|
INDEX `i_op_lock__mac_ip_thread`(`mac`, `ip`, `thread`)
|
||||||
) ENGINE=Memory DEFAULT CHARSET=utf8;
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||||
|
|
||||||
CREATE TABLE `cloud`.`configuration` (
|
CREATE TABLE `cloud`.`configuration` (
|
||||||
`category` varchar(255) NOT NULL DEFAULT 'Advanced',
|
`category` varchar(255) NOT NULL DEFAULT 'Advanced',
|
||||||
@ -1793,7 +1793,7 @@ CREATE TABLE `cloud`.`op_nwgrp_work` (
|
|||||||
INDEX `i_op_nwgrp_work__taken`(`taken`),
|
INDEX `i_op_nwgrp_work__taken`(`taken`),
|
||||||
INDEX `i_op_nwgrp_work__step`(`step`),
|
INDEX `i_op_nwgrp_work__step`(`step`),
|
||||||
INDEX `i_op_nwgrp_work__seq_no`(`seq_no`)
|
INDEX `i_op_nwgrp_work__seq_no`(`seq_no`)
|
||||||
) ENGINE=MEMORY DEFAULT CHARSET=utf8;
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||||
|
|
||||||
CREATE TABLE `cloud`.`op_vm_ruleset_log` (
|
CREATE TABLE `cloud`.`op_vm_ruleset_log` (
|
||||||
`id` bigint unsigned UNIQUE NOT NULL AUTO_INCREMENT COMMENT 'id',
|
`id` bigint unsigned UNIQUE NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||||
|
|||||||
@ -68,6 +68,7 @@ function zero_disk() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function finalize() {
|
function finalize() {
|
||||||
|
depmod -a
|
||||||
configure_misc
|
configure_misc
|
||||||
configure_rundisk_size
|
configure_rundisk_size
|
||||||
configure_sudoers
|
configure_sudoers
|
||||||
|
|||||||
@ -34,6 +34,9 @@
|
|||||||
<span v-if="(resource.icon && resource.icon.base64image || images.template || images.iso || resourceIcon) && !['router', 'systemvm', 'volume'].includes($route.path.split('/')[1])">
|
<span v-if="(resource.icon && resource.icon.base64image || images.template || images.iso || resourceIcon) && !['router', 'systemvm', 'volume'].includes($route.path.split('/')[1])">
|
||||||
<resource-icon :image="getImage(resource.icon && resource.icon.base64image || images.template || images.iso || resourceIcon)" size="4x" style="margin-right: 5px"/>
|
<resource-icon :image="getImage(resource.icon && resource.icon.base64image || images.template || images.iso || resourceIcon)" size="4x" style="margin-right: 5px"/>
|
||||||
</span>
|
</span>
|
||||||
|
<span v-else-if="resource.vmtype === 'sharedfsvm'">
|
||||||
|
<file-text-outlined style="font-size: 36px;" />
|
||||||
|
</span>
|
||||||
<span v-else>
|
<span v-else>
|
||||||
<os-logo v-if="resource.ostypeid || resource.ostypename || ['guestoscategory'].includes($route.path.split('/')[1])" :osId="resource.ostypeid" :osName="resource.ostypename || resource.name" size="3x" @update-osname="setResourceOsType"/>
|
<os-logo v-if="resource.ostypeid || resource.ostypename || ['guestoscategory'].includes($route.path.split('/')[1])" :osId="resource.ostypeid" :osName="resource.ostypename || resource.name" size="3x" @update-osname="setResourceOsType"/>
|
||||||
<render-icon v-else-if="typeof $route.meta.icon ==='string'" style="font-size: 36px" :icon="$route.meta.icon" />
|
<render-icon v-else-if="typeof $route.meta.icon ==='string'" style="font-size: 36px" :icon="$route.meta.icon" />
|
||||||
@ -876,6 +879,7 @@ import UploadResourceIcon from '@/components/view/UploadResourceIcon'
|
|||||||
import eventBus from '@/config/eventBus'
|
import eventBus from '@/config/eventBus'
|
||||||
import ResourceIcon from '@/components/view/ResourceIcon'
|
import ResourceIcon from '@/components/view/ResourceIcon'
|
||||||
import ResourceLabel from '@/components/widgets/ResourceLabel'
|
import ResourceLabel from '@/components/widgets/ResourceLabel'
|
||||||
|
import { FileTextOutlined } from '@ant-design/icons-vue'
|
||||||
|
|
||||||
export default {
|
export default {
|
||||||
name: 'InfoCard',
|
name: 'InfoCard',
|
||||||
@ -887,7 +891,8 @@ export default {
|
|||||||
TooltipButton,
|
TooltipButton,
|
||||||
UploadResourceIcon,
|
UploadResourceIcon,
|
||||||
ResourceIcon,
|
ResourceIcon,
|
||||||
ResourceLabel
|
ResourceLabel,
|
||||||
|
FileTextOutlined
|
||||||
},
|
},
|
||||||
props: {
|
props: {
|
||||||
resource: {
|
resource: {
|
||||||
|
|||||||
@ -44,6 +44,9 @@
|
|||||||
<span v-if="record.icon && record.icon.base64image">
|
<span v-if="record.icon && record.icon.base64image">
|
||||||
<resource-icon :image="record.icon.base64image" size="2x"/>
|
<resource-icon :image="record.icon.base64image" size="2x"/>
|
||||||
</span>
|
</span>
|
||||||
|
<span v-else-if="record.vmtype === 'sharedfsvm'">
|
||||||
|
<file-text-outlined style="font-size: 18px;" />
|
||||||
|
</span>
|
||||||
<os-logo v-else :osId="record.ostypeid" :osName="record.osdisplayname" size="xl" />
|
<os-logo v-else :osId="record.ostypeid" :osName="record.osdisplayname" size="xl" />
|
||||||
</span>
|
</span>
|
||||||
<span style="min-width: 120px" >
|
<span style="min-width: 120px" >
|
||||||
@ -591,6 +594,7 @@ import { createPathBasedOnVmType } from '@/utils/plugins'
|
|||||||
import { validateLinks } from '@/utils/links'
|
import { validateLinks } from '@/utils/links'
|
||||||
import cronstrue from 'cronstrue/i18n'
|
import cronstrue from 'cronstrue/i18n'
|
||||||
import moment from 'moment-timezone'
|
import moment from 'moment-timezone'
|
||||||
|
import { FileTextOutlined } from '@ant-design/icons-vue'
|
||||||
|
|
||||||
export default {
|
export default {
|
||||||
name: 'ListView',
|
name: 'ListView',
|
||||||
@ -601,7 +605,8 @@ export default {
|
|||||||
CopyLabel,
|
CopyLabel,
|
||||||
TooltipButton,
|
TooltipButton,
|
||||||
ResourceIcon,
|
ResourceIcon,
|
||||||
ResourceLabel
|
ResourceLabel,
|
||||||
|
FileTextOutlined
|
||||||
},
|
},
|
||||||
props: {
|
props: {
|
||||||
columns: {
|
columns: {
|
||||||
|
|||||||
@ -978,7 +978,7 @@ export default {
|
|||||||
},
|
},
|
||||||
fetchVolumes (searchKeyword) {
|
fetchVolumes (searchKeyword) {
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
api('listvolumes', { listAll: true, isencrypted: searchKeyword }).then(json => {
|
api('listVolumes', { listAll: true, isencrypted: searchKeyword }).then(json => {
|
||||||
const volumes = json.listvolumesresponse.volume
|
const volumes = json.listvolumesresponse.volume
|
||||||
resolve({
|
resolve({
|
||||||
type: 'isencrypted',
|
type: 'isencrypted',
|
||||||
|
|||||||
@ -107,7 +107,7 @@ export const QUOTA_TYPES = [
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 29,
|
id: 29,
|
||||||
type: 'VPC'
|
type: 'BUCKET'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 30,
|
id: 30,
|
||||||
@ -115,7 +115,7 @@ export const QUOTA_TYPES = [
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 31,
|
id: 31,
|
||||||
type: 'BACKUP_OBJECT'
|
type: 'VPC'
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@ -845,7 +845,7 @@
|
|||||||
:deployButtonMenuOptions="deployMenuOptions"
|
:deployButtonMenuOptions="deployMenuOptions"
|
||||||
@handle-cancel="() => $router.back()"
|
@handle-cancel="() => $router.back()"
|
||||||
@handle-deploy="handleSubmit"
|
@handle-deploy="handleSubmit"
|
||||||
@handle-deploy-menu="handleSubmitAndStay" />
|
@handle-deploy-menu="(index, e) => handleSubmitAndStay(e)" />
|
||||||
</div>
|
</div>
|
||||||
</a-form>
|
</a-form>
|
||||||
</a-card>
|
</a-card>
|
||||||
@ -860,7 +860,7 @@
|
|||||||
:deployButtonMenuOptions="deployMenuOptions"
|
:deployButtonMenuOptions="deployMenuOptions"
|
||||||
@handle-cancel="() => $router.back()"
|
@handle-cancel="() => $router.back()"
|
||||||
@handle-deploy="handleSubmit"
|
@handle-deploy="handleSubmit"
|
||||||
@handle-deploy-menu="handleSubmitAndStay" />
|
@handle-deploy-menu="(index, e) => handleSubmitAndStay(e)" />
|
||||||
</template>
|
</template>
|
||||||
</info-card>
|
</info-card>
|
||||||
</a-affix>
|
</a-affix>
|
||||||
|
|||||||
@ -825,7 +825,7 @@
|
|||||||
:deployButtonMenuOptions="deployMenuOptions"
|
:deployButtonMenuOptions="deployMenuOptions"
|
||||||
@handle-cancel="() => $router.back()"
|
@handle-cancel="() => $router.back()"
|
||||||
@handle-deploy="handleSubmit"
|
@handle-deploy="handleSubmit"
|
||||||
@handle-deploy-menu="handleSubmitAndStay" />
|
@handle-deploy-menu="(index, e) => handleSubmitAndStay(e)" />
|
||||||
</div>
|
</div>
|
||||||
</a-form>
|
</a-form>
|
||||||
</a-card>
|
</a-card>
|
||||||
@ -840,7 +840,7 @@
|
|||||||
:deployButtonMenuOptions="deployMenuOptions"
|
:deployButtonMenuOptions="deployMenuOptions"
|
||||||
@handle-cancel="() => $router.back()"
|
@handle-cancel="() => $router.back()"
|
||||||
@handle-deploy="handleSubmit"
|
@handle-deploy="handleSubmit"
|
||||||
@handle-deploy-menu="handleSubmitAndStay" />
|
@handle-deploy-menu="(index, e) => handleSubmitAndStay(e)" />
|
||||||
</template>
|
</template>
|
||||||
</info-card>
|
</info-card>
|
||||||
</a-affix>
|
</a-affix>
|
||||||
|
|||||||
@ -86,7 +86,7 @@ export default {
|
|||||||
this.$emit('handle-deploy', e)
|
this.$emit('handle-deploy', e)
|
||||||
},
|
},
|
||||||
handleMenu (e) {
|
handleMenu (e) {
|
||||||
this.$emit('handle-deploy-menu', e.key - 1)
|
this.$emit('handle-deploy-menu', e.key - 1, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -94,37 +94,36 @@ export default {
|
|||||||
|
|
||||||
<style lang="less" scoped>
|
<style lang="less" scoped>
|
||||||
|
|
||||||
.button-container {
|
|
||||||
display: flex;
|
|
||||||
flex-wrap: wrap;
|
|
||||||
gap: 10px;
|
|
||||||
justify-content: flex-start;
|
|
||||||
}
|
|
||||||
|
|
||||||
.equal-size-button {
|
|
||||||
flex-grow: 1; /* Make each button grow equally */
|
|
||||||
min-width: 120px; /* Set a minimum width so that the buttons don't shrink too much */
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (max-width: 600px) {
|
|
||||||
.button-container {
|
.button-container {
|
||||||
flex-direction: column;
|
display: flex;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
gap: 10px;
|
||||||
|
justify-content: flex-start;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
.equal-size-button {
|
||||||
|
flex-grow: 1; /* Make each button grow equally */
|
||||||
|
min-width: 120px; /* Set a minimum width so that the buttons don't shrink too much */
|
||||||
|
}
|
||||||
|
|
||||||
.btn-stay-on-page {
|
@media (max-width: 600px) {
|
||||||
&.ant-dropdown-menu-dark {
|
.button-container {
|
||||||
.ant-dropdown-menu-item:hover {
|
flex-direction: column;
|
||||||
background: transparent !important;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
</style>
|
</style>
|
||||||
|
|
||||||
<style lang="less">
|
<style lang="less">
|
||||||
|
|
||||||
.ant-btn-group > .ant-btn:first-child:not(:last-child) {
|
.btn-stay-on-page {
|
||||||
flex-grow: 1; /* Make each button grow equally */
|
&.ant-dropdown-menu-dark {
|
||||||
}
|
.ant-dropdown-menu-item:hover {
|
||||||
|
background: transparent !important;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.ant-btn-group > .ant-btn:first-child:not(:last-child) {
|
||||||
|
flex-grow: 1; /* Make each button grow equally */
|
||||||
|
}
|
||||||
</style>
|
</style>
|
||||||
|
|||||||
@ -324,6 +324,9 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
|
|||||||
logger.info("Starting Usage Manager");
|
logger.info("Starting Usage Manager");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_usageJobDao.removeLastOpenJobsOwned(_hostname, 0);
|
||||||
|
Runtime.getRuntime().addShutdownHook(new AbandonJob());
|
||||||
|
|
||||||
// use the configured exec time and aggregation duration for scheduling the job
|
// use the configured exec time and aggregation duration for scheduling the job
|
||||||
_scheduledFuture =
|
_scheduledFuture =
|
||||||
_executor.scheduleAtFixedRate(this, _jobExecTime.getTimeInMillis() - System.currentTimeMillis(), _aggregationDuration * 60 * 1000, TimeUnit.MILLISECONDS);
|
_executor.scheduleAtFixedRate(this, _jobExecTime.getTimeInMillis() - System.currentTimeMillis(), _aggregationDuration * 60 * 1000, TimeUnit.MILLISECONDS);
|
||||||
@ -336,7 +339,6 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
|
|||||||
_sanity = _sanityExecutor.scheduleAtFixedRate(new SanityCheck(), 1, _sanityCheckInterval, TimeUnit.DAYS);
|
_sanity = _sanityExecutor.scheduleAtFixedRate(new SanityCheck(), 1, _sanityCheckInterval, TimeUnit.DAYS);
|
||||||
}
|
}
|
||||||
|
|
||||||
Runtime.getRuntime().addShutdownHook(new AbandonJob());
|
|
||||||
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
||||||
try {
|
try {
|
||||||
if (_heartbeatLock.lock(3)) { // 3 second timeout
|
if (_heartbeatLock.lock(3)) { // 3 second timeout
|
||||||
@ -2262,19 +2264,17 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
|
|||||||
// the aggregation range away from executing the next job
|
// the aggregation range away from executing the next job
|
||||||
long now = System.currentTimeMillis();
|
long now = System.currentTimeMillis();
|
||||||
long timeToJob = _jobExecTime.getTimeInMillis() - now;
|
long timeToJob = _jobExecTime.getTimeInMillis() - now;
|
||||||
long timeSinceJob = 0;
|
long timeSinceLastSuccessJob = 0;
|
||||||
long aggregationDurationMillis = _aggregationDuration * 60L * 1000L;
|
long aggregationDurationMillis = _aggregationDuration * 60L * 1000L;
|
||||||
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
|
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
|
||||||
if (lastSuccess > 0) {
|
if (lastSuccess > 0) {
|
||||||
timeSinceJob = now - lastSuccess;
|
timeSinceLastSuccessJob = now - lastSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((timeSinceJob > 0) && (timeSinceJob > (aggregationDurationMillis - 100))) {
|
if ((timeSinceLastSuccessJob > 0) && (timeSinceLastSuccessJob > (aggregationDurationMillis - 100))) {
|
||||||
if (timeToJob > (aggregationDurationMillis / 2)) {
|
if (timeToJob > (aggregationDurationMillis / 2)) {
|
||||||
if (logger.isDebugEnabled()) {
|
logger.debug("it's been {} ms since last usage job and {} ms until next job, scheduling an immediate job to catch up (aggregation duration is {} minutes)"
|
||||||
logger.debug("it's been " + timeSinceJob + " ms since last usage job and " + timeToJob +
|
, timeSinceLastSuccessJob, timeToJob, _aggregationDuration);
|
||||||
" ms until next job, scheduling an immediate job to catch up (aggregation duration is " + _aggregationDuration + " minutes)");
|
|
||||||
}
|
|
||||||
scheduleParse();
|
scheduleParse();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2359,17 +2359,12 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class AbandonJob extends Thread {
|
private class AbandonJob extends Thread {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
logger.info("exitting Usage Manager");
|
logger.info("exiting Usage Manager");
|
||||||
deleteOpenjob();
|
_usageJobDao.removeLastOpenJobsOwned(_hostname, _pid);
|
||||||
}
|
|
||||||
private void deleteOpenjob() {
|
|
||||||
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
|
|
||||||
if (job != null) {
|
|
||||||
_usageJobDao.remove(job.getId());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1058,13 +1058,23 @@ public class NetUtils {
|
|||||||
return Integer.toString(portRange[0]) + ":" + Integer.toString(portRange[1]);
|
return Integer.toString(portRange[0]) + ":" + Integer.toString(portRange[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates a domain name.
|
||||||
|
*
|
||||||
|
* <p>Domain names must satisfy the following constraints:
|
||||||
|
* <ul>
|
||||||
|
* <li>Length between 1 and 63 characters</li>
|
||||||
|
* <li>Contain only ASCII letters 'a' through 'z' (case-insensitive)</li>
|
||||||
|
* <li>Can include digits '0' through '9' and hyphens (-)</li>
|
||||||
|
* <li>Must not start or end with a hyphen</li>
|
||||||
|
* <li>If used as hostname, must not start with a digit</li>
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* @param hostName The domain name to validate
|
||||||
|
* @param isHostName If true, verifies whether the domain name starts with a digit
|
||||||
|
* @return true if the domain name is valid, false otherwise
|
||||||
|
*/
|
||||||
public static boolean verifyDomainNameLabel(final String hostName, final boolean isHostName) {
|
public static boolean verifyDomainNameLabel(final String hostName, final boolean isHostName) {
|
||||||
// must be between 1 and 63 characters long and may contain only the ASCII letters 'a' through 'z' (in a
|
|
||||||
// case-insensitive manner),
|
|
||||||
// the digits '0' through '9', and the hyphen ('-').
|
|
||||||
// Can not start with a hyphen and digit, and must not end with a hyphen
|
|
||||||
// If it's a host name, don't allow to start with digit
|
|
||||||
|
|
||||||
if (hostName.length() > 63 || hostName.length() < 1) {
|
if (hostName.length() > 63 || hostName.length() < 1) {
|
||||||
LOGGER.warn("Domain name label must be between 1 and 63 characters long");
|
LOGGER.warn("Domain name label must be between 1 and 63 characters long");
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -253,12 +253,29 @@ public class BaseMO {
|
|||||||
hostClusterPair = hostClusterNamesMap.get(hostMorValue);
|
hostClusterPair = hostClusterNamesMap.get(hostMorValue);
|
||||||
} else {
|
} else {
|
||||||
HostMO hostMO = new HostMO(_context, hostMor);
|
HostMO hostMO = new HostMO(_context, hostMor);
|
||||||
ClusterMO clusterMO = new ClusterMO(_context, hostMO.getHyperHostCluster());
|
String hostName = hostMO.getHostName();
|
||||||
hostClusterPair = new Pair<>(hostMO.getHostName(), clusterMO.getName());
|
String clusterName = getClusterNameFromHostIncludingStandaloneHosts(hostMO, hostName);
|
||||||
|
hostClusterPair = new Pair<>(hostName, clusterName);
|
||||||
hostClusterNamesMap.put(hostMorValue, hostClusterPair);
|
hostClusterNamesMap.put(hostMorValue, hostClusterPair);
|
||||||
}
|
}
|
||||||
vm.setHostName(hostClusterPair.first());
|
vm.setHostName(hostClusterPair.first());
|
||||||
vm.setClusterName(hostClusterPair.second());
|
vm.setClusterName(hostClusterPair.second());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the cluster name of the host on the vCenter
|
||||||
|
* @return null in case the host is standalone (doesn't belong to a cluster), cluster name otherwise
|
||||||
|
*/
|
||||||
|
private String getClusterNameFromHostIncludingStandaloneHosts(HostMO hostMO, String hostName) {
|
||||||
|
try {
|
||||||
|
ClusterMO clusterMO = new ClusterMO(_context, hostMO.getHyperHostCluster());
|
||||||
|
return clusterMO.getName();
|
||||||
|
} catch (Exception e) {
|
||||||
|
String msg = String.format("Cannot find a cluster for host %s, assuming standalone host, " +
|
||||||
|
"setting its cluster name as empty", hostName);
|
||||||
|
logger.info(msg);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user