Primera pure patches & various small fixes (#10132)

Co-authored-by: GLOVER RENE <rg9975@cs419-mgmtserver.rg9975nprd.app.ecp.att.com>
Co-authored-by: Suresh Kumar Anaparti <sureshkumar.anaparti@gmail.com>
This commit is contained in:
Rene Glover 2025-02-07 06:19:34 -06:00 committed by GitHub
parent c09720a19a
commit 3337f425ff
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 163 additions and 40 deletions

View File

@ -26,7 +26,7 @@ import com.cloud.utils.SerialVersionUID;
public class StorageAccessException extends RuntimeException {
private static final long serialVersionUID = SerialVersionUID.StorageAccessException;
public StorageAccessException(String message) {
super(message);
public StorageAccessException(String message, Exception causer) {
super(message, causer);
}
}

View File

@ -1827,7 +1827,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to the volume [%s] on host [%s].", newVolToString, host));
throw new StorageAccessException(String.format("Unable to grant access to the volume [%s] on host [%s].", newVolToString, host), e);
}
}
@ -1867,7 +1867,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
volService.grantAccess(volFactory.getVolume(volumeId), host, volumeStore);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host));
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host), e);
}
}
@ -1915,7 +1915,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
volService.grantAccess(volFactory.getVolume(vol.getId()), host, store);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host));
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host), e);
}
} else {
grantVolumeAccessToHostIfNeeded(store, vol.getId(), host, volToString);

View File

@ -40,6 +40,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
@ -1533,6 +1534,16 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
verifyFormat(templateInfo.getFormat());
}
// this blurb handles the case where the storage system can clone a volume from a template
String canCloneVolumeFromTemplate = templateInfo.getDataStore().getDriver().getCapabilities().get("CAN_CLONE_VOLUME_FROM_TEMPLATE");
if (canCloneVolumeFromTemplate != null && canCloneVolumeFromTemplate.toLowerCase().equals("true")) {
DataStoreDriver driver = templateInfo.getDataStore().getDriver();
driver.createAsync(volumeInfo.getDataStore(), volumeInfo, null);
volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore());
driver.copyAsync(templateInfo, volumeInfo, null);
return;
}
HostVO hostVO = null;
final boolean computeClusterSupportsVolumeClone;
@ -1640,7 +1651,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
errMsg = "Create volume from template failed: " + ex.getMessage();
}
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
if (copyCmdAnswer == null) {
@ -2633,7 +2644,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
catch (Exception ex) {
errMsg = ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
if (copyCmdAnswer == null) {

View File

@ -1035,7 +1035,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
grantAccess(templateOnPrimary, destHost, destPrimaryDataStore);
} catch (Exception e) {
throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId());
throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId(), e);
}
templateOnPrimary.processEvent(Event.CopyingRequested);
@ -1161,7 +1161,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore);
} catch (Exception e) {
throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId());
throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId(), e);
}
_volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false);
@ -1406,7 +1406,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
grantAccess(templateOnPrimary, destHost, destPrimaryDataStore);
} catch (Exception e) {
throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId());
throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId(), e);
}
templateOnPrimary.processEvent(Event.CopyingRequested);

View File

@ -71,7 +71,11 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector {
with(new WithComponentLifeCycle() {
@Override
public void with(ComponentLifecycle lifecycle) {
lifecycle.start();
try {
lifecycle.start();
} catch (Throwable e) {
log.warn("Unable to start component: " + lifecycle.getName(), e);
}
if (lifecycle instanceof ManagementBean) {
ManagementBean mbean = (ManagementBean)lifecycle;
@ -115,6 +119,9 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector {
} catch (ConfigurationException e) {
log.error("Failed to configure " + lifecycle.getName(), e);
throw new CloudRuntimeException(e);
} catch (Throwable e) {
log.error("Failed to configure " + lifecycle.getName(), e);
throw new CloudRuntimeException(e);
}
}
});

View File

@ -108,10 +108,15 @@ public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, App
while (iter.hasNext()) {
Object next = iter.next();
if (registry.register(next)) {
log.debug("Registered " + next);
} else {
iter.remove();
try {
if (registry.register(next)) {
log.debug("Registered " + next);
} else {
log.warn("Bean registration failed for " + next.toString());
iter.remove();
}
} catch (Throwable e) {
log.warn("Bean registration attempt resulted in an exception for " + next.toString(), e);
}
}
}

View File

@ -184,6 +184,7 @@ public class LibvirtVMDef {
guestDef.append("<entry name='manufacturer'>Apache Software Foundation</entry>\n");
guestDef.append("<entry name='product'>CloudStack " + _type.toString() + " Hypervisor</entry>\n");
guestDef.append("<entry name='uuid'>" + _uuid + "</entry>\n");
guestDef.append("<entry name='serial'>" + _uuid + "</entry>\n");
guestDef.append("</system>\n");
guestDef.append("</sysinfo>\n");
@ -222,7 +223,9 @@ public class LibvirtVMDef {
guestDef.append("<boot dev='" + bo + "'/>\n");
}
}
guestDef.append("<smbios mode='sysinfo'/>\n");
if (_arch == null || !_arch.equals("aarch64")) {
guestDef.append("<smbios mode='sysinfo'/>\n");
}
guestDef.append("</os>\n");
if (iothreads) {
guestDef.append(String.format("<iothreads>%s</iothreads>", NUMBER_OF_IOTHREADS));

View File

@ -124,7 +124,10 @@ public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWra
instance.setName(domain.getName());
instance.setCpuCores((int) LibvirtComputingResource.countDomainRunningVcpus(domain));
instance.setCpuSpeed(parser.getCpuTuneDef().getShares()/instance.getCpuCores());
if (parser.getCpuTuneDef() != null && instance.getCpuCores() != null) {
instance.setCpuSpeed(parser.getCpuTuneDef().getShares()/instance.getCpuCores());
}
if (parser.getCpuModeDef() != null) {
instance.setCpuCoresPerSocket(parser.getCpuModeDef().getCoresPerSocket());

View File

@ -273,8 +273,13 @@ public class KVMStorageProcessor implements StorageProcessor {
String path = derivePath(primaryStore, destData, details);
if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
if (path == null) {
path = destTempl.getUuid();
}
if (path != null && !storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
}
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
@ -338,6 +343,7 @@ public class KVMStorageProcessor implements StorageProcessor {
} else {
path = details != null ? details.get("managedStoreTarget") : null;
}
return path;
}
@ -418,7 +424,7 @@ public class KVMStorageProcessor implements StorageProcessor {
if (primaryPool.getType() == StoragePoolType.CLVM) {
templatePath = ((NfsTO)imageStore).getUrl() + File.separator + templatePath;
vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds());
} if (primaryPool.getType() == StoragePoolType.PowerFlex) {
} if (primaryPool.getType() == StoragePoolType.PowerFlex || primaryPool.getType() == StoragePoolType.FiberChannel) {
Map<String, String> details = primaryStore.getDetails();
String path = derivePath(primaryStore, destData, details);
@ -772,15 +778,19 @@ public class KVMStorageProcessor implements StorageProcessor {
KVMStoragePool secondaryStorage = null;
String path = null;
try {
// look for options indicating an overridden path or IQN. Used when snapshots have to be
// temporarily copied on the manaaged storage device before the actual copy to target object
Map<String, String> details = cmd.getOptions();
String path = details != null ? details.get(DiskTO.PATH) : null;
path = details != null ? details.get(DiskTO.PATH) : null;
if (path == null) {
path = details != null ? details.get(DiskTO.IQN) : null;
if (path == null) {
new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
path = srcData.getPath();
if (path == null) {
new CloudRuntimeException("The 'path' or 'iqn' field must be specified.");
}
}
}
@ -843,8 +853,6 @@ public class KVMStorageProcessor implements StorageProcessor {
loc.addFormat(info);
loc.save();
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path);
TemplateObjectTO newTemplate = new TemplateObjectTO();
newTemplate.setPath(templateFolder + File.separator + templateName + ".qcow2");
@ -864,6 +872,10 @@ public class KVMStorageProcessor implements StorageProcessor {
return new CopyCmdAnswer(ex.toString());
} finally {
if (path != null) {
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path);
}
if (secondaryStorage != null) {
secondaryStorage.delete();
}
@ -1039,7 +1051,9 @@ public class KVMStorageProcessor implements StorageProcessor {
command.add(NAME_OPTION, snapshotName);
command.add("-p", snapshotDestPath);
descName = UUID.randomUUID().toString();
if (isCreatedFromVmSnapshot) {
descName = UUID.randomUUID().toString();
}
command.add("-t", descName);
final String result = command.execute();

View File

@ -160,6 +160,13 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
KVMPhysicalDisk disk = new KVMPhysicalDisk(address.getPath(), address.toString(), pool);
disk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
// validate we have a connection, if not we need to connect first.
if (!isConnected(address.getPath())) {
if (!connectPhysicalDisk(address, pool, null)) {
throw new CloudRuntimeException("Unable to connect to volume " + address.getPath());
}
}
long diskSize = getPhysicalDiskSize(address.getPath());
disk.setSize(diskSize);
disk.setVirtualSize(diskSize);
@ -197,6 +204,10 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
// we expect WWN values in the volumePath so need to convert it to an actual physical path
AddressInfo address = this.parseAndValidatePath(volumePath);
return connectPhysicalDisk(address, pool, details);
}
private boolean connectPhysicalDisk(AddressInfo address, KVMStoragePool pool, Map<String, String> details) {
// validate we have a connection id - we can't proceed without that
if (address.getConnectionId() == null) {
LOGGER.error("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid() + " - connection id is not set in provided path");
@ -508,6 +519,18 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
return false;
}
boolean isConnected(String path) {
// run a command to test if this is a binary device at this path
Script blockTest = new Script("/bin/test", LOGGER);
blockTest.add("-b", path);
blockTest.execute();
int rc = blockTest.getExitValue();
if (rc == 0) {
return true;
}
return false;
}
long getPhysicalDiskSize(String diskPath) {
if (StringUtils.isEmpty(diskPath)) {
return 0;

View File

@ -362,6 +362,11 @@ public class KubernetesClusterActionWorker {
IpAddress address = ipAddressDao.findByUuid(detailsVO.getValue());
if (address == null || network.getVpcId() != address.getVpcId()) {
LOGGER.warn(String.format("Public IP with ID: %s linked to the Kubernetes cluster: %s is not usable", detailsVO.getValue(), kubernetesCluster.getName()));
if (address == null) {
LOGGER.warn(String.format("Public IP with ID: %s was not found by uuid", detailsVO.getValue()));
} else {
LOGGER.warn(String.format("Public IP with ID: %s was associated with vpc %d instead of %d", detailsVO.getValue(), address.getVpcId().longValue(), network.getVpcId().longValue()));
}
return null;
}
return address;

View File

@ -192,7 +192,7 @@ public class KubernetesClusterUtil {
while (System.currentTimeMillis() < timeoutTime) {
try {
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, user,
sshKeyFile, null, "sudo cat /etc/kubernetes/admin.conf",
sshKeyFile, null, "sudo cat /etc/kubernetes/user.conf 2>/dev/null || sudo cat /etc/kubernetes/admin.conf",
10000, 10000, 10000);
if (result.first() && StringUtils.isNotEmpty(result.second())) {

View File

@ -145,16 +145,18 @@ public class PrimeraAdapter implements ProviderAdapter {
}
// determine volume type based on offering
// THIN: tpvv=true, reduce=false
// SPARSE: tpvv=true, reduce=true
// THICK: tpvv=false, tpZeroFill=true (not supported)
// tpvv -- thin provisioned virtual volume (no deduplication)
// reduce -- thin provisioned virtual volume (with duplication and compression, also known as DECO)
// these are the only choices with newer Primera devices
// we will use THIN for the deduplicated/compressed type and SPARSE for thin-only without dedup/compress
// note: DECO/reduce type must be at least 16GB in size
if (diskOffering != null) {
if (diskOffering.getType() == ProvisioningType.THIN) {
request.setTpvv(true);
request.setReduce(false);
} else if (diskOffering.getType() == ProvisioningType.SPARSE) {
request.setTpvv(false);
request.setReduce(true);
} else if (diskOffering.getType() == ProvisioningType.SPARSE) {
request.setTpvv(true);
request.setReduce(false);
} else if (diskOffering.getType() == ProvisioningType.FAT) {
throw new RuntimeException("This storage provider does not support FAT provisioned volumes");
}
@ -165,8 +167,16 @@ public class PrimeraAdapter implements ProviderAdapter {
}
} else {
// default to deduplicated volume
request.setReduce(true);
request.setTpvv(false);
request.setReduce(true);
}
if (request.getReduce() == true) {
// check if sizeMiB is less than 16GB adjust up to 16GB. The AdaptiveDatastoreDriver will automatically
// update this on the cloudstack side to match
if (request.getSizeMiB() < 16 * 1024) {
request.setSizeMiB(16 * 1024);
}
}
request.setComment(ProviderVolumeNamer.generateObjectComment(context, dataIn));
@ -184,8 +194,11 @@ public class PrimeraAdapter implements ProviderAdapter {
if (host == null) {
throw new RuntimeException("Unable to find host " + hostname + " on storage provider");
}
request.setHostname(host.getName());
// check if we already have a vlun for requested host
Integer vlun = hasVlun(hostname, hostname);
if (vlun == null) {
request.setHostname(host.getName());
request.setVolumeName(dataIn.getExternalName());
request.setAutoLun(true);
// auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4
@ -197,7 +210,13 @@ public class PrimeraAdapter implements ProviderAdapter {
if (toks.length <2) {
throw new RuntimeException("Attach volume failed with invalid location response to vlun add command on storage provider. Provided location: " + location);
}
return toks[1];
try {
vlun = Integer.parseInt(toks[1]);
} catch (NumberFormatException e) {
throw new RuntimeException("VLUN attach request succeeded but the VLUN value is not a valid number: " + toks[1]);
}
}
return vlun.toString();
}
/**
@ -232,6 +251,20 @@ public class PrimeraAdapter implements ProviderAdapter {
}
}
private Integer hasVlun(String externalName, String hostname) {
PrimeraVlunList list = getVluns(externalName);
if (list != null && list.getMembers().size() > 0) {
for (PrimeraVlun vlun: list.getMembers()) {
if (hostname != null) {
if (vlun.getHostname().equals(hostname) || vlun.getHostname().equals(hostname.split("\\.")[0])) {
return vlun.getLun();
}
}
}
}
return null;
}
public void removeVlun(String name, Integer lunid, String hostString) {
// hostString can be a hostname OR "set:<hostsetname>". It is stored this way
// in the appliance and returned as the vlun's name/string.

View File

@ -22,10 +22,18 @@
#
#############################################################################################
SCRIPT_NAME=$(basename "$0")
if [[ $(pgrep -f ${SCRIPT_NAME}) != "$$" ]]; then
echo "Another instance of ${SCRIPT_NAME} is already running! Exiting"
exit
fi
cd $(dirname $0)
for WWID in $(multipathd list maps status | awk '{ if ($4 == 0) { print substr($1,2); }}'); do
./removeVolume.sh ${WWID}
./disconnectVolume.sh ${WWID}
done
exit 0

View File

@ -66,6 +66,9 @@ fi
logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices"
# Added to give time for the event to be fired to the server
sleep 10
echo "$(date): ${WWID} removed"
exit 0

View File

@ -694,7 +694,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
for (SecurityChecker checker : _securityCheckers) {
if (checker.checkAccess(caller, entity, accessType, apiName)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Access to " + entity + " granted to " + caller + " by " + checker.getName());
User user = CallContext.current().getCallingUser();
String userName = "";
if (user != null)
userName = user.getUsername();
s_logger.debug("Access to " + entity + " granted to " + caller + " by " + checker.getName() + " on behalf of user " + userName);
}
granted = true;
break;

View File

@ -129,8 +129,8 @@ import org.apache.cloudstack.storage.template.VnfTemplateManager;
import org.apache.cloudstack.userdata.UserDataManager;
import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
import org.apache.cloudstack.utils.security.ParserUtils;
import org.apache.cloudstack.vm.UnmanagedVMsManager;
import org.apache.cloudstack.vm.schedule.VMScheduleManager;
import org.apache.cloudstack.vm.UnmanagedVMsManager;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.math.NumberUtils;
@ -4406,7 +4406,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) {
Long rootDiskSize = rootDiskSizeCustomParam * GiB_TO_BYTES;
Long rootDiskSize = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1);
if (rootDiskSize <= 0) {
throw new InvalidParameterValueException("Root disk size should be a positive number.");
}
rootDiskSize = rootDiskSizeCustomParam * GiB_TO_BYTES;
_volumeService.validateVolumeSizeInBytes(rootDiskSize);
return rootDiskSize;
} else {

View File

@ -77,14 +77,14 @@ public class LocalNfsSecondaryStorageResource extends NfsSecondaryStorageResourc
// Change permissions for the mountpoint - seems to bypass authentication
Script script = new Script(true, "chmod", _timeout, s_logger);
script.add("777", localRootPath);
script.add("1777", localRootPath);
String result = script.execute();
if (result != null) {
String errMsg = "Unable to set permissions for " + localRootPath + " due to " + result;
s_logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
s_logger.debug("Successfully set 777 permission for " + localRootPath);
s_logger.debug("Successfully set 1777 permission for " + localRootPath);
// XXX: Adding the check for creation of snapshots dir here. Might have
// to move it somewhere more logical later.