mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 01:32:18 +02:00
Merge release branch 4.19 to main
This commit is contained in:
commit
41e7a2a689
@ -485,7 +485,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator {
|
||||
sb.append("\tbind ").append(publicIP).append(":").append(publicPort);
|
||||
result.add(sb.toString());
|
||||
sb = new StringBuilder();
|
||||
sb.append("\t").append("balance ").append(algorithm);
|
||||
sb.append("\t").append("balance ").append(algorithm.toLowerCase());
|
||||
result.add(sb.toString());
|
||||
|
||||
int i = 0;
|
||||
|
||||
@ -1973,16 +1973,8 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
|
||||
return;
|
||||
}
|
||||
|
||||
String msg;
|
||||
String rootDiskController = controllerInfo.first();
|
||||
String dataDiskController = controllerInfo.second();
|
||||
String scsiDiskController;
|
||||
String recommendedDiskController = null;
|
||||
|
||||
if (VmwareHelper.isControllerOsRecommended(dataDiskController) || VmwareHelper.isControllerOsRecommended(rootDiskController)) {
|
||||
recommendedDiskController = vmMo.getRecommendedDiskController(null);
|
||||
}
|
||||
scsiDiskController = HypervisorHostHelper.getScsiController(new Pair<String, String>(rootDiskController, dataDiskController), recommendedDiskController);
|
||||
Pair<String, String> chosenDiskControllers = VmwareHelper.chooseRequiredDiskControllers(controllerInfo, vmMo, null, null);
|
||||
String scsiDiskController = HypervisorHostHelper.getScsiController(chosenDiskControllers);
|
||||
if (scsiDiskController == null) {
|
||||
return;
|
||||
}
|
||||
@ -2335,6 +2327,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
|
||||
}
|
||||
|
||||
int controllerKey;
|
||||
Pair<String, String> chosenDiskControllers = VmwareHelper.chooseRequiredDiskControllers(controllerInfo,vmMo, null, null);
|
||||
|
||||
//
|
||||
// Setup ROOT/DATA disk devices
|
||||
@ -2359,10 +2352,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
|
||||
}
|
||||
|
||||
VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context);
|
||||
String diskController = getDiskController(vmMo, matchingExistingDisk, vol, controllerInfo, deployAsIs);
|
||||
if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) {
|
||||
diskController = vmMo.getRecommendedDiskController(null);
|
||||
}
|
||||
String diskController = getDiskController(vmMo, matchingExistingDisk, vol, chosenDiskControllers, deployAsIs);
|
||||
if (DiskControllerType.getType(diskController) == DiskControllerType.ide) {
|
||||
controllerKey = vmMo.getIDEControllerKey(ideUnitNumber);
|
||||
if (vol.getType() == Volume.Type.DATADISK) {
|
||||
@ -2846,27 +2836,10 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
|
||||
}
|
||||
|
||||
private Pair<String, String> getControllerInfoFromVmSpec(VirtualMachineTO vmSpec) throws CloudRuntimeException {
|
||||
String dataDiskController = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER);
|
||||
String rootDiskController = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER);
|
||||
|
||||
// If root disk controller is scsi, then data disk controller would also be scsi instead of using 'osdefault'
|
||||
// This helps avoid mix of different scsi subtype controllers in instance.
|
||||
if (DiskControllerType.osdefault == DiskControllerType.getType(dataDiskController) && DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) {
|
||||
dataDiskController = DiskControllerType.scsi.toString();
|
||||
}
|
||||
|
||||
// Validate the controller types
|
||||
dataDiskController = DiskControllerType.getType(dataDiskController).toString();
|
||||
rootDiskController = DiskControllerType.getType(rootDiskController).toString();
|
||||
|
||||
if (DiskControllerType.getType(rootDiskController) == DiskControllerType.none) {
|
||||
throw new CloudRuntimeException("Invalid root disk controller detected : " + rootDiskController);
|
||||
}
|
||||
if (DiskControllerType.getType(dataDiskController) == DiskControllerType.none) {
|
||||
throw new CloudRuntimeException("Invalid data disk controller detected : " + dataDiskController);
|
||||
}
|
||||
|
||||
return new Pair<>(rootDiskController, dataDiskController);
|
||||
String rootDiskControllerDetail = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER);
|
||||
String dataDiskControllerDetail = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER);
|
||||
VmwareHelper.validateDiskControllerDetails(rootDiskControllerDetail, dataDiskControllerDetail);
|
||||
return new Pair<>(rootDiskControllerDetail, dataDiskControllerDetail);
|
||||
}
|
||||
|
||||
private String getBootModeFromVmSpec(VirtualMachineTO vmSpec, boolean deployAsIs) {
|
||||
@ -3614,15 +3587,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
|
||||
return controllerType.toString();
|
||||
}
|
||||
|
||||
if (vol.getType() == Volume.Type.ROOT) {
|
||||
logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.first()
|
||||
+ ", based on root disk controller settings at global configuration setting.");
|
||||
return controllerInfo.first();
|
||||
} else {
|
||||
logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.second()
|
||||
+ ", based on default data disk controller setting i.e. Operating system recommended."); // Need to bring in global configuration setting & template level setting.
|
||||
return controllerInfo.second();
|
||||
}
|
||||
return VmwareHelper.getControllerBasedOnDiskType(controllerInfo, vol);
|
||||
}
|
||||
|
||||
private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey,
|
||||
|
||||
@ -2101,15 +2101,18 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
||||
AttachAnswer answer = new AttachAnswer(disk);
|
||||
|
||||
if (isAttach) {
|
||||
String diskController = getLegacyVmDataDiskController();
|
||||
|
||||
String rootDiskControllerDetail = DiskControllerType.ide.toString();
|
||||
if (controllerInfo != null && StringUtils.isNotEmpty(controllerInfo.get(VmDetailConstants.ROOT_DISK_CONTROLLER))) {
|
||||
rootDiskControllerDetail = controllerInfo.get(VmDetailConstants.ROOT_DISK_CONTROLLER);
|
||||
}
|
||||
String dataDiskControllerDetail = getLegacyVmDataDiskController();
|
||||
if (controllerInfo != null && StringUtils.isNotEmpty(controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER))) {
|
||||
diskController = controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER);
|
||||
dataDiskControllerDetail = controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER);
|
||||
}
|
||||
|
||||
if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) {
|
||||
diskController = vmMo.getRecommendedDiskController(null);
|
||||
}
|
||||
VmwareHelper.validateDiskControllerDetails(rootDiskControllerDetail, dataDiskControllerDetail);
|
||||
Pair<String, String> chosenDiskControllers = VmwareHelper.chooseRequiredDiskControllers(new Pair<>(rootDiskControllerDetail, dataDiskControllerDetail), vmMo, null, null);
|
||||
String diskController = VmwareHelper.getControllerBasedOnDiskType(chosenDiskControllers, disk);
|
||||
|
||||
vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs, diskController, storagePolicyId, volumeTO.getIopsReadRate() + volumeTO.getIopsWriteRate());
|
||||
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
|
||||
|
||||
@ -35,6 +35,7 @@ import javax.inject.Inject;
|
||||
|
||||
import com.cloud.network.dao.PublicIpQuarantineDao;
|
||||
import com.cloud.network.vo.PublicIpQuarantineVO;
|
||||
import com.cloud.resourcelimit.CheckedReservation;
|
||||
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
|
||||
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
|
||||
import org.apache.cloudstack.annotation.AnnotationService;
|
||||
@ -53,6 +54,7 @@ import org.apache.cloudstack.region.PortableIp;
|
||||
import org.apache.cloudstack.region.PortableIpDao;
|
||||
import org.apache.cloudstack.region.PortableIpVO;
|
||||
import org.apache.cloudstack.region.Region;
|
||||
import org.apache.cloudstack.reservation.dao.ReservationDao;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
@ -259,6 +261,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
||||
@Inject
|
||||
ResourceLimitService _resourceLimitMgr;
|
||||
|
||||
@Inject
|
||||
ReservationDao reservationDao;
|
||||
@Inject
|
||||
NetworkOfferingServiceMapDao _ntwkOfferingSrvcDao;
|
||||
@Inject
|
||||
@ -1548,14 +1552,15 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
||||
|
||||
logger.debug("Associating ip " + ipToAssoc + " to network " + network);
|
||||
|
||||
IPAddressVO ip = _ipAddressDao.findById(ipId);
|
||||
//update ip address with networkId
|
||||
ip.setAssociatedWithNetworkId(networkId);
|
||||
ip.setSourceNat(isSourceNat);
|
||||
_ipAddressDao.update(ipId, ip);
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
IPAddressVO ip = null;
|
||||
try (CheckedReservation publicIpReservation = new CheckedReservation(owner, ResourceType.public_ip, 1l, reservationDao, _resourceLimitMgr)) {
|
||||
ip = _ipAddressDao.findById(ipId);
|
||||
//update ip address with networkId
|
||||
ip.setAssociatedWithNetworkId(networkId);
|
||||
ip.setSourceNat(isSourceNat);
|
||||
_ipAddressDao.update(ipId, ip);
|
||||
|
||||
success = applyIpAssociations(network, false);
|
||||
if (success) {
|
||||
logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network);
|
||||
@ -1563,6 +1568,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
|
||||
logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network);
|
||||
}
|
||||
return _ipAddressDao.findById(ipId);
|
||||
} catch (Exception e) {
|
||||
s_logger.error(String.format("Failed to associate ip address %s to network %s", ipToAssoc, network), e);
|
||||
throw new CloudRuntimeException(String.format("Failed to associate ip address %s to network %s", ipToAssoc, network), e);
|
||||
} finally {
|
||||
if (!success && releaseOnFailure) {
|
||||
if (ip != null) {
|
||||
|
||||
@ -43,13 +43,14 @@ import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.configuration.ConfigurationManager;
|
||||
import com.cloud.configuration.ConfigurationManagerImpl;
|
||||
import com.cloud.bgp.BGPService;
|
||||
import com.cloud.dc.ASNumberVO;
|
||||
import com.cloud.dc.dao.ASNumberDao;
|
||||
import com.cloud.dc.Vlan;
|
||||
import com.cloud.network.dao.NsxProviderDao;
|
||||
import com.cloud.network.element.NsxProviderVO;
|
||||
import com.cloud.configuration.ConfigurationManagerImpl;
|
||||
import com.cloud.dc.ASNumberVO;
|
||||
import com.cloud.bgp.BGPService;
|
||||
import com.cloud.dc.dao.ASNumberDao;
|
||||
import com.cloud.resourcelimit.CheckedReservation;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
|
||||
import org.apache.cloudstack.alert.AlertService;
|
||||
@ -75,6 +76,7 @@ import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
||||
import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap;
|
||||
import org.apache.cloudstack.network.RoutedIpv4Manager;
|
||||
import org.apache.cloudstack.query.QueryService;
|
||||
import org.apache.cloudstack.reservation.dao.ReservationDao;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
@ -250,6 +252,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
||||
@Inject
|
||||
ResourceLimitService _resourceLimitMgr;
|
||||
@Inject
|
||||
ReservationDao reservationDao;
|
||||
@Inject
|
||||
VpcServiceMapDao _vpcSrvcDao;
|
||||
@Inject
|
||||
DataCenterDao _dcDao;
|
||||
@ -3175,9 +3179,10 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
||||
logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc);
|
||||
|
||||
final boolean isSourceNatFinal = isSrcNatIpRequired(vpc.getVpcOfferingId()) && getExistingSourceNatInVpc(vpc.getAccountId(), vpcId, false) == null;
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(final TransactionStatus status) {
|
||||
try (CheckedReservation publicIpReservation = new CheckedReservation(owner, ResourceType.public_ip, 1l, reservationDao, _resourceLimitMgr)) {
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(final TransactionStatus status) {
|
||||
final IPAddressVO ip = _ipAddressDao.findById(ipId);
|
||||
// update ip address with networkId
|
||||
ip.setVpcId(vpcId);
|
||||
@ -3187,8 +3192,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
||||
|
||||
// mark ip as allocated
|
||||
_ipAddrMgr.markPublicIpAsAllocated(ip);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Failed to associate ip " + ipToAssoc + " to vpc " + vpc, e);
|
||||
throw new CloudRuntimeException("Failed to associate ip " + ipToAssoc + " to vpc " + vpc, e);
|
||||
}
|
||||
|
||||
logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc);
|
||||
CallContext.current().putContextParameter(IpAddress.class, ipToAssoc.getUuid());
|
||||
|
||||
@ -18,13 +18,11 @@
|
||||
# under the License.
|
||||
|
||||
|
||||
. /etc/rc.d/init.d/functions
|
||||
|
||||
#set -x
|
||||
|
||||
usage() {
|
||||
usage_content="
|
||||
The tool for stopping/starting running system vms and domain routers
|
||||
The tool for stopping/starting running system vms and domain routers. It requires integration API port to be enabled.
|
||||
|
||||
Usage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-n] [-t] [-l] [-z] [-v] [-i] [-j]
|
||||
|
||||
@ -63,6 +61,7 @@ vmids=""
|
||||
vmidsclause=""
|
||||
withids=""
|
||||
vmtable="vm_instance"
|
||||
integrationport=0
|
||||
|
||||
|
||||
|
||||
@ -104,6 +103,12 @@ do
|
||||
done
|
||||
|
||||
|
||||
integrationport=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "SELECT value FROM configuration WHERE name='integration.api.port'"`)
|
||||
if [ $integrationport -le 0 ]; then
|
||||
echo "Integration port is not enabled!"
|
||||
exit
|
||||
fi
|
||||
|
||||
prepare_ids_clause() {
|
||||
if [[ ! -z "$vmidsclause" ]]; then
|
||||
return
|
||||
@ -117,168 +122,167 @@ prepare_ids_clause() {
|
||||
|
||||
stop_start_system() {
|
||||
prepare_ids_clause
|
||||
secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from $vmtable where state=\"Running\" and type=\"SecondaryStorageVm\"$zone$vmidsclause"`)
|
||||
console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from $vmtable where state=\"Running\" and type=\"ConsoleProxy\"$zone$vmidsclause"`)
|
||||
length_secondary=(${#secondary[@]})
|
||||
length_console=(${#console[@]})
|
||||
secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select uuid from $vmtable where state=\"Running\" and type=\"SecondaryStorageVm\"$zone$vmidsclause"`)
|
||||
console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select uuid from $vmtable where state=\"Running\" and type=\"ConsoleProxy\"$zone$vmidsclause"`)
|
||||
length_secondary=(${#secondary[@]})
|
||||
length_console=(${#console[@]})
|
||||
|
||||
|
||||
echo -e "\nStopping and starting $length_secondary secondary storage vm(s)$inzone$withids..."
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Stopping and starting $length_secondary secondary storage vm(s)$inzone$withids..." >>$LOGFILE
|
||||
echo -e "\nStopping and starting $length_secondary secondary storage vm(s)$inzone$withids..."
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Stopping and starting $length_secondary secondary storage vm(s)$inzone$withids..." >>$LOGFILE
|
||||
|
||||
for d in "${secondary[@]}"; do
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Stopping secondary storage vm with id $d" >>$LOGFILE
|
||||
jobresult=$(send_request stopSystemVm $d)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to stop secondary storage vm with id $d \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to stop secondary storage vm with id $d" >>$LOGFILE
|
||||
else
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Starting secondary storage vm with id $d" >>$LOGFILE
|
||||
jobresult=$(send_request startSystemVm $d SSVM)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to start secondary storage vm with id $d" >>$LOGFILE
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to start secondary storage vm with id $d" >>$LOGFILE
|
||||
fi
|
||||
fi
|
||||
done
|
||||
for d in "${secondary[@]}"; do
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Stopping secondary storage vm with id $d" >>$LOGFILE
|
||||
jobresult=$(send_request stopSystemVm $d)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to stop secondary storage vm with id $d \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to stop secondary storage vm with id $d" >>$LOGFILE
|
||||
else
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Starting secondary storage vm with id $d" >>$LOGFILE
|
||||
jobresult=$(send_request startSystemVm $d SSVM)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to start secondary storage vm with id $d" >>$LOGFILE
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to start secondary storage vm with id $d" >>$LOGFILE
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$length_secondary" == "0" ];then
|
||||
echo -e "No running secondary storage vms found \n"
|
||||
else
|
||||
echo -e "Done stopping and starting secondary storage vm(s)$inzone$withids"
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Done stopping and starting secondary storage vm(s)$inzone$withids." >>$LOGFILE
|
||||
fi
|
||||
|
||||
echo -e "\nStopping and starting $length_console console proxy vm(s)$inzone$withids..."
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Stopping and starting $length_console console proxy vm(s)$inzone$withids..." >>$LOGFILE
|
||||
|
||||
for d in "${console[@]}"; do
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Stopping console proxy with id $d" >>$LOGFILE
|
||||
jobresult=$(send_request stopSystemVm $d)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to stop console proxy vm with id $d \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to stop console proxy vm with id $d" >>$LOGFILE
|
||||
if [ "$length_secondary" == "0" ];then
|
||||
echo -e "No running secondary storage vms found \n"
|
||||
else
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Starting console proxy vm with id $d" >>$LOGFILE
|
||||
jobresult=$(send_request startSystemVm $d consoleProxy)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to start console proxy vm with id $d \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to start console proxy vm with id $d" >>$LOGFILE
|
||||
fi
|
||||
echo -e "Done stopping and starting secondary storage vm(s)$inzone$withids"
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Done stopping and starting secondary storage vm(s)$inzone$withids." >>$LOGFILE
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$length_console" == "0" ];then
|
||||
echo -e "No running console proxy vms found \n"
|
||||
else
|
||||
echo "Done stopping and starting console proxy vm(s) $inzone$withids."
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] Done stopping and starting console proxy vm(s) $inzone$withids." >>$LOGFILE
|
||||
fi
|
||||
echo -e "\nStopping and starting $length_console console proxy vm(s)$inzone$withids..."
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Stopping and starting $length_console console proxy vm(s)$inzone$withids..." >>$LOGFILE
|
||||
|
||||
for d in "${console[@]}"; do
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Stopping console proxy with id $d" >>$LOGFILE
|
||||
jobresult=$(send_request stopSystemVm $d)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to stop console proxy vm with id $d \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to stop console proxy vm with id $d" >>$LOGFILE
|
||||
else
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Starting console proxy vm with id $d" >>$LOGFILE
|
||||
jobresult=$(send_request startSystemVm $d consoleProxy)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to start console proxy vm with id $d \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to start console proxy vm with id $d" >>$LOGFILE
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$length_console" == "0" ];then
|
||||
echo -e "No running console proxy vms found \n"
|
||||
else
|
||||
echo "Done stopping and starting console proxy vm(s) $inzone$withids."
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] Done stopping and starting console proxy vm(s) $inzone$withids." >>$LOGFILE
|
||||
fi
|
||||
}
|
||||
|
||||
stop_start_router() {
|
||||
prepare_ids_clause
|
||||
router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\"$zone$vmidsclause"`)
|
||||
length_router=(${#router[@]})
|
||||
router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select uuid from vm_instance where state=\"Running\" and type=\"DomainRouter\"$zone$vmidsclause"`)
|
||||
length_router=(${#router[@]})
|
||||
|
||||
echo -e "\nStopping and starting $length_router running routing vm(s)$inzone$withids... "
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Stopping and starting $length_router running routing vm(s)$inzone$withids... " >>$LOGFILE
|
||||
echo -e "\nStopping and starting $length_router running routing vm(s)$inzone$withids... "
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Stopping and starting $length_router running routing vm(s)$inzone$withids... " >>$LOGFILE
|
||||
|
||||
#Spawn reboot router in parallel - run commands in <n> chunks - number of threads is configurable
|
||||
#Spawn reboot router in parallel - run commands in <n> chunks - number of threads is configurable
|
||||
|
||||
pids=()
|
||||
for d in "${router[@]}"; do
|
||||
pids=()
|
||||
for d in "${router[@]}"; do
|
||||
|
||||
reboot_router $d &
|
||||
reboot_router $d &
|
||||
|
||||
pids=( "${pids[@]}" $! )
|
||||
pids=( "${pids[@]}" $! )
|
||||
|
||||
length_pids=(${#pids[@]})
|
||||
unfinishedPids=(${#pids[@]})
|
||||
length_pids=(${#pids[@]})
|
||||
unfinishedPids=(${#pids[@]})
|
||||
|
||||
if [ $maxthreads -gt $length_router ]; then
|
||||
maxthreads=$length_router
|
||||
fi
|
||||
if [ $maxthreads -gt $length_router ]; then
|
||||
maxthreads=$length_router
|
||||
fi
|
||||
|
||||
if [ $length_pids -ge $maxthreads ]; then
|
||||
while [ $unfinishedPids -gt 0 ]; do
|
||||
sleep 10
|
||||
count=0
|
||||
for (( i = 0 ; i < $length_pids; i++ )); do
|
||||
if ! ps ax | grep -v grep | grep ${pids[$i]} > /dev/null; then
|
||||
count=`expr $count + 1`
|
||||
fi
|
||||
done
|
||||
if [ $length_pids -ge $maxthreads ]; then
|
||||
while [ $unfinishedPids -gt 0 ]; do
|
||||
sleep 10
|
||||
count=0
|
||||
for (( i = 0 ; i < $length_pids; i++ )); do
|
||||
if ! ps ax | grep -v grep | grep ${pids[$i]} > /dev/null; then
|
||||
count=`expr $count + 1`
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $count -eq $unfinishedPids ]; then
|
||||
unfinishedPids=0
|
||||
fi
|
||||
if [ $count -eq $unfinishedPids ]; then
|
||||
unfinishedPids=0
|
||||
fi
|
||||
|
||||
done
|
||||
done
|
||||
|
||||
#remove all elements from pids
|
||||
if [ $unfinishedPids -eq 0 ]; then
|
||||
pids=()
|
||||
length_pids=(${#pids[@]})
|
||||
fi
|
||||
#remove all elements from pids
|
||||
if [ $unfinishedPids -eq 0 ]; then
|
||||
pids=()
|
||||
length_pids=(${#pids[@]})
|
||||
fi
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
done
|
||||
done
|
||||
|
||||
if [ "$length_router" == "0" ];then
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] No running router vms found \n" >>$LOGFILE
|
||||
else
|
||||
while [ $unfinishedPids -gt 0 ]; do
|
||||
sleep 10
|
||||
done
|
||||
if [ "$length_router" == "0" ];then
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] No running router vms found \n" >>$LOGFILE
|
||||
else
|
||||
while [ $unfinishedPids -gt 0 ]; do
|
||||
sleep 10
|
||||
done
|
||||
|
||||
echo -e "Done restarting router(s)$inzone$withids. \n"
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Done restarting router(s)$inzone$withids. \n" >>$LOGFILE
|
||||
echo -e "Done restarting router(s)$inzone$withids. \n"
|
||||
echo -e "[$(date "+%Y.%m.%d-%H.%M.%S")] Done restarting router(s)$inzone$withids. \n" >>$LOGFILE
|
||||
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
stop_start_all() {
|
||||
stop_start_system
|
||||
stop_start_router
|
||||
stop_start_system
|
||||
stop_start_router
|
||||
}
|
||||
|
||||
send_request(){
|
||||
jobid=`curl -sS "http://$ms:8096/?command=$1&id=$2&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
|
||||
if [ "$jobid" == "" ]; then
|
||||
echo 2
|
||||
return
|
||||
fi
|
||||
jobresult=$(query_async_job_result $jobid)
|
||||
jobid=`curl -sS "http://$ms:$integrationport/?command=$1&id=$2&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
|
||||
if [ "$jobid" == "" ]; then
|
||||
echo 2
|
||||
return
|
||||
fi
|
||||
jobresult=$(query_async_job_result $jobid)
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to $1 id=$2; jobId is $jobid \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to $1 id=$2; jobId is $jobid" >>$LOGFILE
|
||||
fi
|
||||
echo $jobresult
|
||||
echo $jobresult
|
||||
}
|
||||
|
||||
|
||||
reboot_router(){
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Restarting router with id $1" >>$LOGFILE
|
||||
jobid=`curl -sS "http://$ms:8096/?command=rebootRouter&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
|
||||
if [ "$jobid" == "" ]; then
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart domainRouter with id $1; unable to submit the job" >>$LOGFILE
|
||||
echo 2
|
||||
return
|
||||
fi
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Restarting router with id $1" >>$LOGFILE
|
||||
jobid=`curl -sS "http://$ms:$integrationport/?command=rebootRouter&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
|
||||
if [ "$jobid" == "" ]; then
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart domainRouter with id $1; unable to submit the job" >>$LOGFILE
|
||||
echo 2
|
||||
return
|
||||
fi
|
||||
|
||||
jobresult=$(query_async_job_result $jobid)
|
||||
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to restart domainRouter with id $1 \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart domainRouter with id $1; jobId $jobid" >>$LOGFILE
|
||||
exit 0
|
||||
else
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Successfully restarted domainRouter with id $1; jobId $jobid" >>$LOGFILE
|
||||
exit 0
|
||||
fi
|
||||
jobresult=$(query_async_job_result $jobid)
|
||||
|
||||
if [ "$jobresult" != "1" ]; then
|
||||
echo -e "ERROR: Failed to restart domainRouter with id $1 \n"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart domainRouter with id $1; jobId $jobid" >>$LOGFILE
|
||||
exit 0
|
||||
else
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Successfully restarted domainRouter with id $1; jobId $jobid" >>$LOGFILE
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
restart_networks(){
|
||||
@ -346,7 +350,7 @@ restart_networks(){
|
||||
}
|
||||
|
||||
restart_network(){
|
||||
jobid=`curl -sS "http://$ms:8096/?command=restartNetwork&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
|
||||
jobid=`curl -sS "http://$ms:$integrationport/?command=restartNetwork&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
|
||||
if [ "$jobid" == "" ]; then
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart network with id $1; unable to submit the job" >>$LOGFILE
|
||||
echo 2
|
||||
@ -367,7 +371,7 @@ restart_network(){
|
||||
restart_vpc(){
|
||||
echo -e "INFO: Restarting vpc with id $1"
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Restarting vpc with id $1" >>$LOGFILE
|
||||
jobid=`curl -sS "http://$ms:8096/?command=restartVPC&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
|
||||
jobid=`curl -sS "http://$ms:$integrationport/?command=restartVPC&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
|
||||
if [ "$jobid" == "" ]; then
|
||||
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart vpc with id $1; unable to submit the job" >>$LOGFILE
|
||||
echo 2
|
||||
@ -387,7 +391,7 @@ restart_vpc(){
|
||||
|
||||
|
||||
restart_vpcs(){
|
||||
vpcs=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vpc WHERE removed is null$zone"`)
|
||||
vpcs=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select uuid from vpc WHERE removed is null$zone"`)
|
||||
length_vpcs=(${#vpcs[@]})
|
||||
|
||||
echo -e "\nRestarting $length_vpcs vpcs... "
|
||||
@ -450,21 +454,21 @@ restart_vpcs(){
|
||||
}
|
||||
|
||||
query_async_job_result() {
|
||||
while [ 1 ]
|
||||
do
|
||||
jobstatus=`curl -sS "http://$ms:8096/?command=queryAsyncJobResult&jobId=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F, {'print $4'} | awk -F: {'print $2'}`
|
||||
if [ "$jobstatus" != "0" ]; then
|
||||
echo $jobstatus
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
while [ 1 ]
|
||||
do
|
||||
jobstatus=`curl -sS "http://$ms:$integrationport/?command=queryAsyncJobResult&jobId=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F, {'print $7'} | awk -F: {'print $2'}`
|
||||
if [ "$jobstatus" != "0" ]; then
|
||||
echo $jobstatus
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
|
||||
if [ "$system$router$all$help$redundant$vpc" == "" ]
|
||||
then
|
||||
usage
|
||||
exit
|
||||
usage
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ "$help" == "1" ]
|
||||
|
||||
@ -1000,6 +1000,7 @@ const UI = {
|
||||
|
||||
clipboardClear() {
|
||||
document.getElementById('noVNC_clipboard_text').value = "";
|
||||
document.getElementById('noVNC_clipboard_text').focus();
|
||||
},
|
||||
|
||||
clipboardSend() {
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
<menu-unfold-outlined class="trigger" v-if="collapsed" @click="toggle" />
|
||||
<menu-fold-outlined class="trigger" v-else @click="toggle" />
|
||||
</template>
|
||||
<project-menu v-if="device !== 'mobile'" />
|
||||
<project-menu />
|
||||
<saml-domain-switcher style="margin-left: 20px" />
|
||||
<user-menu :device="device"></user-menu>
|
||||
</div>
|
||||
@ -45,7 +45,7 @@
|
||||
<menu-unfold-outlined class="trigger" v-else @click="toggle" />
|
||||
</div>
|
||||
</div>
|
||||
<project-menu v-if="device !== 'mobile'" />
|
||||
<project-menu />
|
||||
<saml-domain-switcher style="margin-left: 20px" />
|
||||
<user-menu></user-menu>
|
||||
</div>
|
||||
|
||||
@ -47,7 +47,7 @@ export default {
|
||||
return filters
|
||||
},
|
||||
columns: () => {
|
||||
const fields = ['name', 'state', 'ipaddress']
|
||||
const fields = ['displayname', 'state', 'ipaddress']
|
||||
const metricsFields = ['cpunumber', 'cputotal', 'cpuused', 'memorytotal',
|
||||
{
|
||||
memoryused: (record) => {
|
||||
@ -77,7 +77,7 @@ export default {
|
||||
fields.push('zonename')
|
||||
return fields
|
||||
},
|
||||
searchFilters: ['name', 'zoneid', 'domainid', 'account', 'groupid', 'tags'],
|
||||
searchFilters: ['displayname', 'zoneid', 'domainid', 'account', 'groupid', 'tags'],
|
||||
details: () => {
|
||||
var fields = ['name', 'displayname', 'id', 'state', 'ipaddress', 'ip6address', 'templatename', 'ostypename',
|
||||
'serviceofferingname', 'isdynamicallyscalable', 'haenable', 'hypervisor', 'boottype', 'bootmode', 'account',
|
||||
|
||||
@ -591,7 +591,7 @@ export default {
|
||||
}).then(async json => {
|
||||
var lbNetworks = json.listnetworksresponse.network || []
|
||||
if (lbNetworks.length > 0) {
|
||||
this.publicLBExists = true
|
||||
this.publicLBExists = false
|
||||
for (var idx = 0; idx < lbNetworks.length; idx++) {
|
||||
const lbNetworkOffering = await this.getNetworkOffering(lbNetworks[idx].networkofferingid)
|
||||
const index = lbNetworkOffering.service.map(svc => { return svc.name }).indexOf('Lb')
|
||||
@ -618,16 +618,23 @@ export default {
|
||||
api('listNetworkOfferings', params).then(json => {
|
||||
this.networkOfferings = json.listnetworkofferingsresponse.networkoffering || []
|
||||
var filteredOfferings = []
|
||||
if (this.publicLBExists) {
|
||||
for (var index in this.networkOfferings) {
|
||||
const offering = this.networkOfferings[index]
|
||||
const idx = offering.service.map(svc => { return svc.name }).indexOf('Lb')
|
||||
if (idx === -1 || this.lbProviderMap.publicLb.vpc.indexOf(offering.service.map(svc => { return svc.provider[0].name })[idx]) === -1) {
|
||||
const vpcLbServiceIndex = this.resource.service.map(svc => { return svc.name }).indexOf('Lb')
|
||||
for (var index in this.networkOfferings) {
|
||||
const offering = this.networkOfferings[index]
|
||||
const idx = offering.service.map(svc => { return svc.name }).indexOf('Lb')
|
||||
if (this.publicLBExists && (idx === -1 || this.lbProviderMap.publicLb.vpc.indexOf(offering.service.map(svc => { return svc.provider[0].name })[idx]) === -1)) {
|
||||
filteredOfferings.push(offering)
|
||||
} else if (!this.publicLBExists && vpcLbServiceIndex > -1) {
|
||||
const vpcLbServiceProvider = vpcLbServiceIndex === -1 ? undefined : this.resource.service[vpcLbServiceIndex].provider[0].name
|
||||
const offeringLbServiceProvider = idx === -1 ? undefined : offering.service[idx].provider[0].name
|
||||
if (vpcLbServiceProvider && (!offeringLbServiceProvider || (offeringLbServiceProvider && vpcLbServiceProvider === offeringLbServiceProvider))) {
|
||||
filteredOfferings.push(offering)
|
||||
}
|
||||
} else {
|
||||
filteredOfferings.push(offering)
|
||||
}
|
||||
this.networkOfferings = filteredOfferings
|
||||
}
|
||||
this.networkOfferings = filteredOfferings
|
||||
if (this.isNsxEnabled) {
|
||||
this.networkOfferings = this.networkOfferings.filter(offering => offering.networkmode === (this.isOfferingNatMode ? 'NATTED' : 'ROUTED'))
|
||||
}
|
||||
|
||||
@ -1574,15 +1574,8 @@ public class HypervisorHostHelper {
|
||||
|
||||
VmwareHelper.setBasicVmConfig(vmConfig, cpuCount, cpuSpeedMHz, cpuReservedMHz, memoryMB, memoryReserveMB, guestOsIdentifier, limitCpuUse, false);
|
||||
|
||||
String newRootDiskController = controllerInfo.first();
|
||||
String newDataDiskController = controllerInfo.second();
|
||||
String recommendedController = null;
|
||||
if (VmwareHelper.isControllerOsRecommended(newRootDiskController) || VmwareHelper.isControllerOsRecommended(newDataDiskController)) {
|
||||
recommendedController = host.getRecommendedDiskController(guestOsIdentifier);
|
||||
}
|
||||
|
||||
Pair<String, String> updatedControllerInfo = new Pair<String, String>(newRootDiskController, newDataDiskController);
|
||||
String scsiDiskController = HypervisorHostHelper.getScsiController(updatedControllerInfo, recommendedController);
|
||||
Pair<String, String> chosenDiskControllers = VmwareHelper.chooseRequiredDiskControllers(controllerInfo, null, host, guestOsIdentifier);
|
||||
String scsiDiskController = HypervisorHostHelper.getScsiController(chosenDiskControllers);
|
||||
// If there is requirement for a SCSI controller, ensure to create those.
|
||||
if (scsiDiskController != null) {
|
||||
int busNum = 0;
|
||||
@ -2256,19 +2249,11 @@ public class HypervisorHostHelper {
|
||||
return morHyperHost;
|
||||
}
|
||||
|
||||
public static String getScsiController(Pair<String, String> controllerInfo, String recommendedController) {
|
||||
public static String getScsiController(Pair<String, String> controllerInfo) {
|
||||
String rootDiskController = controllerInfo.first();
|
||||
String dataDiskController = controllerInfo.second();
|
||||
|
||||
// If "osdefault" is specified as controller type, then translate to actual recommended controller.
|
||||
if (VmwareHelper.isControllerOsRecommended(rootDiskController)) {
|
||||
rootDiskController = recommendedController;
|
||||
}
|
||||
if (VmwareHelper.isControllerOsRecommended(dataDiskController)) {
|
||||
dataDiskController = recommendedController;
|
||||
}
|
||||
|
||||
String scsiDiskController = null; //If any of the controller provided is SCSI then return it's sub-type.
|
||||
String scsiDiskController; //If any of the controller provided is SCSI then return it's sub-type.
|
||||
if (isIdeController(rootDiskController) && isIdeController(dataDiskController)) {
|
||||
//Default controllers would exist
|
||||
return null;
|
||||
|
||||
@ -40,11 +40,14 @@ import javax.xml.datatype.DatatypeConfigurationException;
|
||||
import javax.xml.datatype.DatatypeFactory;
|
||||
import javax.xml.datatype.XMLGregorianCalendar;
|
||||
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.hypervisor.vmware.mo.ClusterMO;
|
||||
import com.cloud.hypervisor.vmware.mo.DatastoreFile;
|
||||
import com.cloud.hypervisor.vmware.mo.DistributedVirtualSwitchMO;
|
||||
import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper;
|
||||
import com.cloud.serializer.GsonHelper;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
import com.vmware.vim25.DatastoreInfo;
|
||||
import com.vmware.vim25.DistributedVirtualPort;
|
||||
@ -1064,4 +1067,76 @@ public class VmwareHelper {
|
||||
}
|
||||
return vmdkAbsFile;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates an instance's <code>rootDiskController</code> and <code>dataDiskController</code> details. Throws a
|
||||
* <code>CloudRuntimeException</code> if they are invalid.
|
||||
*/
|
||||
public static void validateDiskControllerDetails(String rootDiskControllerDetail, String dataDiskControllerDetail) {
|
||||
rootDiskControllerDetail = DiskControllerType.getType(rootDiskControllerDetail).toString();
|
||||
if (DiskControllerType.getType(rootDiskControllerDetail) == DiskControllerType.none) {
|
||||
throw new CloudRuntimeException(String.format("[%s] is not a valid root disk controller", rootDiskControllerDetail));
|
||||
}
|
||||
dataDiskControllerDetail = DiskControllerType.getType(dataDiskControllerDetail).toString();
|
||||
if (DiskControllerType.getType(dataDiskControllerDetail) == DiskControllerType.none) {
|
||||
throw new CloudRuntimeException(String.format("[%s] is not a valid data disk controller", dataDiskControllerDetail));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Based on an instance's <code>rootDiskController</code> and <code>dataDiskController</code> details, returns a pair
|
||||
* containing the disk controllers that should be used for root disk and the data disks, respectively.
|
||||
*
|
||||
* @param controllerInfo pair containing the root disk and data disk controllers, respectively.
|
||||
* @param vmMo virtual machine to derive the recommended disk controllers from. If not null, <code>host</code> and <code>guestOsIdentifier</code> will be ignored.
|
||||
* @param host host to derive the recommended disk controllers from. Must be provided with <code>guestOsIdentifier</code>.
|
||||
* @param guestOsIdentifier used to derive the recommended disk controllers from the host.
|
||||
*/
|
||||
public static Pair<String, String> chooseRequiredDiskControllers(Pair<String, String> controllerInfo, VirtualMachineMO vmMo,
|
||||
VmwareHypervisorHost host, String guestOsIdentifier) throws Exception {
|
||||
String recommendedDiskControllerClassName = vmMo != null ? vmMo.getRecommendedDiskController(null) : host.getRecommendedDiskController(guestOsIdentifier);
|
||||
String recommendedDiskController = DiskControllerType.getType(recommendedDiskControllerClassName).toString();
|
||||
|
||||
String convertedRootDiskController = controllerInfo.first();
|
||||
if (isControllerOsRecommended(convertedRootDiskController)) {
|
||||
convertedRootDiskController = recommendedDiskController;
|
||||
}
|
||||
|
||||
String convertedDataDiskController = controllerInfo.second();
|
||||
if (isControllerOsRecommended(convertedDataDiskController)) {
|
||||
convertedDataDiskController = recommendedDiskController;
|
||||
}
|
||||
|
||||
if (diskControllersShareTheSameBusType(convertedRootDiskController, convertedDataDiskController)) {
|
||||
s_logger.debug("Root and data disk controllers share the same bus type; therefore, we will only use the controllers specified for the root disk.");
|
||||
return new Pair<>(convertedRootDiskController, convertedRootDiskController);
|
||||
}
|
||||
|
||||
return new Pair<>(convertedRootDiskController, convertedDataDiskController);
|
||||
}
|
||||
|
||||
protected static boolean diskControllersShareTheSameBusType(String rootDiskController, String dataDiskController) {
|
||||
DiskControllerType rootDiskControllerType = DiskControllerType.getType(rootDiskController);
|
||||
DiskControllerType dataDiskControllerType = DiskControllerType.getType(dataDiskController);
|
||||
if (rootDiskControllerType.equals(dataDiskControllerType)) {
|
||||
return true;
|
||||
}
|
||||
List<DiskControllerType> scsiDiskControllers = List.of(DiskControllerType.scsi, DiskControllerType.lsilogic, DiskControllerType.lsisas1068,
|
||||
DiskControllerType.buslogic ,DiskControllerType.pvscsi);
|
||||
return scsiDiskControllers.contains(rootDiskControllerType) && scsiDiskControllers.contains(dataDiskControllerType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Identifies whether the disk is a root or data disk, and returns the controller from the provided pair that should
|
||||
* be used for the disk.
|
||||
* @param controllerInfo pair containing the root disk and data disk controllers, respectively.
|
||||
*/
|
||||
public static String getControllerBasedOnDiskType(Pair<String, String> controllerInfo, DiskTO disk) {
|
||||
if (disk.getType() == Volume.Type.ROOT || disk.getDiskSeq() == 0) {
|
||||
s_logger.debug(String.format("Choosing disk controller [%s] for the root disk.", controllerInfo.first()));
|
||||
return controllerInfo.first();
|
||||
}
|
||||
s_logger.debug(String.format("Choosing disk controller [%s] for the data disks.", controllerInfo.second()));
|
||||
return controllerInfo.second();
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user