Merge release branch 4.19 to main

This commit is contained in:
João Jandre 2024-09-19 15:02:29 -03:00
commit 41e7a2a689
12 changed files with 290 additions and 233 deletions

View File

@ -485,7 +485,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator {
sb.append("\tbind ").append(publicIP).append(":").append(publicPort); sb.append("\tbind ").append(publicIP).append(":").append(publicPort);
result.add(sb.toString()); result.add(sb.toString());
sb = new StringBuilder(); sb = new StringBuilder();
sb.append("\t").append("balance ").append(algorithm); sb.append("\t").append("balance ").append(algorithm.toLowerCase());
result.add(sb.toString()); result.add(sb.toString());
int i = 0; int i = 0;

View File

@ -1973,16 +1973,8 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
return; return;
} }
String msg; Pair<String, String> chosenDiskControllers = VmwareHelper.chooseRequiredDiskControllers(controllerInfo, vmMo, null, null);
String rootDiskController = controllerInfo.first(); String scsiDiskController = HypervisorHostHelper.getScsiController(chosenDiskControllers);
String dataDiskController = controllerInfo.second();
String scsiDiskController;
String recommendedDiskController = null;
if (VmwareHelper.isControllerOsRecommended(dataDiskController) || VmwareHelper.isControllerOsRecommended(rootDiskController)) {
recommendedDiskController = vmMo.getRecommendedDiskController(null);
}
scsiDiskController = HypervisorHostHelper.getScsiController(new Pair<String, String>(rootDiskController, dataDiskController), recommendedDiskController);
if (scsiDiskController == null) { if (scsiDiskController == null) {
return; return;
} }
@ -2335,6 +2327,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
} }
int controllerKey; int controllerKey;
Pair<String, String> chosenDiskControllers = VmwareHelper.chooseRequiredDiskControllers(controllerInfo,vmMo, null, null);
// //
// Setup ROOT/DATA disk devices // Setup ROOT/DATA disk devices
@ -2359,10 +2352,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
} }
VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context); VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol, hyperHost, context);
String diskController = getDiskController(vmMo, matchingExistingDisk, vol, controllerInfo, deployAsIs); String diskController = getDiskController(vmMo, matchingExistingDisk, vol, chosenDiskControllers, deployAsIs);
if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) {
diskController = vmMo.getRecommendedDiskController(null);
}
if (DiskControllerType.getType(diskController) == DiskControllerType.ide) { if (DiskControllerType.getType(diskController) == DiskControllerType.ide) {
controllerKey = vmMo.getIDEControllerKey(ideUnitNumber); controllerKey = vmMo.getIDEControllerKey(ideUnitNumber);
if (vol.getType() == Volume.Type.DATADISK) { if (vol.getType() == Volume.Type.DATADISK) {
@ -2846,27 +2836,10 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
} }
private Pair<String, String> getControllerInfoFromVmSpec(VirtualMachineTO vmSpec) throws CloudRuntimeException { private Pair<String, String> getControllerInfoFromVmSpec(VirtualMachineTO vmSpec) throws CloudRuntimeException {
String dataDiskController = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER); String rootDiskControllerDetail = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER);
String rootDiskController = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER); String dataDiskControllerDetail = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER);
VmwareHelper.validateDiskControllerDetails(rootDiskControllerDetail, dataDiskControllerDetail);
// If root disk controller is scsi, then data disk controller would also be scsi instead of using 'osdefault' return new Pair<>(rootDiskControllerDetail, dataDiskControllerDetail);
// This helps avoid mix of different scsi subtype controllers in instance.
if (DiskControllerType.osdefault == DiskControllerType.getType(dataDiskController) && DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) {
dataDiskController = DiskControllerType.scsi.toString();
}
// Validate the controller types
dataDiskController = DiskControllerType.getType(dataDiskController).toString();
rootDiskController = DiskControllerType.getType(rootDiskController).toString();
if (DiskControllerType.getType(rootDiskController) == DiskControllerType.none) {
throw new CloudRuntimeException("Invalid root disk controller detected : " + rootDiskController);
}
if (DiskControllerType.getType(dataDiskController) == DiskControllerType.none) {
throw new CloudRuntimeException("Invalid data disk controller detected : " + dataDiskController);
}
return new Pair<>(rootDiskController, dataDiskController);
} }
private String getBootModeFromVmSpec(VirtualMachineTO vmSpec, boolean deployAsIs) { private String getBootModeFromVmSpec(VirtualMachineTO vmSpec, boolean deployAsIs) {
@ -3614,15 +3587,7 @@ public class VmwareResource extends ServerResourceBase implements StoragePoolRes
return controllerType.toString(); return controllerType.toString();
} }
if (vol.getType() == Volume.Type.ROOT) { return VmwareHelper.getControllerBasedOnDiskType(controllerInfo, vol);
logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.first()
+ ", based on root disk controller settings at global configuration setting.");
return controllerInfo.first();
} else {
logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.second()
+ ", based on default data disk controller setting i.e. Operating system recommended."); // Need to bring in global configuration setting & template level setting.
return controllerInfo.second();
}
} }
private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey,

View File

@ -2101,15 +2101,18 @@ public class VmwareStorageProcessor implements StorageProcessor {
AttachAnswer answer = new AttachAnswer(disk); AttachAnswer answer = new AttachAnswer(disk);
if (isAttach) { if (isAttach) {
String diskController = getLegacyVmDataDiskController(); String rootDiskControllerDetail = DiskControllerType.ide.toString();
if (controllerInfo != null && StringUtils.isNotEmpty(controllerInfo.get(VmDetailConstants.ROOT_DISK_CONTROLLER))) {
rootDiskControllerDetail = controllerInfo.get(VmDetailConstants.ROOT_DISK_CONTROLLER);
}
String dataDiskControllerDetail = getLegacyVmDataDiskController();
if (controllerInfo != null && StringUtils.isNotEmpty(controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER))) { if (controllerInfo != null && StringUtils.isNotEmpty(controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER))) {
diskController = controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER); dataDiskControllerDetail = controllerInfo.get(VmDetailConstants.DATA_DISK_CONTROLLER);
} }
if (DiskControllerType.getType(diskController) == DiskControllerType.osdefault) { VmwareHelper.validateDiskControllerDetails(rootDiskControllerDetail, dataDiskControllerDetail);
diskController = vmMo.getRecommendedDiskController(null); Pair<String, String> chosenDiskControllers = VmwareHelper.chooseRequiredDiskControllers(new Pair<>(rootDiskControllerDetail, dataDiskControllerDetail), vmMo, null, null);
} String diskController = VmwareHelper.getControllerBasedOnDiskType(chosenDiskControllers, disk);
vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs, diskController, storagePolicyId, volumeTO.getIopsReadRate() + volumeTO.getIopsWriteRate()); vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs, diskController, storagePolicyId, volumeTO.getIopsReadRate() + volumeTO.getIopsWriteRate());
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();

View File

@ -35,6 +35,7 @@ import javax.inject.Inject;
import com.cloud.network.dao.PublicIpQuarantineDao; import com.cloud.network.dao.PublicIpQuarantineDao;
import com.cloud.network.vo.PublicIpQuarantineVO; import com.cloud.network.vo.PublicIpQuarantineVO;
import com.cloud.resourcelimit.CheckedReservation;
import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.ControlledEntity.ACLType;
import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.acl.SecurityChecker.AccessType;
import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.AnnotationService;
@ -53,6 +54,7 @@ import org.apache.cloudstack.region.PortableIp;
import org.apache.cloudstack.region.PortableIpDao; import org.apache.cloudstack.region.PortableIpDao;
import org.apache.cloudstack.region.PortableIpVO; import org.apache.cloudstack.region.PortableIpVO;
import org.apache.cloudstack.region.Region; import org.apache.cloudstack.region.Region;
import org.apache.cloudstack.reservation.dao.ReservationDao;
import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.CollectionUtils;
import com.cloud.agent.AgentManager; import com.cloud.agent.AgentManager;
@ -259,6 +261,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
@Inject @Inject
ResourceLimitService _resourceLimitMgr; ResourceLimitService _resourceLimitMgr;
@Inject
ReservationDao reservationDao;
@Inject @Inject
NetworkOfferingServiceMapDao _ntwkOfferingSrvcDao; NetworkOfferingServiceMapDao _ntwkOfferingSrvcDao;
@Inject @Inject
@ -1548,14 +1552,15 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
logger.debug("Associating ip " + ipToAssoc + " to network " + network); logger.debug("Associating ip " + ipToAssoc + " to network " + network);
IPAddressVO ip = _ipAddressDao.findById(ipId); boolean success = false;
IPAddressVO ip = null;
try (CheckedReservation publicIpReservation = new CheckedReservation(owner, ResourceType.public_ip, 1l, reservationDao, _resourceLimitMgr)) {
ip = _ipAddressDao.findById(ipId);
//update ip address with networkId //update ip address with networkId
ip.setAssociatedWithNetworkId(networkId); ip.setAssociatedWithNetworkId(networkId);
ip.setSourceNat(isSourceNat); ip.setSourceNat(isSourceNat);
_ipAddressDao.update(ipId, ip); _ipAddressDao.update(ipId, ip);
boolean success = false;
try {
success = applyIpAssociations(network, false); success = applyIpAssociations(network, false);
if (success) { if (success) {
logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network);
@ -1563,6 +1568,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage
logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network);
} }
return _ipAddressDao.findById(ipId); return _ipAddressDao.findById(ipId);
} catch (Exception e) {
s_logger.error(String.format("Failed to associate ip address %s to network %s", ipToAssoc, network), e);
throw new CloudRuntimeException(String.format("Failed to associate ip address %s to network %s", ipToAssoc, network), e);
} finally { } finally {
if (!success && releaseOnFailure) { if (!success && releaseOnFailure) {
if (ip != null) { if (ip != null) {

View File

@ -43,13 +43,14 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.ConfigurationManager;
import com.cloud.configuration.ConfigurationManagerImpl;
import com.cloud.bgp.BGPService;
import com.cloud.dc.ASNumberVO;
import com.cloud.dc.dao.ASNumberDao;
import com.cloud.dc.Vlan; import com.cloud.dc.Vlan;
import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.NsxProviderDao;
import com.cloud.network.element.NsxProviderVO; import com.cloud.network.element.NsxProviderVO;
import com.cloud.configuration.ConfigurationManagerImpl; import com.cloud.resourcelimit.CheckedReservation;
import com.cloud.dc.ASNumberVO;
import com.cloud.bgp.BGPService;
import com.cloud.dc.dao.ASNumberDao;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.ControlledEntity.ACLType;
import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.alert.AlertService;
@ -75,6 +76,7 @@ import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap;
import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.cloudstack.network.RoutedIpv4Manager;
import org.apache.cloudstack.query.QueryService; import org.apache.cloudstack.query.QueryService;
import org.apache.cloudstack.reservation.dao.ReservationDao;
import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.EnumUtils;
import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.ObjectUtils;
@ -250,6 +252,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
@Inject @Inject
ResourceLimitService _resourceLimitMgr; ResourceLimitService _resourceLimitMgr;
@Inject @Inject
ReservationDao reservationDao;
@Inject
VpcServiceMapDao _vpcSrvcDao; VpcServiceMapDao _vpcSrvcDao;
@Inject @Inject
DataCenterDao _dcDao; DataCenterDao _dcDao;
@ -3175,6 +3179,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc); logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc);
final boolean isSourceNatFinal = isSrcNatIpRequired(vpc.getVpcOfferingId()) && getExistingSourceNatInVpc(vpc.getAccountId(), vpcId, false) == null; final boolean isSourceNatFinal = isSrcNatIpRequired(vpc.getVpcOfferingId()) && getExistingSourceNatInVpc(vpc.getAccountId(), vpcId, false) == null;
try (CheckedReservation publicIpReservation = new CheckedReservation(owner, ResourceType.public_ip, 1l, reservationDao, _resourceLimitMgr)) {
Transaction.execute(new TransactionCallbackNoReturn() { Transaction.execute(new TransactionCallbackNoReturn() {
@Override @Override
public void doInTransactionWithoutResult(final TransactionStatus status) { public void doInTransactionWithoutResult(final TransactionStatus status) {
@ -3189,6 +3194,10 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
_ipAddrMgr.markPublicIpAsAllocated(ip); _ipAddrMgr.markPublicIpAsAllocated(ip);
} }
}); });
} catch (Exception e) {
s_logger.error("Failed to associate ip " + ipToAssoc + " to vpc " + vpc, e);
throw new CloudRuntimeException("Failed to associate ip " + ipToAssoc + " to vpc " + vpc, e);
}
logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc); logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc);
CallContext.current().putContextParameter(IpAddress.class, ipToAssoc.getUuid()); CallContext.current().putContextParameter(IpAddress.class, ipToAssoc.getUuid());

View File

@ -18,13 +18,11 @@
# under the License. # under the License.
. /etc/rc.d/init.d/functions
#set -x #set -x
usage() { usage() {
usage_content=" usage_content="
The tool for stopping/starting running system vms and domain routers The tool for stopping/starting running system vms and domain routers. It requires integration API port to be enabled.
Usage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-n] [-t] [-l] [-z] [-v] [-i] [-j] Usage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-n] [-t] [-l] [-z] [-v] [-i] [-j]
@ -63,6 +61,7 @@ vmids=""
vmidsclause="" vmidsclause=""
withids="" withids=""
vmtable="vm_instance" vmtable="vm_instance"
integrationport=0
@ -104,6 +103,12 @@ do
done done
integrationport=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "SELECT value FROM configuration WHERE name='integration.api.port'"`)
if [ $integrationport -le 0 ]; then
echo "Integration port is not enabled!"
exit
fi
prepare_ids_clause() { prepare_ids_clause() {
if [[ ! -z "$vmidsclause" ]]; then if [[ ! -z "$vmidsclause" ]]; then
return return
@ -117,8 +122,8 @@ prepare_ids_clause() {
stop_start_system() { stop_start_system() {
prepare_ids_clause prepare_ids_clause
secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from $vmtable where state=\"Running\" and type=\"SecondaryStorageVm\"$zone$vmidsclause"`) secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select uuid from $vmtable where state=\"Running\" and type=\"SecondaryStorageVm\"$zone$vmidsclause"`)
console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from $vmtable where state=\"Running\" and type=\"ConsoleProxy\"$zone$vmidsclause"`) console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select uuid from $vmtable where state=\"Running\" and type=\"ConsoleProxy\"$zone$vmidsclause"`)
length_secondary=(${#secondary[@]}) length_secondary=(${#secondary[@]})
length_console=(${#console[@]}) length_console=(${#console[@]})
@ -178,7 +183,7 @@ fi
stop_start_router() { stop_start_router() {
prepare_ids_clause prepare_ids_clause
router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\"$zone$vmidsclause"`) router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select uuid from vm_instance where state=\"Running\" and type=\"DomainRouter\"$zone$vmidsclause"`)
length_router=(${#router[@]}) length_router=(${#router[@]})
echo -e "\nStopping and starting $length_router running routing vm(s)$inzone$withids... " echo -e "\nStopping and starting $length_router running routing vm(s)$inzone$withids... "
@ -245,7 +250,7 @@ stop_start_router
} }
send_request(){ send_request(){
jobid=`curl -sS "http://$ms:8096/?command=$1&id=$2&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}` jobid=`curl -sS "http://$ms:$integrationport/?command=$1&id=$2&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
if [ "$jobid" == "" ]; then if [ "$jobid" == "" ]; then
echo 2 echo 2
return return
@ -261,7 +266,7 @@ send_request(){
reboot_router(){ reboot_router(){
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Restarting router with id $1" >>$LOGFILE echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Restarting router with id $1" >>$LOGFILE
jobid=`curl -sS "http://$ms:8096/?command=rebootRouter&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}` jobid=`curl -sS "http://$ms:$integrationport/?command=rebootRouter&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
if [ "$jobid" == "" ]; then if [ "$jobid" == "" ]; then
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart domainRouter with id $1; unable to submit the job" >>$LOGFILE echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart domainRouter with id $1; unable to submit the job" >>$LOGFILE
echo 2 echo 2
@ -278,7 +283,6 @@ reboot_router(){
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Successfully restarted domainRouter with id $1; jobId $jobid" >>$LOGFILE echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Successfully restarted domainRouter with id $1; jobId $jobid" >>$LOGFILE
exit 0 exit 0
fi fi
} }
restart_networks(){ restart_networks(){
@ -346,7 +350,7 @@ restart_networks(){
} }
restart_network(){ restart_network(){
jobid=`curl -sS "http://$ms:8096/?command=restartNetwork&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}` jobid=`curl -sS "http://$ms:$integrationport/?command=restartNetwork&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
if [ "$jobid" == "" ]; then if [ "$jobid" == "" ]; then
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart network with id $1; unable to submit the job" >>$LOGFILE echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart network with id $1; unable to submit the job" >>$LOGFILE
echo 2 echo 2
@ -367,7 +371,7 @@ restart_network(){
restart_vpc(){ restart_vpc(){
echo -e "INFO: Restarting vpc with id $1" echo -e "INFO: Restarting vpc with id $1"
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Restarting vpc with id $1" >>$LOGFILE echo "[$(date "+%Y.%m.%d-%H.%M.%S")] INFO: Restarting vpc with id $1" >>$LOGFILE
jobid=`curl -sS "http://$ms:8096/?command=restartVPC&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}` jobid=`curl -sS "http://$ms:$integrationport/?command=restartVPC&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}`
if [ "$jobid" == "" ]; then if [ "$jobid" == "" ]; then
echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart vpc with id $1; unable to submit the job" >>$LOGFILE echo "[$(date "+%Y.%m.%d-%H.%M.%S")] ERROR: Failed to restart vpc with id $1; unable to submit the job" >>$LOGFILE
echo 2 echo 2
@ -387,7 +391,7 @@ restart_vpc(){
restart_vpcs(){ restart_vpcs(){
vpcs=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vpc WHERE removed is null$zone"`) vpcs=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select uuid from vpc WHERE removed is null$zone"`)
length_vpcs=(${#vpcs[@]}) length_vpcs=(${#vpcs[@]})
echo -e "\nRestarting $length_vpcs vpcs... " echo -e "\nRestarting $length_vpcs vpcs... "
@ -452,7 +456,7 @@ restart_vpcs(){
query_async_job_result() { query_async_job_result() {
while [ 1 ] while [ 1 ]
do do
jobstatus=`curl -sS "http://$ms:8096/?command=queryAsyncJobResult&jobId=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F, {'print $4'} | awk -F: {'print $2'}` jobstatus=`curl -sS "http://$ms:$integrationport/?command=queryAsyncJobResult&jobId=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F, {'print $7'} | awk -F: {'print $2'}`
if [ "$jobstatus" != "0" ]; then if [ "$jobstatus" != "0" ]; then
echo $jobstatus echo $jobstatus
break break

View File

@ -1000,6 +1000,7 @@ const UI = {
clipboardClear() { clipboardClear() {
document.getElementById('noVNC_clipboard_text').value = ""; document.getElementById('noVNC_clipboard_text').value = "";
document.getElementById('noVNC_clipboard_text').focus();
}, },
clipboardSend() { clipboardSend() {

View File

@ -26,7 +26,7 @@
<menu-unfold-outlined class="trigger" v-if="collapsed" @click="toggle" /> <menu-unfold-outlined class="trigger" v-if="collapsed" @click="toggle" />
<menu-fold-outlined class="trigger" v-else @click="toggle" /> <menu-fold-outlined class="trigger" v-else @click="toggle" />
</template> </template>
<project-menu v-if="device !== 'mobile'" /> <project-menu />
<saml-domain-switcher style="margin-left: 20px" /> <saml-domain-switcher style="margin-left: 20px" />
<user-menu :device="device"></user-menu> <user-menu :device="device"></user-menu>
</div> </div>
@ -45,7 +45,7 @@
<menu-unfold-outlined class="trigger" v-else @click="toggle" /> <menu-unfold-outlined class="trigger" v-else @click="toggle" />
</div> </div>
</div> </div>
<project-menu v-if="device !== 'mobile'" /> <project-menu />
<saml-domain-switcher style="margin-left: 20px" /> <saml-domain-switcher style="margin-left: 20px" />
<user-menu></user-menu> <user-menu></user-menu>
</div> </div>

View File

@ -47,7 +47,7 @@ export default {
return filters return filters
}, },
columns: () => { columns: () => {
const fields = ['name', 'state', 'ipaddress'] const fields = ['displayname', 'state', 'ipaddress']
const metricsFields = ['cpunumber', 'cputotal', 'cpuused', 'memorytotal', const metricsFields = ['cpunumber', 'cputotal', 'cpuused', 'memorytotal',
{ {
memoryused: (record) => { memoryused: (record) => {
@ -77,7 +77,7 @@ export default {
fields.push('zonename') fields.push('zonename')
return fields return fields
}, },
searchFilters: ['name', 'zoneid', 'domainid', 'account', 'groupid', 'tags'], searchFilters: ['displayname', 'zoneid', 'domainid', 'account', 'groupid', 'tags'],
details: () => { details: () => {
var fields = ['name', 'displayname', 'id', 'state', 'ipaddress', 'ip6address', 'templatename', 'ostypename', var fields = ['name', 'displayname', 'id', 'state', 'ipaddress', 'ip6address', 'templatename', 'ostypename',
'serviceofferingname', 'isdynamicallyscalable', 'haenable', 'hypervisor', 'boottype', 'bootmode', 'account', 'serviceofferingname', 'isdynamicallyscalable', 'haenable', 'hypervisor', 'boottype', 'bootmode', 'account',

View File

@ -591,7 +591,7 @@ export default {
}).then(async json => { }).then(async json => {
var lbNetworks = json.listnetworksresponse.network || [] var lbNetworks = json.listnetworksresponse.network || []
if (lbNetworks.length > 0) { if (lbNetworks.length > 0) {
this.publicLBExists = true this.publicLBExists = false
for (var idx = 0; idx < lbNetworks.length; idx++) { for (var idx = 0; idx < lbNetworks.length; idx++) {
const lbNetworkOffering = await this.getNetworkOffering(lbNetworks[idx].networkofferingid) const lbNetworkOffering = await this.getNetworkOffering(lbNetworks[idx].networkofferingid)
const index = lbNetworkOffering.service.map(svc => { return svc.name }).indexOf('Lb') const index = lbNetworkOffering.service.map(svc => { return svc.name }).indexOf('Lb')
@ -618,16 +618,23 @@ export default {
api('listNetworkOfferings', params).then(json => { api('listNetworkOfferings', params).then(json => {
this.networkOfferings = json.listnetworkofferingsresponse.networkoffering || [] this.networkOfferings = json.listnetworkofferingsresponse.networkoffering || []
var filteredOfferings = [] var filteredOfferings = []
if (this.publicLBExists) { const vpcLbServiceIndex = this.resource.service.map(svc => { return svc.name }).indexOf('Lb')
for (var index in this.networkOfferings) { for (var index in this.networkOfferings) {
const offering = this.networkOfferings[index] const offering = this.networkOfferings[index]
const idx = offering.service.map(svc => { return svc.name }).indexOf('Lb') const idx = offering.service.map(svc => { return svc.name }).indexOf('Lb')
if (idx === -1 || this.lbProviderMap.publicLb.vpc.indexOf(offering.service.map(svc => { return svc.provider[0].name })[idx]) === -1) { if (this.publicLBExists && (idx === -1 || this.lbProviderMap.publicLb.vpc.indexOf(offering.service.map(svc => { return svc.provider[0].name })[idx]) === -1)) {
filteredOfferings.push(offering)
} else if (!this.publicLBExists && vpcLbServiceIndex > -1) {
const vpcLbServiceProvider = vpcLbServiceIndex === -1 ? undefined : this.resource.service[vpcLbServiceIndex].provider[0].name
const offeringLbServiceProvider = idx === -1 ? undefined : offering.service[idx].provider[0].name
if (vpcLbServiceProvider && (!offeringLbServiceProvider || (offeringLbServiceProvider && vpcLbServiceProvider === offeringLbServiceProvider))) {
filteredOfferings.push(offering)
}
} else {
filteredOfferings.push(offering) filteredOfferings.push(offering)
} }
} }
this.networkOfferings = filteredOfferings this.networkOfferings = filteredOfferings
}
if (this.isNsxEnabled) { if (this.isNsxEnabled) {
this.networkOfferings = this.networkOfferings.filter(offering => offering.networkmode === (this.isOfferingNatMode ? 'NATTED' : 'ROUTED')) this.networkOfferings = this.networkOfferings.filter(offering => offering.networkmode === (this.isOfferingNatMode ? 'NATTED' : 'ROUTED'))
} }

View File

@ -1574,15 +1574,8 @@ public class HypervisorHostHelper {
VmwareHelper.setBasicVmConfig(vmConfig, cpuCount, cpuSpeedMHz, cpuReservedMHz, memoryMB, memoryReserveMB, guestOsIdentifier, limitCpuUse, false); VmwareHelper.setBasicVmConfig(vmConfig, cpuCount, cpuSpeedMHz, cpuReservedMHz, memoryMB, memoryReserveMB, guestOsIdentifier, limitCpuUse, false);
String newRootDiskController = controllerInfo.first(); Pair<String, String> chosenDiskControllers = VmwareHelper.chooseRequiredDiskControllers(controllerInfo, null, host, guestOsIdentifier);
String newDataDiskController = controllerInfo.second(); String scsiDiskController = HypervisorHostHelper.getScsiController(chosenDiskControllers);
String recommendedController = null;
if (VmwareHelper.isControllerOsRecommended(newRootDiskController) || VmwareHelper.isControllerOsRecommended(newDataDiskController)) {
recommendedController = host.getRecommendedDiskController(guestOsIdentifier);
}
Pair<String, String> updatedControllerInfo = new Pair<String, String>(newRootDiskController, newDataDiskController);
String scsiDiskController = HypervisorHostHelper.getScsiController(updatedControllerInfo, recommendedController);
// If there is requirement for a SCSI controller, ensure to create those. // If there is requirement for a SCSI controller, ensure to create those.
if (scsiDiskController != null) { if (scsiDiskController != null) {
int busNum = 0; int busNum = 0;
@ -2256,19 +2249,11 @@ public class HypervisorHostHelper {
return morHyperHost; return morHyperHost;
} }
public static String getScsiController(Pair<String, String> controllerInfo, String recommendedController) { public static String getScsiController(Pair<String, String> controllerInfo) {
String rootDiskController = controllerInfo.first(); String rootDiskController = controllerInfo.first();
String dataDiskController = controllerInfo.second(); String dataDiskController = controllerInfo.second();
// If "osdefault" is specified as controller type, then translate to actual recommended controller. String scsiDiskController; //If any of the controller provided is SCSI then return it's sub-type.
if (VmwareHelper.isControllerOsRecommended(rootDiskController)) {
rootDiskController = recommendedController;
}
if (VmwareHelper.isControllerOsRecommended(dataDiskController)) {
dataDiskController = recommendedController;
}
String scsiDiskController = null; //If any of the controller provided is SCSI then return it's sub-type.
if (isIdeController(rootDiskController) && isIdeController(dataDiskController)) { if (isIdeController(rootDiskController) && isIdeController(dataDiskController)) {
//Default controllers would exist //Default controllers would exist
return null; return null;

View File

@ -40,11 +40,14 @@ import javax.xml.datatype.DatatypeConfigurationException;
import javax.xml.datatype.DatatypeFactory; import javax.xml.datatype.DatatypeFactory;
import javax.xml.datatype.XMLGregorianCalendar; import javax.xml.datatype.XMLGregorianCalendar;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.hypervisor.vmware.mo.ClusterMO; import com.cloud.hypervisor.vmware.mo.ClusterMO;
import com.cloud.hypervisor.vmware.mo.DatastoreFile; import com.cloud.hypervisor.vmware.mo.DatastoreFile;
import com.cloud.hypervisor.vmware.mo.DistributedVirtualSwitchMO; import com.cloud.hypervisor.vmware.mo.DistributedVirtualSwitchMO;
import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper;
import com.cloud.serializer.GsonHelper; import com.cloud.serializer.GsonHelper;
import com.cloud.storage.Volume;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.NetUtils; import com.cloud.utils.net.NetUtils;
import com.vmware.vim25.DatastoreInfo; import com.vmware.vim25.DatastoreInfo;
import com.vmware.vim25.DistributedVirtualPort; import com.vmware.vim25.DistributedVirtualPort;
@ -1064,4 +1067,76 @@ public class VmwareHelper {
} }
return vmdkAbsFile; return vmdkAbsFile;
} }
/**
* Validates an instance's <code>rootDiskController</code> and <code>dataDiskController</code> details. Throws a
* <code>CloudRuntimeException</code> if they are invalid.
*/
public static void validateDiskControllerDetails(String rootDiskControllerDetail, String dataDiskControllerDetail) {
rootDiskControllerDetail = DiskControllerType.getType(rootDiskControllerDetail).toString();
if (DiskControllerType.getType(rootDiskControllerDetail) == DiskControllerType.none) {
throw new CloudRuntimeException(String.format("[%s] is not a valid root disk controller", rootDiskControllerDetail));
}
dataDiskControllerDetail = DiskControllerType.getType(dataDiskControllerDetail).toString();
if (DiskControllerType.getType(dataDiskControllerDetail) == DiskControllerType.none) {
throw new CloudRuntimeException(String.format("[%s] is not a valid data disk controller", dataDiskControllerDetail));
}
}
/**
* Based on an instance's <code>rootDiskController</code> and <code>dataDiskController</code> details, returns a pair
* containing the disk controllers that should be used for root disk and the data disks, respectively.
*
* @param controllerInfo pair containing the root disk and data disk controllers, respectively.
* @param vmMo virtual machine to derive the recommended disk controllers from. If not null, <code>host</code> and <code>guestOsIdentifier</code> will be ignored.
* @param host host to derive the recommended disk controllers from. Must be provided with <code>guestOsIdentifier</code>.
* @param guestOsIdentifier used to derive the recommended disk controllers from the host.
*/
public static Pair<String, String> chooseRequiredDiskControllers(Pair<String, String> controllerInfo, VirtualMachineMO vmMo,
VmwareHypervisorHost host, String guestOsIdentifier) throws Exception {
String recommendedDiskControllerClassName = vmMo != null ? vmMo.getRecommendedDiskController(null) : host.getRecommendedDiskController(guestOsIdentifier);
String recommendedDiskController = DiskControllerType.getType(recommendedDiskControllerClassName).toString();
String convertedRootDiskController = controllerInfo.first();
if (isControllerOsRecommended(convertedRootDiskController)) {
convertedRootDiskController = recommendedDiskController;
}
String convertedDataDiskController = controllerInfo.second();
if (isControllerOsRecommended(convertedDataDiskController)) {
convertedDataDiskController = recommendedDiskController;
}
if (diskControllersShareTheSameBusType(convertedRootDiskController, convertedDataDiskController)) {
s_logger.debug("Root and data disk controllers share the same bus type; therefore, we will only use the controllers specified for the root disk.");
return new Pair<>(convertedRootDiskController, convertedRootDiskController);
}
return new Pair<>(convertedRootDiskController, convertedDataDiskController);
}
protected static boolean diskControllersShareTheSameBusType(String rootDiskController, String dataDiskController) {
DiskControllerType rootDiskControllerType = DiskControllerType.getType(rootDiskController);
DiskControllerType dataDiskControllerType = DiskControllerType.getType(dataDiskController);
if (rootDiskControllerType.equals(dataDiskControllerType)) {
return true;
}
List<DiskControllerType> scsiDiskControllers = List.of(DiskControllerType.scsi, DiskControllerType.lsilogic, DiskControllerType.lsisas1068,
DiskControllerType.buslogic ,DiskControllerType.pvscsi);
return scsiDiskControllers.contains(rootDiskControllerType) && scsiDiskControllers.contains(dataDiskControllerType);
}
/**
* Identifies whether the disk is a root or data disk, and returns the controller from the provided pair that should
* be used for the disk.
* @param controllerInfo pair containing the root disk and data disk controllers, respectively.
*/
public static String getControllerBasedOnDiskType(Pair<String, String> controllerInfo, DiskTO disk) {
if (disk.getType() == Volume.Type.ROOT || disk.getDiskSeq() == 0) {
s_logger.debug(String.format("Choosing disk controller [%s] for the root disk.", controllerInfo.first()));
return controllerInfo.first();
}
s_logger.debug(String.format("Choosing disk controller [%s] for the data disks.", controllerInfo.second()));
return controllerInfo.second();
}
} }