mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
server: NPE checks and improved case checking
- pool allocation checks for both root and data disks - NPE checks to not add null object in collection or try to migrate null VM - HA work tries need to increment and be given up when max retries are crossed - VM creation should check IP address format for IPv4 and IPv6 - If userdata is not supported by a network, then fail early if userdata, ssh key, or password enabled template is passed/used Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com> (cherry picked from commit 24435dd6bc2424da18277ca00229d1d3bb0ec284) Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com>
This commit is contained in:
parent
06e353e5c7
commit
ca99603321
@ -1215,7 +1215,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
|
||||
// volume is ready and the pool should be reused.
|
||||
// In this case, also check if rest of the volumes are ready and can
|
||||
// be reused.
|
||||
if (plan.getPoolId() != null) {
|
||||
if (plan.getPoolId() != null || (toBeCreated.getVolumeType() == Volume.Type.DATADISK && toBeCreated.getPoolId() != null && toBeCreated.getState() == Volume.State.Ready)) {
|
||||
s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId());
|
||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||
StoragePool pool = null;
|
||||
|
||||
@ -638,6 +638,9 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
_haDao.update(work.getId(), work);
|
||||
|
||||
VMInstanceVO vm = _instanceDao.findById(vmId);
|
||||
if (vm == null) {
|
||||
return null;
|
||||
}
|
||||
// First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency.
|
||||
_itMgr.migrateAway(vm.getUuid(), srcHostId);
|
||||
return null;
|
||||
@ -757,7 +760,10 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
List<HaWorkVO> works = _haDao.findTakenWorkItems(WorkType.Migration);
|
||||
List<VMInstanceVO> vms = new ArrayList<VMInstanceVO>(works.size());
|
||||
for (HaWorkVO work : works) {
|
||||
vms.add(_instanceDao.findById(work.getInstanceId()));
|
||||
VMInstanceVO vm = _instanceDao.findById(work.getInstanceId());
|
||||
if (vm != null) {
|
||||
vms.add(vm);
|
||||
}
|
||||
}
|
||||
return vms;
|
||||
}
|
||||
@ -917,6 +923,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
} else {
|
||||
s_logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10));
|
||||
work.setTimeToTry(nextTime);
|
||||
work.setTimesTried(work.getTimesTried() + 1);
|
||||
work.setServerId(null);
|
||||
work.setDateTaken(null);
|
||||
}
|
||||
@ -927,6 +934,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
|
||||
s_logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10));
|
||||
work.setTimeToTry(nextTime);
|
||||
work.setTimesTried(work.getTimesTried() + 1);
|
||||
work.setServerId(null);
|
||||
work.setDateTaken(null);
|
||||
|
||||
@ -935,6 +943,10 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai
|
||||
VMInstanceVO vm = _instanceDao.findById(work.getInstanceId());
|
||||
work.setUpdateTime(vm.getUpdated());
|
||||
work.setPreviousState(vm.getState());
|
||||
if (!Step.Done.equals(work.getStep()) && work.getTimesTried() >= _maxRetries) {
|
||||
s_logger.warn("Giving up, retries max times for work: " + work);
|
||||
work.setStep(Step.Done);
|
||||
}
|
||||
}
|
||||
_haDao.update(work.getId(), work);
|
||||
} catch (final Throwable th) {
|
||||
|
||||
@ -1016,6 +1016,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
|
||||
NicProfile profile = new NicProfile(null, null);
|
||||
if (ipAddress != null) {
|
||||
if (!(NetUtils.isValidIp(ipAddress) || NetUtils.isValidIpv6(ipAddress))) {
|
||||
throw new InvalidParameterValueException("Invalid format for IP address parameter: " + ipAddress);
|
||||
}
|
||||
profile = new NicProfile(ipAddress, null);
|
||||
}
|
||||
|
||||
@ -2892,6 +2895,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
||||
}
|
||||
|
||||
profile.setDefaultNic(true);
|
||||
if (!_networkModel.areServicesSupportedInNetwork(network.getId(), new Service[]{Service.UserData})) {
|
||||
if ((userData != null) && (!userData.isEmpty())) {
|
||||
throw new InvalidParameterValueException("Unable to deploy VM as UserData is provided while deploying the VM, but there is no support for " + Network.Service.UserData.getName() + " service in the default network " + network.getId());
|
||||
}
|
||||
|
||||
if ((sshPublicKey != null) && (!sshPublicKey.isEmpty())) {
|
||||
throw new InvalidParameterValueException("Unable to deploy VM as SSH keypair is provided while deploying the VM, but there is no support for " + Network.Service.UserData.getName() + " service in the default network " + network.getId());
|
||||
}
|
||||
|
||||
if (template.getEnablePassword()) {
|
||||
throw new InvalidParameterValueException("Unable to deploy VM as template " + template.getId() + " is password enabled, but there is no support for " + Network.Service.UserData.getName() + " service in the default network " + network.getId());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
networks.add(new Pair<NetworkVO, NicProfile>(network, profile));
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user