mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge remote-tracking branch 'origin/4.13'
This commit is contained in:
commit
3ca5be40d4
@ -702,18 +702,32 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ
|
||||
|
||||
// If any router is running then send save password command otherwise
|
||||
// save the password in DB
|
||||
boolean savePasswordResult = true;
|
||||
boolean isVrRunning = false;
|
||||
for (final VirtualRouter router : routers) {
|
||||
if (router.getState() == State.Running) {
|
||||
final boolean result = networkTopology.savePasswordToRouter(network, nic, uservm, router);
|
||||
if (result) {
|
||||
// Explicit password reset, while VM hasn't generated a password yet.
|
||||
final UserVmVO userVmVO = _userVmDao.findById(vm.getId());
|
||||
userVmVO.setUpdateParameters(false);
|
||||
_userVmDao.update(userVmVO.getId(), userVmVO);
|
||||
if (!result) {
|
||||
s_logger.error("Unable to save password for VM " + vm.getInstanceName() +
|
||||
" on router " + router.getInstanceName());
|
||||
return false;
|
||||
}
|
||||
return result;
|
||||
isVrRunning = true;
|
||||
savePasswordResult = savePasswordResult && result;
|
||||
}
|
||||
}
|
||||
|
||||
// return the result only if one of the vr is running
|
||||
if (isVrRunning) {
|
||||
if (savePasswordResult) {
|
||||
// Explicit password reset, while VM hasn't generated a password yet.
|
||||
final UserVmVO userVmVO = _userVmDao.findById(vm.getId());
|
||||
userVmVO.setUpdateParameters(false);
|
||||
_userVmDao.update(userVmVO.getId(), userVmVO);
|
||||
}
|
||||
return savePasswordResult;
|
||||
}
|
||||
|
||||
final String password = (String) uservm.getParameter(VirtualMachineProfile.Param.VmPassword);
|
||||
final String password_encrypted = DBEncryptionUtil.encrypt(password);
|
||||
final UserVmVO userVmVO = _userVmDao.findById(vm.getId());
|
||||
|
||||
@ -1120,7 +1120,9 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
||||
for (final DomainRouterVO router : routers) {
|
||||
final List<Long> routerGuestNtwkIds = _routerDao.getRouterNetworks(router.getId());
|
||||
|
||||
for (final Long routerGuestNtwkId : routerGuestNtwkIds) {
|
||||
final Long vpcId = router.getVpcId();
|
||||
if (vpcId != null || routerGuestNtwkIds.size() > 0) {
|
||||
Long routerGuestNtwkId = vpcId != null ? vpcId : routerGuestNtwkIds.get(0);
|
||||
if (router.getRedundantState() == RedundantState.MASTER) {
|
||||
if (networkRouterMaps.containsKey(routerGuestNtwkId)) {
|
||||
final DomainRouterVO dupRouter = networkRouterMaps.get(routerGuestNtwkId);
|
||||
@ -1129,7 +1131,6 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
||||
final String context = "Virtual router (name: " + router.getHostName() + ", id: " + router.getId() + " and router (name: " + dupRouter.getHostName()
|
||||
+ ", id: " + router.getId() + ") are both in MASTER state! If the problem persist, restart both of routers. ";
|
||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context);
|
||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, dupRouter.getDataCenterId(), dupRouter.getPodIdToDeployIn(), title, context);
|
||||
s_logger.warn(context);
|
||||
} else {
|
||||
networkRouterMaps.put(routerGuestNtwkId, router);
|
||||
@ -1213,8 +1214,14 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
||||
|
||||
updateSite2SiteVpnConnectionState(routers);
|
||||
|
||||
List<NetworkVO> networks = _networkDao.listVpcNetworks();
|
||||
s_logger.debug("Found " + networks.size() + " VPC networks to update Redundant State. ");
|
||||
List<NetworkVO> networks = new ArrayList<>();
|
||||
for (Vpc vpc : _vpcDao.listAll()) {
|
||||
List<NetworkVO> vpcNetworks = _networkDao.listByVpc(vpc.getId());
|
||||
if (vpcNetworks.size() > 0) {
|
||||
networks.add(vpcNetworks.get(0));
|
||||
}
|
||||
}
|
||||
s_logger.debug("Found " + networks.size() + " VPC's to update Redundant State. ");
|
||||
pushToUpdateQueue(networks);
|
||||
|
||||
networks = _networkDao.listRedundantNetworks();
|
||||
|
||||
@ -174,21 +174,18 @@ class CsDhcp(CsDataBag):
|
||||
|
||||
def add(self, entry):
|
||||
self.add_host(entry['ipv4_address'], entry['host_name'])
|
||||
# Lease time set to effectively infinite (36000+ days) since we properly control all DHCP/DNS config via CloudStack.
|
||||
# Lease time set to "infinite" since we properly control all DHCP/DNS config via CloudStack.
|
||||
# Infinite time helps avoid some edge cases which could cause DHCPNAK being sent to VMs since
|
||||
# (RHEL) system lose routes when they receive DHCPNAK.
|
||||
# When VM is expunged, its active lease and DHCP/DNS config is properly removed from related files in VR,
|
||||
# so the infinite duration of lease does not cause any issues or garbage.
|
||||
# There will be soon a PR which also regenerates the /var/lib/misc/dnsmasq.leases (active lease DB file)
|
||||
# in the new VR (when restarting network with cleanup), which will help around RHEL edge cases (described above)
|
||||
# for the VMs who are already running in productions systems with 30d lease time.
|
||||
lease = randint(870000, 870010)
|
||||
lease = 'infinite'
|
||||
|
||||
if entry['default_entry']:
|
||||
self.cloud.add("%s,%s,%s,%sh" % (entry['mac_address'],
|
||||
entry['ipv4_address'],
|
||||
entry['host_name'],
|
||||
lease))
|
||||
self.cloud.add("%s,%s,%s,%s" % (entry['mac_address'],
|
||||
entry['ipv4_address'],
|
||||
entry['host_name'],
|
||||
lease))
|
||||
else:
|
||||
tag = entry['ipv4_address'].replace(".", "_")
|
||||
self.cloud.add("%s,set:%s,%s,%s,%sh" % (entry['mac_address'],
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user