Two fixes to the out-of-swap process termination code. First, start killing
processes a little earlier to avoid a deadlock. Second, when calculating the 'largest process' do not just count RSS. Instead count the RSS + SWAP used by the process. Without this the code tended to kill small inconsequential processes like, oh, sshd, rather then one of the many 'eatmem 200MB' I run on a whim :-). This fix has been extensively tested on -stable and somewhat tested on -current and will be MFCd in a few days. Shamed into fixing this by: ps
This commit is contained in:
parent
d520fe23e8
commit
ff2b5645b5
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=77948
@ -225,6 +225,41 @@ vmspace_free(vm)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vmspace_swap_count() - count the approximate swap useage in pages for a
|
||||
* vmspace.
|
||||
*
|
||||
* Swap useage is determined by taking the proportional swap used by
|
||||
* VM objects backing the VM map. To make up for fractional losses,
|
||||
* if the VM object has any swap use at all the associated map entries
|
||||
* count for at least 1 swap page.
|
||||
*/
|
||||
int
|
||||
vmspace_swap_count(struct vmspace *vmspace)
|
||||
{
|
||||
vm_map_t map = &vmspace->vm_map;
|
||||
vm_map_entry_t cur;
|
||||
int count = 0;
|
||||
|
||||
for (cur = map->header.next; cur != &map->header; cur = cur->next) {
|
||||
vm_object_t object;
|
||||
|
||||
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
|
||||
(object = cur->object.vm_object) != NULL &&
|
||||
object->type == OBJT_SWAP
|
||||
) {
|
||||
int n = (cur->end - cur->start) / PAGE_SIZE;
|
||||
|
||||
if (object->un_pager.swp.swp_bcount) {
|
||||
count += object->un_pager.swp.swp_bcount * SWAP_META_PAGES * n /
|
||||
object->size + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return(count);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vm_map_create:
|
||||
*
|
||||
|
@ -365,6 +365,7 @@ int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
|
||||
void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
|
||||
int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
|
||||
int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
|
||||
int vmspace_swap_count __P((struct vmspace *vmspace));
|
||||
|
||||
#endif
|
||||
#endif /* _VM_MAP_ */
|
||||
|
@ -1139,8 +1139,8 @@ vm_pageout_scan(int pass)
|
||||
}
|
||||
|
||||
/*
|
||||
* make sure that we have swap space -- if we are low on memory and
|
||||
* swap -- then kill the biggest process.
|
||||
* If we are out of swap and were not able to reach our paging
|
||||
* target, kill the largest process.
|
||||
*
|
||||
* We keep the process bigproc locked once we find it to keep anyone
|
||||
* from messing with it; however, there is a possibility of
|
||||
@ -1149,7 +1149,11 @@ vm_pageout_scan(int pass)
|
||||
* lock while walking this list. To avoid this, we don't block on
|
||||
* the process lock but just skip a process if it is already locked.
|
||||
*/
|
||||
if ((vm_swap_size < 64 && vm_page_count_min()) ||
|
||||
(swap_pager_full && vm_paging_target() > 0)) {
|
||||
#if 0
|
||||
if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
|
||||
#endif
|
||||
mtx_unlock(&vm_mtx);
|
||||
bigproc = NULL;
|
||||
bigsize = 0;
|
||||
@ -1184,7 +1188,8 @@ vm_pageout_scan(int pass)
|
||||
/*
|
||||
* get the process size
|
||||
*/
|
||||
size = vmspace_resident_count(p->p_vmspace);
|
||||
size = vmspace_resident_count(p->p_vmspace) +
|
||||
vmspace_swap_count(p->p_vmspace);
|
||||
/*
|
||||
* if the this process is bigger than the biggest one
|
||||
* remember it.
|
||||
|
Loading…
Reference in New Issue
Block a user