From ff2b5645b505b75fccf4719513374271a33ed80c Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sat, 9 Jun 2001 18:06:58 +0000 Subject: [PATCH] Two fixes to the out-of-swap process termination code. First, start killing processes a little earlier to avoid a deadlock. Second, when calculating the 'largest process' do not just count RSS. Instead count the RSS + SWAP used by the process. Without this the code tended to kill small inconsequential processes like, oh, sshd, rather then one of the many 'eatmem 200MB' I run on a whim :-). This fix has been extensively tested on -stable and somewhat tested on -current and will be MFCd in a few days. Shamed into fixing this by: ps --- sys/vm/vm_map.c | 35 +++++++++++++++++++++++++++++++++++ sys/vm/vm_map.h | 1 + sys/vm/vm_pageout.c | 11 ++++++++--- 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 428c1943f2ca..d04081908e5c 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -225,6 +225,41 @@ vmspace_free(vm) } } +/* + * vmspace_swap_count() - count the approximate swap useage in pages for a + * vmspace. + * + * Swap useage is determined by taking the proportional swap used by + * VM objects backing the VM map. To make up for fractional losses, + * if the VM object has any swap use at all the associated map entries + * count for at least 1 swap page. + */ +int +vmspace_swap_count(struct vmspace *vmspace) +{ + vm_map_t map = &vmspace->vm_map; + vm_map_entry_t cur; + int count = 0; + + for (cur = map->header.next; cur != &map->header; cur = cur->next) { + vm_object_t object; + + if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 && + (object = cur->object.vm_object) != NULL && + object->type == OBJT_SWAP + ) { + int n = (cur->end - cur->start) / PAGE_SIZE; + + if (object->un_pager.swp.swp_bcount) { + count += object->un_pager.swp.swp_bcount * SWAP_META_PAGES * n / + object->size + 1; + } + } + } + return(count); +} + + /* * vm_map_create: * diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index 241a80cefaa5..5ea3ccf993c9 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -365,6 +365,7 @@ int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *)); void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t)); int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int)); int vm_map_growstack __P((struct proc *p, vm_offset_t addr)); +int vmspace_swap_count __P((struct vmspace *vmspace)); #endif #endif /* _VM_MAP_ */ diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index c1e9fece8263..dd96cb250e46 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -1139,8 +1139,8 @@ vm_pageout_scan(int pass) } /* - * make sure that we have swap space -- if we are low on memory and - * swap -- then kill the biggest process. + * If we are out of swap and were not able to reach our paging + * target, kill the largest process. * * We keep the process bigproc locked once we find it to keep anyone * from messing with it; however, there is a possibility of @@ -1149,7 +1149,11 @@ vm_pageout_scan(int pass) * lock while walking this list. To avoid this, we don't block on * the process lock but just skip a process if it is already locked. */ + if ((vm_swap_size < 64 && vm_page_count_min()) || + (swap_pager_full && vm_paging_target() > 0)) { +#if 0 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { +#endif mtx_unlock(&vm_mtx); bigproc = NULL; bigsize = 0; @@ -1184,7 +1188,8 @@ vm_pageout_scan(int pass) /* * get the process size */ - size = vmspace_resident_count(p->p_vmspace); + size = vmspace_resident_count(p->p_vmspace) + + vmspace_swap_count(p->p_vmspace); /* * if the this process is bigger than the biggest one * remember it.