Move the code for doing out-of-memory grass from vm_pageout_scan()

into the separate function vm_pageout_oom(). Supply a parameter for
vm_pageout_oom() describing a reason for the call.

Call vm_pageout_oom() from the swp_pager_meta_build() when swap zone
is exhausted.

Reviewed by:	alc
Tested by:	pho, jhb
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2008-09-29 19:45:12 +00:00
parent 051bc8ead2
commit 2025d69ba7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=183474
3 changed files with 88 additions and 72 deletions

View File

@ -1711,9 +1711,12 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
if (swap == NULL) {
mtx_unlock(&swhash_mtx);
VM_OBJECT_UNLOCK(object);
if (uma_zone_exhausted(swap_zone))
panic("swap zone exhausted, increase kern.maxswzone\n");
VM_WAIT;
if (uma_zone_exhausted(swap_zone)) {
printf("swap zone exhausted, increase kern.maxswzone\n");
vm_pageout_oom(VM_OOM_SWAPZ);
pause("swzonex", 10);
} else
VM_WAIT;
VM_OBJECT_LOCK(object);
goto retry;
}

View File

@ -681,9 +681,6 @@ vm_pageout_scan(int pass)
struct vm_page marker;
int page_shortage, maxscan, pcount;
int addl_page_shortage, addl_page_shortage_init;
struct proc *p, *bigproc;
struct thread *td;
vm_offset_t size, bigsize;
vm_object_t object;
int actcount;
int vnodes_skipped = 0;
@ -1174,7 +1171,22 @@ vm_pageout_scan(int pass)
* doing this on the first pass in order to give ourselves a
* chance to flush out dirty vnode-backed pages and to allow
* active pages to be moved to the inactive queue and reclaimed.
*
*/
if (pass != 0 &&
((swap_pager_avail < 64 && vm_page_count_min()) ||
(swap_pager_full && vm_paging_target() > 0)))
vm_pageout_oom(VM_OOM_MEM);
}
void
vm_pageout_oom(int shortage)
{
struct proc *p, *bigproc;
vm_offset_t size, bigsize;
struct thread *td;
/*
* We keep the process bigproc locked once we find it to keep anyone
* from messing with it; however, there is a possibility of
* deadlock if process B is bigproc and one of it's child processes
@ -1182,75 +1194,72 @@ vm_pageout_scan(int pass)
* lock while walking this list. To avoid this, we don't block on
* the process lock but just skip a process if it is already locked.
*/
if (pass != 0 &&
((swap_pager_avail < 64 && vm_page_count_min()) ||
(swap_pager_full && vm_paging_target() > 0))) {
bigproc = NULL;
bigsize = 0;
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
int breakout;
bigproc = NULL;
bigsize = 0;
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
int breakout;
if (PROC_TRYLOCK(p) == 0)
continue;
/*
* If this is a system or protected process, skip it.
*/
if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
(p->p_flag & P_PROTECTED) ||
((p->p_pid < 48) && (swap_pager_avail != 0))) {
PROC_UNLOCK(p);
continue;
}
/*
* If the process is in a non-running type state,
* don't touch it. Check all the threads individually.
*/
breakout = 0;
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (!TD_ON_RUNQ(td) &&
!TD_IS_RUNNING(td) &&
!TD_IS_SLEEPING(td)) {
thread_unlock(td);
breakout = 1;
break;
}
if (PROC_TRYLOCK(p) == 0)
continue;
/*
* If this is a system or protected process, skip it.
*/
if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
(p->p_flag & P_PROTECTED) ||
((p->p_pid < 48) && (swap_pager_avail != 0))) {
PROC_UNLOCK(p);
continue;
}
/*
* If the process is in a non-running type state,
* don't touch it. Check all the threads individually.
*/
breakout = 0;
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
if (!TD_ON_RUNQ(td) &&
!TD_IS_RUNNING(td) &&
!TD_IS_SLEEPING(td)) {
thread_unlock(td);
breakout = 1;
break;
}
if (breakout) {
PROC_UNLOCK(p);
continue;
}
/*
* get the process size
*/
if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) {
PROC_UNLOCK(p);
continue;
}
size = vmspace_swap_count(p->p_vmspace);
vm_map_unlock_read(&p->p_vmspace->vm_map);
thread_unlock(td);
}
if (breakout) {
PROC_UNLOCK(p);
continue;
}
/*
* get the process size
*/
if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) {
PROC_UNLOCK(p);
continue;
}
size = vmspace_swap_count(p->p_vmspace);
vm_map_unlock_read(&p->p_vmspace->vm_map);
if (shortage == VM_OOM_MEM)
size += vmspace_resident_count(p->p_vmspace);
/*
* if the this process is bigger than the biggest one
* remember it.
*/
if (size > bigsize) {
if (bigproc != NULL)
PROC_UNLOCK(bigproc);
bigproc = p;
bigsize = size;
} else
PROC_UNLOCK(p);
}
sx_sunlock(&allproc_lock);
if (bigproc != NULL) {
killproc(bigproc, "out of swap space");
sched_nice(bigproc, PRIO_MIN);
PROC_UNLOCK(bigproc);
wakeup(&cnt.v_free_count);
}
/*
* if the this process is bigger than the biggest one
* remember it.
*/
if (size > bigsize) {
if (bigproc != NULL)
PROC_UNLOCK(bigproc);
bigproc = p;
bigsize = size;
} else
PROC_UNLOCK(p);
}
sx_sunlock(&allproc_lock);
if (bigproc != NULL) {
killproc(bigproc, "out of swap space");
sched_nice(bigproc, PRIO_MIN);
PROC_UNLOCK(bigproc);
wakeup(&cnt.v_free_count);
}
}

View File

@ -83,6 +83,9 @@ extern int vm_pageout_page_count;
#define VM_SWAP_NORMAL 1
#define VM_SWAP_IDLE 2
#define VM_OOM_MEM 1
#define VM_OOM_SWAPZ 2
/*
* Exported routines.
*/
@ -100,5 +103,6 @@ extern void vm_waitpfault(void);
#ifdef _KERNEL
boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
int vm_pageout_flush(vm_page_t *, int, int);
void vm_pageout_oom(int shortage);
#endif
#endif /* _VM_VM_PAGEOUT_H_ */