Move the code for doing out-of-memory grass from vm_pageout_scan()
into the separate function vm_pageout_oom(). Supply a parameter for vm_pageout_oom() describing a reason for the call. Call vm_pageout_oom() from the swp_pager_meta_build() when swap zone is exhausted. Reviewed by: alc Tested by: pho, jhb MFC after: 2 weeks
This commit is contained in:
parent
7e5e6d6f27
commit
de9e891748
@ -1711,8 +1711,11 @@ retry:
|
||||
if (swap == NULL) {
|
||||
mtx_unlock(&swhash_mtx);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
if (uma_zone_exhausted(swap_zone))
|
||||
panic("swap zone exhausted, increase kern.maxswzone\n");
|
||||
if (uma_zone_exhausted(swap_zone)) {
|
||||
printf("swap zone exhausted, increase kern.maxswzone\n");
|
||||
vm_pageout_oom(VM_OOM_SWAPZ);
|
||||
pause("swzonex", 10);
|
||||
} else
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(object);
|
||||
goto retry;
|
||||
|
@ -681,9 +681,6 @@ vm_pageout_scan(int pass)
|
||||
struct vm_page marker;
|
||||
int page_shortage, maxscan, pcount;
|
||||
int addl_page_shortage, addl_page_shortage_init;
|
||||
struct proc *p, *bigproc;
|
||||
struct thread *td;
|
||||
vm_offset_t size, bigsize;
|
||||
vm_object_t object;
|
||||
int actcount;
|
||||
int vnodes_skipped = 0;
|
||||
@ -1174,7 +1171,22 @@ unlock_and_continue:
|
||||
* doing this on the first pass in order to give ourselves a
|
||||
* chance to flush out dirty vnode-backed pages and to allow
|
||||
* active pages to be moved to the inactive queue and reclaimed.
|
||||
*
|
||||
*/
|
||||
if (pass != 0 &&
|
||||
((swap_pager_avail < 64 && vm_page_count_min()) ||
|
||||
(swap_pager_full && vm_paging_target() > 0)))
|
||||
vm_pageout_oom(VM_OOM_MEM);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_pageout_oom(int shortage)
|
||||
{
|
||||
struct proc *p, *bigproc;
|
||||
vm_offset_t size, bigsize;
|
||||
struct thread *td;
|
||||
|
||||
/*
|
||||
* We keep the process bigproc locked once we find it to keep anyone
|
||||
* from messing with it; however, there is a possibility of
|
||||
* deadlock if process B is bigproc and one of it's child processes
|
||||
@ -1182,9 +1194,6 @@ unlock_and_continue:
|
||||
* lock while walking this list. To avoid this, we don't block on
|
||||
* the process lock but just skip a process if it is already locked.
|
||||
*/
|
||||
if (pass != 0 &&
|
||||
((swap_pager_avail < 64 && vm_page_count_min()) ||
|
||||
(swap_pager_full && vm_paging_target() > 0))) {
|
||||
bigproc = NULL;
|
||||
bigsize = 0;
|
||||
sx_slock(&allproc_lock);
|
||||
@ -1231,6 +1240,7 @@ unlock_and_continue:
|
||||
}
|
||||
size = vmspace_swap_count(p->p_vmspace);
|
||||
vm_map_unlock_read(&p->p_vmspace->vm_map);
|
||||
if (shortage == VM_OOM_MEM)
|
||||
size += vmspace_resident_count(p->p_vmspace);
|
||||
/*
|
||||
* if the this process is bigger than the biggest one
|
||||
@ -1251,7 +1261,6 @@ unlock_and_continue:
|
||||
PROC_UNLOCK(bigproc);
|
||||
wakeup(&cnt.v_free_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -83,6 +83,9 @@ extern int vm_pageout_page_count;
|
||||
#define VM_SWAP_NORMAL 1
|
||||
#define VM_SWAP_IDLE 2
|
||||
|
||||
#define VM_OOM_MEM 1
|
||||
#define VM_OOM_SWAPZ 2
|
||||
|
||||
/*
|
||||
* Exported routines.
|
||||
*/
|
||||
@ -100,5 +103,6 @@ extern void vm_waitpfault(void);
|
||||
#ifdef _KERNEL
|
||||
boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
|
||||
int vm_pageout_flush(vm_page_t *, int, int);
|
||||
void vm_pageout_oom(int shortage);
|
||||
#endif
|
||||
#endif /* _VM_VM_PAGEOUT_H_ */
|
||||
|
Loading…
x
Reference in New Issue
Block a user