Support an optional, sysctl enabled feature of idle process swapout. This

is apparently useful for large shell systems, or systems  with long running
idle processes.  To enable the feature:

	sysctl -w vm.swap_idle_enabled=1

Please note that some of the other vm sysctl variables have been renamed
to be more accurate.
Submitted by:	Much of it from Matt Dillon <dillon@best.net>
This commit is contained in:
John Dyson 1997-12-06 02:23:36 +00:00
parent 0ee6e540f5
commit ceb0cf87e8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=31563
4 changed files with 91 additions and 33 deletions

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
* $Id: vm_extern.h,v 1.33 1997/04/07 07:16:04 peter Exp $
* $Id: vm_extern.h,v 1.34 1997/04/13 01:48:33 dyson Exp $
*/
#ifndef _VM_EXTERN_H_
@ -74,7 +74,7 @@ vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, b
void munmapfd __P((struct proc *, int));
int pager_cache __P((vm_object_t, boolean_t));
int swaponvp __P((struct proc *, struct vnode *, dev_t , u_long));
void swapout_procs __P((void));
void swapout_procs __P((int));
int useracc __P((caddr_t, int, int));
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, int));
void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t));

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.66 1997/09/01 03:17:16 bde Exp $
* $Id: vm_glue.c,v 1.67 1997/11/07 08:53:42 phk Exp $
*/
#include "opt_rlimit.h"
@ -71,6 +71,7 @@
#include <sys/buf.h>
#include <sys/shm.h>
#include <sys/vmmeter.h>
#include <sys/sysctl.h>
#include <sys/kernel.h>
#include <sys/unistd.h>
@ -372,6 +373,22 @@ scheduler(dummy)
(((p)->p_lock == 0) && \
((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM)
/*
* Swap_idle_threshold1 is the guaranteed swapped in time for a process
*/
int swap_idle_threshold1 = 2;
SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
CTLFLAG_RW, &swap_idle_threshold1, 0, "");
/*
* Swap_idle_threshold2 is the time that a process can be idle before
* it will be swapped out, if idle swapping is enabled.
*/
int swap_idle_threshold2 = 10;
SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
CTLFLAG_RW, &swap_idle_threshold2, 0, "");
/*
* Swapout is driven by the pageout daemon. Very simple, we find eligible
* procs and unwire their u-areas. We try to always "swap" at least one
@ -381,7 +398,7 @@ scheduler(dummy)
* if any, otherwise the longest-resident process.
*/
void
swapout_procs()
swapout_procs(int action)
{
register struct proc *p;
struct proc *outp, *outp2;
@ -411,11 +428,22 @@ swapout_procs()
continue;
/*
* do not swapout a process waiting on a critical
* event of some kind
* Do not swapout a process waiting on a critical
* event of some kind. Also guarantee swap_idle_threshold1
* time in memory.
*/
if (((p->p_priority & 0x7f) < PSOCK) ||
(p->p_slptime <= 10))
(p->p_slptime < swap_idle_threshold1))
continue;
/*
* If the system is under memory stress, or if we are swapping
* idle processes >= swap_idle_threshold2, then swap the process
* out.
*/
if (((action & VM_SWAP_NORMAL) == 0) &&
(((action & VM_SWAP_IDLE) == 0) ||
(p->p_slptime < swap_idle_threshold2)))
continue;
++vm->vm_refcnt;
@ -436,11 +464,15 @@ swapout_procs()
* If the process has been asleep for awhile and had
* most of its pages taken away already, swap it out.
*/
swapout(p);
vm_map_deallocate(&vm->vm_map);
vmspace_free(vm);
didswap++;
goto retry;
if ((action & VM_SWAP_NORMAL) ||
((action & VM_SWAP_IDLE) &&
(p->p_slptime > swap_idle_threshold2))) {
swapout(p);
vm_map_deallocate(&vm->vm_map);
vmspace_free(vm);
didswap++;
goto retry;
}
}
}
/*

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.101 1997/12/04 19:00:56 dyson Exp $
* $Id: vm_pageout.c,v 1.102 1997/12/05 05:41:06 dyson Exp $
*/
/*
@ -144,11 +144,13 @@ int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
int defer_swap_pageouts=0;
int disable_swap_pageouts=0;
int vm_maxlaunder=100;
int max_page_launder=100;
#if defined(NO_SWAPPING)
int vm_swapping_enabled=0;
int vm_swap_enabled=0;
int vm_swap_idle_enabled=0;
#else
int vm_swapping_enabled=1;
int vm_swap_enabled=1;
int vm_swap_idle_enabled=0;
#endif
SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
@ -167,21 +169,25 @@ SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "");
#if defined(NO_SWAPPING)
SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
CTLFLAG_RD, &vm_swapping_enabled, 0, "");
SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
CTLFLAG_RD, &vm_swap_enabled, 0, "");
SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
#else
SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swapping_enabled,
CTLFLAG_RW, &vm_swapping_enabled, 0, "");
SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
CTLFLAG_RW, &vm_swap_enabled, 0, "");
SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
CTLFLAG_RW, &vm_swap_idle_enabled, 0, "");
#endif
SYSCTL_INT(_vm, OID_AUTO, defer_swap_pageouts,
SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
CTLFLAG_RW, &defer_swap_pageouts, 0, "");
SYSCTL_INT(_vm, OID_AUTO, disable_swap_pageouts,
SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
CTLFLAG_RW, &disable_swap_pageouts, 0, "");
SYSCTL_INT(_vm, OID_AUTO, vm_maxlaunder,
CTLFLAG_RW, &vm_maxlaunder, 0, "");
SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
CTLFLAG_RW, &max_page_launder, 0, "");
#define VM_PAGEOUT_PAGE_COUNT 16
@ -619,10 +625,10 @@ vm_pageout_scan()
pages_freed = 0;
addl_page_shortage = 0;
if (vm_maxlaunder == 0)
vm_maxlaunder = 1;
maxlaunder = (cnt.v_inactive_target > vm_maxlaunder) ?
vm_maxlaunder : cnt.v_inactive_target;
if (max_page_launder == 0)
max_page_launder = 1;
maxlaunder = (cnt.v_inactive_target > max_page_launder) ?
max_page_launder : cnt.v_inactive_target;
rescan0:
maxscan = cnt.v_inactive_count;
@ -956,6 +962,20 @@ vm_pageout_scan()
}
splx(s);
#if !defined(NO_SWAPPING)
/*
* Idle process swapout -- run once per second.
*/
if (vm_swap_idle_enabled) {
static long lsec;
if (time.tv_sec != lsec) {
vm_pageout_req_swapout |= VM_SWAP_IDLE;
vm_req_vmdaemon();
lsec = time.tv_sec;
}
}
#endif
/*
* If we didn't get enough free pages, and we have skipped a vnode
* in a writeable object, wakeup the sync daemon. And kick swapout
@ -971,10 +991,10 @@ vm_pageout_scan()
}
}
#if !defined(NO_SWAPPING)
if (vm_swapping_enabled &&
if (vm_swap_enabled &&
(cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) {
vm_req_vmdaemon();
vm_pageout_req_swapout = 1;
vm_pageout_req_swapout |= VM_SWAP_NORMAL;
}
#endif
}
@ -1188,7 +1208,7 @@ vm_pageout()
if (vm_pageout_stats_free_max == 0)
vm_pageout_stats_free_max = 25;
vm_maxlaunder = (cnt.v_page_count > 1800 ? 32 : 16);
max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
swap_pager_swap_init();
/*
@ -1257,7 +1277,7 @@ vm_daemon()
while (TRUE) {
tsleep(&vm_daemon_needed, PUSER, "psleep", 0);
if (vm_pageout_req_swapout) {
swapout_procs();
swapout_procs(vm_pageout_req_swapout);
vm_pageout_req_swapout = 0;
}
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: vm_pageout.h,v 1.20 1997/02/22 09:48:34 peter Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@ -83,6 +83,12 @@ extern int vm_pageout_pages_needed;
#define VM_PAGEOUT_SYNC 1
#define VM_PAGEOUT_FORCE 2
/*
* Swap out requests
*/
#define VM_SWAP_NORMAL 1
#define VM_SWAP_IDLE 2
/*
* Exported routines.
*/