Add hysteresis to alpha version of vm_page_zero_idle().
This commit is contained in:
parent
faa273d5c2
commit
d718be7be7
@ -38,7 +38,7 @@
|
||||
*
|
||||
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
|
||||
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
|
||||
* $Id: vm_machdep.c,v 1.8 1999/01/26 02:49:51 julian Exp $
|
||||
* $Id: vm_machdep.c,v 1.9 1999/02/08 00:37:35 dillon Exp $
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
|
||||
@ -447,29 +447,30 @@ int
|
||||
vm_page_zero_idle()
|
||||
{
|
||||
static int free_rover;
|
||||
static int zero_state;
|
||||
vm_page_t m;
|
||||
int s;
|
||||
|
||||
/*
|
||||
* XXX
|
||||
* We stop zeroing pages when there are sufficent prezeroed pages.
|
||||
* This threshold isn't really needed, except we want to
|
||||
* bypass unneeded calls to vm_page_list_find, and the
|
||||
* associated cache flush and latency. The pre-zero will
|
||||
* still be called when there are significantly more
|
||||
* non-prezeroed pages than zeroed pages. The threshold
|
||||
* of half the number of reserved pages is arbitrary, but
|
||||
* approximately the right amount. Eventually, we should
|
||||
* perhaps interrupt the zero operation when a process
|
||||
* is found to be ready to run.
|
||||
* Attempt to maintain approximately 1/2 of our free pages in a
|
||||
* PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
|
||||
* generally zeroing a page when the system is near steady-state.
|
||||
* Otherwise we might get 'flutter' during disk I/O / IPC or
|
||||
* fast sleeps. We also do not want to be continuously zeroing
|
||||
* pages because doing so may flush our L1 and L2 caches too much.
|
||||
*/
|
||||
if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
|
||||
return (0);
|
||||
|
||||
if (zero_state && vm_page_zero_count >= cnt.v_free_count / 3)
|
||||
return(0);
|
||||
if (vm_page_zero_count >= cnt.v_free_count / 2)
|
||||
return(0);
|
||||
|
||||
#ifdef SMP
|
||||
if (try_mplock()) {
|
||||
#endif
|
||||
s = splvm();
|
||||
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
|
||||
zero_state = 0;
|
||||
if (m != NULL && (m->flags & PG_ZERO) == 0) {
|
||||
--(*vm_page_queues[m->queue].lcnt);
|
||||
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
|
||||
@ -490,6 +491,8 @@ vm_page_zero_idle()
|
||||
pageq);
|
||||
++vm_page_zero_count;
|
||||
++cnt_prezero;
|
||||
if (vm_page_zero_count >= cnt.v_free_count / 2)
|
||||
zero_state = 1;
|
||||
}
|
||||
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
|
||||
splx(s);
|
||||
|
@ -38,7 +38,7 @@
|
||||
*
|
||||
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
|
||||
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
|
||||
* $Id: vm_machdep.c,v 1.8 1999/01/26 02:49:51 julian Exp $
|
||||
* $Id: vm_machdep.c,v 1.9 1999/02/08 00:37:35 dillon Exp $
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
|
||||
@ -447,29 +447,30 @@ int
|
||||
vm_page_zero_idle()
|
||||
{
|
||||
static int free_rover;
|
||||
static int zero_state;
|
||||
vm_page_t m;
|
||||
int s;
|
||||
|
||||
/*
|
||||
* XXX
|
||||
* We stop zeroing pages when there are sufficent prezeroed pages.
|
||||
* This threshold isn't really needed, except we want to
|
||||
* bypass unneeded calls to vm_page_list_find, and the
|
||||
* associated cache flush and latency. The pre-zero will
|
||||
* still be called when there are significantly more
|
||||
* non-prezeroed pages than zeroed pages. The threshold
|
||||
* of half the number of reserved pages is arbitrary, but
|
||||
* approximately the right amount. Eventually, we should
|
||||
* perhaps interrupt the zero operation when a process
|
||||
* is found to be ready to run.
|
||||
* Attempt to maintain approximately 1/2 of our free pages in a
|
||||
* PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
|
||||
* generally zeroing a page when the system is near steady-state.
|
||||
* Otherwise we might get 'flutter' during disk I/O / IPC or
|
||||
* fast sleeps. We also do not want to be continuously zeroing
|
||||
* pages because doing so may flush our L1 and L2 caches too much.
|
||||
*/
|
||||
if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
|
||||
return (0);
|
||||
|
||||
if (zero_state && vm_page_zero_count >= cnt.v_free_count / 3)
|
||||
return(0);
|
||||
if (vm_page_zero_count >= cnt.v_free_count / 2)
|
||||
return(0);
|
||||
|
||||
#ifdef SMP
|
||||
if (try_mplock()) {
|
||||
#endif
|
||||
s = splvm();
|
||||
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
|
||||
zero_state = 0;
|
||||
if (m != NULL && (m->flags & PG_ZERO) == 0) {
|
||||
--(*vm_page_queues[m->queue].lcnt);
|
||||
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
|
||||
@ -490,6 +491,8 @@ vm_page_zero_idle()
|
||||
pageq);
|
||||
++vm_page_zero_count;
|
||||
++cnt_prezero;
|
||||
if (vm_page_zero_count >= cnt.v_free_count / 2)
|
||||
zero_state = 1;
|
||||
}
|
||||
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
|
||||
splx(s);
|
||||
|
@ -38,7 +38,7 @@
|
||||
*
|
||||
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
|
||||
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
|
||||
* $Id: vm_machdep.c,v 1.8 1999/01/26 02:49:51 julian Exp $
|
||||
* $Id: vm_machdep.c,v 1.9 1999/02/08 00:37:35 dillon Exp $
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
|
||||
@ -447,29 +447,30 @@ int
|
||||
vm_page_zero_idle()
|
||||
{
|
||||
static int free_rover;
|
||||
static int zero_state;
|
||||
vm_page_t m;
|
||||
int s;
|
||||
|
||||
/*
|
||||
* XXX
|
||||
* We stop zeroing pages when there are sufficent prezeroed pages.
|
||||
* This threshold isn't really needed, except we want to
|
||||
* bypass unneeded calls to vm_page_list_find, and the
|
||||
* associated cache flush and latency. The pre-zero will
|
||||
* still be called when there are significantly more
|
||||
* non-prezeroed pages than zeroed pages. The threshold
|
||||
* of half the number of reserved pages is arbitrary, but
|
||||
* approximately the right amount. Eventually, we should
|
||||
* perhaps interrupt the zero operation when a process
|
||||
* is found to be ready to run.
|
||||
* Attempt to maintain approximately 1/2 of our free pages in a
|
||||
* PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
|
||||
* generally zeroing a page when the system is near steady-state.
|
||||
* Otherwise we might get 'flutter' during disk I/O / IPC or
|
||||
* fast sleeps. We also do not want to be continuously zeroing
|
||||
* pages because doing so may flush our L1 and L2 caches too much.
|
||||
*/
|
||||
if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
|
||||
return (0);
|
||||
|
||||
if (zero_state && vm_page_zero_count >= cnt.v_free_count / 3)
|
||||
return(0);
|
||||
if (vm_page_zero_count >= cnt.v_free_count / 2)
|
||||
return(0);
|
||||
|
||||
#ifdef SMP
|
||||
if (try_mplock()) {
|
||||
#endif
|
||||
s = splvm();
|
||||
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
|
||||
zero_state = 0;
|
||||
if (m != NULL && (m->flags & PG_ZERO) == 0) {
|
||||
--(*vm_page_queues[m->queue].lcnt);
|
||||
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
|
||||
@ -490,6 +491,8 @@ vm_page_zero_idle()
|
||||
pageq);
|
||||
++vm_page_zero_count;
|
||||
++cnt_prezero;
|
||||
if (vm_page_zero_count >= cnt.v_free_count / 2)
|
||||
zero_state = 1;
|
||||
}
|
||||
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
|
||||
splx(s);
|
||||
|
Loading…
Reference in New Issue
Block a user