Rip out PQ_ZERO queue. PQ_ZERO functionality is now combined in with

PQ_FREE.  There is little operational difference other then the kernel
    being a few kilobytes smaller and the code being more readable.

    * vm_page_select_free() has been *greatly* simplified.
    * The PQ_ZERO page queue and supporting structures have been removed
    * vm_page_zero_idle() revamped (see below)

    PG_ZERO setting and clearing has been migrated from vm_page_alloc()
    to vm_page_free[_zero]() and will eventually be guarenteed to remain
    tracked throughout a page's life ( if it isn't already ).

    When a page is freed, PG_ZERO pages are appended to the appropriate
    tailq in the PQ_FREE queue while non-PG_ZERO pages are prepended.
    When locating a new free page, PG_ZERO selection operates from within
    vm_page_list_find() ( get page from end of queue instead of beginning
    of queue ) and then only occurs in the nominal critical path case.  If
    the nominal case misses, both normal and zero-page allocation devolves
    into the same _vm_page_list_find() select code without any specific
    zero-page optimizations.

    Additionally, vm_page_zero_idle() has been revamped.  Hysteresis has been
    added and zero-page tracking adjusted to conform with the other changes.
    Currently hysteresis is set at 1/3 (lo) and 1/2 (hi) the number of free
    pages.  We may wish to increase both parameters as time permits.  The
    hysteresis is designed to avoid silly zeroing in borderline allocation/free
    situations.
This commit is contained in:
Matthew Dillon 1999-02-08 00:37:36 +00:00
parent 5313b05fe0
commit faa273d5c2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=43752
8 changed files with 133 additions and 165 deletions

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.7 1998/12/30 10:38:58 dfr Exp $
* $Id: vm_machdep.c,v 1.8 1999/01/26 02:49:51 julian Exp $
*/
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@ -469,8 +469,8 @@ vm_page_zero_idle()
if (try_mplock()) {
#endif
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover);
if (m != NULL) {
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
@ -483,14 +483,15 @@ vm_page_zero_idle()
get_mplock();
#endif
(void)splvm();
m->queue = PQ_ZERO + m->pc;
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m,
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
++cnt_prezero;
}
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
splx(s);
#ifdef SMP
rel_mplock();

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.115 1999/01/06 23:05:37 julian Exp $
* $Id: vm_machdep.c,v 1.116 1999/01/29 08:36:42 dillon Exp $
*/
#include "npx.h"
@ -590,31 +590,32 @@ int
vm_page_zero_idle()
{
static int free_rover;
static int zero_state;
vm_page_t m;
int s;
/*
* XXX
* We stop zeroing pages when there are sufficent prezeroed pages.
* This threshold isn't really needed, except we want to
* bypass unneeded calls to vm_page_list_find, and the
* associated cache flush and latency. The pre-zero will
* still be called when there are significantly more
* non-prezeroed pages than zeroed pages. The threshold
* of half the number of reserved pages is arbitrary, but
* approximately the right amount. Eventually, we should
* perhaps interrupt the zero operation when a process
* is found to be ready to run.
* Attempt to maintain approximately 1/2 of our free pages in a
* PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
* generally zeroing a page when the system is near steady-state.
* Otherwise we might get 'flutter' during disk I/O / IPC or
* fast sleeps. We also do not want to be continuously zeroing
* pages because doing so may flush our L1 and L2 caches too much.
*/
if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
return (0);
if (zero_state && vm_page_zero_count >= cnt.v_free_count / 3)
return(0);
if (vm_page_zero_count >= cnt.v_free_count / 2)
return(0);
#ifdef SMP
if (try_mplock()) {
#endif
s = splvm();
__asm __volatile("sti" : : : "memory");
m = vm_page_list_find(PQ_FREE, free_rover);
if (m != NULL) {
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
@ -627,14 +628,17 @@ vm_page_zero_idle()
get_mplock();
#endif
(void)splvm();
m->queue = PQ_ZERO + m->pc;
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m,
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
++cnt_prezero;
if (vm_page_zero_count >= cnt.v_free_count / 2)
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
splx(s);
__asm __volatile("cli" : : : "memory");
#ifdef SMP

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.115 1999/01/06 23:05:37 julian Exp $
* $Id: vm_machdep.c,v 1.116 1999/01/29 08:36:42 dillon Exp $
*/
#include "npx.h"
@ -590,31 +590,32 @@ int
vm_page_zero_idle()
{
static int free_rover;
static int zero_state;
vm_page_t m;
int s;
/*
* XXX
* We stop zeroing pages when there are sufficent prezeroed pages.
* This threshold isn't really needed, except we want to
* bypass unneeded calls to vm_page_list_find, and the
* associated cache flush and latency. The pre-zero will
* still be called when there are significantly more
* non-prezeroed pages than zeroed pages. The threshold
* of half the number of reserved pages is arbitrary, but
* approximately the right amount. Eventually, we should
* perhaps interrupt the zero operation when a process
* is found to be ready to run.
* Attempt to maintain approximately 1/2 of our free pages in a
* PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
* generally zeroing a page when the system is near steady-state.
* Otherwise we might get 'flutter' during disk I/O / IPC or
* fast sleeps. We also do not want to be continuously zeroing
* pages because doing so may flush our L1 and L2 caches too much.
*/
if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
return (0);
if (zero_state && vm_page_zero_count >= cnt.v_free_count / 3)
return(0);
if (vm_page_zero_count >= cnt.v_free_count / 2)
return(0);
#ifdef SMP
if (try_mplock()) {
#endif
s = splvm();
__asm __volatile("sti" : : : "memory");
m = vm_page_list_find(PQ_FREE, free_rover);
if (m != NULL) {
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
@ -627,14 +628,17 @@ vm_page_zero_idle()
get_mplock();
#endif
(void)splvm();
m->queue = PQ_ZERO + m->pc;
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m,
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
++cnt_prezero;
if (vm_page_zero_count >= cnt.v_free_count / 2)
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
splx(s);
__asm __volatile("cli" : : : "memory");
#ifdef SMP

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.7 1998/12/30 10:38:58 dfr Exp $
* $Id: vm_machdep.c,v 1.8 1999/01/26 02:49:51 julian Exp $
*/
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@ -469,8 +469,8 @@ vm_page_zero_idle()
if (try_mplock()) {
#endif
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover);
if (m != NULL) {
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
@ -483,14 +483,15 @@ vm_page_zero_idle()
get_mplock();
#endif
(void)splvm();
m->queue = PQ_ZERO + m->pc;
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m,
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
++cnt_prezero;
}
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
splx(s);
#ifdef SMP
rel_mplock();

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.7 1998/12/30 10:38:58 dfr Exp $
* $Id: vm_machdep.c,v 1.8 1999/01/26 02:49:51 julian Exp $
*/
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@ -469,8 +469,8 @@ vm_page_zero_idle()
if (try_mplock()) {
#endif
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover);
if (m != NULL) {
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
m->queue = PQ_NONE;
@ -483,14 +483,15 @@ vm_page_zero_idle()
get_mplock();
#endif
(void)splvm();
m->queue = PQ_ZERO + m->pc;
vm_page_flag_set(m, PG_ZERO);
m->queue = PQ_FREE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m,
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
++cnt_prezero;
}
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
splx(s);
#ifdef SMP
rel_mplock();

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.123 1999/01/28 00:57:57 dillon Exp $
* $Id: vm_page.c,v 1.124 1999/02/07 20:45:15 dillon Exp $
*/
/*
@ -87,8 +87,6 @@
#include <vm/vm_extern.h>
static void vm_page_queue_init __P((void));
static vm_page_t _vm_page_select_free __P((vm_object_t object,
vm_pindex_t pindex, int prefqueue));
static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t));
/*
@ -102,7 +100,6 @@ static int vm_page_hash_mask; /* Mask for hash function */
static volatile int vm_page_bucket_generation;
struct pglist vm_page_queue_free[PQ_L2_SIZE] = {{0}};
struct pglist vm_page_queue_zero[PQ_L2_SIZE] = {{0}};
struct pglist vm_page_queue_active = {0};
struct pglist vm_page_queue_inactive = {0};
struct pglist vm_page_queue_cache[PQ_L2_SIZE] = {{0}};
@ -122,10 +119,6 @@ vm_page_queue_init(void) {
vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i];
vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
}
for(i=0;i<PQ_L2_SIZE;i++) {
vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i];
vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count;
}
vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive;
vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
@ -726,7 +719,8 @@ vm_page_select_cache(object, pindex)
while (TRUE) {
m = vm_page_list_find(
PQ_CACHE,
(pindex + object->pg_color) & PQ_L2_MASK
(pindex + object->pg_color) & PQ_L2_MASK,
FALSE
);
if (m && ((m->flags & PG_BUSY) || m->busy ||
m->hold_count || m->wire_count)) {
@ -749,65 +743,18 @@ vm_page_select_cache(object, pindex)
*/
static __inline vm_page_t
vm_page_select_free(vm_object_t object, vm_pindex_t pindex, int prefqueue)
vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
{
vm_page_t m;
int otherq = (prefqueue == PQ_ZERO) ? PQ_FREE : PQ_ZERO;
vm_page_t m;
#if PQ_L2_SIZE > 1
int i = (pindex + object->pg_color) & PQ_L2_MASK;
if ((m = TAILQ_FIRST(vm_page_queues[prefqueue+i].pl)) == NULL &&
(m = TAILQ_FIRST(vm_page_queues[otherq+i].pl)) == NULL
) {
m = _vm_page_select_free(object, pindex, prefqueue);
}
#else
if ((m = TAILQ_FIRST(vm_page_queues[prefqueue].pl)) == NULL)
m = TAILQ_FIRST(vm_page_queues[otherq].pl);
#endif
return(m);
}
#if PQ_L2_SIZE > 1
static vm_page_t
_vm_page_select_free(object, pindex, prefqueue)
vm_object_t object;
vm_pindex_t pindex;
int prefqueue;
{
int i;
int index;
vm_page_t m = NULL;
struct vpgqueues *pq;
struct vpgqueues *po;
if (prefqueue == PQ_ZERO) {
pq = &vm_page_queues[PQ_ZERO];
po = &vm_page_queues[PQ_FREE];
} else {
pq = &vm_page_queues[PQ_FREE];
po = &vm_page_queues[PQ_ZERO];
}
index = pindex + object->pg_color;
for(i = PQ_L2_SIZE / 2; i > 0; --i) {
if ((m = TAILQ_FIRST(pq[(index+i) & PQ_L2_MASK].pl)) != NULL)
break;
if ((m = TAILQ_FIRST(po[(index+i) & PQ_L2_MASK].pl)) != NULL)
break;
if ((m = TAILQ_FIRST(pq[(index-i) & PQ_L2_MASK].pl)) != NULL)
break;
if ((m = TAILQ_FIRST(po[(index-i) & PQ_L2_MASK].pl)) != NULL)
break;
}
m = vm_page_list_find(
PQ_FREE,
(pindex + object->pg_color) & PQ_L2_MASK,
prefer_zero
);
return(m);
}
#endif
/*
* vm_page_alloc:
*
@ -859,7 +806,7 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_NORMAL:
if (cnt.v_free_count >= cnt.v_free_reserved) {
m = vm_page_select_free(object, pindex, PQ_FREE);
m = vm_page_select_free(object, pindex, FALSE);
KASSERT(m != NULL, ("vm_page_alloc(NORMAL): missing page on free queue\n"));
} else {
m = vm_page_select_cache(object, pindex);
@ -878,7 +825,7 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_ZERO:
if (cnt.v_free_count >= cnt.v_free_reserved) {
m = vm_page_select_free(object, pindex, PQ_ZERO);
m = vm_page_select_free(object, pindex, TRUE);
KASSERT(m != NULL, ("vm_page_alloc(ZERO): missing page on free queue\n"));
} else {
m = vm_page_select_cache(object, pindex);
@ -899,7 +846,7 @@ vm_page_alloc(object, pindex, page_req)
if ((cnt.v_free_count >= cnt.v_free_reserved) ||
((cnt.v_cache_count == 0) &&
(cnt.v_free_count >= cnt.v_interrupt_free_min))) {
m = vm_page_select_free(object, pindex, PQ_FREE);
m = vm_page_select_free(object, pindex, FALSE);
KASSERT(m != NULL, ("vm_page_alloc(SYSTEM): missing page on free queue\n"));
} else {
m = vm_page_select_cache(object, pindex);
@ -918,7 +865,7 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_INTERRUPT:
if (cnt.v_free_count > 0) {
m = vm_page_select_free(object, pindex, PQ_FREE);
m = vm_page_select_free(object, pindex, FALSE);
KASSERT(m != NULL, ("vm_page_alloc(INTERRUPT): missing page on free queue\n"));
} else {
splx(s);
@ -963,7 +910,7 @@ vm_page_alloc(object, pindex, page_req)
(*pq->lcnt)--;
oldobject = NULL;
if (qtype == PQ_ZERO) {
if (m->flags & PG_ZERO) {
vm_page_zero_count--;
m->flags = PG_ZERO | PG_BUSY;
} else {
@ -1182,7 +1129,7 @@ vm_page_free_wakeup()
*/
void
vm_page_free_toq(vm_page_t m, int queue)
vm_page_free_toq(vm_page_t m)
{
int s;
struct vpgqueues *pq;
@ -1265,27 +1212,29 @@ vm_page_free_toq(vm_page_t m, int queue)
pmap_page_is_free(m);
#endif
m->queue = queue + m->pc;
m->queue = PQ_FREE + m->pc;
pq = &vm_page_queues[m->queue];
++(*pq->lcnt);
++(*pq->cnt);
if (queue == PQ_ZERO) {
TAILQ_INSERT_HEAD(pq->pl, m, pageq);
++vm_page_zero_count;
} else {
/*
* If the pageout process is grabbing the page, it is likely
* that the page is NOT in the cache. It is more likely that
* the page will be partially in the cache if it is being
* explicitly freed.
*/
/*
* Put zero'd pages on the end ( where we look for zero'd pages
* first ) and non-zerod pages at the head.
*/
if (curproc == pageproc) {
TAILQ_INSERT_TAIL(pq->pl, m, pageq);
} else {
TAILQ_INSERT_HEAD(pq->pl, m, pageq);
}
if (m->flags & PG_ZERO) {
TAILQ_INSERT_TAIL(pq->pl, m, pageq);
++vm_page_zero_count;
} else if (curproc == pageproc) {
/*
* If the pageout daemon is freeing pages, the pages are
* likely to NOT be in the L1 or L2 caches due to their age.
* For now we do not try to do anything special with this
* info.
*/
TAILQ_INSERT_HEAD(pq->pl, m, pageq);
} else {
TAILQ_INSERT_HEAD(pq->pl, m, pageq);
}
vm_page_free_wakeup();
@ -1640,7 +1589,7 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
int pqtype;
phys = VM_PAGE_TO_PHYS(&pga[i]);
pqtype = pga[i].queue - pga[i].pc;
if (((pqtype == PQ_ZERO) || (pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
(phys >= low) && (phys < high) &&
((phys & (alignment - 1)) == 0) &&
(((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
@ -1724,7 +1673,7 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
pqtype = pga[i].queue - pga[i].pc;
if ((VM_PAGE_TO_PHYS(&pga[i]) !=
(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
((pqtype != PQ_ZERO) && (pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
start++;
goto again;
}
@ -1843,12 +1792,6 @@ DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
}
db_printf("\n");
db_printf("PQ_ZERO:");
for(i=0;i<PQ_L2_SIZE;i++) {
db_printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt);
}
db_printf("\n");
db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
*vm_page_queues[PQ_ACTIVE].lcnt,
*vm_page_queues[PQ_INACTIVE].lcnt);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.53 1999/01/24 05:57:50 dillon Exp $
* $Id: vm_page.h,v 1.54 1999/02/07 20:45:15 dillon Exp $
*/
/*
@ -139,7 +139,7 @@ struct vm_page {
/*
* Page coloring parameters
*/
/* Each of PQ_FREE, PQ_ZERO and PQ_CACHE have PQ_HASH_SIZE entries */
/* Each of PQ_FREE, and PQ_CACHE have PQ_HASH_SIZE entries */
/* Define one of the following */
#if defined(PQ_HUGECACHE)
@ -186,11 +186,11 @@ struct vm_page {
#define PQ_NONE 0
#define PQ_FREE 1
#define PQ_ZERO (1 + PQ_L2_SIZE)
#define PQ_INACTIVE (1 + 2*PQ_L2_SIZE)
#define PQ_ACTIVE (2 + 2*PQ_L2_SIZE)
#define PQ_CACHE (3 + 2*PQ_L2_SIZE)
#define PQ_COUNT (3 + 3*PQ_L2_SIZE)
/* #define PQ_ZERO (1 + PQ_L2_SIZE) */
#define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
#define PQ_ACTIVE (2 + 1*PQ_L2_SIZE)
#define PQ_CACHE (3 + 1*PQ_L2_SIZE)
#define PQ_COUNT (3 + 2*PQ_L2_SIZE)
extern struct vpgqueues {
struct pglist *pl;
@ -253,7 +253,6 @@ extern struct vpgqueues {
*/
extern struct pglist vm_page_queue_free[PQ_L2_SIZE];/* memory free queue */
extern struct pglist vm_page_queue_zero[PQ_L2_SIZE];/* zeroed memory free queue */
extern struct pglist vm_page_queue_active; /* active memory queue */
extern struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern struct pglist vm_page_queue_cache[PQ_L2_SIZE];/* cache memory queue */
@ -406,7 +405,7 @@ int vm_page_queue_index __P((vm_offset_t, int));
int vm_page_sleep(vm_page_t m, char *msg, char *busy);
int vm_page_asleep(vm_page_t m, char *msg, char *busy);
#endif
void vm_page_free_toq(vm_page_t m, int queue);
void vm_page_free_toq(vm_page_t m);
/*
* Keep page from being freed by the page daemon
@ -483,12 +482,18 @@ vm_page_copy(src_m, dest_m)
* vm_page_free:
*
* Free a page
*
* The clearing of PG_ZERO is a temporary safety until the code can be
* reviewed to determine that PG_ZERO is being properly cleared on
* write faults or maps. PG_ZERO was previously cleared in
* vm_page_alloc().
*/
static __inline void
vm_page_free(m)
vm_page_t m;
{
vm_page_free_toq(m, PQ_FREE);
vm_page_flag_clear(m, PG_ZERO);
vm_page_free_toq(m);
}
/*
@ -500,7 +505,8 @@ static __inline void
vm_page_free_zero(m)
vm_page_t m;
{
vm_page_free_toq(m, PQ_ZERO);
vm_page_flag_set(m, PG_ZERO);
vm_page_free_toq(m);
}
/*
@ -552,16 +558,24 @@ vm_page_dirty(vm_page_t m)
}
static __inline vm_page_t
vm_page_list_find(int basequeue, int index)
vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
{
vm_page_t m;
#if PQ_L2_SIZE > 1
m = TAILQ_FIRST(vm_page_queues[basequeue+index].pl);
if (prefer_zero) {
m = TAILQ_LAST(vm_page_queues[basequeue+index].pl, pglist);
} else {
m = TAILQ_FIRST(vm_page_queues[basequeue+index].pl);
}
if (m == NULL)
m = _vm_page_list_find(basequeue, index);
#else
m = TAILQ_FIRST(vm_page_queues[basequeue].pl);
if (prefer_zero) {
m = TAILQ_LAST(vm_page_queues[basequeue].pl, pglist);
} else {
m = TAILQ_FIRST(vm_page_queues[basequeue].pl);
}
#endif
return(m);
}

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.134 1999/01/24 06:04:52 dillon Exp $
* $Id: vm_pageout.c,v 1.135 1999/02/07 21:48:23 dillon Exp $
*/
/*
@ -1079,7 +1079,7 @@ vm_pageout_scan()
while (cnt.v_free_count < cnt.v_free_reserved) {
static int cache_rover = 0;
m = vm_page_list_find(PQ_CACHE, cache_rover);
m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE);
if (!m)
break;
if ((m->flags & PG_BUSY) || m->busy || m->hold_count || m->wire_count) {