Addition of page coloring support. Various levels of coloring are afforded.

The default level works with minimal overhead, but one can also enable
full, efficient use of a 512K cache.  (Parameters can be generated
to support arbitrary cache sizes also.)
This commit is contained in:
John Dyson 1996-09-08 20:44:49 +00:00
parent 49ee17cbb8
commit 5070c7f8c5
15 changed files with 472 additions and 134 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.114 1996/07/30 03:08:49 dyson Exp $
* $Id: pmap.c,v 1.115 1996/09/08 16:57:34 dyson Exp $
*/
/*
@ -1105,8 +1105,7 @@ pmap_growkernel(vm_offset_t addr)
if (!nkpg) {
vm_offset_t ptpkva = (vm_offset_t) vtopte(addr);
/*
* This index is bogus, but out of the way, the old
* value of zero is bad for page coloring.
* This index is bogus, but out of the way
*/
vm_pindex_t ptpidx = (ptpkva >> PAGE_SHIFT);
nkpg = vm_page_alloc(kernel_object,
@ -1977,7 +1976,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (p->queue == PQ_CACHE)
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
p->flags |= PG_BUSY;
mpte = pmap_enter_quick(pmap,
@ -1998,7 +1997,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (p->queue == PQ_CACHE)
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
p->flags |= PG_BUSY;
mpte = pmap_enter_quick(pmap,
@ -2091,7 +2090,7 @@ pmap_prefault(pmap, addra, entry, object)
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (m->queue == PQ_CACHE) {
if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_deactivate(m);
}
m->flags |= PG_BUSY;

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.66 1996/06/20 08:07:30 davidg Exp $
* $Id: vm_machdep.c,v 1.67 1996/07/12 04:11:10 bde Exp $
*/
#include "npx.h"
@ -825,14 +825,18 @@ grow(p, sp)
int
vm_page_zero_idle() {
vm_page_t m;
static int free_rover = 0;
if ((cnt.v_free_count > cnt.v_interrupt_free_min) &&
(m = TAILQ_FIRST(&vm_page_queue_free))) {
TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
(m = vm_page_list_find(PQ_FREE, free_rover))) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
enable_intr();
pmap_zero_page(VM_PAGE_TO_PHYS(m));
disable_intr();
TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq);
m->queue = PQ_ZERO;
m->queue = PQ_ZERO + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
return 1;
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.114 1996/07/30 03:08:49 dyson Exp $
* $Id: pmap.c,v 1.115 1996/09/08 16:57:34 dyson Exp $
*/
/*
@ -1105,8 +1105,7 @@ pmap_growkernel(vm_offset_t addr)
if (!nkpg) {
vm_offset_t ptpkva = (vm_offset_t) vtopte(addr);
/*
* This index is bogus, but out of the way, the old
* value of zero is bad for page coloring.
* This index is bogus, but out of the way
*/
vm_pindex_t ptpidx = (ptpkva >> PAGE_SHIFT);
nkpg = vm_page_alloc(kernel_object,
@ -1977,7 +1976,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (p->queue == PQ_CACHE)
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
p->flags |= PG_BUSY;
mpte = pmap_enter_quick(pmap,
@ -1998,7 +1997,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (p->queue == PQ_CACHE)
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
p->flags |= PG_BUSY;
mpte = pmap_enter_quick(pmap,
@ -2091,7 +2090,7 @@ pmap_prefault(pmap, addra, entry, object)
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if (m->queue == PQ_CACHE) {
if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_deactivate(m);
}
m->flags |= PG_BUSY;

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.66 1996/06/20 08:07:30 davidg Exp $
* $Id: vm_machdep.c,v 1.67 1996/07/12 04:11:10 bde Exp $
*/
#include "npx.h"
@ -825,14 +825,18 @@ grow(p, sp)
int
vm_page_zero_idle() {
vm_page_t m;
static int free_rover = 0;
if ((cnt.v_free_count > cnt.v_interrupt_free_min) &&
(m = TAILQ_FIRST(&vm_page_queue_free))) {
TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
(m = vm_page_list_find(PQ_FREE, free_rover))) {
--(*vm_page_queues[m->queue].lcnt);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
enable_intr();
pmap_zero_page(VM_PAGE_TO_PHYS(m));
disable_intr();
TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq);
m->queue = PQ_ZERO;
m->queue = PQ_ZERO + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq);
free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
++vm_page_zero_count;
return 1;
}

View File

@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
* $Id: vfs_bio.c,v 1.96 1996/08/21 21:55:18 dyson Exp $
* $Id: vfs_bio.c,v 1.97 1996/09/06 05:37:53 gibbs Exp $
*/
/*
@ -402,7 +402,7 @@ bawrite(struct buf * bp)
* Ordered write.
* Start output on a buffer, but only wait for it to complete if the
* output device cannot guarantee ordering in some other way. Devices
* that can perform asyncronous ordered writes will set the B_ASYNC
* that can perform asynchronous ordered writes will set the B_ASYNC
* flag in their strategy routine.
* The buffer is released when the output completes.
*/
@ -1324,7 +1324,7 @@ allocbuf(struct buf * bp, int size)
goto doretry;
} else {
if ((curproc != pageproc) &&
(m->queue == PQ_CACHE) &&
((m->queue - m->pc) == PQ_CACHE) &&
((cnt.v_free_count + cnt.v_cache_count) <
(cnt.v_free_min + cnt.v_cache_min))) {
pagedaemon_wakeup();

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: pmap.h,v 1.13 1996/07/27 04:22:12 dyson Exp $
* $Id: pmap.h,v 1.14 1996/07/30 03:08:04 dyson Exp $
*/
/*
@ -121,6 +121,7 @@ void pmap_qremove __P((vm_offset_t, int));
void pmap_reference __P((pmap_t));
void pmap_release __P((pmap_t));
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_remove_pages __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_zero_page __P((vm_offset_t));
void pmap_prefault __P((pmap_t pmap, vm_offset_t addra,
vm_map_entry_t entry, vm_object_t object));

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.69 1996/07/27 03:23:51 dyson Exp $
* $Id: swap_pager.c,v 1.70 1996/07/30 03:08:05 dyson Exp $
*/
/*
@ -566,7 +566,8 @@ swap_pager_reclaim()
* see if any blocks associated with a pager has been
* allocated but not used (written)
*/
if (object->paging_in_progress == 0) {
if ((object->flags & OBJ_DEAD) == 0 &&
(object->paging_in_progress == 0)) {
for (i = 0; i < object->un_pager.swp.swp_nblocks; i++) {
sw_blk_t swb = &object->un_pager.swp.swp_blocks[i];

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.55 1996/07/28 01:14:01 dyson Exp $
* $Id: vm_fault.c,v 1.56 1996/07/30 03:08:07 dyson Exp $
*/
/*
@ -287,7 +287,7 @@ RetryFault:;
/*
* Mark page busy for other processes, and the pagedaemon.
*/
if ((queue == PQ_CACHE) &&
if (((queue - m->pc) == PQ_CACHE) &&
(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
vm_page_activate(m);
UNLOCK_AND_DEALLOCATE;

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
* $Id: vm_meter.c,v 1.14 1996/03/11 06:11:40 hsu Exp $
* $Id: vm_meter.c,v 1.15 1996/05/18 03:37:47 dyson Exp $
*/
#include <sys/param.h>
@ -194,18 +194,18 @@ vmtotal SYSCTL_HANDLER_ARGS
for (object = TAILQ_FIRST(&vm_object_list);
object != NULL;
object = TAILQ_NEXT(object, object_list)) {
totalp->t_vm += num_pages(object->size);
totalp->t_vm += object->size;
totalp->t_rm += object->resident_page_count;
if (object->flags & OBJ_ACTIVE) {
totalp->t_avm += num_pages(object->size);
totalp->t_avm += object->size;
totalp->t_arm += object->resident_page_count;
}
if (object->ref_count > 1) {
if (object->shadow_count > 1) {
/* shared object */
totalp->t_vmshr += num_pages(object->size);
totalp->t_vmshr += object->size;
totalp->t_rmshr += object->resident_page_count;
if (object->flags & OBJ_ACTIVE) {
totalp->t_avmshr += num_pages(object->size);
totalp->t_avmshr += object->size;
totalp->t_armshr += object->resident_page_count;
}
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.78 1996/07/30 03:08:14 dyson Exp $
* $Id: vm_object.c,v 1.79 1996/08/21 21:56:19 dyson Exp $
*/
/*
@ -149,6 +149,7 @@ extern int vm_pageout_page_count;
static long object_collapses;
static long object_bypasses;
static int next_index;
static void
_vm_object_allocate(type, size, object)
@ -167,6 +168,8 @@ _vm_object_allocate(type, size, object)
object->paging_in_progress = 0;
object->resident_page_count = 0;
object->shadow_count = 0;
object->pg_color = next_index;
next_index = (next_index + PQ_PRIME1) & PQ_L2_MASK;
object->handle = NULL;
object->paging_offset = (vm_ooffset_t) 0;
object->backing_object = NULL;
@ -494,7 +497,8 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
pi = p->pindex;
if (((p->flags & PG_CLEANCHK) == 0) ||
(pi < tstart) || (pi >= tend) ||
(p->valid == 0) || (p->queue == PQ_CACHE)) {
(p->valid == 0) ||
((p->queue - p->pc) == PQ_CACHE)) {
p->flags &= ~PG_CLEANCHK;
continue;
}
@ -521,7 +525,7 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
if ((tp->flags & PG_BUSY) ||
(tp->flags & PG_CLEANCHK) == 0)
break;
if (tp->queue == PQ_CACHE) {
if((tp->queue - tp->pc) == PQ_CACHE) {
tp->flags &= ~PG_CLEANCHK;
break;
}
@ -545,7 +549,7 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
if ((tp->flags & PG_BUSY) ||
(tp->flags & PG_CLEANCHK) == 0)
break;
if (tp->queue == PQ_CACHE) {
if((tp->queue - tp->pc) == PQ_CACHE) {
tp->flags &= ~PG_CLEANCHK;
break;
}
@ -830,7 +834,8 @@ vm_object_qcollapse(object)
next = TAILQ_NEXT(p, listq);
if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) ||
(p->queue == PQ_CACHE) || !p->valid || p->hold_count || p->wire_count || p->busy) {
((p->queue - p->pc) == PQ_CACHE) ||
!p->valid || p->hold_count || p->wire_count || p->busy) {
p = next;
continue;
}
@ -1484,4 +1489,93 @@ vm_object_print(iobject, full, dummy3, dummy4)
printf("\n");
indent -= 2;
}
void
vm_object_print_pages()
{
vm_object_t object;
int nl = 0;
int c;
for (object = TAILQ_FIRST(&vm_object_list);
object != NULL;
object = TAILQ_NEXT(object, object_list)) {
vm_pindex_t idx, fidx;
vm_pindex_t osize;
vm_offset_t pa = -1, padiff;
int rcount;
vm_page_t m;
db_printf("new object: 0x%x\n", object);
if ( nl > 18) {
c = cngetc();
if (c != ' ')
return;
nl = 0;
}
nl++;
rcount = 0;
fidx = 0;
osize = object->size;
if (osize > 128)
osize = 128;
for(idx=0;idx<osize;idx++) {
m = vm_page_lookup(object, idx);
if (m == NULL) {
if (rcount) {
db_printf(" index(%d)run(%d)pa(0x%x)\n",
fidx, rcount, pa);
if ( nl > 18) {
c = cngetc();
if (c != ' ')
return;
nl = 0;
}
nl++;
rcount = 0;
}
continue;
}
if (rcount &&
(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
++rcount;
continue;
}
if (rcount) {
padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
padiff >>= PAGE_SHIFT;
padiff &= PQ_L2_MASK;
if (padiff == 0) {
pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
++rcount;
continue;
}
db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa);
db_printf("pd(%d)\n", padiff);
if ( nl > 18) {
c = cngetc();
if (c != ' ')
return;
nl = 0;
}
nl++;
}
fidx = idx;
pa = VM_PAGE_TO_PHYS(m);
rcount = 1;
}
if (rcount) {
db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa);
if ( nl > 18) {
c = cngetc();
if (c != ' ')
return;
nl = 0;
}
nl++;
}
}
}
#endif /* DDB */

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.28 1996/05/19 07:36:50 dyson Exp $
* $Id: vm_object.h,v 1.29 1996/08/21 21:56:21 dyson Exp $
*/
/*
@ -94,6 +94,7 @@ struct vm_object {
vm_size_t size; /* Object size */
int ref_count; /* How many refs?? */
int shadow_count; /* how many objects that this is a shadow for */
int pg_color; /* color of first page in obj */
u_short flags; /* see below */
u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
u_short behavior; /* see below */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.61 1996/07/27 03:24:05 dyson Exp $
* $Id: vm_page.c,v 1.62 1996/07/30 03:08:15 dyson Exp $
*/
/*
@ -91,6 +91,8 @@
extern void DDB_print_page_info __P((void));
#endif
static void vm_page_queue_init(void);
/*
* Associated with page of user-allocatable memory is a
* page structure.
@ -100,25 +102,49 @@ static struct pglist *vm_page_buckets; /* Array of buckets */
static int vm_page_bucket_count; /* How big is array? */
static int vm_page_hash_mask; /* Mask for hash function */
struct pglist vm_page_queue_free;
struct pglist vm_page_queue_zero;
struct pglist vm_page_queue_free[PQ_L2_SIZE];
struct pglist vm_page_queue_zero[PQ_L2_SIZE];
struct pglist vm_page_queue_active;
struct pglist vm_page_queue_inactive;
struct pglist vm_page_queue_cache;
struct pglist vm_page_queue_cache[PQ_L2_SIZE];
int no_queue;
struct {
struct pglist *pl;
int *cnt;
} vm_page_queues[PQ_CACHE+1] = {
{NULL, &no_queue},
{ &vm_page_queue_free, &cnt.v_free_count},
{ &vm_page_queue_zero, &cnt.v_free_count},
{ &vm_page_queue_inactive, &cnt.v_inactive_count},
{ &vm_page_queue_active, &cnt.v_active_count},
{ &vm_page_queue_cache, &cnt.v_cache_count}
};
struct vpgqueues vm_page_queues[PQ_COUNT];
int pqcnt[PQ_COUNT];
static void
vm_page_queue_init(void) {
int i;
vm_page_queues[PQ_NONE].pl = NULL;
vm_page_queues[PQ_NONE].cnt = &no_queue;
for(i=0;i<PQ_L2_SIZE;i++) {
vm_page_queues[PQ_FREE+i].pl = &vm_page_queue_free[i];
vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
}
for(i=0;i<PQ_L2_SIZE;i++) {
vm_page_queues[PQ_ZERO+i].pl = &vm_page_queue_zero[i];
vm_page_queues[PQ_ZERO+i].cnt = &cnt.v_free_count;
}
vm_page_queues[PQ_INACTIVE].pl = &vm_page_queue_inactive;
vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
vm_page_queues[PQ_ACTIVE].pl = &vm_page_queue_active;
vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
for(i=0;i<PQ_L2_SIZE;i++) {
vm_page_queues[PQ_CACHE+i].pl = &vm_page_queue_cache[i];
vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
}
for(i=0;i<PQ_COUNT;i++) {
if (vm_page_queues[i].pl) {
TAILQ_INIT(vm_page_queues[i].pl);
} else if (i != 0) {
panic("vm_page_queue_init: queue %d is null", i);
}
vm_page_queues[i].lcnt = &pqcnt[i];
}
}
vm_page_t vm_page_array;
static int vm_page_array_size;
@ -228,11 +254,7 @@ vm_page_startup(starta, enda, vaddr)
* and the inactive queue.
*/
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_zero);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
TAILQ_INIT(&vm_page_queue_cache);
vm_page_queue_init();
/*
* Allocate (and initialize) the hash table buckets.
@ -350,10 +372,12 @@ vm_page_startup(starta, enda, vaddr)
++cnt.v_page_count;
++cnt.v_free_count;
m = PHYS_TO_VM_PAGE(pa);
m->queue = PQ_FREE;
m->flags = 0;
m->phys_addr = pa;
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
m->flags = 0;
m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
m->queue = PQ_FREE + m->pc;
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
++(*vm_page_queues[m->queue].lcnt);
pa += PAGE_SIZE;
}
}
@ -385,7 +409,7 @@ vm_page_hash(object, pindex)
* The object and page must be locked, and must be splhigh.
*/
__inline void
void
vm_page_insert(m, object, pindex)
register vm_page_t m;
register vm_object_t object;
@ -434,7 +458,7 @@ vm_page_insert(m, object, pindex)
* The object and page must be locked, and at splhigh.
*/
__inline void
void
vm_page_remove(m)
register vm_page_t m;
{
@ -525,7 +549,7 @@ vm_page_rename(m, new_object, new_pindex)
/*
* vm_page_unqueue without any wakeup
*/
__inline void
void
vm_page_unqueue_nowakeup(m)
vm_page_t m;
{
@ -534,14 +558,14 @@ vm_page_unqueue_nowakeup(m)
m->queue = PQ_NONE;
TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
--(*vm_page_queues[queue].cnt);
--(*vm_page_queues[queue].lcnt);
}
}
/*
* vm_page_unqueue must be called at splhigh();
*/
__inline void
void
vm_page_unqueue(m)
vm_page_t m;
{
@ -550,7 +574,8 @@ vm_page_unqueue(m)
m->queue = PQ_NONE;
TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
--(*vm_page_queues[queue].cnt);
if (queue == PQ_CACHE) {
--(*vm_page_queues[queue].lcnt);
if ((m->queue - m->pc) == PQ_CACHE) {
if ((cnt.v_cache_count + cnt.v_free_count) <
(cnt.v_free_reserved + cnt.v_cache_min))
pagedaemon_wakeup();
@ -558,6 +583,129 @@ vm_page_unqueue(m)
}
}
/*
* Find a page on the specified queue with color optimization.
*/
vm_page_t
vm_page_list_find(basequeue, index)
int basequeue, index;
{
int i,j;
vm_page_t m;
int hindex;
#if PQ_L2_SIZE > 1
index &= PQ_L2_MASK;
/*
* These are special cased because of clock-arithemetic
*/
for(i = 0; i < 2; i++) {
if (m = TAILQ_FIRST(vm_page_queues[basequeue +
((index + (i*PQ_L2_SIZE)/2)&PQ_L2_MASK)].pl))
return m;
}
for(j = 0; j < PQ_L1_SIZE; j++) {
for(i = PQ_L2_SIZE/PQ_L1_SIZE; i > 0; i -= PQ_L1_SIZE) {
hindex = (index + (i+j)) & PQ_L2_MASK;
m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl);
if (m)
return m;
hindex = (index - (i+j)) & PQ_L2_MASK;
m = TAILQ_FIRST(vm_page_queues[basequeue + hindex].pl);
if (m)
return m;
}
}
return NULL;
#else
return TAILQ_FIRST(vm_page_queues[basequeue].pl);
#endif
}
/*
* Find a free or zero page, with specified preference.
*/
vm_page_t
vm_page_select_free(object, pindex, prefqueue)
vm_object_t object;
vm_pindex_t pindex;
int prefqueue;
{
int i,j,k;
vm_page_t m;
int index, hindex;
int oqueuediff;
if (prefqueue == PQ_ZERO)
oqueuediff = PQ_FREE - PQ_ZERO;
else
oqueuediff = PQ_ZERO - PQ_FREE;
#if PQ_L2_SIZE > 1
index = pindex + object->pg_color;
/*
* These are special cased because of clock-arithemetic
*/
for(i = 0; i < 2; i++) {
hindex = prefqueue +
((index + (i*PQ_L2_SIZE/2)) & PQ_L2_MASK);
if (m = TAILQ_FIRST(vm_page_queues[hindex].pl))
return m;
if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl))
return m;
}
for(j = 0; j < PQ_L1_SIZE; j++) {
for(i = PQ_L2_SIZE/PQ_L1_SIZE - PQ_L1_SIZE;
(i + j) > 0;
i -= PQ_L1_SIZE) {
int iandj = i + j;
for(k = iandj; k >= -iandj; k -= 2*iandj) {
hindex = prefqueue + ((index + k) & PQ_L2_MASK);
if (m = TAILQ_FIRST(vm_page_queues[hindex].pl))
return m;
if (m = TAILQ_FIRST(vm_page_queues[hindex + oqueuediff].pl))
return m;
}
}
}
#else
if (m = TAILQ_FIRST(vm_page_queues[prefqueue].pl))
return m;
else
return TAILQ_FIRST(vm_page_queues[prefqueue + oqueuediff].pl);
#endif
return NULL;
}
/*
* Find a page of the proper color for a given pindex.
*/
vm_page_t
vm_page_select(object, pindex, basequeue)
vm_object_t object;
vm_pindex_t pindex;
int basequeue;
{
int index;
switch(basequeue) {
case PQ_NONE:
case PQ_INACTIVE:
case PQ_ACTIVE:
return TAILQ_FIRST(vm_page_queues[basequeue].pl);
default:
index = (pindex + object->pg_color) & PQ_L2_MASK;
return vm_page_list_find(basequeue, index);
}
}
/*
* vm_page_alloc:
*
@ -598,13 +746,11 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_NORMAL:
if (cnt.v_free_count >= cnt.v_free_reserved) {
m = TAILQ_FIRST(&vm_page_queue_free);
if (m == NULL) {
--vm_page_zero_count;
m = TAILQ_FIRST(&vm_page_queue_zero);
}
m = vm_page_select_free(object, pindex, PQ_FREE);
if (m == NULL)
panic("vm_page_alloc(NORMAL): missing page on free queue\n");
} else {
m = TAILQ_FIRST(&vm_page_queue_cache);
m = vm_page_select(object, pindex, PQ_CACHE);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@ -619,14 +765,11 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_ZERO:
if (cnt.v_free_count >= cnt.v_free_reserved) {
m = TAILQ_FIRST(&vm_page_queue_zero);
if (m) {
--vm_page_zero_count;
m = vm_page_select_free(object, pindex, PQ_ZERO);
if (m == NULL)
panic("vm_page_alloc(ZERO): missing page on free queue\n");
} else {
m = TAILQ_FIRST(&vm_page_queue_free);
}
} else {
m = TAILQ_FIRST(&vm_page_queue_cache);
m = vm_page_select(object, pindex, PQ_CACHE);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@ -643,13 +786,11 @@ vm_page_alloc(object, pindex, page_req)
if ((cnt.v_free_count >= cnt.v_free_reserved) ||
((cnt.v_cache_count == 0) &&
(cnt.v_free_count >= cnt.v_interrupt_free_min))) {
m = TAILQ_FIRST(&vm_page_queue_free);
if (m == NULL) {
--vm_page_zero_count;
m = TAILQ_FIRST(&vm_page_queue_zero);
}
m = vm_page_select_free(object, pindex, PQ_FREE);
if (m == NULL)
panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
} else {
m = TAILQ_FIRST(&vm_page_queue_cache);
m = vm_page_select(object, pindex, PQ_CACHE);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@ -664,11 +805,7 @@ vm_page_alloc(object, pindex, page_req)
case VM_ALLOC_INTERRUPT:
if (cnt.v_free_count > 0) {
m = TAILQ_FIRST(&vm_page_queue_free);
if (m == NULL) {
--vm_page_zero_count;
m = TAILQ_FIRST(&vm_page_queue_zero);
}
m = vm_page_select_free(object, pindex, PQ_FREE);
} else {
splx(s);
pagedaemon_wakeup();
@ -681,11 +818,14 @@ vm_page_alloc(object, pindex, page_req)
}
queue = m->queue;
if (queue == PQ_ZERO)
--vm_page_zero_count;
TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
--(*vm_page_queues[queue].cnt);
if (queue == PQ_ZERO) {
--(*vm_page_queues[queue].lcnt);
if ((m->queue - m->pc) == PQ_ZERO) {
m->flags = PG_ZERO|PG_BUSY;
} else if (queue == PQ_CACHE) {
} else if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_remove(m);
m->flags = PG_BUSY;
} else {
@ -733,14 +873,15 @@ vm_page_activate(m)
if (m->queue == PQ_ACTIVE)
panic("vm_page_activate: already active");
if (m->queue == PQ_CACHE)
if ((m->queue - m->pc) == PQ_CACHE)
cnt.v_reactivated++;
vm_page_unqueue(m);
if (m->wire_count == 0) {
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->queue = PQ_ACTIVE;
++(*vm_page_queues[PQ_ACTIVE].lcnt);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
if (m->act_count < ACT_INIT)
m->act_count = ACT_INIT;
cnt.v_active_count++;
@ -757,12 +898,12 @@ vm_page_freechk_and_unqueue(m)
{
if (m->busy ||
(m->flags & PG_BUSY) ||
(m->queue == PQ_FREE) ||
((m->queue - m->pc) == PQ_FREE) ||
(m->hold_count != 0)) {
printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n",
m->pindex, m->busy,
(m->flags & PG_BUSY) ? 1 : 0, m->hold_count);
if (m->queue == PQ_FREE)
if ((m->queue - m->pc) == PQ_FREE)
panic("vm_page_free: freeing free page");
else
panic("vm_page_free: freeing busy page");
@ -835,8 +976,9 @@ vm_page_free(m)
return;
}
m->queue = PQ_FREE;
m->queue = PQ_FREE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
++(*vm_page_queues[m->queue].cnt);
/*
* If the pageout process is grabbing the page, it is likely
* that the page is NOT in the cache. It is more likely that
@ -844,12 +986,10 @@ vm_page_free(m)
* explicitly freed.
*/
if (curproc == pageproc) {
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
} else {
TAILQ_INSERT_HEAD(&vm_page_queue_free, m, pageq);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq);
}
cnt.v_free_count++;
vm_page_free_wakeup();
splx(s);
}
@ -869,11 +1009,12 @@ vm_page_free_zero(m)
return;
}
m->queue = PQ_ZERO;
m->queue = PQ_ZERO + m->pc;
++(*vm_page_queues[m->queue].lcnt);
++(*vm_page_queues[m->queue].cnt);
TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq);
TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, pageq);
++vm_page_zero_count;
cnt.v_free_count++;
vm_page_free_wakeup();
splx(s);
}
@ -899,6 +1040,7 @@ vm_page_wire(m)
splx(s);
cnt.v_wire_count++;
}
++(*vm_page_queues[PQ_NONE].lcnt);
m->wire_count++;
m->flags |= PG_MAPPED;
}
@ -926,6 +1068,7 @@ vm_page_unwire(m)
cnt.v_wire_count--;
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->queue = PQ_ACTIVE;
++(*vm_page_queues[PQ_ACTIVE].lcnt);
cnt.v_active_count++;
}
splx(s);
@ -959,11 +1102,12 @@ vm_page_deactivate(m)
s = splvm();
if (m->wire_count == 0 && m->hold_count == 0) {
if (m->queue == PQ_CACHE)
if ((m->queue - m->pc) == PQ_CACHE)
cnt.v_reactivated++;
vm_page_unqueue(m);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
m->queue = PQ_INACTIVE;
++(*vm_page_queues[PQ_INACTIVE].lcnt);
cnt.v_inactive_count++;
}
splx(s);
@ -984,7 +1128,7 @@ vm_page_cache(m)
printf("vm_page_cache: attempting to cache busy page\n");
return;
}
if (m->queue == PQ_CACHE)
if ((m->queue - m->pc) == PQ_CACHE)
return;
vm_page_protect(m, VM_PROT_NONE);
@ -993,8 +1137,9 @@ vm_page_cache(m)
}
s = splvm();
vm_page_unqueue_nowakeup(m);
TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
m->queue = PQ_CACHE;
m->queue = PQ_CACHE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
cnt.v_cache_count++;
vm_page_free_wakeup();
splx(s);
@ -1114,7 +1259,8 @@ contigmalloc(size, type, flags, low, high, alignment, boundary)
*/
for (i = start; i < cnt.v_page_count; i++) {
phys = VM_PAGE_TO_PHYS(&pga[i]);
if ((pga[i].queue == PQ_FREE) &&
if (((pga[i].queue >= PQ_FREE) &&
(pga[i].queue < (PQ_FREE + PQ_L2_SIZE))) &&
(phys >= low) && (phys < high) &&
((phys & (alignment - 1)) == 0) &&
(((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
@ -1137,7 +1283,8 @@ contigmalloc(size, type, flags, low, high, alignment, boundary)
for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
if ((VM_PAGE_TO_PHYS(&pga[i]) !=
(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
(pga[i].queue != PQ_FREE)) {
((pga[i].queue < PQ_FREE) ||
(pga[i].queue >= (PQ_FREE + PQ_L2_SIZE)))) {
start++;
goto again;
}
@ -1157,7 +1304,8 @@ contigmalloc(size, type, flags, low, high, alignment, boundary)
for (i = start; i < (start + size / PAGE_SIZE); i++) {
vm_page_t m = &pga[i];
TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
--(*vm_page_queues[m->queue].lcnt);
cnt.v_free_count--;
m->valid = VM_PAGE_BITS_ALL;
m->flags = 0;
@ -1201,4 +1349,31 @@ DDB_print_page_info(void)
printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
}
void
DDB_print_pageq_info(void)
{
int i;
printf("PQ_FREE:");
for(i=0;i<PQ_L2_SIZE;i++) {
printf(" %d", *vm_page_queues[PQ_FREE + i].lcnt);
}
printf("\n");
printf("PQ_CACHE:");
for(i=0;i<PQ_L2_SIZE;i++) {
printf(" %d", *vm_page_queues[PQ_CACHE + i].lcnt);
}
printf("\n");
printf("PQ_ZERO:");
for(i=0;i<PQ_L2_SIZE;i++) {
printf(" %d", *vm_page_queues[PQ_ZERO + i].lcnt);
}
printf("\n");
printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
*vm_page_queues[PQ_ACTIVE].lcnt,
*vm_page_queues[PQ_INACTIVE].lcnt);
}
#endif

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.30 1996/07/27 03:24:06 dyson Exp $
* $Id: vm_page.h,v 1.31 1996/07/30 03:08:17 dyson Exp $
*/
/*
@ -107,8 +107,9 @@ struct vm_page {
vm_object_t object; /* which object am I in (O,P) */
vm_pindex_t pindex; /* offset into object (O,P) */
vm_offset_t phys_addr; /* physical address of page */
u_short queue:4, /* page queue index */
flags:12; /* see below */
u_short queue; /* page queue index */
u_short flags, /* see below */
pc; /* page color */
u_short wire_count; /* wired down maps refs (P) */
short hold_count; /* page hold count */
u_char act_count; /* page usage count */
@ -119,12 +120,62 @@ struct vm_page {
u_char dirty; /* map of dirty DEV_BSIZE chunks */
};
/*
* Page coloring parameters
*/
/* Each of PQ_FREE, PQ_ZERO and PQ_CACHE have PQ_HASH_SIZE entries */
/* Define one of the following */
#if defined(PQ_LARGECACHE)
#define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_PRIME3 17 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */
#define PQ_L1_SIZE 2 /* Two page L1 cache */
#endif
#if defined(PQ_MEDIUMCACHE)
#define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_PRIME3 5 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */
#define PQ_L1_SIZE 2 /* Two page L1 cache */
#endif
/*
* Use 'options PQ_NOOPT' to disable page coloring
*/
#if defined(PQ_NOOPT)
#define PQ_PRIME1 1
#define PQ_PRIME2 1
#define PQ_PRIME3 1
#define PQ_L2_SIZE 1
#define PQ_L1_SIZE 1
#endif
#if defined(PQ_NORMALCACHE) || !defined(PQ_L2_SIZE)
#define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_PRIME3 11 /* Prime number somewhat less than PQ_HASH_SIZE */
#define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */
#define PQ_L1_SIZE 2 /* Two page L1 cache */
#endif
#define PQ_L2_MASK (PQ_L2_SIZE - 1)
#define PQ_NONE 0
#define PQ_FREE 1
#define PQ_ZERO 2
#define PQ_INACTIVE 3
#define PQ_ACTIVE 4
#define PQ_CACHE 5
#define PQ_ZERO (1 + PQ_L2_SIZE)
#define PQ_INACTIVE (1 + 2*PQ_L2_SIZE)
#define PQ_ACTIVE (2 + 2*PQ_L2_SIZE)
#define PQ_CACHE (3 + 2*PQ_L2_SIZE)
#define PQ_COUNT (3 + 3*PQ_L2_SIZE)
extern struct vpgqueues {
struct pglist *pl;
int *cnt;
int *lcnt;
} vm_page_queues[PQ_COUNT];
/*
* These are the flags defined for vm_page.
@ -148,7 +199,7 @@ struct vm_page {
#define ACT_DECLINE 1
#define ACT_ADVANCE 3
#define ACT_INIT 5
#define ACT_MAX 32
#define ACT_MAX 64
#define PFCLUSTER_BEHIND 3
#define PFCLUSTER_AHEAD 3
@ -180,11 +231,11 @@ struct vm_page {
*
*/
extern struct pglist vm_page_queue_free; /* memory free queue */
extern struct pglist vm_page_queue_zero; /* zeroed memory free queue */
extern struct pglist vm_page_queue_free[PQ_L2_SIZE];/* memory free queue */
extern struct pglist vm_page_queue_zero[PQ_L2_SIZE];/* zeroed memory free queue */
extern struct pglist vm_page_queue_active; /* active memory queue */
extern struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern struct pglist vm_page_queue_cache; /* cache memory queue */
extern struct pglist vm_page_queue_cache[PQ_L2_SIZE];/* cache memory queue */
extern int vm_page_zero_count;
@ -259,6 +310,9 @@ static __inline boolean_t vm_page_zero_fill __P((vm_page_t));
int vm_page_is_valid __P((vm_page_t, int, int));
void vm_page_test_dirty __P((vm_page_t));
int vm_page_bits __P((int, int));
vm_page_t vm_page_list_find __P((int, int));
int vm_page_queue_index __P((vm_offset_t, int));
vm_page_t vm_page_select __P((vm_object_t, vm_pindex_t, int));
/*
* Keep page from being freed by the page daemon

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.83 1996/07/27 03:24:08 dyson Exp $
* $Id: vm_pageout.c,v 1.84 1996/07/30 03:08:19 dyson Exp $
*/
/*
@ -256,7 +256,8 @@ vm_pageout_clean(m, sync)
}
p = vm_page_lookup(object, pindex + i);
if (p) {
if ((p->queue == PQ_CACHE) || (p->flags & PG_BUSY) || p->busy) {
if (((p->queue - p->pc) == PQ_CACHE) ||
(p->flags & PG_BUSY) || p->busy) {
forward_okay = FALSE;
goto do_backward;
}
@ -290,7 +291,8 @@ vm_pageout_clean(m, sync)
}
p = vm_page_lookup(object, pindex - i);
if (p) {
if ((p->queue == PQ_CACHE) || (p->flags & PG_BUSY) || p->busy) {
if (((p->queue - p->pc) == PQ_CACHE) ||
(p->flags & PG_BUSY) || p->busy) {
backward_okay = FALSE;
continue;
}
@ -831,9 +833,11 @@ vm_pageout_scan()
* code to be guaranteed space.
*/
while (cnt.v_free_count < cnt.v_free_reserved) {
m = TAILQ_FIRST(&vm_page_queue_cache);
static int cache_rover = 0;
m = vm_page_list_find(PQ_CACHE, cache_rover);
if (!m)
break;
cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
vm_page_free(m);
cnt.v_dfree++;
}
@ -928,7 +932,7 @@ vm_size_t count;
cnt.v_interrupt_free_min;
cnt.v_free_reserved = vm_pageout_page_count +
cnt.v_pageout_free_min + (count / 768);
cnt.v_free_min += cnt.v_free_reserved;
cnt.v_free_min += cnt.v_free_reserved + PQ_L2_SIZE;
return 1;
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pager.c,v 1.22 1996/05/03 21:01:53 phk Exp $
* $Id: vm_pager.c,v 1.23 1996/05/18 03:38:05 dyson Exp $
*/
/*
@ -298,6 +298,7 @@ getpbuf()
bzero(bp, sizeof *bp);
bp->b_rcred = NOCRED;
bp->b_wcred = NOCRED;
bp->b_qindex = QUEUE_NONE;
bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
bp->b_vnbufs.le_next = NOLIST;
return bp;
@ -323,6 +324,7 @@ trypbuf()
bzero(bp, sizeof *bp);
bp->b_rcred = NOCRED;
bp->b_wcred = NOCRED;
bp->b_qindex = QUEUE_NONE;
bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
bp->b_vnbufs.le_next = NOLIST;
return bp;