Several bugfixes/improvements:

1) Make it much less likely to miss a wakeup in vm_page_free_wakeup
	2) Create a new entry point into pmap: pmap_ts_referenced, eliminates
	   the need to scan the pv lists twice in many cases.  Perhaps there
	   is alot more to do here to work on minimizing pv list manipulation
	3) Minor improvements to vm_pageout including the use of pmap_ts_ref.
	4) Major changes and code improvement to pmap.  This code has had
	   several serious bugs in page table page manipulation.  In order
	   to simplify the problem, and hopefully solve it for once and all,
	   page table pages are no longer "managed" with the pv list stuff.
	   Page table pages are only (mapped and held/wired) or
	   (free and unused) now.  Page table pages are never inactive,
	   active or cached.  These changes have probably fixed the
	   hold count problems, but if they haven't, then the code is
	   simpler anyway for future bugfixing.
	5) The pmap code has been sorely in need of re-organization, and I
	   have taken a first (of probably many) steps.  Please tell me
	   if you have any ideas.
This commit is contained in:
John Dyson 1996-06-17 03:35:40 +00:00
parent 6875d5c1b3
commit ef743ce6ed
5 changed files with 1294 additions and 1009 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the * any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes. * rights to redistribute these changes.
* *
* $Id: pmap.h,v 1.9 1996/03/28 04:54:50 dyson Exp $ * $Id: pmap.h,v 1.10 1996/05/19 07:36:44 dyson Exp $
*/ */
/* /*
@ -101,6 +101,7 @@ void pmap_growkernel __P((vm_offset_t));
void pmap_init __P((vm_offset_t, vm_offset_t)); void pmap_init __P((vm_offset_t, vm_offset_t));
boolean_t pmap_is_modified __P((vm_offset_t pa)); boolean_t pmap_is_modified __P((vm_offset_t pa));
boolean_t pmap_is_referenced __P((vm_offset_t pa)); boolean_t pmap_is_referenced __P((vm_offset_t pa));
boolean_t pmap_ts_referenced __P((vm_offset_t pa));
void pmap_kenter __P((vm_offset_t, vm_offset_t)); void pmap_kenter __P((vm_offset_t, vm_offset_t));
void pmap_kremove __P((vm_offset_t)); void pmap_kremove __P((vm_offset_t));
vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int)); vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.56 1996/06/12 06:52:06 dyson Exp $ * $Id: vm_page.c,v 1.57 1996/06/16 20:37:31 dyson Exp $
*/ */
/* /*
@ -802,12 +802,13 @@ vm_page_free_wakeup()
* high water mark. And wakeup scheduler process if we have * high water mark. And wakeup scheduler process if we have
* lots of memory. this process will swapin processes. * lots of memory. this process will swapin processes.
*/ */
if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) { if (vm_pages_needed &&
((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) {
wakeup(&cnt.v_free_count); wakeup(&cnt.v_free_count);
vm_pages_needed = 0;
} }
} }
/* /*
* vm_page_free: * vm_page_free:
* *

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the * any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes. * rights to redistribute these changes.
* *
* $Id: vm_pageout.c,v 1.75 1996/05/29 06:33:30 dyson Exp $ * $Id: vm_pageout.c,v 1.76 1996/05/31 00:38:04 dyson Exp $
*/ */
/* /*
@ -416,6 +416,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
rcount = object->resident_page_count; rcount = object->resident_page_count;
p = TAILQ_FIRST(&object->memq); p = TAILQ_FIRST(&object->memq);
while (p && (rcount-- > 0)) { while (p && (rcount-- > 0)) {
int refcount;
next = TAILQ_NEXT(p, listq); next = TAILQ_NEXT(p, listq);
cnt.v_pdpages++; cnt.v_pdpages++;
if (p->wire_count != 0 || if (p->wire_count != 0 ||
@ -426,13 +427,27 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
p = next; p = next;
continue; continue;
} }
refcount = 0;
if ((p->flags & PG_REFERENCED) == 0) {
refcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
if (refcount) {
p->flags |= PG_REFERENCED;
}
} else {
pmap_clear_reference(VM_PAGE_TO_PHYS(p));
}
if ((p->queue != PQ_ACTIVE) && (p->flags & PG_REFERENCED)) {
vm_page_activate(p);
}
/* /*
* if a page is active, not wired and is in the processes * if a page is active, not wired and is in the processes
* pmap, then deactivate the page. * pmap, then deactivate the page.
*/ */
if (p->queue == PQ_ACTIVE) { if (p->queue == PQ_ACTIVE) {
if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) && if ((p->flags & PG_REFERENCED) == 0) {
(p->flags & PG_REFERENCED) == 0) {
vm_page_protect(p, VM_PROT_NONE); vm_page_protect(p, VM_PROT_NONE);
if (!map_remove_only) if (!map_remove_only)
vm_page_deactivate(p); vm_page_deactivate(p);
@ -448,12 +463,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
} }
} }
} else { } else {
/*
* Move the page to the bottom of the queue.
*/
pmap_clear_reference(VM_PAGE_TO_PHYS(p));
p->flags &= ~PG_REFERENCED; p->flags &= ~PG_REFERENCED;
s = splvm(); s = splvm();
TAILQ_REMOVE(&vm_page_queue_active, p, pageq); TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
@ -576,14 +586,15 @@ vm_pageout_scan()
continue; continue;
} }
if (((m->flags & PG_REFERENCED) == 0) &&
pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
m->flags |= PG_REFERENCED;
}
if (m->object->ref_count == 0) { if (m->object->ref_count == 0) {
m->flags &= ~PG_REFERENCED; m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m)); pmap_clear_reference(VM_PAGE_TO_PHYS(m));
} else if (((m->flags & PG_REFERENCED) == 0) &&
pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) {
vm_page_activate(m);
continue;
} }
if ((m->flags & PG_REFERENCED) != 0) { if ((m->flags & PG_REFERENCED) != 0) {
m->flags &= ~PG_REFERENCED; m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m)); pmap_clear_reference(VM_PAGE_TO_PHYS(m));
@ -596,6 +607,7 @@ vm_pageout_scan()
} else if (m->dirty != 0) { } else if (m->dirty != 0) {
m->dirty = VM_PAGE_BITS_ALL; m->dirty = VM_PAGE_BITS_ALL;
} }
if (m->valid == 0) { if (m->valid == 0) {
vm_page_protect(m, VM_PROT_NONE); vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m); vm_page_free(m);
@ -706,9 +718,12 @@ vm_pageout_scan()
page_shortage += addl_page_shortage; page_shortage += addl_page_shortage;
} }
rescan1:
pcount = cnt.v_active_count; pcount = cnt.v_active_count;
m = TAILQ_FIRST(&vm_page_queue_active); m = TAILQ_FIRST(&vm_page_queue_active);
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
int refcount;
if (m->queue != PQ_ACTIVE) { if (m->queue != PQ_ACTIVE) {
#if defined(DIAGNOSTIC) #if defined(DIAGNOSTIC)
@ -721,7 +736,7 @@ vm_pageout_scan()
else else
printf("object type: %d\n", m->object->type); printf("object type: %d\n", m->object->type);
#endif #endif
break; goto rescan1;
} }
next = TAILQ_NEXT(m, pageq); next = TAILQ_NEXT(m, pageq);
@ -745,35 +760,33 @@ vm_pageout_scan()
* page for eligbility... * page for eligbility...
*/ */
cnt.v_pdpages++; cnt.v_pdpages++;
if ((m->flags & PG_REFERENCED) == 0) {
if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { refcount = 0;
pmap_clear_reference(VM_PAGE_TO_PHYS(m)); if (m->object->ref_count != 0) {
m->flags |= PG_REFERENCED; if (m->flags & PG_REFERENCED) {
refcount += 1;
} }
} else { refcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
} }
if ( (m->object->ref_count != 0) &&
(m->flags & PG_REFERENCED) ) {
m->flags &= ~PG_REFERENCED; m->flags &= ~PG_REFERENCED;
if (refcount && m->object->ref_count != 0) {
s = splvm(); s = splvm();
TAILQ_REMOVE(&vm_page_queue_active, m, pageq); TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
splx(s); splx(s);
} else { } else {
m->flags &= ~PG_REFERENCED;
if (page_shortage > 0) {
--page_shortage; --page_shortage;
vm_page_protect(m, VM_PROT_NONE);
if (m->dirty == 0) if (m->dirty == 0)
vm_page_test_dirty(m); vm_page_test_dirty(m);
if (m->dirty == 0) { if ((m->object->ref_count == 0) && (m->dirty == 0)) {
vm_page_cache(m); vm_page_cache(m);
} else { } else {
vm_page_protect(m, VM_PROT_NONE);
vm_page_deactivate(m); vm_page_deactivate(m);
} }
} }
}
m = next; m = next;
} }