Keep page-table pages from ever being sensed as dirty. This should fix

some problems with the page-table page management code, since it can't
deal with the notion of page-table pages being paged out or in transit.
Also, clean up some stylistic issues per some suggestions from
Stephen McKay.
This commit is contained in:
dyson 1996-06-05 03:31:49 +00:00
parent 2209006899
commit 0d44d496d0
4 changed files with 223 additions and 106 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.97 1996/06/01 19:19:21 dyson Exp $
* $Id: pmap.c,v 1.98 1996/06/02 22:28:53 dyson Exp $
*/
/*
@ -661,7 +661,7 @@ pmap_pinit(pmap)
pmap->pm_count = 1;
}
static __inline int
static int
pmap_release_free_page(pmap, p)
struct pmap *pmap;
vm_page_t p;
@ -687,6 +687,7 @@ pmap_release_free_page(pmap, p)
panic("pmap_release: freeing held page table page");
#endif
/*
* HACK ALERT!!!
* If this failure happens, we must clear the page, because
* there is likely a mapping still valid. This condition
* is an error, but at least this zero operation will mitigate
@ -708,12 +709,8 @@ pmap_release_free_page(pmap, p)
pmap_kremove((vm_offset_t) pmap->pm_pdir);
}
vm_page_free(p);
TAILQ_REMOVE(&vm_page_queue_free, p, pageq);
TAILQ_INSERT_HEAD(&vm_page_queue_zero, p, pageq);
p->queue = PQ_ZERO;
vm_page_free_zero(p);
splx(s);
++vm_page_zero_count;
return 1;
}
@ -728,6 +725,21 @@ pmap_release(pmap)
{
vm_page_t p,n,ptdpg;
vm_object_t object = pmap->pm_pteobj;
int s;
if (object->ref_count != 1)
panic("pmap_release: pteobj reference count != 1");
/*
* Wait until any (bogus) paging activity on this object is
* complete.
*/
s = splvm();
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
tsleep(object,PVM,"pmrlob",0);
}
splx(s);
ptdpg = NULL;
retry:
@ -737,9 +749,14 @@ pmap_release(pmap)
ptdpg = p;
continue;
}
if ((p->flags & PG_BUSY) || p->busy)
continue;
if (!pmap_release_free_page(pmap, p))
goto retry;
}
if (ptdpg == NULL)
panic("pmap_release: missing page table directory page");
pmap_release_free_page(pmap, ptdpg);
vm_object_deallocate(object);
@ -873,9 +890,10 @@ get_pv_entry()
}
/*
* this *strange* allocation routine *statistically* eliminates the
* *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure.
* This *strange* allocation routine eliminates the possibility of a malloc
* failure (*FATAL*) for a pv_entry_t data structure.
* also -- this code is MUCH MUCH faster than the malloc equiv...
* We really need to do the slab allocator thingie here.
*/
static void
pmap_alloc_pv_entry()
@ -1249,6 +1267,10 @@ pmap_remove_all(pa)
printf("pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, tpte);
}
#endif
if ((va >= UPT_MIN_ADDRESS) &&
(va < UPT_MAX_ADDRESS))
continue;
if (va < clean_sva || va >= clean_eva) {
m->dirty = VM_PAGE_BITS_ALL;
}
@ -1453,8 +1475,6 @@ _pmap_allocpte(pmap, va, ptepindex)
pv->pv_ptem = NULL;
ptepa = VM_PAGE_TO_PHYS(m);
pmap->pm_pdir[ptepindex] =
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_MANAGED);
ppv = pa_to_pvh(ptepa);
#if defined(PMAP_DIAGNOSTIC)
if (*ppv)
@ -1462,15 +1482,15 @@ _pmap_allocpte(pmap, va, ptepindex)
#endif
*ppv = pv;
splx(s);
pmap_update_1pg(pteva);
} else {
#if defined(PMAP_DIAGNOSTIC)
if (VM_PAGE_TO_PHYS(m) != (ptepa & PG_FRAME))
panic("pmap_allocpte: mismatch");
#endif
pmap->pm_pdir[ptepindex] =
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_MANAGED);
}
pmap_update();
pmap->pm_pdir[ptepindex] =
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_MANAGED);
m->flags |= PG_MAPPED;
return m;
}
@ -2136,7 +2156,12 @@ pmap_testbit(pa, bit)
* modified.
*/
if (bit & (PG_A|PG_M)) {
if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
if ((pv->pv_va >= UPT_MIN_ADDRESS) &&
(pv->pv_va < UPT_MAX_ADDRESS)) {
continue;
}
if ((pv->pv_va >= clean_sva) &&
(pv->pv_va < clean_eva)) {
continue;
}
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.97 1996/06/01 19:19:21 dyson Exp $
* $Id: pmap.c,v 1.98 1996/06/02 22:28:53 dyson Exp $
*/
/*
@ -661,7 +661,7 @@ pmap_pinit(pmap)
pmap->pm_count = 1;
}
static __inline int
static int
pmap_release_free_page(pmap, p)
struct pmap *pmap;
vm_page_t p;
@ -687,6 +687,7 @@ pmap_release_free_page(pmap, p)
panic("pmap_release: freeing held page table page");
#endif
/*
* HACK ALERT!!!
* If this failure happens, we must clear the page, because
* there is likely a mapping still valid. This condition
* is an error, but at least this zero operation will mitigate
@ -708,12 +709,8 @@ pmap_release_free_page(pmap, p)
pmap_kremove((vm_offset_t) pmap->pm_pdir);
}
vm_page_free(p);
TAILQ_REMOVE(&vm_page_queue_free, p, pageq);
TAILQ_INSERT_HEAD(&vm_page_queue_zero, p, pageq);
p->queue = PQ_ZERO;
vm_page_free_zero(p);
splx(s);
++vm_page_zero_count;
return 1;
}
@ -728,6 +725,21 @@ pmap_release(pmap)
{
vm_page_t p,n,ptdpg;
vm_object_t object = pmap->pm_pteobj;
int s;
if (object->ref_count != 1)
panic("pmap_release: pteobj reference count != 1");
/*
* Wait until any (bogus) paging activity on this object is
* complete.
*/
s = splvm();
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
tsleep(object,PVM,"pmrlob",0);
}
splx(s);
ptdpg = NULL;
retry:
@ -737,9 +749,14 @@ pmap_release(pmap)
ptdpg = p;
continue;
}
if ((p->flags & PG_BUSY) || p->busy)
continue;
if (!pmap_release_free_page(pmap, p))
goto retry;
}
if (ptdpg == NULL)
panic("pmap_release: missing page table directory page");
pmap_release_free_page(pmap, ptdpg);
vm_object_deallocate(object);
@ -873,9 +890,10 @@ get_pv_entry()
}
/*
* this *strange* allocation routine *statistically* eliminates the
* *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure.
* This *strange* allocation routine eliminates the possibility of a malloc
* failure (*FATAL*) for a pv_entry_t data structure.
* also -- this code is MUCH MUCH faster than the malloc equiv...
* We really need to do the slab allocator thingie here.
*/
static void
pmap_alloc_pv_entry()
@ -1249,6 +1267,10 @@ pmap_remove_all(pa)
printf("pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, tpte);
}
#endif
if ((va >= UPT_MIN_ADDRESS) &&
(va < UPT_MAX_ADDRESS))
continue;
if (va < clean_sva || va >= clean_eva) {
m->dirty = VM_PAGE_BITS_ALL;
}
@ -1453,8 +1475,6 @@ _pmap_allocpte(pmap, va, ptepindex)
pv->pv_ptem = NULL;
ptepa = VM_PAGE_TO_PHYS(m);
pmap->pm_pdir[ptepindex] =
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_MANAGED);
ppv = pa_to_pvh(ptepa);
#if defined(PMAP_DIAGNOSTIC)
if (*ppv)
@ -1462,15 +1482,15 @@ _pmap_allocpte(pmap, va, ptepindex)
#endif
*ppv = pv;
splx(s);
pmap_update_1pg(pteva);
} else {
#if defined(PMAP_DIAGNOSTIC)
if (VM_PAGE_TO_PHYS(m) != (ptepa & PG_FRAME))
panic("pmap_allocpte: mismatch");
#endif
pmap->pm_pdir[ptepindex] =
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_MANAGED);
}
pmap_update();
pmap->pm_pdir[ptepindex] =
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_MANAGED);
m->flags |= PG_MAPPED;
return m;
}
@ -2136,7 +2156,12 @@ pmap_testbit(pa, bit)
* modified.
*/
if (bit & (PG_A|PG_M)) {
if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
if ((pv->pv_va >= UPT_MIN_ADDRESS) &&
(pv->pv_va < UPT_MAX_ADDRESS)) {
continue;
}
if ((pv->pv_va >= clean_sva) &&
(pv->pv_va < clean_eva)) {
continue;
}
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.52 1996/05/24 05:20:15 dyson Exp $
* $Id: vm_page.c,v 1.53 1996/05/31 00:38:03 dyson Exp $
*/
/*
@ -141,6 +141,10 @@ static inline __pure int
vm_page_hash __P((vm_object_t object, vm_pindex_t pindex))
__pure2;
static void vm_page_unqueue_nowakeup __P((vm_page_t m));
static int vm_page_freechk_and_unqueue __P((vm_page_t m));
static void vm_page_free_wakeup __P((void));
/*
* vm_set_page_size:
*
@ -519,24 +523,40 @@ vm_page_rename(m, new_object, new_pindex)
splx(s);
}
/*
* vm_page_unqueue without any wakeup
*/
static __inline void
vm_page_unqueue_nowakeup(m)
vm_page_t m;
{
int queue = m->queue;
if (queue != PQ_NONE) {
m->queue = PQ_NONE;
TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
--(*vm_page_queues[queue].cnt);
}
}
/*
* vm_page_unqueue must be called at splhigh();
*/
__inline void
vm_page_unqueue(vm_page_t m)
vm_page_unqueue(m)
vm_page_t m;
{
int queue = m->queue;
if (queue == PQ_NONE)
return;
m->queue = PQ_NONE;
TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
--(*vm_page_queues[queue].cnt);
if (queue == PQ_CACHE) {
if ((cnt.v_cache_count + cnt.v_free_count) <
(cnt.v_free_reserved + cnt.v_cache_min))
pagedaemon_wakeup();
if (queue != PQ_NONE) {
m->queue = PQ_NONE;
TAILQ_REMOVE(vm_page_queues[queue].pl, m, pageq);
--(*vm_page_queues[queue].cnt);
if (queue == PQ_CACHE) {
if ((cnt.v_cache_count + cnt.v_free_count) <
(cnt.v_free_reserved + cnt.v_cache_min))
pagedaemon_wakeup();
}
}
return;
}
/*
@ -726,6 +746,69 @@ vm_page_activate(m)
splx(s);
}
/*
* helper routine for vm_page_free and vm_page_free_zero
*/
static int
vm_page_freechk_and_unqueue(m)
vm_page_t m;
{
if (m->busy ||
(m->flags & PG_BUSY) ||
(m->queue == PQ_FREE) ||
(m->hold_count != 0)) {
printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n",
m->pindex, m->busy,
(m->flags & PG_BUSY) ? 1 : 0, m->hold_count);
if (m->queue == PQ_FREE)
panic("vm_page_free: freeing free page");
else
panic("vm_page_free: freeing busy page");
}
vm_page_remove(m);
vm_page_unqueue_nowakeup(m);
if ((m->flags & PG_FICTITIOUS) != 0) {
return 0;
}
if (m->wire_count != 0) {
if (m->wire_count > 1) {
panic("vm_page_free: invalid wire count (%d), pindex: 0x%x",
m->wire_count, m->pindex);
}
m->wire_count = 0;
}
return 1;
}
/*
* helper routine for vm_page_free and vm_page_free_zero
*/
static __inline void
vm_page_free_wakeup()
{
/*
* if pageout daemon needs pages, then tell it that there are
* some free.
*/
if (vm_pageout_pages_needed) {
wakeup(&vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
cnt.v_free_count++;
/*
* wakeup processes that are waiting on memory if we hit a
* high water mark. And wakeup scheduler process if we have
* lots of memory. this process will swapin processes.
*/
if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
wakeup(&cnt.v_free_count);
}
}
/*
* vm_page_free:
*
@ -739,74 +822,57 @@ vm_page_free(m)
register vm_page_t m;
{
int s;
int flags = m->flags;
s = splvm();
if (m->busy || (flags & PG_BUSY) || (m->queue == PQ_FREE)) {
printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d)\n",
m->pindex, m->busy, (flags & PG_BUSY) ? 1 : 0);
if (m->queue == PQ_FREE)
panic("vm_page_free: freeing free page");
else
panic("vm_page_free: freeing busy page");
}
if (m->hold_count) {
panic("freeing held page, count=%d, pindex=%d(0x%x)",
m->hold_count, m->pindex, m->pindex);
}
vm_page_remove(m);
vm_page_unqueue(m);
if ((flags & PG_FICTITIOUS) == 0) {
if (m->wire_count) {
if (m->wire_count > 1) {
printf("vm_page_free: wire count > 1 (%d)", m->wire_count);
panic("vm_page_free: invalid wire count");
}
cnt.v_wire_count--;
m->wire_count = 0;
}
m->queue = PQ_FREE;
/*
* If the pageout process is grabbing the page, it is likely
* that the page is NOT in the cache. It is more likely that
* the page will be partially in the cache if it is being
* explicitly freed.
*/
if (curproc == pageproc) {
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
} else {
TAILQ_INSERT_HEAD(&vm_page_queue_free, m, pageq);
}
splx(s);
/*
* if pageout daemon needs pages, then tell it that there are
* some free.
*/
if (vm_pageout_pages_needed) {
wakeup(&vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
cnt.v_free_count++;
/*
* wakeup processes that are waiting on memory if we hit a
* high water mark. And wakeup scheduler process if we have
* lots of memory. this process will swapin processes.
*/
if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
wakeup(&cnt.v_free_count);
}
} else {
splx(s);
}
cnt.v_tfree++;
if (!vm_page_freechk_and_unqueue(m)) {
splx(s);
return;
}
m->queue = PQ_FREE;
/*
* If the pageout process is grabbing the page, it is likely
* that the page is NOT in the cache. It is more likely that
* the page will be partially in the cache if it is being
* explicitly freed.
*/
if (curproc == pageproc) {
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
} else {
TAILQ_INSERT_HEAD(&vm_page_queue_free, m, pageq);
}
splx(s);
vm_page_free_wakeup();
}
void
vm_page_free_zero(m)
register vm_page_t m;
{
int s;
s = splvm();
cnt.v_tfree++;
if (!vm_page_freechk_and_unqueue(m)) {
splx(s);
return;
}
m->queue = PQ_ZERO;
TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq);
++vm_page_zero_count;
splx(s);
vm_page_free_wakeup();
}
/*
* vm_page_wire:
@ -922,7 +988,7 @@ vm_page_cache(m)
panic("vm_page_cache: caching a dirty page, pindex: %d", m->pindex);
}
s = splvm();
vm_page_unqueue(m);
vm_page_unqueue_nowakeup(m);
TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
m->queue = PQ_CACHE;
cnt.v_cache_count++;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.25 1996/01/30 23:02:38 mpp Exp $
* $Id: vm_page.h,v 1.26 1996/05/18 04:00:18 dyson Exp $
*/
/*
@ -242,6 +242,7 @@ void vm_page_cache __P((register vm_page_t));
static __inline void vm_page_copy __P((vm_page_t, vm_page_t));
void vm_page_deactivate __P((vm_page_t));
void vm_page_free __P((vm_page_t));
void vm_page_free_zero __P((vm_page_t));
void vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
void vm_page_remove __P((vm_page_t));