Change all manual settings of vm_page_t->dirty = VM_PAGE_BITS_ALL

to use the vm_page_dirty() inline.

    The inline can thus do sanity checks ( or not ) over all cases.
This commit is contained in:
Matthew Dillon 1999-01-24 06:04:52 +00:00
parent 84ef5a8f66
commit 7dbf82dc13
7 changed files with 30 additions and 32 deletions

View File

@ -43,7 +43,7 @@
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
* with some ideas from NetBSD's alpha pmap
* $Id: pmap.c,v 1.12 1998/10/28 13:36:49 dg Exp $
* $Id: pmap.c,v 1.13 1999/01/21 08:29:02 dillon Exp $
*/
/*
@ -1068,9 +1068,9 @@ pmap_swapout_proc(p)
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_swapout_proc: upage already missing???");
m->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(m);
vm_page_unwire(m, 0);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
pmap_kremove((vm_offset_t)p->p_addr + PAGE_SIZE * i);
}
}
@ -2980,7 +2980,7 @@ pmap_emulate_reference(struct proc *p, vm_offset_t v, int user, int write)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
if (write) {
ppv->pv_flags |= PV_TABLE_MOD;
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
faultoff |= PG_FOW;
}
pmap_changebit(pa, faultoff, FALSE);

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.219 1999/01/12 00:17:53 eivind Exp $
* $Id: pmap.c,v 1.220 1999/01/21 08:29:03 dillon Exp $
*/
/*
@ -1070,7 +1070,7 @@ pmap_swapout_proc(p)
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_swapout_proc: upage already missing???");
m->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(m);
vm_page_unwire(m, 0);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
}
@ -1787,7 +1787,7 @@ pmap_remove_pte(pmap, ptq, va)
}
#endif
if (pmap_track_modified(va))
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
if (oldpte & PG_A)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
@ -1989,7 +1989,7 @@ pmap_remove_all(pa)
}
#endif
if (pmap_track_modified(pv->pv_va))
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
if (!update_needed &&
((!curproc || (&curproc->p_vmspace->vm_pmap == pv->pv_pmap)) ||
@ -2087,7 +2087,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (pmap_track_modified(i386_ptob(sindex))) {
if (ppv == NULL)
ppv = pa_to_pvh(pbits);
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
pbits &= ~PG_M;
}
}
@ -2231,7 +2231,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
if ((origpte & PG_M) && pmap_track_modified(va)) {
pv_table_t *ppv;
ppv = pa_to_pvh(opa);
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
pa |= PG_MANAGED;
}
@ -3015,7 +3015,7 @@ pmap_remove_pages(pmap, sva, eva)
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
@ -3145,7 +3145,7 @@ pmap_changebit(pa, bit, setem)
changed = 1;
if (bit == PG_RW) {
if (pbits & PG_M) {
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
*(int *)pte = pbits & ~(PG_M|PG_RW);
} else {

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.219 1999/01/12 00:17:53 eivind Exp $
* $Id: pmap.c,v 1.220 1999/01/21 08:29:03 dillon Exp $
*/
/*
@ -1070,7 +1070,7 @@ pmap_swapout_proc(p)
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_swapout_proc: upage already missing???");
m->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(m);
vm_page_unwire(m, 0);
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
}
@ -1787,7 +1787,7 @@ pmap_remove_pte(pmap, ptq, va)
}
#endif
if (pmap_track_modified(va))
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
if (oldpte & PG_A)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
@ -1989,7 +1989,7 @@ pmap_remove_all(pa)
}
#endif
if (pmap_track_modified(pv->pv_va))
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
if (!update_needed &&
((!curproc || (&curproc->p_vmspace->vm_pmap == pv->pv_pmap)) ||
@ -2087,7 +2087,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (pmap_track_modified(i386_ptob(sindex))) {
if (ppv == NULL)
ppv = pa_to_pvh(pbits);
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
pbits &= ~PG_M;
}
}
@ -2231,7 +2231,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
if ((origpte & PG_M) && pmap_track_modified(va)) {
pv_table_t *ppv;
ppv = pa_to_pvh(opa);
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
pa |= PG_MANAGED;
}
@ -3015,7 +3015,7 @@ pmap_remove_pages(pmap, sva, eva)
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
@ -3145,7 +3145,7 @@ pmap_changebit(pa, bit, setem)
changed = 1;
if (bit == PG_RW) {
if (pbits & PG_M) {
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(ppv->pv_vm_page);
}
*(int *)pte = pbits & ~(PG_M|PG_RW);
} else {

View File

@ -64,7 +64,7 @@
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
*
* $Id: swap_pager.c,v 1.109 1999/01/21 09:33:07 dillon Exp $
* $Id: swap_pager.c,v 1.110 1999/01/24 02:32:14 dillon Exp $
*/
#include <sys/param.h>
@ -1098,7 +1098,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
blk + j,
0
);
mreq->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(mreq);
rtvals[i+j] = VM_PAGER_OK;
vm_page_flag_set(mreq, PG_SWAPINPROG);
@ -1319,7 +1319,7 @@ swp_pager_async_iodone(bp)
* so it doesn't clog the inactive list,
* then finish the I/O.
*/
m->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(m);
vm_page_activate(m);
vm_page_io_finish(m);
}

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.96 1999/01/23 06:00:27 dillon Exp $
* $Id: vm_fault.c,v 1.97 1999/01/24 00:55:04 dillon Exp $
*/
/*
@ -760,7 +760,7 @@ RetryFault:;
* any swap backing since the page is now dirty.
*/
if (fault_flags & VM_FAULT_DIRTY) {
fs.m->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(fs.m);
vm_pager_page_unswapped(fs.m);
}
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.140 1999/01/21 08:29:10 dillon Exp $
* $Id: vm_map.c,v 1.141 1999/01/21 09:40:48 dillon Exp $
*/
/*
@ -2225,7 +2225,7 @@ vm_map_split(entry)
vm_page_busy(m);
vm_page_protect(m, VM_PROT_NONE);
vm_page_rename(m, new_object, idx);
/* page automatically made dirty by rename */
/* page automatically made dirty by rename and cache handled */
vm_page_busy(m);
}
@ -3036,10 +3036,8 @@ vm_freeze_copyopts(object, froma, toa)
vm_page_protect(m_in, VM_PROT_NONE);
pmap_copy_page(VM_PAGE_TO_PHYS(m_in), VM_PAGE_TO_PHYS(m_out));
m_out->valid = m_in->valid;
m_out->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(m_out);
vm_page_activate(m_out);
vm_page_wakeup(m_in);
}
vm_page_wakeup(m_out);

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.132 1999/01/24 01:06:31 dillon Exp $
* $Id: vm_pageout.c,v 1.133 1999/01/24 01:33:22 dillon Exp $
*/
/*
@ -784,7 +784,7 @@ vm_pageout_scan()
if (m->dirty == 0) {
vm_page_test_dirty(m);
} else {
m->dirty = VM_PAGE_BITS_ALL;
vm_page_dirty(m);
}
/*