Cosmetic changes to the PAGE_XXX macros to make them consistent with

the other objects in vm.
This commit is contained in:
Doug Rabson 1998-09-04 08:06:57 +00:00
parent bd2bbbf2f3
commit e69763a315
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=38799
18 changed files with 167 additions and 141 deletions

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
* $Id: spec_vnops.c,v 1.70 1998/08/24 18:23:18 phk Exp $
* $Id: spec_vnops.c,v 1.71 1998/08/25 17:48:54 phk Exp $
*/
#include <sys/param.h>
@ -843,7 +843,7 @@ spec_getpages(ap)
} else {
vm_page_deactivate(m);
}
PAGE_WAKEUP(m);
vm_page_wakeup(m);
} else {
vm_page_free(m);
}

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: kern_exec.c,v 1.84 1998/07/15 06:19:33 bde Exp $
* $Id: kern_exec.c,v 1.85 1998/08/24 08:39:38 dfr Exp $
*/
#include <sys/param.h>
@ -370,7 +370,7 @@ exec_map_first_page(imgp)
break;
if (ma[i]->valid)
break;
PAGE_SET_FLAG(ma[i], PG_BUSY);
vm_page_busy(ma[i]);
} else {
ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL);
if (ma[i] == NULL)
@ -393,7 +393,7 @@ exec_map_first_page(imgp)
}
vm_page_wire(ma[0]);
PAGE_WAKEUP(ma[0]);
vm_page_wakeup(ma[0]);
splx(s);
pmap_kenter((vm_offset_t) imgp->image_header, VM_PAGE_TO_PHYS(ma[0]));

View File

@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
* $Id: vfs_bio.c,v 1.172 1998/08/25 14:41:42 phk Exp $
* $Id: vfs_bio.c,v 1.173 1998/08/28 20:07:13 luoqi Exp $
*/
/*
@ -644,7 +644,7 @@ brelse(struct buf * bp)
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
if (m == bogus_page) {
obj = (vm_object_t) vp->v_object;
@ -831,9 +831,9 @@ vfs_vmio_release(bp)
vm_page_cache(m);
else
vm_page_deactivate(m);
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
} else if (m->hold_count == 0) {
PAGE_SET_FLAG(m, PG_BUSY);
vm_page_busy(m);
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
}
@ -843,7 +843,7 @@ vfs_vmio_release(bp)
* act_count.
*/
m->act_count = 0;
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
}
}
}
@ -1353,7 +1353,7 @@ vfs_setdirty(struct buf *bp) {
* by users through the VM system.
*/
for (i = 0; i < bp->b_npages; i++) {
PAGE_CLEAR_FLAG(bp->b_pages[i], PG_ZERO);
vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
vm_page_test_dirty(bp->b_pages[i]);
}
@ -1785,13 +1785,13 @@ allocbuf(struct buf * bp, int size)
}
vm_page_wire(m);
PAGE_CLEAR_FLAG(m, PG_BUSY);
vm_page_flag_clear(m, PG_BUSY);
bp->b_flags &= ~B_CACHE;
} else if (m->flags & PG_BUSY) {
s = splvm();
if (m->flags & PG_BUSY) {
PAGE_SET_FLAG(m, PG_WANTED);
vm_page_flag_set(m, PG_WANTED);
tsleep(m, PVM, "pgtblk", 0);
}
splx(s);
@ -1808,7 +1808,7 @@ allocbuf(struct buf * bp, int size)
bytesinpage = newbsize - toff;
if (bp->b_flags & B_CACHE)
vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
vm_page_wire(m);
}
bp->b_pages[pageindex] = m;
@ -1990,7 +1990,7 @@ biodone(register struct buf * bp)
if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
vfs_page_set_valid(bp, foff, i, m);
}
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
/*
* when debugging new filesystems or buffer I/O methods, this
@ -2020,7 +2020,7 @@ biodone(register struct buf * bp)
#endif
panic("biodone: page busy < 0\n");
}
PAGE_BWAKEUP(m);
vm_page_io_finish(m);
vm_object_pip_subtract(obj, 1);
foff += resid;
iosize -= resid;
@ -2122,8 +2122,8 @@ vfs_unbusy_pages(struct buf * bp)
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
}
vm_object_pip_subtract(obj, 1);
PAGE_CLEAR_FLAG(m, PG_ZERO);
PAGE_BWAKEUP(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_io_finish(m);
}
if (obj->paging_in_progress == 0 &&
(obj->flags & OBJ_PIPWNT)) {
@ -2244,10 +2244,10 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
vm_page_t m = bp->b_pages[i];
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
if ((bp->b_flags & B_CLUSTER) == 0) {
vm_object_pip_add(obj, 1);
PAGE_BUSY(m);
vm_page_io_start(m);
}
vm_page_protect(m, VM_PROT_NONE);
@ -2323,7 +2323,7 @@ vfs_bio_clrbuf(struct buf *bp) {
}
}
bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
PAGE_CLEAR_FLAG(bp->b_pages[i], PG_ZERO);
vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
}
bp->b_resid = 0;
} else {
@ -2361,10 +2361,10 @@ vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
}
vm_page_wire(p);
p->valid = VM_PAGE_BITS_ALL;
PAGE_CLEAR_FLAG(p, PG_ZERO);
vm_page_flag_clear(p, PG_ZERO);
pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
bp->b_pages[index] = p;
PAGE_WAKEUP(p);
vm_page_wakeup(p);
}
bp->b_npages = index;
}
@ -2391,7 +2391,7 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
#endif
bp->b_pages[index] = NULL;
pmap_kremove(pg);
PAGE_SET_FLAG(p, PG_BUSY);
vm_page_busy(p);
vm_page_unwire(p);
vm_page_free(p);
}

View File

@ -33,7 +33,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
* $Id: vfs_cluster.c,v 1.68 1998/08/13 08:09:07 dfr Exp $
* $Id: vfs_cluster.c,v 1.69 1998/08/24 08:39:39 dfr Exp $
*/
#include "opt_debug_cluster.h"
@ -417,7 +417,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
PAGE_BUSY(m);
vm_page_io_start(m);
vm_object_pip_add(m->object, 1);
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages-1] != m)) {
@ -782,7 +782,7 @@ cluster_wbuild(vp, size, start_lbn, len)
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
PAGE_BUSY(m);
vm_page_io_start(m);
vm_object_pip_add(m->object, 1);
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages - 1] != m)) {

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: devfs_vnops.c,v 1.58 1998/07/30 17:40:44 bde Exp $
* $Id: devfs_vnops.c,v 1.59 1998/08/16 01:21:51 bde Exp $
*/
@ -2030,7 +2030,7 @@ devfs_getpages(struct vop_getpages_args *ap)
} else {
vm_page_deactivate(m);
}
PAGE_WAKEUP(m);
vm_page_wakeup(m);
} else {
vm_page_free(m);
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
* $Id: spec_vnops.c,v 1.70 1998/08/24 18:23:18 phk Exp $
* $Id: spec_vnops.c,v 1.71 1998/08/25 17:48:54 phk Exp $
*/
#include <sys/param.h>
@ -843,7 +843,7 @@ spec_getpages(ap)
} else {
vm_page_deactivate(m);
}
PAGE_WAKEUP(m);
vm_page_wakeup(m);
} else {
vm_page_free(m);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.58 1998/06/01 11:32:53 peter Exp $
* $Id: nfs_bio.c,v 1.59 1998/06/14 15:51:59 bde Exp $
*/
@ -173,7 +173,7 @@ nfs_getpages(ap)
vm_page_activate(m);
else
vm_page_deactivate(m);
PAGE_WAKEUP(m);
vm_page_wakeup(m);
} else {
vnode_pager_freepage(m);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.58 1998/06/01 11:32:53 peter Exp $
* $Id: nfs_bio.c,v 1.59 1998/06/14 15:51:59 bde Exp $
*/
@ -173,7 +173,7 @@ nfs_getpages(ap)
vm_page_activate(m);
else
vm_page_deactivate(m);
PAGE_WAKEUP(m);
vm_page_wakeup(m);
} else {
vnode_pager_freepage(m);
}

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.99 1998/08/13 08:05:13 dfr Exp $
* $Id: swap_pager.c,v 1.100 1998/08/24 08:39:37 dfr Exp $
*/
/*
@ -1104,7 +1104,7 @@ swap_pager_getpages(object, m, count, reqpage)
if (rv == VM_PAGER_OK) {
for (i = 0; i < count; i++) {
m[i]->dirty = 0;
PAGE_CLEAR_FLAG(m[i], PG_ZERO);
vm_page_flag_clear(m[i], PG_ZERO);
if (i != reqpage) {
/*
* whether or not to leave the page
@ -1123,7 +1123,7 @@ swap_pager_getpages(object, m, count, reqpage)
* is ok to use
*/
m[i]->valid = VM_PAGE_BITS_ALL;
PAGE_WAKEUP(m[i]);
vm_page_wakeup(m[i]);
}
}
@ -1587,7 +1587,7 @@ swap_pager_finish(spc)
printf("swap_pager_finish: I/O error, clean of page %lx failed\n",
(u_long) VM_PAGE_TO_PHYS(ma[i]));
ma[i]->dirty = VM_PAGE_BITS_ALL;
PAGE_BWAKEUP(ma[i]);
vm_page_io_finish(ma[i]);
}
vm_object_pip_subtract(object, spc->spc_count);
@ -1658,7 +1658,7 @@ swap_pager_iodone(bp)
/*
* we wakeup any processes that are waiting on these pages.
*/
PAGE_BWAKEUP(ma[i]);
vm_page_io_finish(ma[i]);
}
}

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.86 1998/08/06 08:33:19 dfr Exp $
* $Id: vm_fault.c,v 1.87 1998/08/24 08:39:37 dfr Exp $
*/
/*
@ -117,7 +117,7 @@ struct faultstate {
static void
release_page(struct faultstate *fs)
{
PAGE_WAKEUP(fs->m);
vm_page_wakeup(fs->m);
vm_page_deactivate(fs->m);
fs->m = NULL;
}
@ -291,7 +291,7 @@ RetryFault:;
if ((fs.m->flags & PG_BUSY) ||
(fs.m->busy &&
(fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
PAGE_SET_FLAG(fs.m, PG_WANTED | PG_REFERENCED);
vm_page_flag_set(fs.m, PG_WANTED | PG_REFERENCED);
cnt.v_intrans++;
tsleep(fs.m, PSWP, "vmpfw", 0);
}
@ -314,7 +314,7 @@ RetryFault:;
goto RetryFault;
}
PAGE_SET_FLAG(fs.m, PG_BUSY);
vm_page_busy(fs.m);
if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
fs.m->object != kernel_object && fs.m->object != kmem_object) {
goto readrest;
@ -607,7 +607,7 @@ RetryFault:;
vm_page_rename(fs.m, fs.first_object, fs.first_pindex);
fs.first_m = fs.m;
fs.first_m->dirty = VM_PAGE_BITS_ALL;
PAGE_SET_FLAG(fs.first_m, PG_BUSY);
vm_page_busy(fs.first_m);
fs.m = NULL;
cnt.v_cow_optim++;
} else {
@ -705,7 +705,7 @@ RetryFault:;
*/
if (prot & VM_PROT_WRITE) {
PAGE_SET_FLAG(fs.m, PG_WRITEABLE);
vm_page_flag_set(fs.m, PG_WRITEABLE);
vm_object_set_flag(fs.m->object,
OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
/*
@ -720,14 +720,14 @@ RetryFault:;
unlock_things(&fs);
fs.m->valid = VM_PAGE_BITS_ALL;
PAGE_CLEAR_FLAG(fs.m, PG_ZERO);
vm_page_flag_clear(fs.m, PG_ZERO);
pmap_enter(fs.map->pmap, vaddr, VM_PAGE_TO_PHYS(fs.m), prot, wired);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
pmap_prefault(fs.map->pmap, vaddr, fs.entry);
}
PAGE_SET_FLAG(fs.m, PG_MAPPED|PG_REFERENCED);
vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED);
if (fault_flags & VM_FAULT_HOLD)
vm_page_hold(fs.m);
@ -756,7 +756,7 @@ RetryFault:;
* Unlock everything, and return
*/
PAGE_WAKEUP(fs.m);
vm_page_wakeup(fs.m);
vm_object_deallocate(fs.first_object);
return (KERN_SUCCESS);
@ -967,16 +967,16 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* Enter it in the pmap...
*/
PAGE_CLEAR_FLAG(dst_m, PG_ZERO);
vm_page_flag_clear(dst_m, PG_ZERO);
pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
prot, FALSE);
PAGE_SET_FLAG(dst_m, PG_WRITEABLE|PG_MAPPED);
vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED);
/*
* Mark it no longer busy, and put it on the active list.
*/
vm_page_activate(dst_m);
PAGE_WAKEUP(dst_m);
vm_page_wakeup(dst_m);
}
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.48 1998/06/21 14:53:41 bde Exp $
* $Id: vm_kern.c,v 1.49 1998/08/24 08:39:37 dfr Exp $
*/
/*
@ -181,7 +181,7 @@ kmem_alloc(map, size)
VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((mem->flags & PG_ZERO) == 0)
vm_page_zero_fill(mem);
PAGE_CLEAR_FLAG(mem, (PG_BUSY | PG_ZERO));
vm_page_flag_clear(mem, (PG_BUSY | PG_ZERO));
mem->valid = VM_PAGE_BITS_ALL;
}
@ -332,7 +332,7 @@ kmem_malloc(map, size, waitflag)
vm_map_unlock(map);
return (0);
}
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}
@ -358,10 +358,10 @@ kmem_malloc(map, size, waitflag)
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
vm_page_wire(m);
PAGE_WAKEUP(m);
vm_page_wakeup(m);
pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
VM_PROT_ALL, 1);
PAGE_SET_FLAG(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
}
vm_map_unlock(map);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.133 1998/08/06 08:33:19 dfr Exp $
* $Id: vm_map.c,v 1.134 1998/08/24 08:39:37 dfr Exp $
*/
/*
@ -1990,16 +1990,16 @@ vm_map_split(entry)
if (m == NULL)
continue;
if (m->flags & PG_BUSY) {
PAGE_SET_FLAG(m, PG_WANTED);
vm_page_flag_set(m, PG_WANTED);
tsleep(m, PVM, "spltwt", 0);
goto retry;
}
PAGE_SET_FLAG(m, PG_BUSY);
vm_page_busy(m);
vm_page_protect(m, VM_PROT_NONE);
vm_page_rename(m, new_object, idx);
m->dirty = VM_PAGE_BITS_ALL;
PAGE_SET_FLAG(m, PG_BUSY);
vm_page_busy(m);
}
if (orig_object->type == OBJT_SWAP) {
@ -2018,7 +2018,7 @@ vm_map_split(entry)
for (idx = 0; idx < size; idx++) {
m = vm_page_lookup(new_object, idx);
if (m) {
PAGE_WAKEUP(m);
vm_page_wakeup(m);
}
}
@ -2808,9 +2808,9 @@ vm_freeze_copyopts(object, froma, toa)
vm_page_activate(m_out);
PAGE_WAKEUP(m_in);
vm_page_wakeup(m_in);
}
PAGE_WAKEUP(m_out);
vm_page_wakeup(m_out);
}
object->shadow_count--;

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.81 1998/07/15 02:32:35 bde Exp $
* $Id: vm_mmap.c,v 1.82 1998/08/24 08:39:37 dfr Exp $
*/
/*
@ -747,7 +747,7 @@ mincore(p, uap)
mincoreinfo |= MINCORE_MODIFIED_OTHER;
if ((m->flags & PG_REFERENCED) ||
pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) {
PAGE_SET_FLAG(m, PG_REFERENCED);
vm_page_flag_set(m, PG_REFERENCED);
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.126 1998/08/06 08:33:19 dfr Exp $
* $Id: vm_object.c,v 1.127 1998/08/24 08:39:37 dfr Exp $
*/
/*
@ -461,7 +461,7 @@ vm_object_terminate(object)
if (p->busy || (p->flags & PG_BUSY))
printf("vm_object_terminate: freeing busy page\n");
#endif
PAGE_SET_FLAG(p, PG_BUSY);
vm_page_busy(p);
vm_page_free(p);
cnt.v_pfree++;
}
@ -550,7 +550,7 @@ vm_object_page_clean(object, start, end, flags)
}
for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) {
PAGE_SET_FLAG(p, PG_CLEANCHK);
vm_page_flag_set(p, PG_CLEANCHK);
vm_page_protect(p, VM_PROT_READ);
}
@ -569,19 +569,19 @@ vm_object_page_clean(object, start, end, flags)
(pi < tstart) || (pi >= tend) ||
(p->valid == 0) ||
((p->queue - p->pc) == PQ_CACHE)) {
PAGE_CLEAR_FLAG(p, PG_CLEANCHK);
vm_page_flag_clear(p, PG_CLEANCHK);
continue;
}
vm_page_test_dirty(p);
if ((p->dirty & p->valid) == 0) {
PAGE_CLEAR_FLAG(p, PG_CLEANCHK);
vm_page_flag_clear(p, PG_CLEANCHK);
continue;
}
s = splvm();
while ((p->flags & PG_BUSY) || p->busy) {
PAGE_SET_FLAG(p, PG_WANTED | PG_REFERENCED);
vm_page_flag_set(p, PG_WANTED | PG_REFERENCED);
tsleep(p, PVM, "vpcwai", 0);
if (object->generation != curgeneration) {
splx(s);
@ -597,12 +597,12 @@ vm_object_page_clean(object, start, end, flags)
(tp->busy != 0))
break;
if((tp->queue - tp->pc) == PQ_CACHE) {
PAGE_CLEAR_FLAG(tp, PG_CLEANCHK);
vm_page_flag_clear(tp, PG_CLEANCHK);
break;
}
vm_page_test_dirty(tp);
if ((tp->dirty & tp->valid) == 0) {
PAGE_CLEAR_FLAG(tp, PG_CLEANCHK);
vm_page_flag_clear(tp, PG_CLEANCHK);
break;
}
maf[ i - 1 ] = tp;
@ -622,12 +622,12 @@ vm_object_page_clean(object, start, end, flags)
(tp->busy != 0))
break;
if((tp->queue - tp->pc) == PQ_CACHE) {
PAGE_CLEAR_FLAG(tp, PG_CLEANCHK);
vm_page_flag_clear(tp, PG_CLEANCHK);
break;
}
vm_page_test_dirty(tp);
if ((tp->dirty & tp->valid) == 0) {
PAGE_CLEAR_FLAG(tp, PG_CLEANCHK);
vm_page_flag_clear(tp, PG_CLEANCHK);
break;
}
mab[ i - 1 ] = tp;
@ -641,14 +641,14 @@ vm_object_page_clean(object, start, end, flags)
for(i=0;i<maxb;i++) {
int index = (maxb - i) - 1;
ma[index] = mab[i];
PAGE_CLEAR_FLAG(ma[index], PG_CLEANCHK);
vm_page_flag_clear(ma[index], PG_CLEANCHK);
}
PAGE_CLEAR_FLAG(p, PG_CLEANCHK);
vm_page_flag_clear(p, PG_CLEANCHK);
ma[maxb] = p;
for(i=0;i<maxf;i++) {
int index = (maxb + i) + 1;
ma[index] = maf[i];
PAGE_CLEAR_FLAG(ma[index], PG_CLEANCHK);
vm_page_flag_clear(ma[index], PG_CLEANCHK);
}
runlen = maxb + maxf + 1;
@ -657,7 +657,7 @@ vm_object_page_clean(object, start, end, flags)
for (i = 0; i<runlen; i++) {
if (ma[i]->valid & ma[i]->dirty) {
vm_page_protect(ma[i], VM_PROT_READ);
PAGE_SET_FLAG(ma[i], PG_CLEANCHK);
vm_page_flag_set(ma[i], PG_CLEANCHK);
}
}
if (object->generation != curgeneration)
@ -941,7 +941,7 @@ vm_object_qcollapse(object)
p = next;
continue;
}
PAGE_SET_FLAG(p, PG_BUSY);
vm_page_busy(p);
new_pindex = p->pindex - backing_offset_index;
if (p->pindex < backing_offset_index ||
@ -1066,7 +1066,7 @@ vm_object_collapse(object)
while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) {
new_pindex = p->pindex - backing_offset_index;
PAGE_SET_FLAG(p, PG_BUSY);
vm_page_busy(p);
/*
* If the parent has a page here, or if this
@ -1216,7 +1216,7 @@ vm_object_collapse(object)
p = TAILQ_NEXT(p, listq)) {
new_pindex = p->pindex - backing_offset_index;
PAGE_SET_FLAG(p, PG_BUSY);
vm_page_busy(p);
/*
* If the parent has a page here, or if this
@ -1232,24 +1232,24 @@ vm_object_collapse(object)
pp = vm_page_lookup(object, new_pindex);
if ((pp == NULL) || (pp->flags & PG_BUSY) || pp->busy) {
PAGE_WAKEUP(p);
vm_page_wakeup(p);
return;
}
PAGE_SET_FLAG(pp, PG_BUSY);
vm_page_busy(pp);
if ((pp->valid == 0) &&
!vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) {
/*
* Page still needed. Can't go any
* further.
*/
PAGE_WAKEUP(pp);
PAGE_WAKEUP(p);
vm_page_wakeup(pp);
vm_page_wakeup(p);
return;
}
PAGE_WAKEUP(pp);
vm_page_wakeup(pp);
}
PAGE_WAKEUP(p);
vm_page_wakeup(p);
}
/*
@ -1341,7 +1341,7 @@ vm_object_page_remove(object, start, end, clean_only)
continue;
}
PAGE_SET_FLAG(p, PG_BUSY);
vm_page_busy(p);
vm_page_protect(p, VM_PROT_NONE);
vm_page_free(p);
}
@ -1374,7 +1374,7 @@ vm_object_page_remove(object, start, end, clean_only)
}
}
PAGE_SET_FLAG(p, PG_BUSY);
vm_page_busy(p);
vm_page_protect(p, VM_PROT_NONE);
vm_page_free(p);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.105 1998/07/26 18:15:20 dfr Exp $
* $Id: vm_page.c,v 1.106 1998/08/24 08:39:38 dfr Exp $
*/
/*
@ -403,7 +403,7 @@ vm_page_insert(m, object, pindex)
*/
TAILQ_INSERT_TAIL(&object->memq, m, listq);
PAGE_SET_FLAG(m, PG_TABLED);
vm_page_flag_set(m, PG_TABLED);
m->object->page_hint = m;
m->object->generation++;
@ -446,9 +446,9 @@ vm_page_remove(m)
}
#endif
PAGE_CLEAR_FLAG(m, PG_BUSY);
vm_page_flag_clear(m, PG_BUSY);
if (m->flags & PG_WANTED) {
PAGE_CLEAR_FLAG(m, PG_WANTED);
vm_page_flag_clear(m, PG_WANTED);
wakeup(m);
}
@ -484,7 +484,7 @@ vm_page_remove(m)
object->generation++;
m->object = NULL;
PAGE_CLEAR_FLAG(m, PG_TABLED);
vm_page_flag_clear(m, PG_TABLED);
}
/*
@ -940,7 +940,7 @@ vm_page_alloc(object, pindex, page_req)
m->flags = PG_ZERO | PG_BUSY;
} else if (qtype == PQ_CACHE) {
oldobject = m->object;
PAGE_SET_FLAG(m, PG_BUSY);
vm_page_busy(m);
vm_page_remove(m);
m->flags = PG_BUSY;
} else {
@ -1011,7 +1011,7 @@ vm_page_sleep(vm_page_t m, char *msg, char *busy) {
int s;
s = splvm();
if ((busy && *busy) || (m->flags & PG_BUSY)) {
PAGE_SET_FLAG(m, PG_WANTED);
vm_page_flag_set(m, PG_WANTED);
tsleep(m, PVM, msg, 0);
slept = 1;
}
@ -1247,7 +1247,7 @@ vm_page_wire(m)
}
(*vm_page_queues[PQ_NONE].lcnt)++;
m->wire_count++;
PAGE_SET_FLAG(m, PG_MAPPED);
vm_page_flag_set(m, PG_MAPPED);
}
/*
@ -1384,7 +1384,7 @@ vm_page_grab(object, pindex, allocflags)
s = splvm();
while ((object->generation == generation) &&
(m->busy || (m->flags & PG_BUSY))) {
PAGE_SET_FLAG(m, PG_WANTED | PG_REFERENCED);
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
tsleep(m, PVM, "pgrbwt", 0);
if ((allocflags & VM_ALLOC_RETRY) == 0) {
splx(s);
@ -1394,7 +1394,7 @@ vm_page_grab(object, pindex, allocflags)
splx(s);
goto retrylookup;
} else {
PAGE_SET_FLAG(m, PG_BUSY);
vm_page_busy(m);
return m;
}
}
@ -1633,7 +1633,7 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
pqtype = m->queue - m->pc;
if (pqtype == PQ_CACHE) {
PAGE_SET_FLAG(m, PG_BUSY);
vm_page_busy(m);
vm_page_free(m);
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.44 1998/08/24 08:39:38 dfr Exp $
* $Id: vm_page.h,v 1.45 1998/09/01 17:12:19 wollman Exp $
*/
/*
@ -281,31 +281,57 @@ extern vm_offset_t last_phys_addr; /* physical address for last_page */
* Functions implemented as macros
*/
#define PAGE_SET_FLAG(m, bits) atomic_set_short(&(m)->flags, bits)
#define PAGE_CLEAR_FLAG(m, bits) atomic_clear_short(&(m)->flags, bits)
#define PAGE_ASSERT_WAIT(m, interruptible) { \
PAGE_SET_FLAG(m, PG_WANTED); \
assert_wait((int) (m), (interruptible)); \
static __inline void
vm_page_flag_set(vm_page_t m, unsigned int bits)
{
atomic_set_short(&(m)->flags, bits);
}
#define PAGE_WAKEUP(m) { \
PAGE_CLEAR_FLAG(m, PG_BUSY); \
if ((m)->flags & PG_WANTED) { \
PAGE_CLEAR_FLAG(m, PG_WANTED); \
wakeup((m)); \
} \
static __inline void
vm_page_flag_clear(vm_page_t m, unsigned int bits)
{
atomic_clear_short(&(m)->flags, bits);
}
#define PAGE_BUSY(m) atomic_add_char(&(m)->busy, 1)
#if 0
static __inline void
vm_page_assert_wait(vm_page_t m, int interruptible)
{
vm_page_flag_set(m, PG_WANTED);
assert_wait((int) m, interruptible);
}
#endif
#define PAGE_BWAKEUP(m) { \
atomic_subtract_char(&(m)->busy, 1); \
if (((m)->flags & PG_WANTED) && (m)->busy == 0) { \
PAGE_CLEAR_FLAG(m, PG_WANTED); \
wakeup((m)); \
} \
static __inline void
vm_page_busy(vm_page_t m)
{
vm_page_flag_set(m, PG_BUSY);
}
static __inline void
vm_page_wakeup(vm_page_t m)
{
vm_page_flag_clear(m, PG_BUSY);
if (m->flags & PG_WANTED) {
vm_page_flag_clear(m, PG_WANTED);
wakeup(m);
}
}
static __inline void
vm_page_io_start(vm_page_t m)
{
atomic_add_char(&(m)->busy, 1);
}
static __inline void
vm_page_io_finish(vm_page_t m)
{
atomic_subtract_char(&m->busy, 1);
if ((m->flags & PG_WANTED) && m->busy == 0) {
vm_page_flag_clear(m, PG_WANTED);
wakeup(m);
}
}
@ -380,11 +406,11 @@ vm_page_protect(vm_page_t mem, int prot)
if (prot == VM_PROT_NONE) {
if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE);
PAGE_CLEAR_FLAG(mem, PG_WRITEABLE|PG_MAPPED);
vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
}
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ);
PAGE_CLEAR_FLAG(mem, PG_WRITEABLE);
vm_page_flag_clear(mem, PG_WRITEABLE);
}
}

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.124 1998/08/06 08:33:19 dfr Exp $
* $Id: vm_pageout.c,v 1.125 1998/08/24 08:39:38 dfr Exp $
*/
/*
@ -362,7 +362,7 @@ vm_pageout_flush(mc, count, flags)
int i;
for (i = 0; i < count; i++) {
PAGE_BUSY(mc[i]);
vm_page_io_start(mc[i]);
vm_page_protect(mc[i], VM_PROT_READ);
}
@ -413,7 +413,7 @@ vm_pageout_flush(mc, count, flags)
*/
if (pageout_status[i] != VM_PAGER_PEND) {
vm_object_pip_wakeup(object);
PAGE_BWAKEUP(mt);
vm_page_io_finish(mt);
}
}
return numpagedout;
@ -476,7 +476,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
if (actcount) {
PAGE_SET_FLAG(p, PG_REFERENCED);
vm_page_flag_set(p, PG_REFERENCED);
} else if (p->flags & PG_REFERENCED) {
actcount = 1;
}
@ -485,7 +485,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
(p->flags & PG_REFERENCED)) {
vm_page_activate(p);
p->act_count += actcount;
PAGE_CLEAR_FLAG(p, PG_REFERENCED);
vm_page_flag_clear(p, PG_REFERENCED);
} else if (p->queue == PQ_ACTIVE) {
if ((p->flags & PG_REFERENCED) == 0) {
p->act_count -= min(p->act_count, ACT_DECLINE);
@ -500,7 +500,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
}
} else {
vm_page_activate(p);
PAGE_CLEAR_FLAG(p, PG_REFERENCED);
vm_page_flag_clear(p, PG_REFERENCED);
if (p->act_count < (ACT_MAX - ACT_ADVANCE))
p->act_count += ACT_ADVANCE;
s = splvm();
@ -599,7 +599,7 @@ vm_pageout_page_free(vm_page_t m) {
vbusy(vp);
}
PAGE_SET_FLAG(m, PG_BUSY);
vm_page_busy(m);
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
vm_object_deallocate(object);
@ -683,7 +683,7 @@ vm_pageout_scan()
* If the object is not being used, we ignore previous references.
*/
if (m->object->ref_count == 0) {
PAGE_CLEAR_FLAG(m, PG_REFERENCED);
vm_page_flag_clear(m, PG_REFERENCED);
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
/*
@ -708,7 +708,7 @@ vm_pageout_scan()
* inactive queue again.
*/
if ((m->flags & PG_REFERENCED) != 0) {
PAGE_CLEAR_FLAG(m, PG_REFERENCED);
vm_page_flag_clear(m, PG_REFERENCED);
actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
vm_page_activate(m);
m->act_count += (actcount + ACT_ADVANCE + 1);
@ -906,7 +906,7 @@ vm_pageout_scan()
/*
* Since we have "tested" this bit, we need to clear it now.
*/
PAGE_CLEAR_FLAG(m, PG_REFERENCED);
vm_page_flag_clear(m, PG_REFERENCED);
/*
* Only if an object is currently being used, do we use the
@ -1095,7 +1095,7 @@ vm_pageout_page_stats()
actcount = 0;
if (m->flags & PG_REFERENCED) {
PAGE_CLEAR_FLAG(m, PG_REFERENCED);
vm_page_flag_clear(m, PG_REFERENCED);
actcount += 1;
}

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.95 1998/08/24 08:39:38 dfr Exp $
* $Id: vnode_pager.c,v 1.96 1998/08/25 13:47:37 luoqi Exp $
*/
/*
@ -440,7 +440,7 @@ vnode_pager_input_smlfs(object, m)
}
vm_pager_unmap_page(kva);
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
if (error) {
return VM_PAGER_ERROR;
}
@ -504,7 +504,7 @@ vnode_pager_input_old(object, m)
}
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->dirty = 0;
PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_flag_clear(m, PG_ZERO);
return error ? VM_PAGER_ERROR : VM_PAGER_OK;
}
@ -771,7 +771,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_set_validclean(mt, 0, nvalid);
}
PAGE_CLEAR_FLAG(mt, PG_ZERO);
vm_page_flag_clear(mt, PG_ZERO);
if (i != reqpage) {
/*
@ -791,7 +791,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_activate(mt);
else
vm_page_deactivate(mt);
PAGE_WAKEUP(mt);
vm_page_wakeup(mt);
} else {
vnode_pager_freepage(mt);
}