Add the (inline) function vm_page_undirty for clearing the dirty bitmask

of a vm_page.

Use it.

Submitted by:	dillon
This commit is contained in:
Alan Cox 1999-08-17 04:02:34 +00:00
parent 0e568d4b12
commit 2c28a10540
9 changed files with 35 additions and 23 deletions

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
* $Id: spec_vnops.c,v 1.95 1999/08/13 16:29:21 phk Exp $
* $Id: spec_vnops.c,v 1.96 1999/08/14 11:40:45 phk Exp $
*/
#include <sys/param.h>
@ -849,7 +849,7 @@ spec_getpages(ap)
if (nextoff <= nread) {
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
vm_page_undirty(m);
} else if (toff < nread) {
/*
* Since this is a VM request, we have to supply the
@ -859,7 +859,7 @@ spec_getpages(ap)
vm_page_set_validclean(m, 0, nread - toff);
} else {
m->valid = 0;
m->dirty = 0;
vm_page_undirty(m);
}
if (i != ap->a_reqpage) {

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: devfs_vnops.c,v 1.75 1999/06/26 02:46:17 mckusick Exp $
* $Id: devfs_vnops.c,v 1.76 1999/08/08 18:42:50 phk Exp $
*/
@ -2004,13 +2004,13 @@ devfs_getpages(struct vop_getpages_args *ap)
if (nextoff <= nread) {
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
vm_page_undirty(m);
} else if (toff < nread) {
int nvalid = ((nread + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
} else {
m->valid = 0;
m->dirty = 0;
vm_page_undirty(m);
}
if (i != ap->a_reqpage) {

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
* $Id: spec_vnops.c,v 1.95 1999/08/13 16:29:21 phk Exp $
* $Id: spec_vnops.c,v 1.96 1999/08/14 11:40:45 phk Exp $
*/
#include <sys/param.h>
@ -849,7 +849,7 @@ spec_getpages(ap)
if (nextoff <= nread) {
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
vm_page_undirty(m);
} else if (toff < nread) {
/*
* Since this is a VM request, we have to supply the
@ -859,7 +859,7 @@ spec_getpages(ap)
vm_page_set_validclean(m, 0, nread - toff);
} else {
m->valid = 0;
m->dirty = 0;
vm_page_undirty(m);
}
if (i != ap->a_reqpage) {

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.74 1999/06/26 02:46:29 mckusick Exp $
* $Id: nfs_bio.c,v 1.75 1999/08/12 18:04:39 dt Exp $
*/
@ -185,7 +185,7 @@ nfs_getpages(ap)
* Read operation filled an entire page
*/
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
vm_page_undirty(m);
} else if (size > toff) {
/*
* Read operation filled a partial page.
@ -313,7 +313,7 @@ nfs_putpages(ap)
int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
for (i = 0; i < nwritten; i++) {
rtvals[i] = VM_PAGER_OK;
pages[i]->dirty = 0;
vm_page_undirty(pages[i]);
}
if (must_commit)
nfs_clearcommit(vp->v_mount);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.74 1999/06/26 02:46:29 mckusick Exp $
* $Id: nfs_bio.c,v 1.75 1999/08/12 18:04:39 dt Exp $
*/
@ -185,7 +185,7 @@ nfs_getpages(ap)
* Read operation filled an entire page
*/
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
vm_page_undirty(m);
} else if (size > toff) {
/*
* Read operation filled a partial page.
@ -313,7 +313,7 @@ nfs_putpages(ap)
int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
for (i = 0; i < nwritten; i++) {
rtvals[i] = VM_PAGER_OK;
pages[i]->dirty = 0;
vm_page_undirty(pages[i]);
}
if (must_commit)
nfs_clearcommit(vp->v_mount);

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
* $Id: ufs_readwrite.c,v 1.61 1999/07/25 02:07:16 bde Exp $
* $Id: ufs_readwrite.c,v 1.62 1999/08/01 06:04:52 alc Exp $
*/
#define BLKSIZE(a, b, c) blksize(a, b, c)
@ -669,7 +669,7 @@ ffs_getpages(ap)
if (reqblkno == -1) {
if ((mreq->flags & PG_ZERO) == 0)
vm_page_zero_fill(mreq);
mreq->dirty = 0;
vm_page_undirty(mreq);
mreq->valid = VM_PAGE_BITS_ALL;
return VM_PAGER_OK;
} else {

View File

@ -64,7 +64,7 @@
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
*
* $Id: swap_pager.c,v 1.120 1999/06/27 22:08:38 peter Exp $
* $Id: swap_pager.c,v 1.121 1999/07/16 05:11:35 alc Exp $
*/
#include <sys/param.h>
@ -1631,7 +1631,7 @@ swp_pager_async_iodone(bp)
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
vm_page_undirty(m);
vm_page_flag_clear(m, PG_ZERO);
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.67 1999/08/15 01:16:53 mjacob Exp $
* $Id: vm_page.h,v 1.68 1999/08/15 05:36:43 alc Exp $
*/
/*
@ -572,6 +572,18 @@ vm_page_dirty(vm_page_t m)
m->dirty = VM_PAGE_BITS_ALL;
}
/*
* vm_page_undirty:
*
* Set page to not be dirty. Note: does not clear pmap modify bits
*/
static __inline void
vm_page_undirty(vm_page_t m)
{
m->dirty = 0;
}
static __inline vm_page_t
vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
{

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.111 1999/06/26 02:46:50 mckusick Exp $
* $Id: vnode_pager.c,v 1.112 1999/07/01 19:53:43 peter Exp $
*/
/*
@ -511,7 +511,7 @@ vnode_pager_input_old(object, m)
vm_pager_unmap_page(kva);
}
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->dirty = 0;
vm_page_undirty(m);
vm_page_flag_clear(m, PG_ZERO);
if (!error)
m->valid = VM_PAGE_BITS_ALL;
@ -773,7 +773,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* Read filled up entire page.
*/
mt->valid = VM_PAGE_BITS_ALL;
mt->dirty = 0;
vm_page_undirty(mt); /* should be an assert? XXX */
pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
} else {
/*