Catch a case spotted by Tor where files mmapped could leave garbage in the

unallocated parts of the last page when the file ended on a frag
but not a page boundary.
Delimitted by tags PRE_MATT_MMAP_EOF and POST_MATT_MMAP_EOF,
in files alpha/alpha/pmap.c i386/i386/pmap.c nfs/nfs_bio.c vm/pmap.h
    vm/vm_page.c vm/vm_page.h vm/vnode_pager.c miscfs/specfs/spec_vnops.c
    ufs/ufs/ufs_readwrite.c kern/vfs_bio.c

Submitted by: Matt Dillon <dillon@freebsd.org>
Reviewed by: Alan Cox <alc@freebsd.org>
This commit is contained in:
Julian Elischer 1999-04-05 19:38:30 +00:00
parent 7b9e192e28
commit 8d17e69460
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=45347
13 changed files with 440 additions and 86 deletions

View File

@ -43,7 +43,7 @@
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
* with some ideas from NetBSD's alpha pmap
* $Id: pmap.c,v 1.14 1999/01/24 06:04:50 dillon Exp $
* $Id: pmap.c,v 1.15 1999/02/19 14:25:32 luoqi Exp $
*/
/*
@ -2523,11 +2523,11 @@ pmap_kernel()
}
/*
* pmap_zero_page zeros the specified (machine independent)
* page by mapping the page into virtual memory and using
* bzero to clear its contents, one machine dependent page
* at a time.
* pmap_zero_page zeros the specified hardware page by
* mapping it into virtual memory and using bzero to clear
* its contents.
*/
void
pmap_zero_page(vm_offset_t pa)
{
@ -2535,6 +2535,22 @@ pmap_zero_page(vm_offset_t pa)
bzero((caddr_t) va, PAGE_SIZE);
}
/*
* pmap_zero_page_area zeros the specified hardware page by
* mapping it into virtual memory and using bzero to clear
* its contents.
*
* off and size must reside within a single page.
*/
void
pmap_zero_page_area(vm_offset_t pa, int off, int size)
{
vm_offset_t va = ALPHA_PHYS_TO_K0SEG(pa);
bzero((char *)(caddr_t)va + off, size);
}
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.225 1999/03/13 07:31:29 alc Exp $
* $Id: pmap.c,v 1.226 1999/04/02 17:59:38 alc Exp $
*/
/*
@ -2818,10 +2818,8 @@ pmap_kernel()
}
/*
* pmap_zero_page zeros the specified (machine independent)
* page by mapping the page into virtual memory and using
* bzero to clear its contents, one machine dependent page
* at a time.
* pmap_zero_page zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.
*/
void
pmap_zero_page(phys)
@ -2867,6 +2865,58 @@ pmap_zero_page(phys)
#endif
}
/*
* pmap_zero_page_area zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.
*
* off and size may not cover an area beyond a single hardware page.
*/
void
pmap_zero_page_area(phys, off, size)
vm_offset_t phys;
int off;
int size;
{
#ifdef SMP
#if !defined(MAX_PERF)
if (*(int *) prv_CMAP3)
panic("pmap_zero_page: prv_CMAP3 busy");
#endif
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
cpu_invlpg(&prv_CPAGE3);
#if defined(I686_CPU)
if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
i686_pagezero(&prv_CPAGE3);
else
#endif
bzero((char *)&prv_CPAGE3 + off, size);
*(int *) prv_CMAP3 = 0;
#else
#if !defined(MAX_PERF)
if (*(int *) CMAP2)
panic("pmap_zero_page: CMAP2 busy");
#endif
*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
if (cpu_class == CPUCLASS_386) {
invltlb();
} else {
invlpg((u_int)CADDR2);
}
#if defined(I686_CPU)
if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
i686_pagezero(CADDR2);
else
#endif
bzero((char *)CADDR2 + off, size);
*(int *) CMAP2 = 0;
#endif
}
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
* $Id: spec_vnops.c,v 1.80 1999/01/27 22:42:07 dillon Exp $
* $Id: spec_vnops.c,v 1.81 1999/02/25 05:22:30 dillon Exp $
*/
#include <sys/param.h>
@ -866,8 +866,12 @@ spec_getpages(ap)
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
} else if (toff < nread) {
int nvalid = ((nread + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
/*
* Since this is a VM request, we have to supply the
* unaligned offset to allow vm_page_set_validclean()
* to zero sub-DEV_BSIZE'd portions of the page.
*/
vm_page_set_validclean(m, 0, nread - toff);
} else {
m->valid = 0;
m->dirty = 0;
@ -894,6 +898,12 @@ spec_getpages(ap)
}
} else if (m->valid) {
gotreqpage = 1;
/*
* Since this is a VM request, we need to make the
* entire page presentable by zeroing invalid sections.
*/
if (m->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(m, FALSE);
}
}
if (!gotreqpage) {

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.225 1999/03/13 07:31:29 alc Exp $
* $Id: pmap.c,v 1.226 1999/04/02 17:59:38 alc Exp $
*/
/*
@ -2818,10 +2818,8 @@ pmap_kernel()
}
/*
* pmap_zero_page zeros the specified (machine independent)
* page by mapping the page into virtual memory and using
* bzero to clear its contents, one machine dependent page
* at a time.
* pmap_zero_page zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.
*/
void
pmap_zero_page(phys)
@ -2867,6 +2865,58 @@ pmap_zero_page(phys)
#endif
}
/*
* pmap_zero_page_area zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.
*
* off and size may not cover an area beyond a single hardware page.
*/
void
pmap_zero_page_area(phys, off, size)
vm_offset_t phys;
int off;
int size;
{
#ifdef SMP
#if !defined(MAX_PERF)
if (*(int *) prv_CMAP3)
panic("pmap_zero_page: prv_CMAP3 busy");
#endif
*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
cpu_invlpg(&prv_CPAGE3);
#if defined(I686_CPU)
if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
i686_pagezero(&prv_CPAGE3);
else
#endif
bzero((char *)&prv_CPAGE3 + off, size);
*(int *) prv_CMAP3 = 0;
#else
#if !defined(MAX_PERF)
if (*(int *) CMAP2)
panic("pmap_zero_page: CMAP2 busy");
#endif
*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
if (cpu_class == CPUCLASS_386) {
invltlb();
} else {
invlpg((u_int)CADDR2);
}
#if defined(I686_CPU)
if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
i686_pagezero(CADDR2);
else
#endif
bzero((char *)CADDR2 + off, size);
*(int *) CMAP2 = 0;
#endif
}
/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using

View File

@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
* $Id: vfs_bio.c,v 1.202 1999/03/12 02:24:56 julian Exp $
* $Id: vfs_bio.c,v 1.203 1999/03/19 10:17:44 bde Exp $
*/
/*
@ -2489,21 +2489,37 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
struct vnode *vp = bp->b_vp;
vm_ooffset_t soff, eoff;
/*
* Start and end offsets in buffer. eoff - soff may not cross a
* page boundry or cross the end of the buffer.
*/
soff = off;
eoff = (off + PAGE_SIZE) & ~PAGE_MASK;
if (eoff > bp->b_offset + bp->b_bufsize)
eoff = bp->b_offset + bp->b_bufsize;
if (vp->v_tag == VT_NFS && vp->v_type != VBLK) {
vm_ooffset_t sv, ev;
vm_page_set_invalid(m,
(vm_offset_t) (soff & PAGE_MASK),
(vm_offset_t) (eoff - soff));
/*
* bp->b_validoff and bp->b_validend restrict the valid range
* that we can set. Note that these offsets are not DEV_BSIZE
* aligned. vm_page_set_validclean() must know what
* sub-DEV_BSIZE ranges to clear.
*/
#if 0
sv = (bp->b_offset + bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
ev = (bp->b_offset + bp->b_validend + (DEV_BSIZE - 1)) &
~(DEV_BSIZE - 1);
#endif
sv = bp->b_offset + bp->b_validoff;
ev = bp->b_offset + bp->b_validend;
soff = qmax(sv, soff);
eoff = qmin(ev, eoff);
}
if (eoff > soff)
vm_page_set_validclean(m,
(vm_offset_t) (soff & PAGE_MASK),

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
* $Id: spec_vnops.c,v 1.80 1999/01/27 22:42:07 dillon Exp $
* $Id: spec_vnops.c,v 1.81 1999/02/25 05:22:30 dillon Exp $
*/
#include <sys/param.h>
@ -866,8 +866,12 @@ spec_getpages(ap)
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
} else if (toff < nread) {
int nvalid = ((nread + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
/*
* Since this is a VM request, we have to supply the
* unaligned offset to allow vm_page_set_validclean()
* to zero sub-DEV_BSIZE'd portions of the page.
*/
vm_page_set_validclean(m, 0, nread - toff);
} else {
m->valid = 0;
m->dirty = 0;
@ -894,6 +898,12 @@ spec_getpages(ap)
}
} else if (m->valid) {
gotreqpage = 1;
/*
* Since this is a VM request, we need to make the
* entire page presentable by zeroing invalid sections.
*/
if (m->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(m, FALSE);
}
}
if (!gotreqpage) {

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.66 1999/01/21 08:29:07 dillon Exp $
* $Id: nfs_bio.c,v 1.67 1999/03/12 02:24:58 julian Exp $
*/
@ -144,6 +144,12 @@ nfs_getpages(ap)
return VM_PAGER_ERROR;
}
/*
* Calculate the number of bytes read and validate only that number
* of bytes. Note that due to pending writes, size may be 0. This
* does not mean that the remaining data is invalid!
*/
size = count - uio.uio_resid;
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
@ -154,11 +160,19 @@ nfs_getpages(ap)
m->flags &= ~PG_ZERO;
if (nextoff <= size) {
/*
* Read operation filled an entire page
*/
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
} else {
int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
} else if (size > toff) {
/*
* Read operation filled a partial page, set valid
* bits properly. validclean will zero out
* any cruft in the buffer when setting a valid bit,
* if the size is not DEV_BSIZE aligned.
*/
vm_page_set_validclean(m, 0, size - toff);
}
if (i != ap->a_reqpage) {
@ -183,6 +197,13 @@ nfs_getpages(ap)
} else {
vnode_pager_freepage(m);
}
} else {
/*
* This page is being mapped, clear out any other
* cruft in the invalid areas of the page.
*/
if (m->valid && m->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(m, FALSE);
}
}
return 0;
@ -784,8 +805,16 @@ nfs_write(ap)
}
np->n_flag |= NMODIFIED;
/*
* If dirtyend exceeds file size, chop it down. If this
* creates a reverse-indexed or degenerate situation with
* dirtyoff/end, 0 them.
*/
if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
if (bp->b_dirtyoff >= bp->b_dirtyend)
bp->b_dirtyoff = bp->b_dirtyend = 0;
/*
* If the new write will leave a contiguous dirty
@ -838,13 +867,20 @@ nfs_write(ap)
*/
nfs_prot_buf(bp, on, n);
if (bp->b_dirtyend > 0) {
bp->b_dirtyoff = min(on, bp->b_dirtyoff);
bp->b_dirtyend = max((on + n), bp->b_dirtyend);
} else {
bp->b_dirtyoff = on;
bp->b_dirtyend = on + n;
/*
* Only update dirtyoff/dirtyend if not a degenerate
* condition.
*/
if (n) {
if (bp->b_dirtyend > 0) {
bp->b_dirtyoff = min(on, bp->b_dirtyoff);
bp->b_dirtyend = max((on + n), bp->b_dirtyend);
} else {
bp->b_dirtyoff = on;
bp->b_dirtyend = on + n;
}
}
/*
* To avoid code complexity, we may have to throw away
* previously valid ranges when merging the new dirty range

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.66 1999/01/21 08:29:07 dillon Exp $
* $Id: nfs_bio.c,v 1.67 1999/03/12 02:24:58 julian Exp $
*/
@ -144,6 +144,12 @@ nfs_getpages(ap)
return VM_PAGER_ERROR;
}
/*
* Calculate the number of bytes read and validate only that number
* of bytes. Note that due to pending writes, size may be 0. This
* does not mean that the remaining data is invalid!
*/
size = count - uio.uio_resid;
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
@ -154,11 +160,19 @@ nfs_getpages(ap)
m->flags &= ~PG_ZERO;
if (nextoff <= size) {
/*
* Read operation filled an entire page
*/
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
} else {
int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
} else if (size > toff) {
/*
* Read operation filled a partial page, set valid
* bits properly. validclean will zero out
* any cruft in the buffer when setting a valid bit,
* if the size is not DEV_BSIZE aligned.
*/
vm_page_set_validclean(m, 0, size - toff);
}
if (i != ap->a_reqpage) {
@ -183,6 +197,13 @@ nfs_getpages(ap)
} else {
vnode_pager_freepage(m);
}
} else {
/*
* This page is being mapped, clear out any other
* cruft in the invalid areas of the page.
*/
if (m->valid && m->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(m, FALSE);
}
}
return 0;
@ -784,8 +805,16 @@ nfs_write(ap)
}
np->n_flag |= NMODIFIED;
/*
* If dirtyend exceeds file size, chop it down. If this
* creates a reverse-indexed or degenerate situation with
* dirtyoff/end, 0 them.
*/
if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
if (bp->b_dirtyoff >= bp->b_dirtyend)
bp->b_dirtyoff = bp->b_dirtyend = 0;
/*
* If the new write will leave a contiguous dirty
@ -838,13 +867,20 @@ nfs_write(ap)
*/
nfs_prot_buf(bp, on, n);
if (bp->b_dirtyend > 0) {
bp->b_dirtyoff = min(on, bp->b_dirtyoff);
bp->b_dirtyend = max((on + n), bp->b_dirtyend);
} else {
bp->b_dirtyoff = on;
bp->b_dirtyend = on + n;
/*
* Only update dirtyoff/dirtyend if not a degenerate
* condition.
*/
if (n) {
if (bp->b_dirtyend > 0) {
bp->b_dirtyoff = min(on, bp->b_dirtyoff);
bp->b_dirtyend = max((on + n), bp->b_dirtyend);
} else {
bp->b_dirtyoff = on;
bp->b_dirtyend = on + n;
}
}
/*
* To avoid code complexity, we may have to throw away
* previously valid ranges when merging the new dirty range

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
* $Id: ufs_readwrite.c,v 1.56 1999/01/21 08:29:09 dillon Exp $
* $Id: ufs_readwrite.c,v 1.57 1999/01/28 00:57:56 dillon Exp $
*/
#define BLKSIZE(a, b, c) blksize(a, b, c)
@ -189,6 +189,13 @@ READ(ap)
lbn = lblkno(fs, uio->uio_offset);
nextlbn = lbn + 1;
/*
* size of buffer. The buffer representing the
* end of the file is rounded up to the size of
* the block type ( fragment or full block,
* depending ).
*/
size = BLKSIZE(fs, ip, lbn);
blkoffset = blkoff(fs, uio->uio_offset);
@ -536,11 +543,14 @@ ffs_getpages(ap)
firstindex = ap->a_m[0]->pindex;
/*
* if ANY DEV_BSIZE blocks are valid on a large filesystem block
* then, the entire page is valid --
* if ANY DEV_BSIZE blocks are valid on a large filesystem block,
* then the entire page is valid. Since the page may be mapped,
* user programs might reference data beyond the actual end of file
* occuring within the page. We have to zero that data.
*/
if (mreq->valid) {
mreq->valid = VM_PAGE_BITS_ALL;
if (mreq->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(mreq, TRUE);
for (i = 0; i < pcount; i++) {
if (i != ap->a_reqpage) {
vm_page_free(ap->a_m[i]);
@ -568,6 +578,7 @@ ffs_getpages(ap)
(firstindex != 0) && (firstindex <= vp->v_lastr) &&
((firstindex + pcount) > vp->v_lastr)) ||
(obj->behavior == OBJ_SEQUENTIAL)) {
struct uio auio;
struct iovec aiov;
int error;
@ -620,8 +631,8 @@ ffs_getpages(ap)
if (mreq->valid == 0)
return VM_PAGER_ERROR;
mreq->valid = VM_PAGE_BITS_ALL;
if (mreq->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(mreq, TRUE);
return VM_PAGER_OK;
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: pmap.h,v 1.27 1998/02/01 20:08:39 bde Exp $
* $Id: pmap.h,v 1.28 1998/07/26 18:15:20 dfr Exp $
*/
/*
@ -129,6 +129,7 @@ void pmap_release __P((pmap_t));
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_remove_pages __P((pmap_t, vm_offset_t, vm_offset_t));
void pmap_zero_page __P((vm_offset_t));
void pmap_zero_page_area __P((vm_offset_t, int off, int size));
void pmap_prefault __P((pmap_t, vm_offset_t, vm_map_entry_t));
int pmap_mincore __P((pmap_t pmap, vm_offset_t addr));
void pmap_new_proc __P((struct proc *p));

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.127 1999/02/24 21:26:26 dillon Exp $
* $Id: vm_page.c,v 1.128 1999/03/19 05:21:03 alc Exp $
*/
/*
@ -146,15 +146,6 @@ static vm_size_t page_mask;
static int page_shift;
int vm_page_zero_count = 0;
/*
* map of contiguous valid DEV_BSIZE chunks in a page
* (this list is valid for page sizes upto 16*DEV_BSIZE)
*/
static u_short vm_page_dev_bsize_chunks[] = {
0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
};
static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
static void vm_page_free_wakeup __P((void));
@ -1442,30 +1433,41 @@ vm_page_grab(object, pindex, allocflags)
}
/*
* mapping function for valid bits or for dirty bits in
* Mapping function for valid bits or for dirty bits in
* a page. May not block.
*
* Inputs are required to range within a page.
*/
__inline int
vm_page_bits(int base, int size)
{
u_short chunk;
int first_bit;
int last_bit;
if ((base == 0) && (size >= PAGE_SIZE))
return VM_PAGE_BITS_ALL;
KASSERT(
base + size <= PAGE_SIZE,
("vm_page_bits: illegal base/size %d/%d", base, size)
);
size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
base &= PAGE_MASK;
if (size > PAGE_SIZE - base) {
size = PAGE_SIZE - base;
}
if (size == 0) /* handle degenerate case */
return(0);
base = base / DEV_BSIZE;
chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
return (chunk << base) & VM_PAGE_BITS_ALL;
first_bit = base >> DEV_BSHIFT;
last_bit = (base + size - 1) >> DEV_BSHIFT;
return ((2 << last_bit) - (1 << first_bit));
}
/*
* set a page valid and clean. May not block.
*
* In order to maintain consistancy due to the DEV_BSIZE granularity
* of the valid bits, we have to zero non-DEV_BSIZE aligned portions of
* the page at the beginning and end of the valid range when the
* associated valid bits are not already set.
*
* (base + size) must be less then or equal to PAGE_SIZE.
*/
void
vm_page_set_validclean(m, base, size)
@ -1473,10 +1475,57 @@ vm_page_set_validclean(m, base, size)
int base;
int size;
{
int pagebits = vm_page_bits(base, size);
int pagebits;
int frag;
int endoff;
if (size == 0) /* handle degenerate case */
return;
/*
* If the base is not DEV_BSIZE aligned and the valid
* bit is clear, we have to zero out a portion of the
* first block.
*/
if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
(m->valid & (1 << (base >> DEV_BSHIFT))) == 0
) {
pmap_zero_page_area(
VM_PAGE_TO_PHYS(m),
frag,
base - frag
);
}
/*
* If the ending offset is not DEV_BSIZE aligned and the
* valid bit is clear, we have to zero out a portion of
* the last block.
*/
endoff = base + size;
if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
(m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
) {
pmap_zero_page_area(
VM_PAGE_TO_PHYS(m),
endoff,
DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
);
}
/*
* Set valid, clear dirty bits. If validating the entire
* page we can safely clear the pmap modify bit.
*/
pagebits = vm_page_bits(base, size);
m->valid |= pagebits;
m->dirty &= ~pagebits;
if( base == 0 && size == PAGE_SIZE)
if (base == 0 && size == PAGE_SIZE)
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
}
@ -1498,8 +1547,65 @@ vm_page_set_invalid(m, base, size)
}
/*
* is (partial) page valid? May not block.
* vm_page_zero_invalid()
*
* The kernel assumes that the invalid portions of a page contain
* garbage, but such pages can be mapped into memory by user code.
* When this occurs, we must zero out the non-valid portions of the
* page so user code sees what it expects.
*
* Pages are most often semi-valid when the end of a file is mapped
* into memory and the file's size is not page aligned.
*/
void
vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
{
int b;
int i;
/*
* Scan the valid bits looking for invalid sections that
* must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
* valid bit may be set ) have already been zerod by
* vm_page_set_validclean().
*/
for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
if (i == (PAGE_SIZE / DEV_BSIZE) ||
(m->valid & (1 << i))
) {
if (i > b) {
pmap_zero_page_area(
VM_PAGE_TO_PHYS(m),
b << DEV_BSHIFT,
(i - b) << DEV_BSHIFT
);
}
b = i + 1;
}
}
/*
* setvalid is TRUE when we can safely set the zero'd areas
* as being valid. We can do this if there are no cache consistancy
* issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
*/
if (setvalid)
m->valid = VM_PAGE_BITS_ALL;
}
/*
* vm_page_is_valid:
*
* Is (partial) page valid? Note that the case where size == 0
* will return FALSE in the degenerate case where the page is
* entirely invalid, and TRUE otherwise.
*
* May not block.
*/
int
vm_page_is_valid(m, base, size)
vm_page_t m;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.57 1999/03/14 20:40:15 julian Exp $
* $Id: vm_page.h,v 1.58 1999/03/15 05:09:48 julian Exp $
*/
/*
@ -415,6 +415,7 @@ int vm_page_queue_index __P((vm_offset_t, int));
int vm_page_sleep(vm_page_t m, char *msg, char *busy);
int vm_page_asleep(vm_page_t m, char *msg, char *busy);
#endif
void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
void vm_page_free_toq(vm_page_t m);
/*

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.104 1999/02/27 23:39:28 alc Exp $
* $Id: vnode_pager.c,v 1.105 1999/03/27 02:39:01 eivind Exp $
*/
/*
@ -624,23 +624,21 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
cnt.v_vnodepgsin++;
return vnode_pager_input_smlfs(object, m[reqpage]);
}
/*
* if ANY DEV_BSIZE blocks are valid on a large filesystem block
* then, the entire page is valid --
* XXX no it isn't
* If we have a completely valid page available to us, we can
* clean up and return. Otherwise we have to re-read the
* media.
*/
if (m[reqpage]->valid != VM_PAGE_BITS_ALL)
m[reqpage]->valid = 0;
if (m[reqpage]->valid) {
m[reqpage]->valid = VM_PAGE_BITS_ALL;
if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
for (i = 0; i < count; i++) {
if (i != reqpage)
vnode_pager_freepage(m[i]);
}
return VM_PAGER_OK;
}
m[reqpage]->valid = 0;
/*
* here on direct device I/O
@ -773,12 +771,25 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
mt = m[i];
if (nextoff <= size) {
/*
* Read filled up entire page.
*/
mt->valid = VM_PAGE_BITS_ALL;
mt->dirty = 0;
pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
} else {
int nvalid = ((size + DEV_BSIZE - 1) - tfoff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(mt, 0, nvalid);
/*
* Read did not fill up entire page. Since this
* is getpages, the page may be mapped, so we have
* to zero the invalid portions of the page even
* though we aren't setting them valid.
*
* Currently we do not set the entire page valid,
* we just try to clear the piece that we couldn't
* read.
*/
vm_page_set_validclean(mt, 0, size - tfoff);
vm_page_zero_invalid(mt, FALSE);
}
vm_page_flag_clear(mt, PG_ZERO);