This mega-commit is meant to fix numerous interrelated problems. There

has been some bitrot and incorrect assumptions in the vfs_bio code.  These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances.  Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code.  This code might have been committed seperately, but
almost everything is interrelated.

1)	Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
	are fully valid.
2)	Rather than deactivating erroneously read initial (header) pages in
	kern_exec, we now free them.
3)	Fix the rundown of non-VMIO buffers that are in an inconsistent
	(missing vp) state.
4)	Fix the disassociation of pages from buffers in brelse.  The previous
	code had rotted and was faulty in a couple of important circumstances.
5)	Remove a gratuitious buffer wakeup in vfs_vmio_release.
6)	Remove a crufty and currently unused cluster mechanism for VBLK
	files in vfs_bio_awrite.  When the code is functional, I'll add back
	a cleaner version.
7)	The page busy count wakeups assocated with the buffer cache usage were
	incorrectly cleaned up in a previous commit by me.  Revert to the
	original, correct version, but with a cleaner implementation.
8)	The cluster read code now tries to keep data associated with buffers
	more aggressively (without breaking the heuristics) when it is presumed
	that the read data (buffers) will be soon needed.
9)	Change to filesystem lockmgr locks so that they use LK_NOPAUSE.  The
	delay loop waiting is not useful for filesystem locks, due to the
	length of the time intervals.
10)	Correct and clean-up spec_getpages.
11)	Implement a fully functional nfs_getpages, nfs_putpages.
12)	Fix nfs_write so that modifications are coherent with the NFS data on
	the server disk (at least as well as NFS seems to allow.)
13)	Properly support MS_INVALIDATE on NFS.
14)	Properly pass down MS_INVALIDATE to lower levels of the VM code from
	vm_map_clean.
15)	Better support the notion of pages being busy but valid, so that
	fewer in-transit waits occur.  (use p->busy more for pageouts instead
	of PG_BUSY.)  Since the page is fully valid, it is still usable for
	reads.
16)	It is possible (in error) for cached pages to be busy.  Make the
	page allocation code handle that case correctly.  (It should probably
	be a printf or panic, but I want the system to handle coding errors
	robustly.  I'll probably add a printf.)
17)	Correct the design and usage of vm_page_sleep.  It didn't handle
	consistancy problems very well, so make the design a little less
	lofty.  After vm_page_sleep, if it ever blocked, it is still important
	to relookup the page (if the object generation count changed), and
	verify it's status (always.)
18)	In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19)	Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20)	Fix vm_pager_put_pages and it's descendents to support an int flag
	instead of a boolean, so that we can pass down the invalidate bit.
This commit is contained in:
John Dyson 1998-03-07 21:37:31 +00:00
parent 051b1b1a74
commit 8f9110f6a1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=34206
36 changed files with 766 additions and 426 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.186 1998/02/12 22:00:01 bde Exp $
* $Id: pmap.c,v 1.187 1998/03/01 04:18:54 dyson Exp $
*/
/*
@ -2383,7 +2383,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
continue;
}
if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
@ -2404,7 +2403,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.38 1998/03/03 22:56:26 tegge Exp $
* $Id: smp.h,v 1.39 1998/03/05 21:45:44 tegge Exp $
*
*/
@ -67,11 +67,11 @@ extern u_int mpintr_lock;
/* functions in mplock.s */
void get_mplock __P((void));
void rel_mplock __P((void));
int try_mplock __P((void));
int try_mplock __P((void));
#ifdef RECURSIVE_MPINTRLOCK
void get_mpintrlock __P((void));
void rel_mpintrlock __P((void));
int try_mpintrlock __P((void));
int try_mpintrlock __P((void));
#endif /* RECURSIVE_MPINTRLOCK */
/* global data in apic_vector.s */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
* $Id: spec_vnops.c,v 1.56 1998/02/06 12:13:43 eivind Exp $
* $Id: spec_vnops.c,v 1.57 1998/03/04 06:44:59 dyson Exp $
*/
#include <sys/param.h>
@ -723,8 +723,10 @@ spec_getpages(ap)
daddr_t blkno;
struct buf *bp;
vm_ooffset_t offset;
int toff, nextoff, nread;
struct vnode *vp = ap->a_vp;
int blksiz;
int gotreqpage;
error = 0;
pcount = round_page(ap->a_count) / PAGE_SIZE;
@ -788,8 +790,6 @@ spec_getpages(ap)
/* Do the input. */
VOP_STRATEGY(bp);
if (bp->b_flags & B_ASYNC)
return (VM_PAGER_PEND);
s = splbio();
@ -799,12 +799,19 @@ spec_getpages(ap)
splx(s);
if ((bp->b_flags & B_ERROR) != 0)
error = EIO;
if ((bp->b_flags & B_ERROR) != 0) {
if (bp->b_error)
error = bp->b_error;
else
error = EIO;
}
if (!error && ap->a_count != pcount * PAGE_SIZE)
bzero((caddr_t)kva + ap->a_count,
PAGE_SIZE * pcount - ap->a_count);
nread = size - bp->b_resid;
if (nread < ap->a_count) {
bzero((caddr_t)kva + nread,
ap->a_count - nread);
}
pmap_qremove(kva, pcount);
/*
@ -812,36 +819,53 @@ spec_getpages(ap)
*/
relpbuf(bp);
for (i = 0; i < pcount; i++) {
ap->a_m[i]->dirty = 0;
ap->a_m[i]->valid = VM_PAGE_BITS_ALL;
ap->a_m[i]->flags &= ~PG_ZERO;
if (i != ap->a_reqpage) {
/*
* Whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
* somewhere (it already is in the object). Result:
* It appears that emperical results show that
* deactivating pages is best.
*/
gotreqpage = 0;
for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = ap->a_m[i];
m->flags &= ~PG_ZERO;
if (nextoff <= nread) {
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
} else if (toff < nread) {
int nvalid = ((nread + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
} else {
m->valid = 0;
m->dirty = 0;
}
if (i != ap->a_reqpage) {
/*
* Just in case someone was asking for this page we
* now tell them that it is ok to use.
*/
if (!error) {
if (ap->a_m[i]->flags & PG_WANTED)
vm_page_activate(ap->a_m[i]);
else
vm_page_deactivate(ap->a_m[i]);
PAGE_WAKEUP(ap->a_m[i]);
} else
vnode_pager_freepage(ap->a_m[i]);
if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
if (m->valid) {
if (m->flags & PG_WANTED) {
vm_page_activate(m);
} else {
vm_page_deactivate(m);
}
PAGE_WAKEUP(m);
} else {
vm_page_free(m);
}
} else {
vm_page_free(m);
}
} else if (m->valid) {
gotreqpage = 1;
}
}
if (error)
printf("spec_getpages: I/O read error\n");
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
if (!gotreqpage) {
printf("spec_getpages: I/O read failure: (code=%d)\n", error);
return VM_PAGER_ERROR;
}
return VM_PAGER_OK;
}
/* ARGSUSED */

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.186 1998/02/12 22:00:01 bde Exp $
* $Id: pmap.c,v 1.187 1998/03/01 04:18:54 dyson Exp $
*/
/*
@ -2383,7 +2383,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
continue;
}
if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
@ -2404,7 +2403,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.38 1998/03/03 22:56:26 tegge Exp $
* $Id: smp.h,v 1.39 1998/03/05 21:45:44 tegge Exp $
*
*/
@ -67,11 +67,11 @@ extern u_int mpintr_lock;
/* functions in mplock.s */
void get_mplock __P((void));
void rel_mplock __P((void));
int try_mplock __P((void));
int try_mplock __P((void));
#ifdef RECURSIVE_MPINTRLOCK
void get_mpintrlock __P((void));
void rel_mpintrlock __P((void));
int try_mpintrlock __P((void));
int try_mpintrlock __P((void));
#endif /* RECURSIVE_MPINTRLOCK */
/* global data in apic_vector.s */

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: kern_exec.c,v 1.78 1998/02/25 13:08:06 bde Exp $
* $Id: kern_exec.c,v 1.79 1998/03/02 05:47:55 peter Exp $
*/
#include <sys/param.h>
@ -389,8 +389,7 @@ exec_map_first_page(imgp)
if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
vm_page_protect(ma[0], VM_PROT_NONE);
vm_page_deactivate(ma[0]);
PAGE_WAKEUP(ma[0]);
vm_page_free(ma[0]);
splx(s);
return EIO;
}

View File

@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
* $Id: vfs_bio.c,v 1.152 1998/03/01 04:18:42 dyson Exp $
* $Id: vfs_bio.c,v 1.153 1998/03/04 03:17:30 dyson Exp $
*/
/*
@ -138,7 +138,7 @@ SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD,
&kvafreespace, 0, "");
static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
struct bqueues bufqueues[BUFFER_QUEUES] = {0};
extern int vm_swap_size;
@ -520,7 +520,7 @@ brelse(struct buf * bp)
relpbuf(bp);
return;
}
/* anyone need a "free" block? */
s = splbio();
/* anyone need this block? */
@ -538,10 +538,11 @@ brelse(struct buf * bp)
if (bp->b_flags & B_DELWRI)
--numdirtybuffers;
bp->b_flags &= ~(B_DELWRI | B_CACHE);
if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
if ((bp->b_flags & B_VMIO) == 0) {
if (bp->b_bufsize)
allocbuf(bp, 0);
brelvp(bp);
if (bp->b_vp)
brelvp(bp);
}
}
@ -571,78 +572,72 @@ brelse(struct buf * bp)
&& bp->b_validend == bp->b_bufsize))
#endif
) {
vm_ooffset_t foff;
vm_object_t obj;
int i, resid;
int i, j, resid;
vm_page_t m;
off_t foff;
vm_pindex_t poff;
vm_object_t obj;
struct vnode *vp;
int iototal = bp->b_bufsize;
int blksize;
vp = bp->b_vp;
#if !defined(MAX_PERF)
if (!vp)
panic("brelse: missing vp");
#endif
if (vp->v_type == VBLK)
blksize = DEV_BSIZE;
else
blksize = vp->v_mount->mnt_stat.f_iosize;
if (bp->b_npages) {
vm_pindex_t poff;
obj = (vm_object_t) vp->v_object;
if (vp->v_type == VBLK)
foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
else
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
poff = OFF_TO_IDX(foff);
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
if (m == bogus_page) {
m = vm_page_lookup(obj, poff + i);
resid = bp->b_bufsize;
foff = -1LL;
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
if (m == bogus_page) {
obj = (vm_object_t) vp->v_object;
foff = (off_t) bp->b_lblkno * blksize;
poff = OFF_TO_IDX(foff);
for (j = i; j < bp->b_npages; j++) {
m = bp->b_pages[j];
if (m == bogus_page) {
m = vm_page_lookup(obj, poff + j);
#if !defined(MAX_PERF)
if (!m) {
panic("brelse: page missing\n");
}
if (!m) {
panic("brelse: page missing\n");
}
#endif
bp->b_pages[i] = m;
pmap_qenter(trunc_page(bp->b_data),
bp->b_pages, bp->b_npages);
}
resid = IDX_TO_OFF(m->pindex+1) - foff;
if (resid > iototal)
resid = iototal;
if (resid > 0) {
/*
* Don't invalidate the page if the local machine has already
* modified it. This is the lesser of two evils, and should
* be fixed.
*/
if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
vm_page_test_dirty(m);
if (m->dirty == 0) {
vm_page_set_invalid(m, (vm_offset_t) foff, resid);
if (m->valid == 0)
vm_page_protect(m, VM_PROT_NONE);
}
}
if (resid >= PAGE_SIZE) {
if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
bp->b_flags |= B_INVAL;
}
} else {
if (!vm_page_is_valid(m,
(((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) {
bp->b_flags |= B_INVAL;
}
bp->b_pages[j] = m;
}
}
foff += resid;
iototal -= resid;
if ((bp->b_flags & B_INVAL) == 0) {
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
}
break;
}
if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
if ((blksize & PAGE_MASK) == 0) {
vm_page_set_invalid(m, 0, resid);
} else {
if (foff == -1LL)
foff = (off_t) bp->b_lblkno * blksize;
vm_page_set_invalid(m, (vm_offset_t) foff, resid);
}
}
resid -= PAGE_SIZE;
}
if (bp->b_flags & (B_INVAL | B_RELBUF))
vfs_vmio_release(bp);
} else if (bp->b_flags & B_VMIO) {
if (bp->b_flags & (B_INVAL | B_RELBUF))
vfs_vmio_release(bp);
}
#if !defined(MAX_PERF)
@ -755,6 +750,7 @@ vfs_vmio_release(bp)
m = bp->b_pages[i];
bp->b_pages[i] = NULL;
vm_page_unwire(m);
/*
* We don't mess with busy pages, it is
* the responsibility of the process that
@ -765,11 +761,6 @@ vfs_vmio_release(bp)
if (m->wire_count == 0) {
if (m->flags & PG_WANTED) {
m->flags &= ~PG_WANTED;
wakeup(m);
}
/*
* If this is an async free -- we cannot place
* pages onto the cache queue. If it is an
@ -895,33 +886,6 @@ vfs_bio_awrite(struct buf * bp)
return nwritten;
}
}
#if 0
else if ((vp->v_flag & VOBJBUF) && (vp->v_type == VBLK) &&
((size = bp->b_bufsize) >= PAGE_SIZE)) {
maxcl = MAXPHYS / size;
for (i = 1; i < maxcl; i++) {
if ((bpa = gbincore(vp, lblkno + i)) &&
((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
(B_DELWRI | B_CLUSTEROK)) &&
(bpa->b_bufsize == size)) {
if (bpa->b_blkno !=
bp->b_blkno + ((i * size) >> DEV_BSHIFT))
break;
} else {
break;
}
}
ncl = i;
/*
* this is a possible cluster write
*/
if (ncl != 1) {
nwritten = cluster_wbuild(vp, size, lblkno, ncl);
splx(s);
return nwritten;
}
}
#endif
bremfree(bp);
splx(s);
@ -1362,7 +1326,7 @@ struct buf *
getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
{
struct buf *bp;
int s;
int i, s;
struct bufhashhdr *bh;
int maxsize;
int generation;
@ -1474,9 +1438,10 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
} else {
bp->b_flags &= ~B_VMIO;
}
splx(s);
allocbuf(bp, size);
splx(s);
#ifdef PC98
/*
* 1024byte/sector support
@ -1660,6 +1625,8 @@ allocbuf(struct buf * bp, int size)
int pageindex, curbpnpages;
struct vnode *vp;
int bsize;
int orig_validoff = bp->b_validoff;
int orig_validend = bp->b_validend;
vp = bp->b_vp;
@ -1676,8 +1643,9 @@ allocbuf(struct buf * bp, int size)
off = (vm_ooffset_t) bp->b_lblkno * bsize;
curbpnpages = bp->b_npages;
doretry:
bp->b_validoff = orig_validoff;
bp->b_validend = orig_validend;
bp->b_flags |= B_CACHE;
bp->b_validoff = bp->b_validend = 0;
for (toff = 0; toff < newbsize; toff += tinc) {
int bytesinpage;
@ -1705,14 +1673,11 @@ allocbuf(struct buf * bp, int size)
vm_pageout_deficit += (desiredpages - bp->b_npages);
goto doretry;
}
/*
* Normally it is unwise to clear PG_BUSY without
* PAGE_WAKEUP -- but it is okay here, as there is
* no chance for blocking between here and vm_page_alloc
*/
m->flags &= ~PG_BUSY;
vm_page_wire(m);
m->flags &= ~PG_BUSY;
bp->b_flags &= ~B_CACHE;
} else if (m->flags & PG_BUSY) {
s = splvm();
if (m->flags & PG_BUSY) {
@ -1935,14 +1900,13 @@ biodone(register struct buf * bp)
#endif
panic("biodone: page busy < 0\n");
}
m->flags |= PG_BUSY;
--m->busy;
PAGE_WAKEUP(m);
PAGE_BWAKEUP(m);
--obj->paging_in_progress;
foff += resid;
iosize -= resid;
}
if (obj && obj->paging_in_progress == 0 &&
if (obj &&
(obj->paging_in_progress == 0) &&
(obj->flags & OBJ_PIPWNT)) {
obj->flags &= ~OBJ_PIPWNT;
wakeup(obj);
@ -2038,9 +2002,7 @@ vfs_unbusy_pages(struct buf * bp)
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
}
--obj->paging_in_progress;
m->flags |= PG_BUSY;
--m->busy;
PAGE_WAKEUP(m);
PAGE_BWAKEUP(m);
}
if (obj->paging_in_progress == 0 &&
(obj->flags & OBJ_PIPWNT)) {
@ -2271,6 +2233,7 @@ vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
goto tryagain;
}
vm_page_wire(p);
p->valid = VM_PAGE_BITS_ALL;
pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
bp->b_pages[index] = p;
PAGE_WAKEUP(p);

View File

@ -33,7 +33,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
* $Id: vfs_cluster.c,v 1.54 1998/02/04 22:32:39 eivind Exp $
* $Id: vfs_cluster.c,v 1.55 1998/02/06 12:13:30 eivind Exp $
*/
#include "opt_debug_cluster.h"
@ -150,17 +150,12 @@ cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
(i == (maxra - 1)))
tbp->b_flags |= B_RAM;
#if 0
if (tbp->b_usecount == 0) {
/*
* Make sure that the soon-to-be used readaheads
* are still there. The getblk/bqrelse pair will
* boost the priority of the buffer.
*/
tbp = getblk(vp, lblkno+i, size, 0, 0);
bqrelse(tbp);
if ((tbp->b_usecount < 5) &&
((tbp->b_flags & B_BUSY) == 0) &&
(tbp->b_qindex == QUEUE_LRU)) {
TAILQ_REMOVE(&bufqueues[QUEUE_LRU], tbp, b_freelist);
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], tbp, b_freelist);
}
#endif
}
splx(s);
if (i >= maxra) {
@ -215,7 +210,6 @@ cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
* if we have been doing sequential I/O, then do some read-ahead
*/
rbp = NULL;
/* if (seqcount && (lblkno < (origblkno + maxra))) { */
if (seqcount && (lblkno < (origblkno + seqcount))) {
/*
* we now build the read-ahead buffer if it is desirable.

View File

@ -342,7 +342,7 @@ vop_sharedlock(ap)
return (0);
MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
M_VNODE, M_WAITOK);
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
}
switch (flags & LK_TYPE_MASK) {
case LK_DRAIN:
@ -410,7 +410,7 @@ vop_nolock(ap)
return (0);
MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
M_VNODE, M_WAITOK);
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE);
}
switch (flags & LK_TYPE_MASK) {
case LK_DRAIN:

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.135 1998/03/01 04:18:44 dyson Exp $
* $Id: vfs_subr.c,v 1.136 1998/03/01 23:07:45 dyson Exp $
*/
/*
@ -181,7 +181,7 @@ vfs_busy(mp, flags, interlkp, p)
}
return (ENOENT);
}
lkflags = LK_SHARED;
lkflags = LK_SHARED | LK_NOPAUSE;
if (interlkp)
lkflags |= LK_INTERLOCK;
if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
@ -224,7 +224,7 @@ vfs_rootmountalloc(fstypename, devname, mpp)
return (ENODEV);
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
bzero((char *)mp, (u_long)sizeof(struct mount));
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, p);
LIST_INIT(&mp->mnt_vnodelist);
mp->mnt_vfc = vfsp;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
* $Id: vfs_syscalls.c,v 1.92 1998/02/08 01:41:33 dyson Exp $
* $Id: vfs_syscalls.c,v 1.93 1998/02/15 04:17:09 dyson Exp $
*/
/* For 4.3 integer FS ID compatibility */
@ -244,7 +244,7 @@ mount(p, uap)
mp = (struct mount *)malloc((u_long)sizeof(struct mount),
M_MOUNT, M_WAITOK);
bzero((char *)mp, (u_long)sizeof(struct mount));
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, p);
mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_vfc = vfsp;
@ -2158,9 +2158,9 @@ fsync(p, uap)
if (error = getvnode(p->p_fd, SCARG(uap, fd), &fp))
return (error);
vp = (struct vnode *)fp->f_data;
if ((error = vn_lock(vp, LK_EXCLUSIVE|LK_RETRY, p)) == NULL) {
if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p)) == NULL) {
if (vp->v_object) {
vm_object_page_clean(vp->v_object, 0, 0 ,0);
vm_object_page_clean(vp->v_object, 0, 0, FALSE);
}
error = VOP_FSYNC(vp, fp->f_cred,
(vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) ?

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.135 1998/03/01 04:18:44 dyson Exp $
* $Id: vfs_subr.c,v 1.136 1998/03/01 23:07:45 dyson Exp $
*/
/*
@ -181,7 +181,7 @@ vfs_busy(mp, flags, interlkp, p)
}
return (ENOENT);
}
lkflags = LK_SHARED;
lkflags = LK_SHARED | LK_NOPAUSE;
if (interlkp)
lkflags |= LK_INTERLOCK;
if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
@ -224,7 +224,7 @@ vfs_rootmountalloc(fstypename, devname, mpp)
return (ENODEV);
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
bzero((char *)mp, (u_long)sizeof(struct mount));
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, p);
LIST_INIT(&mp->mnt_vnodelist);
mp->mnt_vfc = vfsp;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
* $Id: vfs_syscalls.c,v 1.92 1998/02/08 01:41:33 dyson Exp $
* $Id: vfs_syscalls.c,v 1.93 1998/02/15 04:17:09 dyson Exp $
*/
/* For 4.3 integer FS ID compatibility */
@ -244,7 +244,7 @@ mount(p, uap)
mp = (struct mount *)malloc((u_long)sizeof(struct mount),
M_MOUNT, M_WAITOK);
bzero((char *)mp, (u_long)sizeof(struct mount));
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
(void)vfs_busy(mp, LK_NOWAIT, 0, p);
mp->mnt_op = vfsp->vfc_vfsops;
mp->mnt_vfc = vfsp;
@ -2158,9 +2158,9 @@ fsync(p, uap)
if (error = getvnode(p->p_fd, SCARG(uap, fd), &fp))
return (error);
vp = (struct vnode *)fp->f_data;
if ((error = vn_lock(vp, LK_EXCLUSIVE|LK_RETRY, p)) == NULL) {
if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p)) == NULL) {
if (vp->v_object) {
vm_object_page_clean(vp->v_object, 0, 0 ,0);
vm_object_page_clean(vp->v_object, 0, 0, FALSE);
}
error = VOP_FSYNC(vp, fp->f_cred,
(vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) ?

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95
* $Id: spec_vnops.c,v 1.56 1998/02/06 12:13:43 eivind Exp $
* $Id: spec_vnops.c,v 1.57 1998/03/04 06:44:59 dyson Exp $
*/
#include <sys/param.h>
@ -723,8 +723,10 @@ spec_getpages(ap)
daddr_t blkno;
struct buf *bp;
vm_ooffset_t offset;
int toff, nextoff, nread;
struct vnode *vp = ap->a_vp;
int blksiz;
int gotreqpage;
error = 0;
pcount = round_page(ap->a_count) / PAGE_SIZE;
@ -788,8 +790,6 @@ spec_getpages(ap)
/* Do the input. */
VOP_STRATEGY(bp);
if (bp->b_flags & B_ASYNC)
return (VM_PAGER_PEND);
s = splbio();
@ -799,12 +799,19 @@ spec_getpages(ap)
splx(s);
if ((bp->b_flags & B_ERROR) != 0)
error = EIO;
if ((bp->b_flags & B_ERROR) != 0) {
if (bp->b_error)
error = bp->b_error;
else
error = EIO;
}
if (!error && ap->a_count != pcount * PAGE_SIZE)
bzero((caddr_t)kva + ap->a_count,
PAGE_SIZE * pcount - ap->a_count);
nread = size - bp->b_resid;
if (nread < ap->a_count) {
bzero((caddr_t)kva + nread,
ap->a_count - nread);
}
pmap_qremove(kva, pcount);
/*
@ -812,36 +819,53 @@ spec_getpages(ap)
*/
relpbuf(bp);
for (i = 0; i < pcount; i++) {
ap->a_m[i]->dirty = 0;
ap->a_m[i]->valid = VM_PAGE_BITS_ALL;
ap->a_m[i]->flags &= ~PG_ZERO;
if (i != ap->a_reqpage) {
/*
* Whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
* somewhere (it already is in the object). Result:
* It appears that emperical results show that
* deactivating pages is best.
*/
gotreqpage = 0;
for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = ap->a_m[i];
m->flags &= ~PG_ZERO;
if (nextoff <= nread) {
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
} else if (toff < nread) {
int nvalid = ((nread + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
} else {
m->valid = 0;
m->dirty = 0;
}
if (i != ap->a_reqpage) {
/*
* Just in case someone was asking for this page we
* now tell them that it is ok to use.
*/
if (!error) {
if (ap->a_m[i]->flags & PG_WANTED)
vm_page_activate(ap->a_m[i]);
else
vm_page_deactivate(ap->a_m[i]);
PAGE_WAKEUP(ap->a_m[i]);
} else
vnode_pager_freepage(ap->a_m[i]);
if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
if (m->valid) {
if (m->flags & PG_WANTED) {
vm_page_activate(m);
} else {
vm_page_deactivate(m);
}
PAGE_WAKEUP(m);
} else {
vm_page_free(m);
}
} else {
vm_page_free(m);
}
} else if (m->valid) {
gotreqpage = 1;
}
}
if (error)
printf("spec_getpages: I/O read error\n");
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
if (!gotreqpage) {
printf("spec_getpages: I/O read failure: (code=%d)\n", error);
return VM_PAGER_ERROR;
}
return VM_PAGER_OK;
}
/* ARGSUSED */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.50 1998/02/06 12:13:55 eivind Exp $
* $Id: nfs_bio.c,v 1.51 1998/03/06 09:46:43 msmith Exp $
*/
@ -65,6 +65,7 @@
static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size,
struct proc *p));
static void nfs_prot_buf __P((struct buf *bp, int off, int n));
extern int nfs_numasync;
extern struct nfsstats nfsstats;
@ -76,58 +77,153 @@ int
nfs_getpages(ap)
struct vop_getpages_args *ap;
{
int i, pcount, error;
int i, error, nextoff, size, toff, npages;
struct uio uio;
struct iovec iov;
vm_page_t m;
vm_offset_t kva;
struct buf *bp;
if ((ap->a_vp->v_object) == NULL) {
printf("nfs_getpages: called with non-merged cache vnode??\n");
return EOPNOTSUPP;
}
m = ap->a_m[ap->a_reqpage];
kva = vm_pager_map_page(m);
/*
* We use only the kva address for the buffer, but this is extremely
* convienient and fast.
*/
bp = getpbuf();
npages = btoc(ap->a_count);
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = PAGE_SIZE;
iov.iov_len = ap->a_count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_offset = IDX_TO_OFF(m->pindex);
uio.uio_resid = PAGE_SIZE;
uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
uio.uio_resid = ap->a_count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
uio.uio_procp = curproc;
error = nfs_readrpc(ap->a_vp, &uio, curproc->p_ucred);
vm_pager_unmap_page(kva);
pmap_qremove(kva, npages);
pcount = round_page(ap->a_count) / PAGE_SIZE;
for (i = 0; i < pcount; i++) {
relpbuf(bp);
if (error && (uio.uio_resid == ap->a_count))
return VM_PAGER_ERROR;
size = ap->a_count - uio.uio_resid;
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = ap->a_m[i];
m->flags &= ~PG_ZERO;
if (nextoff <= size) {
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
} else {
int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
}
if (i != ap->a_reqpage) {
vnode_pager_freepage(ap->a_m[i]);
/*
* Whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
* somewhere (it already is in the object). Result:
* It appears that emperical results show that
* deactivating pages is best.
*/
/*
* Just in case someone was asking for this page we
* now tell them that it is ok to use.
*/
if (!error) {
if (m->flags & PG_WANTED)
vm_page_activate(m);
else
vm_page_deactivate(m);
PAGE_WAKEUP(m);
} else {
vnode_pager_freepage(m);
}
}
}
if (error && (uio.uio_resid == PAGE_SIZE))
return VM_PAGER_ERROR;
return 0;
}
/*
* put page routine
*
* XXX By default, wimp out... note that a_offset is ignored (and always
* XXX has been).
* Vnode op for VM putpages.
*/
int
nfs_putpages(ap)
struct vop_putpages_args *ap;
{
return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
ap->a_sync, ap->a_rtvals);
struct uio uio;
struct iovec iov;
vm_page_t m;
vm_offset_t kva;
struct buf *bp;
int iomode, must_commit, i, error, npages;
int *rtvals;
rtvals = ap->a_rtvals;
npages = btoc(ap->a_count);
for (i = 0; i < npages; i++) {
rtvals[i] = VM_PAGER_AGAIN;
}
/*
* We use only the kva address for the buffer, but this is extremely
* convienient and fast.
*/
bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = ap->a_count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
uio.uio_resid = ap->a_count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_WRITE;
uio.uio_procp = curproc;
if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
iomode = NFSV3WRITE_UNSTABLE;
else
iomode = NFSV3WRITE_FILESYNC;
error = nfs_writerpc(ap->a_vp, &uio,
curproc->p_ucred, &iomode, &must_commit);
pmap_qremove(kva, npages);
relpbuf(bp);
if (!error) {
int nwritten = round_page(ap->a_count - uio.uio_resid) / PAGE_SIZE;
for (i = 0; i < nwritten; i++) {
rtvals[i] = VM_PAGER_OK;
ap->a_m[i]->dirty = 0;
}
if (must_commit)
nfs_clearcommit(ap->a_vp->v_mount);
}
return ap->a_rtvals[0];
}
/*
@ -464,7 +560,7 @@ nfs_bioread(vp, uio, ioflag, cred, getpages)
};
if (n > 0) {
error = uiomove(bp->b_data + on, (int)n, uio);
error = uiomove(bp->b_data + on, (int)n, uio);
}
switch (vp->v_type) {
case VREG:
@ -484,6 +580,24 @@ nfs_bioread(vp, uio, ioflag, cred, getpages)
return (error);
}
static void
nfs_prot_buf(bp, off, n)
struct buf *bp;
int off;
int n;
{
int pindex, boff, end;
if ((bp->b_flags & B_VMIO) == 0)
return;
end = round_page(off + n);
for (boff = trunc_page(off); boff < end; boff += PAGE_SIZE) {
pindex = boff >> PAGE_SHIFT;
vm_page_protect(bp->b_pages[pindex], VM_PROT_NONE);
}
}
/*
* Vnode op for write using bio
*/
@ -648,12 +762,19 @@ nfs_write(ap)
goto again;
}
}
error = uiomove((char *)bp->b_data + on, n, uio);
if (error) {
bp->b_flags |= B_ERROR;
brelse(bp);
return (error);
}
/*
* This will keep the buffer and mmaped regions more coherent.
*/
nfs_prot_buf(bp, on, n);
if (bp->b_dirtyend > 0) {
bp->b_dirtyoff = min(on, bp->b_dirtyoff);
bp->b_dirtyend = max((on + n), bp->b_dirtyend);
@ -681,6 +802,8 @@ nfs_write(ap)
*/
if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
bp->b_proc = p;
if (ioflag & IO_INVAL)
bp->b_flags |= B_INVAL;
error = VOP_BWRITE(bp);
if (error)
return (error);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
* $Id: nfs_vnops.c,v 1.78 1998/02/06 12:13:58 eivind Exp $
* $Id: nfs_vnops.c,v 1.79 1998/03/06 09:46:48 msmith Exp $
*/
@ -3000,6 +3000,9 @@ nfs_writebp(bp, force)
if(!(bp->b_flags & B_BUSY))
panic("bwrite: buffer is not busy???");
if (bp->b_flags & B_INVAL)
bp->b_flags |= B_INVAL | B_NOCACHE;
if (bp->b_flags & B_DELWRI) {
--numdirtybuffers;
if (needsbuffer)
@ -3045,6 +3048,7 @@ nfs_writebp(bp, force)
if (oldflags & B_DELWRI) {
reassignbuf(bp, bp->b_vp);
}
brelse(bp);
return (rtval);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
* $Id: nfs_bio.c,v 1.50 1998/02/06 12:13:55 eivind Exp $
* $Id: nfs_bio.c,v 1.51 1998/03/06 09:46:43 msmith Exp $
*/
@ -65,6 +65,7 @@
static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size,
struct proc *p));
static void nfs_prot_buf __P((struct buf *bp, int off, int n));
extern int nfs_numasync;
extern struct nfsstats nfsstats;
@ -76,58 +77,153 @@ int
nfs_getpages(ap)
struct vop_getpages_args *ap;
{
int i, pcount, error;
int i, error, nextoff, size, toff, npages;
struct uio uio;
struct iovec iov;
vm_page_t m;
vm_offset_t kva;
struct buf *bp;
if ((ap->a_vp->v_object) == NULL) {
printf("nfs_getpages: called with non-merged cache vnode??\n");
return EOPNOTSUPP;
}
m = ap->a_m[ap->a_reqpage];
kva = vm_pager_map_page(m);
/*
* We use only the kva address for the buffer, but this is extremely
* convienient and fast.
*/
bp = getpbuf();
npages = btoc(ap->a_count);
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = PAGE_SIZE;
iov.iov_len = ap->a_count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_offset = IDX_TO_OFF(m->pindex);
uio.uio_resid = PAGE_SIZE;
uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
uio.uio_resid = ap->a_count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
uio.uio_procp = curproc;
error = nfs_readrpc(ap->a_vp, &uio, curproc->p_ucred);
vm_pager_unmap_page(kva);
pmap_qremove(kva, npages);
pcount = round_page(ap->a_count) / PAGE_SIZE;
for (i = 0; i < pcount; i++) {
relpbuf(bp);
if (error && (uio.uio_resid == ap->a_count))
return VM_PAGER_ERROR;
size = ap->a_count - uio.uio_resid;
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = ap->a_m[i];
m->flags &= ~PG_ZERO;
if (nextoff <= size) {
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
} else {
int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(m, 0, nvalid);
}
if (i != ap->a_reqpage) {
vnode_pager_freepage(ap->a_m[i]);
/*
* Whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
* somewhere (it already is in the object). Result:
* It appears that emperical results show that
* deactivating pages is best.
*/
/*
* Just in case someone was asking for this page we
* now tell them that it is ok to use.
*/
if (!error) {
if (m->flags & PG_WANTED)
vm_page_activate(m);
else
vm_page_deactivate(m);
PAGE_WAKEUP(m);
} else {
vnode_pager_freepage(m);
}
}
}
if (error && (uio.uio_resid == PAGE_SIZE))
return VM_PAGER_ERROR;
return 0;
}
/*
* put page routine
*
* XXX By default, wimp out... note that a_offset is ignored (and always
* XXX has been).
* Vnode op for VM putpages.
*/
int
nfs_putpages(ap)
struct vop_putpages_args *ap;
{
return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
ap->a_sync, ap->a_rtvals);
struct uio uio;
struct iovec iov;
vm_page_t m;
vm_offset_t kva;
struct buf *bp;
int iomode, must_commit, i, error, npages;
int *rtvals;
rtvals = ap->a_rtvals;
npages = btoc(ap->a_count);
for (i = 0; i < npages; i++) {
rtvals[i] = VM_PAGER_AGAIN;
}
/*
* We use only the kva address for the buffer, but this is extremely
* convienient and fast.
*/
bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = ap->a_count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
uio.uio_resid = ap->a_count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_WRITE;
uio.uio_procp = curproc;
if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
iomode = NFSV3WRITE_UNSTABLE;
else
iomode = NFSV3WRITE_FILESYNC;
error = nfs_writerpc(ap->a_vp, &uio,
curproc->p_ucred, &iomode, &must_commit);
pmap_qremove(kva, npages);
relpbuf(bp);
if (!error) {
int nwritten = round_page(ap->a_count - uio.uio_resid) / PAGE_SIZE;
for (i = 0; i < nwritten; i++) {
rtvals[i] = VM_PAGER_OK;
ap->a_m[i]->dirty = 0;
}
if (must_commit)
nfs_clearcommit(ap->a_vp->v_mount);
}
return ap->a_rtvals[0];
}
/*
@ -464,7 +560,7 @@ nfs_bioread(vp, uio, ioflag, cred, getpages)
};
if (n > 0) {
error = uiomove(bp->b_data + on, (int)n, uio);
error = uiomove(bp->b_data + on, (int)n, uio);
}
switch (vp->v_type) {
case VREG:
@ -484,6 +580,24 @@ nfs_bioread(vp, uio, ioflag, cred, getpages)
return (error);
}
static void
nfs_prot_buf(bp, off, n)
struct buf *bp;
int off;
int n;
{
int pindex, boff, end;
if ((bp->b_flags & B_VMIO) == 0)
return;
end = round_page(off + n);
for (boff = trunc_page(off); boff < end; boff += PAGE_SIZE) {
pindex = boff >> PAGE_SHIFT;
vm_page_protect(bp->b_pages[pindex], VM_PROT_NONE);
}
}
/*
* Vnode op for write using bio
*/
@ -648,12 +762,19 @@ nfs_write(ap)
goto again;
}
}
error = uiomove((char *)bp->b_data + on, n, uio);
if (error) {
bp->b_flags |= B_ERROR;
brelse(bp);
return (error);
}
/*
* This will keep the buffer and mmaped regions more coherent.
*/
nfs_prot_buf(bp, on, n);
if (bp->b_dirtyend > 0) {
bp->b_dirtyoff = min(on, bp->b_dirtyoff);
bp->b_dirtyend = max((on + n), bp->b_dirtyend);
@ -681,6 +802,8 @@ nfs_write(ap)
*/
if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
bp->b_proc = p;
if (ioflag & IO_INVAL)
bp->b_flags |= B_INVAL;
error = VOP_BWRITE(bp);
if (error)
return (error);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
* $Id: nfs_vnops.c,v 1.78 1998/02/06 12:13:58 eivind Exp $
* $Id: nfs_vnops.c,v 1.79 1998/03/06 09:46:48 msmith Exp $
*/
@ -3000,6 +3000,9 @@ nfs_writebp(bp, force)
if(!(bp->b_flags & B_BUSY))
panic("bwrite: buffer is not busy???");
if (bp->b_flags & B_INVAL)
bp->b_flags |= B_INVAL | B_NOCACHE;
if (bp->b_flags & B_DELWRI) {
--numdirtybuffers;
if (needsbuffer)
@ -3045,6 +3048,7 @@ nfs_writebp(bp, force)
if (oldflags & B_DELWRI) {
reassignbuf(bp, bp->b_vp);
}
brelse(bp);
return (rtval);
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)buf.h 8.9 (Berkeley) 3/30/95
* $Id: buf.h,v 1.44 1997/12/02 21:07:14 phk Exp $
* $Id: buf.h,v 1.45 1998/01/22 17:30:10 dyson Exp $
*/
#ifndef _SYS_BUF_H_
@ -253,6 +253,7 @@ extern struct buf *swbuf; /* Swap I/O buffer headers. */
extern int nswbuf; /* Number of swap I/O buffer headers. */
extern int needsbuffer, numdirtybuffers;
extern TAILQ_HEAD(swqueue, buf) bswlist;
extern TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
void bufinit __P((void));
void bremfree __P((struct buf *));

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)buf.h 8.9 (Berkeley) 3/30/95
* $Id: buf.h,v 1.44 1997/12/02 21:07:14 phk Exp $
* $Id: buf.h,v 1.45 1998/01/22 17:30:10 dyson Exp $
*/
#ifndef _SYS_BUF_H_
@ -253,6 +253,7 @@ extern struct buf *swbuf; /* Swap I/O buffer headers. */
extern int nswbuf; /* Number of swap I/O buffer headers. */
extern int needsbuffer, numdirtybuffers;
extern TAILQ_HEAD(swqueue, buf) bswlist;
extern TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
void bufinit __P((void));
void bremfree __P((struct buf *));

View File

@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* $Id: smp.h,v 1.38 1998/03/03 22:56:26 tegge Exp $
* $Id: smp.h,v 1.39 1998/03/05 21:45:44 tegge Exp $
*
*/
@ -67,11 +67,11 @@ extern u_int mpintr_lock;
/* functions in mplock.s */
void get_mplock __P((void));
void rel_mplock __P((void));
int try_mplock __P((void));
int try_mplock __P((void));
#ifdef RECURSIVE_MPINTRLOCK
void get_mpintrlock __P((void));
void rel_mpintrlock __P((void));
int try_mpintrlock __P((void));
int try_mpintrlock __P((void));
#endif /* RECURSIVE_MPINTRLOCK */
/* global data in apic_vector.s */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
* $Id: vnode.h,v 1.65 1998/01/17 09:16:39 dyson Exp $
* $Id: vnode.h,v 1.66 1998/01/24 02:01:31 dyson Exp $
*/
#ifndef _SYS_VNODE_H_
@ -197,6 +197,7 @@ struct vattr {
#define IO_NODELOCKED 0x08 /* underlying node already locked */
#define IO_NDELAY 0x10 /* FNDELAY flag set in file table */
#define IO_VMIO 0x20 /* data already in VMIO space */
#define IO_INVAL 0x40 /* invalidate after I/O */
/*
* Modes. Some values same as Ixxx entries from inode.h for now.

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95
* $Id: ffs_inode.c,v 1.33 1998/02/04 22:33:31 eivind Exp $
* $Id: ffs_inode.c,v 1.34 1998/02/06 12:14:14 eivind Exp $
*/
#include "opt_quota.h"
@ -204,6 +204,7 @@ ffs_truncate(vp, length, flags, cred, p)
* value of osize is 0, length will be at least 1.
*/
if (osize < length) {
vnode_pager_setsize(ovp, length);
offset = blkoff(fs, length - 1);
lbn = lblkno(fs, length - 1);
aflags = B_CLRBUF;
@ -214,7 +215,6 @@ ffs_truncate(vp, length, flags, cred, p)
if (error)
return (error);
oip->i_size = length;
vnode_pager_setsize(ovp, length);
if (bp->b_bufsize == fs->fs_bsize)
bp->b_flags |= B_CLUSTEROK;
if (aflags & B_SYNC)
@ -257,7 +257,6 @@ ffs_truncate(vp, length, flags, cred, p)
else
bawrite(bp);
}
vnode_pager_setsize(ovp, length);
/*
* Calculate index into inode's block list of
* last direct and indirect blocks (if any)
@ -298,6 +297,7 @@ ffs_truncate(vp, length, flags, cred, p)
oip->i_size = osize;
vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA;
allerror = vinvalbuf(ovp, vflags, cred, p, 0, 0);
vnode_pager_setsize(ovp, length);
/*
* Indirect blocks first.

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
* $Id: ffs_vfsops.c,v 1.73 1998/03/01 22:46:46 msmith Exp $
* $Id: ffs_vfsops.c,v 1.74 1998/03/07 14:59:44 bde Exp $
*/
#include "opt_quota.h"
@ -559,10 +559,7 @@ ffs_mountfs(devvp, mp, p, malloctype)
if (error)
return (error);
ncount = vcount(devvp);
/*
if (devvp->v_object)
ncount -= 1;
*/
if (ncount > 1 && devvp != rootvp)
return (EBUSY);
if (error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0))
@ -983,8 +980,9 @@ ffs_vget(mp, ino, vpp)
ump = VFSTOUFS(mp);
dev = ump->um_dev;
restart:
if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
return (0);
}
/*
* Lock out the creation of new entries in the FFS hash table in

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
* $Id: ufs_readwrite.c,v 1.42 1998/02/05 03:32:33 dyson Exp $
* $Id: ufs_readwrite.c,v 1.43 1998/02/26 06:39:50 msmith Exp $
*/
#define BLKSIZE(a, b, c) blksize(a, b, c)
@ -74,6 +74,7 @@ READ(ap)
int error;
u_short mode;
int seqcount;
int ioflag;
vm_object_t object;
vp = ap->a_vp;
@ -81,6 +82,7 @@ READ(ap)
ip = VTOI(vp);
mode = ip->i_mode;
uio = ap->a_uio;
ioflag = ap->a_ioflag;
#ifdef DIAGNOSTIC
if (uio->uio_rw != UIO_READ)
@ -106,7 +108,7 @@ READ(ap)
if (object)
vm_object_reference(object);
#if 1
if ((vfs_ioopt > 1) && object) {
if ((ioflag & IO_VMIO) == 0 && (vfs_ioopt > 1) && object) {
int nread, toread;
toread = uio->uio_resid;
if (toread > bytesinfile)
@ -128,7 +130,7 @@ READ(ap)
if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
break;
#if 1
if ((vfs_ioopt > 1) && object) {
if ((ioflag & IO_VMIO) == 0 && (vfs_ioopt > 1) && object) {
int nread, toread;
toread = uio->uio_resid;
if (toread > bytesinfile)
@ -208,10 +210,23 @@ READ(ap)
if (error)
break;
bqrelse(bp);
if (ioflag & IO_VMIO) {
bp->b_flags |= B_RELBUF;
brelse(bp);
} else {
bqrelse(bp);
}
}
if (bp != NULL)
bqrelse(bp);
if (bp != NULL) {
if (ioflag & IO_VMIO) {
bp->b_flags |= B_RELBUF;
brelse(bp);
} else {
bqrelse(bp);
}
}
if (object)
vm_object_vndeallocate(object);
if (!(vp->v_mount->mnt_flag & MNT_NOATIME))
@ -397,8 +412,10 @@ ffs_getpages(ap)
{
off_t foff, physoffset;
int i, size, bsize;
struct vnode *dp;
struct vnode *dp, *vp;
vm_object_t obj;
vm_pindex_t pindex, firstindex;
vm_page_t m, mreq;
int bbackwards, bforwards;
int pbackwards, pforwards;
int firstpage;
@ -411,56 +428,92 @@ ffs_getpages(ap)
pcount = round_page(ap->a_count) / PAGE_SIZE;
mreq = ap->a_m[ap->a_reqpage];
firstindex = ap->a_m[0]->pindex;
/*
* if ANY DEV_BSIZE blocks are valid on a large filesystem block
* then, the entire page is valid --
*/
if (ap->a_m[ap->a_reqpage]->valid) {
ap->a_m[ap->a_reqpage]->valid = VM_PAGE_BITS_ALL;
if (mreq->valid) {
mreq->valid = VM_PAGE_BITS_ALL;
for (i = 0; i < pcount; i++) {
if (i != ap->a_reqpage)
vnode_pager_freepage(ap->a_m[i]);
if (i != ap->a_reqpage) {
vm_page_free(ap->a_m[i]);
}
}
return VM_PAGER_OK;
}
obj = ap->a_m[ap->a_reqpage]->object;
bsize = ap->a_vp->v_mount->mnt_stat.f_iosize;
vp = ap->a_vp;
obj = vp->v_object;
bsize = vp->v_mount->mnt_stat.f_iosize;
pindex = mreq->pindex;
foff = IDX_TO_OFF(pindex) /* + ap->a_offset should be zero */;
if (obj->behavior == OBJ_SEQUENTIAL) {
if (firstindex == 0)
vp->v_lastr = 0;
if ((obj->behavior != OBJ_RANDOM) &&
((firstindex != 0) && (firstindex <= vp->v_lastr) &&
((firstindex + pcount) > vp->v_lastr)) ||
(obj->behavior == OBJ_SEQUENTIAL)) {
struct uio auio;
struct iovec aiov;
int error;
vm_page_t m;
for (i = 0; i < pcount; i++) {
if (i != ap->a_reqpage) {
vnode_pager_freepage(ap->a_m[i]);
}
m = ap->a_m[i];
vm_page_activate(m);
m->busy++;
m->flags &= ~PG_BUSY;
}
m = ap->a_m[ap->a_reqpage];
m->busy++;
m->flags &= ~PG_BUSY;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
aiov.iov_base = 0;
aiov.iov_len = MAXBSIZE;
auio.uio_resid = MAXBSIZE;
auio.uio_offset = IDX_TO_OFF(m->pindex);
auio.uio_offset = foff;
auio.uio_segflg = UIO_NOCOPY;
auio.uio_rw = UIO_READ;
auio.uio_procp = curproc;
error = VOP_READ(ap->a_vp, &auio,
((MAXBSIZE / bsize) << 16), curproc->p_ucred);
error = VOP_READ(vp, &auio,
IO_VMIO | ((MAXBSIZE / bsize) << 16), curproc->p_ucred);
m->flags |= PG_BUSY;
m->busy--;
for (i = 0; i < pcount; i++) {
m = ap->a_m[i];
m->busy--;
if (error && (auio.uio_resid == MAXBSIZE))
if ((m != mreq) && (m->wire_count == 0) && (m->hold_count == 0) &&
(m->valid == 0) && (m->busy == 0) &&
(m->flags & PG_BUSY) == 0) {
m->flags |= PG_BUSY;
vm_page_free(m);
} else if (m == mreq) {
while (m->flags & PG_BUSY) {
vm_page_sleep(m, "ffspwt", NULL);
}
m->flags |= PG_BUSY;
vp->v_lastr = m->pindex + 1;
} else {
if (m->wire_count == 0) {
if (m->busy || (m->flags & PG_MAPPED) ||
(m->flags & (PG_WANTED | PG_BUSY)) == PG_WANTED) {
vm_page_activate(m);
} else {
vm_page_deactivate(m);
}
}
vp->v_lastr = m->pindex + 1;
}
}
if (mreq->valid == 0)
return VM_PAGER_ERROR;
return 0;
mreq->valid = VM_PAGE_BITS_ALL;
return VM_PAGER_OK;
}
/*
@ -468,21 +521,20 @@ ffs_getpages(ap)
* reqlblkno is the logical block that contains the page
* poff is the index of the page into the logical block
*/
foff = IDX_TO_OFF(ap->a_m[ap->a_reqpage]->pindex) + ap->a_offset;
reqlblkno = foff / bsize;
poff = (foff % bsize) / PAGE_SIZE;
if ( VOP_BMAP( ap->a_vp, reqlblkno, &dp, &reqblkno,
if ( VOP_BMAP( vp, reqlblkno, &dp, &reqblkno,
&bforwards, &bbackwards) || (reqblkno == -1)) {
for(i = 0; i < pcount; i++) {
if (i != ap->a_reqpage)
vnode_pager_freepage(ap->a_m[i]);
vm_page_free(ap->a_m[i]);
}
if (reqblkno == -1) {
if ((ap->a_m[ap->a_reqpage]->flags & PG_ZERO) == 0)
vm_page_zero_fill(ap->a_m[ap->a_reqpage]);
ap->a_m[ap->a_reqpage]->dirty = 0;
ap->a_m[ap->a_reqpage]->valid = VM_PAGE_BITS_ALL;
if ((mreq->flags & PG_ZERO) == 0)
vm_page_zero_fill(mreq);
mreq->dirty = 0;
mreq->valid = VM_PAGE_BITS_ALL;
return VM_PAGER_OK;
} else {
return VM_PAGER_ERROR;
@ -502,7 +554,7 @@ ffs_getpages(ap)
if (ap->a_reqpage > pbackwards) {
firstpage = ap->a_reqpage - pbackwards;
for(i=0;i<firstpage;i++)
vnode_pager_freepage(ap->a_m[i]);
vm_page_free(ap->a_m[i]);
}
/*
@ -513,7 +565,7 @@ ffs_getpages(ap)
bforwards * pagesperblock;
if (pforwards < (pcount - (ap->a_reqpage + 1))) {
for( i = ap->a_reqpage + pforwards + 1; i < pcount; i++)
vnode_pager_freepage(ap->a_m[i]);
vm_page_free(ap->a_m[i]);
pcount = ap->a_reqpage + pforwards + 1;
}
@ -529,11 +581,13 @@ ffs_getpages(ap)
*/
size = pcount * PAGE_SIZE;
if ((IDX_TO_OFF(ap->a_m[firstpage]->pindex) + size) >
((vm_object_t) ap->a_vp->v_object)->un_pager.vnp.vnp_size)
size = ((vm_object_t) ap->a_vp->v_object)->un_pager.vnp.vnp_size - IDX_TO_OFF(ap->a_m[firstpage]->pindex);
vp->v_lastr = mreq->pindex + pcount;
physoffset -= IDX_TO_OFF(ap->a_m[ap->a_reqpage]->pindex);
if ((IDX_TO_OFF(ap->a_m[firstpage]->pindex) + size) >
obj->un_pager.vnp.vnp_size)
size = obj->un_pager.vnp.vnp_size - foff;
physoffset -= foff;
rtval = VOP_GETPAGES(dp, &ap->a_m[firstpage], size,
(ap->a_reqpage - firstpage), physoffset);

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.90 1998/02/25 03:55:47 dyson Exp $
* $Id: swap_pager.c,v 1.91 1998/03/01 04:18:14 dyson Exp $
*/
/*
@ -1578,9 +1578,7 @@ swap_pager_finish(spc)
printf("swap_pager_finish: I/O error, clean of page %lx failed\n",
(u_long) VM_PAGE_TO_PHYS(ma[i]));
ma[i]->dirty = VM_PAGE_BITS_ALL;
ma[i]->flags |= PG_BUSY;
ma[i]->busy--;
PAGE_WAKEUP(ma[i]);
PAGE_BWAKEUP(ma[i]);
}
object->paging_in_progress -= spc->spc_count;
@ -1651,9 +1649,7 @@ swap_pager_iodone(bp)
/*
* we wakeup any processes that are waiting on these pages.
*/
ma[i]->flags |= PG_BUSY;
ma[i]->busy--;
PAGE_WAKEUP(ma[i]);
PAGE_BWAKEUP(ma[i]);
}
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.116 1998/02/23 08:22:33 dyson Exp $
* $Id: vm_map.c,v 1.117 1998/02/25 03:55:49 dyson Exp $
*/
/*
@ -286,7 +286,7 @@ vm_map_init(map, min, max)
map->first_free = &map->header;
map->hint = &map->header;
map->timestamp = 0;
lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
}
/*
@ -1665,12 +1665,15 @@ vm_map_clean(map, start, end, syncio, invalidate)
* idea.
*/
if (current->protection & VM_PROT_WRITE) {
int flags;
if (object->type == OBJT_VNODE)
vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
flags |= invalidate ? OBJPC_INVAL : 0;
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),
(syncio||invalidate)?1:0);
flags);
if (invalidate)
vm_object_page_remove(object,
OFF_TO_IDX(offset),

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.72 1998/02/04 22:33:48 eivind Exp $
* $Id: vm_mmap.c,v 1.73 1998/02/06 12:14:25 eivind Exp $
*/
/*
@ -642,13 +642,6 @@ mincore(p, uap)
vm_map_lock(map);
/*
* Not needed here
*/
#if 0
VM_MAP_RANGE_CHECK(map, addr, end);
#endif
if (!vm_map_lookup_entry(map, addr, &entry))
entry = entry->next;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.114 1998/02/25 03:55:50 dyson Exp $
* $Id: vm_object.c,v 1.115 1998/03/01 04:18:22 dyson Exp $
*/
/*
@ -431,7 +431,7 @@ vm_object_terminate(object)
/*
* Clean pages and flush buffers.
*/
vm_object_page_clean(object, 0, 0, TRUE);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
vp = (struct vnode *) object->handle;
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
@ -499,11 +499,11 @@ vm_object_dispose(object)
*/
void
vm_object_page_clean(object, start, end, syncio)
vm_object_page_clean(object, start, end, flags)
vm_object_t object;
vm_pindex_t start;
vm_pindex_t end;
boolean_t syncio;
int flags;
{
register vm_page_t p, np, tp;
register vm_offset_t tstart, tend;
@ -515,6 +515,7 @@ vm_object_page_clean(object, start, end, syncio)
int chkb;
int maxb;
int i;
int pagerflags;
vm_page_t maf[vm_pageout_page_count];
vm_page_t mab[vm_pageout_page_count];
vm_page_t ma[vm_pageout_page_count];
@ -525,6 +526,9 @@ vm_object_page_clean(object, start, end, syncio)
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
return;
pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0;
pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
vp = object->handle;
object->flags |= OBJ_CLEANING;
@ -628,29 +632,23 @@ vm_object_page_clean(object, start, end, syncio)
for(i=0;i<maxb;i++) {
int index = (maxb - i) - 1;
ma[index] = mab[i];
ma[index]->flags |= PG_BUSY;
ma[index]->flags &= ~PG_CLEANCHK;
vm_page_protect(ma[index], VM_PROT_READ);
}
vm_page_protect(p, VM_PROT_READ);
p->flags |= PG_BUSY;
p->flags &= ~PG_CLEANCHK;
ma[maxb] = p;
for(i=0;i<maxf;i++) {
int index = (maxb + i) + 1;
ma[index] = maf[i];
ma[index]->flags |= PG_BUSY;
ma[index]->flags &= ~PG_CLEANCHK;
vm_page_protect(ma[index], VM_PROT_READ);
}
runlen = maxb + maxf + 1;
splx(s);
vm_pageout_flush(ma, runlen, 0);
vm_pageout_flush(ma, runlen, pagerflags);
if (object->generation != curgeneration)
goto rescan;
}
VOP_FSYNC(vp, NULL, syncio, curproc);
VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?1:0, curproc);
object->flags &= ~OBJ_CLEANING;
return;
@ -1314,7 +1312,7 @@ vm_object_page_remove(object, start, end, clean_only)
if (vm_page_sleep(p, "vmopar", &p->busy))
goto again;
if (clean_only) {
if (clean_only && p->valid) {
vm_page_test_dirty(p);
if (p->valid & p->dirty)
continue;
@ -1342,9 +1340,9 @@ vm_object_page_remove(object, start, end, clean_only)
* interrupt -- minimize the spl transitions
*/
if (vm_page_sleep(p, "vmopar", &p->busy))
goto again;
goto again;
if (clean_only) {
if (clean_only && p->valid) {
vm_page_test_dirty(p);
if (p->valid & p->dirty) {
start += 1;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.45 1998/02/05 03:32:45 dyson Exp $
* $Id: vm_object.h,v 1.46 1998/02/25 03:55:52 dyson Exp $
*/
/*
@ -142,6 +142,9 @@ struct vm_object {
#ifdef KERNEL
#define OBJPC_SYNC 0x1 /* sync I/O */
#define OBJPC_INVAL 0x2 /* invalidate */
TAILQ_HEAD(object_q, vm_object);
extern struct object_q vm_object_list; /* list of allocated objects */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.93 1998/02/09 06:11:32 eivind Exp $
* $Id: vm_page.c,v 1.94 1998/03/01 04:18:24 dyson Exp $
*/
/*
@ -88,6 +88,7 @@
static void vm_page_queue_init __P((void));
static vm_page_t vm_page_select_free __P((vm_object_t object,
vm_pindex_t pindex, int prefqueue));
static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t));
/*
* Associated with page of user-allocatable memory is a
@ -684,6 +685,36 @@ vm_page_select(object, pindex, basequeue)
}
/*
* Find a page on the cache queue with color optimization. As pages
* might be found, but not applicable, they are deactivated. This
* keeps us from using potentially busy cached pages.
*/
vm_page_t
vm_page_select_cache(object, pindex)
vm_object_t object;
vm_pindex_t pindex;
{
vm_page_t m;
while (TRUE) {
#if PQ_L2_SIZE > 1
int index;
index = (pindex + object->pg_color) & PQ_L2_MASK;
m = vm_page_list_find(PQ_CACHE, index);
#else
m = TAILQ_FIRST(vm_page_queues[PQ_CACHE].pl);
#endif
if (m && ((m->flags & PG_BUSY) || m->busy ||
m->hold_count || m->wire_count)) {
vm_page_deactivate(m);
continue;
}
return m;
}
}
/*
* Find a free or zero page, with specified preference.
*/
@ -825,7 +856,7 @@ vm_page_alloc(object, pindex, page_req)
panic("vm_page_alloc(NORMAL): missing page on free queue\n");
#endif
} else {
m = vm_page_select(object, pindex, PQ_CACHE);
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@ -847,7 +878,7 @@ vm_page_alloc(object, pindex, page_req)
panic("vm_page_alloc(ZERO): missing page on free queue\n");
#endif
} else {
m = vm_page_select(object, pindex, PQ_CACHE);
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@ -871,7 +902,7 @@ vm_page_alloc(object, pindex, page_req)
panic("vm_page_alloc(SYSTEM): missing page on free queue\n");
#endif
} else {
m = vm_page_select(object, pindex, PQ_CACHE);
m = vm_page_select_cache(object, pindex);
if (m == NULL) {
splx(s);
#if defined(DIAGNOSTIC)
@ -986,18 +1017,18 @@ vm_wait()
int
vm_page_sleep(vm_page_t m, char *msg, char *busy) {
vm_object_t object = m->object;
int generation = object->generation;
int slept = 0;
if ((busy && *busy) || (m->flags & PG_BUSY)) {
int s;
s = splvm();
if ((busy && *busy) || (m->flags & PG_BUSY)) {
m->flags |= PG_WANTED;
tsleep(m, PVM, msg, 800);
tsleep(m, PVM, msg, 0);
slept = 1;
}
splx(s);
}
return ((generation != object->generation) || (busy && *busy) ||
(m->flags & PG_BUSY));
return slept;
}
/*
@ -1540,13 +1571,11 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
if (m->dirty) {
if (m->object->type == OBJT_VNODE) {
vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
vm_object_page_clean(m->object, 0, 0, TRUE);
vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
VOP_UNLOCK(m->object->handle, 0, curproc);
goto again1;
} else if (m->object->type == OBJT_SWAP ||
m->object->type == OBJT_DEFAULT) {
m->flags |= PG_BUSY;
vm_page_protect(m, VM_PROT_NONE);
vm_pageout_flush(&m, 1, 0);
goto again1;
}
@ -1570,13 +1599,11 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
if (m->dirty) {
if (m->object->type == OBJT_VNODE) {
vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
vm_object_page_clean(m->object, 0, 0, TRUE);
vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
VOP_UNLOCK(m->object->handle, 0, curproc);
goto again1;
} else if (m->object->type == OBJT_SWAP ||
m->object->type == OBJT_DEFAULT) {
m->flags |= PG_BUSY;
vm_page_protect(m, VM_PROT_NONE);
vm_pageout_flush(&m, 1, 0);
goto again1;
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.36 1998/02/05 03:32:47 dyson Exp $
* $Id: vm_page.h,v 1.37 1998/03/01 04:18:26 dyson Exp $
*/
/*
@ -276,6 +276,16 @@ extern vm_offset_t last_phys_addr; /* physical address for last_page */
} \
}
#define PAGE_BWAKEUP(m) { \
(m)->busy--; \
if ((((m)->flags & (PG_WANTED | PG_BUSY)) == PG_WANTED) && \
((m)->busy == 0)) { \
(m)->flags &= ~PG_WANTED; \
wakeup((m)); \
} \
}
#if PAGE_SIZE == 4096
#define VM_PAGE_BITS_ALL 0xff
#endif
@ -350,11 +360,11 @@ vm_page_protect(vm_page_t mem, int prot)
{
if (prot == VM_PROT_NONE) {
if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE);
mem->flags &= ~(PG_WRITEABLE|PG_MAPPED);
}
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ);
mem->flags &= ~PG_WRITEABLE;
}
}

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.116 1998/02/24 10:16:23 dyson Exp $
* $Id: vm_pageout.c,v 1.117 1998/03/01 04:18:28 dyson Exp $
*/
/*
@ -100,7 +100,7 @@
/* the kernel process "vm_pageout"*/
static void vm_pageout __P((void));
static int vm_pageout_clean __P((vm_page_t, int));
static int vm_pageout_clean __P((vm_page_t));
static int vm_pageout_scan __P((void));
static int vm_pageout_free_page_calc __P((vm_size_t count));
struct proc *pageproc;
@ -218,9 +218,8 @@ void pmap_collect(void);
* move!)
*/
static int
vm_pageout_clean(m, sync)
vm_pageout_clean(m)
vm_page_t m;
int sync;
{
register vm_object_t object;
vm_page_t mc[2*vm_pageout_page_count];
@ -234,22 +233,21 @@ vm_pageout_clean(m, sync)
* If not OBJT_SWAP, additional memory may be needed to do the pageout.
* Try to avoid the deadlock.
*/
if ((sync != VM_PAGEOUT_FORCE) &&
(object->type == OBJT_DEFAULT) &&
if ((object->type == OBJT_DEFAULT) &&
((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min))
return 0;
/*
* Don't mess with the page if it's busy.
*/
if ((!sync && m->hold_count != 0) ||
if ((m->hold_count != 0) ||
((m->busy != 0) || (m->flags & PG_BUSY)))
return 0;
/*
* Try collapsing before it's too late.
*/
if (!sync && object->backing_object) {
if (object->backing_object) {
vm_object_collapse(object);
}
@ -295,8 +293,7 @@ vm_pageout_clean(m, sync)
}
vm_page_test_dirty(p);
if ((p->dirty & p->valid) != 0 &&
((p->queue == PQ_INACTIVE) ||
(sync == VM_PAGEOUT_FORCE)) &&
(p->queue == PQ_INACTIVE) &&
(p->wire_count == 0) &&
(p->hold_count == 0)) {
mc[vm_pageout_page_count + i] = p;
@ -330,8 +327,7 @@ vm_pageout_clean(m, sync)
}
vm_page_test_dirty(p);
if ((p->dirty & p->valid) != 0 &&
((p->queue == PQ_INACTIVE) ||
(sync == VM_PAGEOUT_FORCE)) &&
(p->queue == PQ_INACTIVE) &&
(p->wire_count == 0) &&
(p->hold_count == 0)) {
mc[vm_pageout_page_count - i] = p;
@ -351,30 +347,30 @@ vm_pageout_clean(m, sync)
/*
* we allow reads during pageouts...
*/
for (i = page_base; i < (page_base + pageout_count); i++) {
mc[i]->busy++;
vm_page_protect(mc[i], VM_PROT_READ);
}
return vm_pageout_flush(&mc[page_base], pageout_count, sync);
return vm_pageout_flush(&mc[page_base], pageout_count, 0);
}
int
vm_pageout_flush(mc, count, sync)
vm_pageout_flush(mc, count, flags)
vm_page_t *mc;
int count;
int sync;
int flags;
{
register vm_object_t object;
int pageout_status[count];
int numpagedout = 0;
int i;
for (i = 0; i < count; i++) {
mc[i]->busy++;
vm_page_protect(mc[i], VM_PROT_READ);
}
object = mc[0]->object;
object->paging_in_progress += count;
vm_pager_put_pages(object, mc, count,
((sync || (object == kernel_object)) ? TRUE : FALSE),
(flags | ((object == kernel_object) ? OBJPC_SYNC : 0)),
pageout_status);
for (i = 0; i < count; i++) {
@ -417,9 +413,7 @@ vm_pageout_flush(mc, count, sync)
*/
if (pageout_status[i] != VM_PAGER_PEND) {
vm_object_pip_wakeup(object);
mt->flags |= PG_BUSY;
mt->busy--;
PAGE_WAKEUP(mt);
PAGE_BWAKEUP(mt);
}
}
return numpagedout;
@ -840,7 +834,7 @@ vm_pageout_scan()
* laundry. If it is still in the laundry, then we
* start the cleaning operation.
*/
written = vm_pageout_clean(m, 0);
written = vm_pageout_clean(m);
if (vp)
vput(vp);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pager.c,v 1.34 1998/02/06 12:14:29 eivind Exp $
* $Id: vm_pager.c,v 1.35 1998/02/23 08:22:40 dyson Exp $
*/
/*
@ -183,14 +183,14 @@ vm_pager_get_pages(object, m, count, reqpage)
}
int
vm_pager_put_pages(object, m, count, sync, rtvals)
vm_pager_put_pages(object, m, count, flags, rtvals)
vm_object_t object;
vm_page_t *m;
int count;
boolean_t sync;
int flags;
int *rtvals;
{
return ((*pagertab[object->type]->pgo_putpages)(object, m, count, sync, rtvals));
return ((*pagertab[object->type]->pgo_putpages)(object, m, count, flags, rtvals));
}
boolean_t

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vm_pager.h 8.4 (Berkeley) 1/12/94
* $Id: vm_pager.h,v 1.14 1997/10/12 20:26:32 phk Exp $
* $Id: vm_pager.h,v 1.15 1998/02/03 22:19:35 bde Exp $
*/
/*
@ -55,7 +55,7 @@ struct pagerops {
vm_object_t (*pgo_alloc) __P((void *, vm_size_t, vm_prot_t, vm_ooffset_t)); /* Allocate pager. */
void (*pgo_dealloc) __P((vm_object_t)); /* Disassociate. */
int (*pgo_getpages) __P((vm_object_t, vm_page_t *, int, int)); /* Get (read) page. */
int (*pgo_putpages) __P((vm_object_t, vm_page_t *, int, boolean_t, int *)); /* Put (write) page. */
int (*pgo_putpages) __P((vm_object_t, vm_page_t *, int, int, int *)); /* Put (write) page. */
boolean_t (*pgo_haspage) __P((vm_object_t, vm_pindex_t, int *, int *)); /* Does pager have page? */
void (*pgo_sync) __P((void));
};
@ -76,6 +76,9 @@ struct pagerops {
#define VM_PAGER_ERROR 4
#define VM_PAGER_AGAIN 5
#define VM_PAGER_PUT_SYNC 0x1
#define VM_PAGER_PUT_INVAL 0x2
#ifdef KERNEL
#ifdef MALLOC_DECLARE

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.87 1998/02/26 06:39:58 msmith Exp $
* $Id: vnode_pager.c,v 1.88 1998/03/01 04:18:31 dyson Exp $
*/
/*
@ -557,7 +557,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
{
vm_object_t object;
vm_offset_t kva;
off_t foff;
off_t foff, tfoff, nextoff;
int i, size, bsize, first, firstaddr;
struct vnode *dp;
int runpg;
@ -749,11 +749,22 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
relpbuf(bp);
for (i = 0; i < count; i++) {
pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
m[i]->dirty = 0;
m[i]->valid = VM_PAGE_BITS_ALL;
m[i]->flags &= ~PG_ZERO;
for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
vm_page_t mt;
nextoff = tfoff + PAGE_SIZE;
mt = m[i];
if (nextoff <= size) {
mt->valid = VM_PAGE_BITS_ALL;
mt->dirty = 0;
pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
} else {
int nvalid = ((size + DEV_BSIZE - 1) - tfoff) & ~(DEV_BSIZE - 1);
vm_page_set_validclean(mt, 0, nvalid);
}
mt->flags &= ~PG_ZERO;
if (i != reqpage) {
/*
@ -769,13 +780,13 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* now tell them that it is ok to use
*/
if (!error) {
if (m[i]->flags & PG_WANTED)
vm_page_activate(m[i]);
if (mt->flags & PG_WANTED)
vm_page_activate(mt);
else
vm_page_deactivate(m[i]);
PAGE_WAKEUP(m[i]);
vm_page_deactivate(mt);
PAGE_WAKEUP(mt);
} else {
vnode_pager_freepage(m[i]);
vnode_pager_freepage(mt);
}
}
}
@ -814,11 +825,11 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
* own vnodes if they fail to implement VOP_GETPAGES.
*/
int
vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
struct vnode *vp;
vm_page_t *m;
int bytecount;
boolean_t sync;
int flags;
int *rtvals;
{
int i;
@ -830,6 +841,7 @@ vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
struct uio auio;
struct iovec aiov;
int error;
int ioflags;
object = vp->v_object;
count = bytecount / PAGE_SIZE;
@ -838,7 +850,8 @@ vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
rtvals[i] = VM_PAGER_AGAIN;
if ((int) m[0]->pindex < 0) {
printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->pindex, m[0]->dirty);
printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n",
m[0]->pindex, m[0]->dirty);
rtvals[0] = VM_PAGER_BAD;
return VM_PAGER_BAD;
}
@ -857,21 +870,12 @@ vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
for (i = ncount; i < count; i++) {
rtvals[i] = VM_PAGER_BAD;
}
#ifdef BOGUS
if (ncount == 0) {
printf("vnode_pager_putpages: write past end of file: %d, %lu\n",
poffset,
(unsigned long) object->un_pager.vnp.vnp_size);
return rtvals[0];
}
#endif
}
}
for (i = 0; i < count; i++) {
m[i]->busy++;
m[i]->flags &= ~PG_BUSY;
}
ioflags = IO_VMIO;
ioflags |= (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) ? IO_SYNC: 0;
ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
aiov.iov_base = (caddr_t) 0;
aiov.iov_len = maxsize;
@ -882,7 +886,7 @@ vnode_pager_generic_putpages(vp, m, bytecount, sync, rtvals)
auio.uio_rw = UIO_WRITE;
auio.uio_resid = maxsize;
auio.uio_procp = (struct proc *) 0;
error = VOP_WRITE(vp, &auio, IO_VMIO|(sync?IO_SYNC:0), curproc->p_ucred);
error = VOP_WRITE(vp, &auio, ioflags, curproc->p_ucred);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += ncount;