freebsd-dev/sys/kern/vfs_cluster.c
Alan Cox 4221e284a3 The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS.  These hacks have caused no
end of trouble, especially when combined with mmap().  I've removed
them.  Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write.  NFS does, however,
optimize piecemeal appends to files.  For most common file operations,
you will not notice the difference.  The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations.  NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write.  There is quite a bit of room for further
optimization in these areas.

The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault.  This
is not correct operation.  The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid.  A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid.  This operation is
necessary to properly support mmap().  The zeroing occurs most often
when dealing with file-EOF situations.  Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.

getblk() and allocbuf() have been rewritten.  B_CACHE operation is now
formally defined in comments and more straightforward in
implementation.  B_CACHE for VMIO buffers is based on the validity of
the backing store.  B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa).  biodone() is now responsible for setting B_CACHE
when a successful read completes.  B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated.  VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE.  This means that bowrite() and bawrite() also
set B_CACHE indirectly.

There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount.  These have been fixed.  getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.

Major fixes to NFS/TCP have been made.  A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain.  The server's kernel must be
recompiled to get the benefit of the fixes.

Submitted by:	Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00

845 lines
22 KiB
C

/*-
* Copyright (c) 1993
* The Regents of the University of California. All rights reserved.
* Modifications/enhancements:
* Copyright (c) 1995 John S. Dyson. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
* $Id: vfs_cluster.c,v 1.80 1999/03/12 02:24:56 julian Exp $
*/
#include "opt_debug_cluster.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/resourcevar.h>
#include <vm/vm.h>
#include <vm/vm_prot.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#if defined(CLUSTERDEBUG)
#include <sys/sysctl.h>
static int rcluster= 0;
SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
#endif
static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer");
static struct cluster_save *
cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp));
static struct buf *
cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn,
daddr_t blkno, long size, int run, struct buf *fbp));
extern vm_page_t bogus_page;
extern int cluster_pbuf_freecnt;
/*
* Maximum number of blocks for read-ahead.
*/
#define MAXRA 32
/*
* This replaces bread.
*/
int
cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
struct vnode *vp;
u_quad_t filesize;
daddr_t lblkno;
long size;
struct ucred *cred;
long totread;
int seqcount;
struct buf **bpp;
{
struct buf *bp, *rbp, *reqbp;
daddr_t blkno, origblkno;
int error, num_ra;
int i;
int maxra, racluster;
long origtotread;
error = 0;
if (vp->v_maxio == 0)
vp->v_maxio = DFLTPHYS;
/*
* Try to limit the amount of read-ahead by a few
* ad-hoc parameters. This needs work!!!
*/
racluster = vp->v_maxio/size;
maxra = 2 * racluster + (totread / size);
if (maxra > MAXRA)
maxra = MAXRA;
if (maxra > nbuf/8)
maxra = nbuf/8;
/*
* get the requested block
*/
*bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0);
origblkno = lblkno;
origtotread = totread;
/*
* if it is in the cache, then check to see if the reads have been
* sequential. If they have, then try some read-ahead, otherwise
* back-off on prospective read-aheads.
*/
if (bp->b_flags & B_CACHE) {
if (!seqcount) {
return 0;
} else if ((bp->b_flags & B_RAM) == 0) {
return 0;
} else {
int s;
struct buf *tbp;
bp->b_flags &= ~B_RAM;
/*
* We do the spl here so that there is no window
* between the incore and the b_usecount increment
* below. We opt to keep the spl out of the loop
* for efficiency.
*/
s = splbio();
for(i=1;i<maxra;i++) {
if (!(tbp = incore(vp, lblkno+i))) {
break;
}
/*
* Set another read-ahead mark so we know to check
* again.
*/
if (((i % racluster) == (racluster - 1)) ||
(i == (maxra - 1)))
tbp->b_flags |= B_RAM;
if ((tbp->b_usecount < 1) &&
((tbp->b_flags & B_BUSY) == 0) &&
(tbp->b_qindex == QUEUE_LRU)) {
TAILQ_REMOVE(&bufqueues[QUEUE_LRU], tbp, b_freelist);
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], tbp, b_freelist);
}
}
splx(s);
if (i >= maxra) {
return 0;
}
lblkno += i;
}
reqbp = bp = NULL;
} else {
off_t firstread = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("cluster_read: no buffer offset"));
if (firstread + totread > filesize)
totread = filesize - firstread;
if (totread > size) {
int nblks = 0;
int ncontigafter;
while (totread > 0) {
nblks++;
totread -= size;
}
if (nblks == 1)
goto single_block_read;
if (nblks > racluster)
nblks = racluster;
error = VOP_BMAP(vp, lblkno, NULL,
&blkno, &ncontigafter, NULL);
if (error)
goto single_block_read;
if (blkno == -1)
goto single_block_read;
if (ncontigafter == 0)
goto single_block_read;
if (ncontigafter + 1 < nblks)
nblks = ncontigafter + 1;
bp = cluster_rbuild(vp, filesize, lblkno,
blkno, size, nblks, bp);
lblkno += (bp->b_bufsize / size);
} else {
single_block_read:
/*
* if it isn't in the cache, then get a chunk from
* disk if sequential, otherwise just get the block.
*/
bp->b_flags |= B_READ | B_RAM;
lblkno += 1;
}
}
/*
* if we have been doing sequential I/O, then do some read-ahead
*/
rbp = NULL;
if (seqcount && (lblkno < (origblkno + seqcount))) {
/*
* we now build the read-ahead buffer if it is desirable.
*/
if (((u_quad_t)(lblkno + 1) * size) <= filesize &&
!(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) &&
blkno != -1) {
int nblksread;
int ntoread = num_ra + 1;
nblksread = (origtotread + size - 1) / size;
if (seqcount < nblksread)
seqcount = nblksread;
if (seqcount < ntoread)
ntoread = seqcount;
if (num_ra) {
rbp = cluster_rbuild(vp, filesize, lblkno,
blkno, size, ntoread, NULL);
} else {
rbp = getblk(vp, lblkno, size, 0, 0);
rbp->b_flags |= B_READ | B_ASYNC | B_RAM;
rbp->b_blkno = blkno;
}
}
}
/*
* handle the synchronous read
*/
if (bp) {
#if defined(CLUSTERDEBUG)
if (rcluster)
printf("S(%ld,%ld,%d) ",
(long)bp->b_lblkno, bp->b_bcount, seqcount);
#endif
if ((bp->b_flags & B_CLUSTER) == 0)
vfs_busy_pages(bp, 0);
bp->b_flags &= ~(B_ERROR|B_INVAL);
error = VOP_STRATEGY(vp, bp);
curproc->p_stats->p_ru.ru_inblock++;
}
/*
* and if we have read-aheads, do them too
*/
if (rbp) {
if (error) {
rbp->b_flags &= ~(B_ASYNC | B_READ);
brelse(rbp);
} else if (rbp->b_flags & B_CACHE) {
rbp->b_flags &= ~(B_ASYNC | B_READ);
bqrelse(rbp);
} else {
#if defined(CLUSTERDEBUG)
if (rcluster) {
if (bp)
printf("A+(%ld,%ld,%ld,%d) ",
(long)rbp->b_lblkno, rbp->b_bcount,
(long)(rbp->b_lblkno - origblkno),
seqcount);
else
printf("A(%ld,%ld,%ld,%d) ",
(long)rbp->b_lblkno, rbp->b_bcount,
(long)(rbp->b_lblkno - origblkno),
seqcount);
}
#endif
if ((rbp->b_flags & B_CLUSTER) == 0)
vfs_busy_pages(rbp, 0);
rbp->b_flags &= ~(B_ERROR|B_INVAL);
(void) VOP_STRATEGY(vp, rbp);
curproc->p_stats->p_ru.ru_inblock++;
}
}
if (reqbp)
return (biowait(reqbp));
else
return (error);
}
/*
* If blocks are contiguous on disk, use this to provide clustered
* read ahead. We will read as many blocks as possible sequentially
* and then parcel them up into logical blocks in the buffer hash table.
*/
static struct buf *
cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
struct vnode *vp;
u_quad_t filesize;
daddr_t lbn;
daddr_t blkno;
long size;
int run;
struct buf *fbp;
{
struct buf *bp, *tbp;
daddr_t bn;
int i, inc, j;
KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
("cluster_rbuild: size %ld != filesize %ld\n",
size, vp->v_mount->mnt_stat.f_iosize));
/*
* avoid a division
*/
while ((u_quad_t) size * (lbn + run) > filesize) {
--run;
}
if (fbp) {
tbp = fbp;
tbp->b_flags |= B_READ;
} else {
tbp = getblk(vp, lbn, size, 0, 0);
if (tbp->b_flags & B_CACHE)
return tbp;
tbp->b_flags |= B_ASYNC | B_READ | B_RAM;
}
tbp->b_blkno = blkno;
if( (tbp->b_flags & B_MALLOC) ||
((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
return tbp;
bp = trypbuf(&cluster_pbuf_freecnt);
if (bp == 0)
return tbp;
bp->b_data = (char *)((vm_offset_t)bp->b_data |
((vm_offset_t)tbp->b_data & PAGE_MASK));
bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO;
bp->b_iodone = cluster_callback;
bp->b_blkno = blkno;
bp->b_lblkno = lbn;
bp->b_offset = tbp->b_offset;
KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
pbgetvp(vp, bp);
TAILQ_INIT(&bp->b_cluster.cluster_head);
bp->b_bcount = 0;
bp->b_bufsize = 0;
bp->b_npages = 0;
if (vp->v_maxio == 0)
vp->v_maxio = DFLTPHYS;
inc = btodb(size);
for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
if (i != 0) {
if ((bp->b_npages * PAGE_SIZE) +
round_page(size) > vp->v_maxio)
break;
if ((tbp = incore(vp, lbn + i)) != NULL) {
if (tbp->b_flags & B_BUSY)
break;
for (j = 0; j < tbp->b_npages; j++)
if (tbp->b_pages[j]->valid)
break;
if (j != tbp->b_npages)
break;
if (tbp->b_bcount != size)
break;
}
tbp = getblk(vp, lbn + i, size, 0, 0);
if ((tbp->b_flags & B_CACHE) ||
(tbp->b_flags & B_VMIO) == 0) {
bqrelse(tbp);
break;
}
for (j = 0;j < tbp->b_npages; j++)
if (tbp->b_pages[j]->valid)
break;
if (j != tbp->b_npages) {
bqrelse(tbp);
break;
}
if ((fbp && (i == 1)) || (i == (run - 1)))
tbp->b_flags |= B_RAM;
tbp->b_flags |= B_READ | B_ASYNC;
if (tbp->b_blkno == tbp->b_lblkno) {
tbp->b_blkno = bn;
} else if (tbp->b_blkno != bn) {
brelse(tbp);
break;
}
}
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
vm_page_io_start(m);
vm_object_pip_add(m->object, 1);
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages-1] != m)) {
bp->b_pages[bp->b_npages] = m;
bp->b_npages++;
}
if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
tbp->b_pages[j] = bogus_page;
}
bp->b_bcount += tbp->b_bcount;
bp->b_bufsize += tbp->b_bufsize;
}
for(j=0;j<bp->b_npages;j++) {
if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) ==
VM_PAGE_BITS_ALL)
bp->b_pages[j] = bogus_page;
}
if (bp->b_bufsize > bp->b_kvasize)
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
bp->b_bufsize, bp->b_kvasize);
bp->b_kvasize = bp->b_bufsize;
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *)bp->b_pages, bp->b_npages);
return (bp);
}
/*
* Cleanup after a clustered read or write.
* This is complicated by the fact that any of the buffers might have
* extra memory (if there were no empty buffer headers at allocbuf time)
* that we will need to shift around.
*/
void
cluster_callback(bp)
struct buf *bp;
{
struct buf *nbp, *tbp;
int error = 0;
/*
* Must propogate errors to all the components.
*/
if (bp->b_flags & B_ERROR)
error = bp->b_error;
pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
/*
* Move memory from the large cluster buffer into the component
* buffers and mark IO as done on these.
*/
for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
tbp; tbp = nbp) {
nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
if (error) {
tbp->b_flags |= B_ERROR;
tbp->b_error = error;
} else {
tbp->b_dirtyoff = tbp->b_dirtyend = 0;
tbp->b_flags &= ~(B_ERROR|B_INVAL);
}
biodone(tbp);
}
relpbuf(bp, &cluster_pbuf_freecnt);
}
/*
* Do clustered write for FFS.
*
* Three cases:
* 1. Write is not sequential (write asynchronously)
* Write is sequential:
* 2. beginning of cluster - begin cluster
* 3. middle of a cluster - add to cluster
* 4. end of a cluster - asynchronously write cluster
*/
void
cluster_write(bp, filesize)
struct buf *bp;
u_quad_t filesize;
{
struct vnode *vp;
daddr_t lbn;
int maxclen, cursize;
int lblocksize;
int async;
vp = bp->b_vp;
if (vp->v_maxio == 0)
vp->v_maxio = DFLTPHYS;
if (vp->v_type == VREG) {
async = vp->v_mount->mnt_flag & MNT_ASYNC;
lblocksize = vp->v_mount->mnt_stat.f_iosize;
} else {
async = 0;
lblocksize = bp->b_bufsize;
}
lbn = bp->b_lblkno;
KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
/* Initialize vnode to beginning of file. */
if (lbn == 0)
vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
(bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
maxclen = vp->v_maxio / lblocksize - 1;
if (vp->v_clen != 0) {
/*
* Next block is not sequential.
*
* If we are not writing at end of file, the process
* seeked to another point in the file since its last
* write, or we have reached our maximum cluster size,
* then push the previous cluster. Otherwise try
* reallocating to make it sequential.
*/
cursize = vp->v_lastw - vp->v_cstart + 1;
if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
if (!async)
cluster_wbuild(vp, lblocksize,
vp->v_cstart, cursize);
} else {
struct buf **bpp, **endbp;
struct cluster_save *buflist;
buflist = cluster_collectbufs(vp, bp);
endbp = &buflist->bs_children
[buflist->bs_nchildren - 1];
if (VOP_REALLOCBLKS(vp, buflist)) {
/*
* Failed, push the previous cluster.
*/
for (bpp = buflist->bs_children;
bpp < endbp; bpp++)
brelse(*bpp);
free(buflist, M_SEGMENT);
cluster_wbuild(vp, lblocksize,
vp->v_cstart, cursize);
} else {
/*
* Succeeded, keep building cluster.
*/
for (bpp = buflist->bs_children;
bpp <= endbp; bpp++)
bdwrite(*bpp);
free(buflist, M_SEGMENT);
vp->v_lastw = lbn;
vp->v_lasta = bp->b_blkno;
return;
}
}
}
/*
* Consider beginning a cluster. If at end of file, make
* cluster as large as possible, otherwise find size of
* existing cluster.
*/
if ((vp->v_type == VREG) &&
((u_quad_t) bp->b_offset + lblocksize) != filesize &&
(bp->b_blkno == bp->b_lblkno) &&
(VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
bp->b_blkno == -1)) {
bawrite(bp);
vp->v_clen = 0;
vp->v_lasta = bp->b_blkno;
vp->v_cstart = lbn + 1;
vp->v_lastw = lbn;
return;
}
vp->v_clen = maxclen;
if (!async && maxclen == 0) { /* I/O not contiguous */
vp->v_cstart = lbn + 1;
bawrite(bp);
} else { /* Wait for rest of cluster */
vp->v_cstart = lbn;
bdwrite(bp);
}
} else if (lbn == vp->v_cstart + vp->v_clen) {
/*
* At end of cluster, write it out.
*/
bdwrite(bp);
cluster_wbuild(vp, lblocksize, vp->v_cstart, vp->v_clen + 1);
vp->v_clen = 0;
vp->v_cstart = lbn + 1;
} else
/*
* In the middle of a cluster, so just delay the I/O for now.
*/
bdwrite(bp);
vp->v_lastw = lbn;
vp->v_lasta = bp->b_blkno;
}
/*
* This is an awful lot like cluster_rbuild...wish they could be combined.
* The last lbn argument is the current block on which I/O is being
* performed. Check to see that it doesn't fall in the middle of
* the current block (if last_bp == NULL).
*/
int
cluster_wbuild(vp, size, start_lbn, len)
struct vnode *vp;
long size;
daddr_t start_lbn;
int len;
{
struct buf *bp, *tbp;
int i, j, s;
int totalwritten = 0;
int dbsize = btodb(size);
if (vp->v_maxio == 0)
vp->v_maxio = DFLTPHYS;
while (len > 0) {
s = splbio();
if (((tbp = gbincore(vp, start_lbn)) == NULL) ||
((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) {
++start_lbn;
--len;
splx(s);
continue;
}
bremfree(tbp);
tbp->b_flags |= B_BUSY;
tbp->b_flags &= ~B_DONE;
splx(s);
/*
* Extra memory in the buffer, punt on this buffer. XXX we could
* handle this in most cases, but we would have to push the extra
* memory down to after our max possible cluster size and then
* potentially pull it back up if the cluster was terminated
* prematurely--too much hassle.
*/
if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
(tbp->b_bcount != tbp->b_bufsize) ||
(tbp->b_bcount != size) ||
(len == 1) ||
((bp = trypbuf(&cluster_pbuf_freecnt)) == NULL)) {
totalwritten += tbp->b_bufsize;
bawrite(tbp);
++start_lbn;
--len;
continue;
}
/*
* We got a pbuf to make the cluster in.
* so initialise it.
*/
TAILQ_INIT(&bp->b_cluster.cluster_head);
bp->b_bcount = 0;
bp->b_bufsize = 0;
bp->b_npages = 0;
if (tbp->b_wcred != NOCRED) {
bp->b_wcred = tbp->b_wcred;
crhold(bp->b_wcred);
}
bp->b_blkno = tbp->b_blkno;
bp->b_lblkno = tbp->b_lblkno;
bp->b_offset = tbp->b_offset;
bp->b_data = (char *)((vm_offset_t)bp->b_data |
((vm_offset_t)tbp->b_data & PAGE_MASK));
bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER |
(tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
bp->b_iodone = cluster_callback;
pbgetvp(vp, bp);
/*
* From this location in the file, scan forward to see
* if there are buffers with adjacent data that need to
* be written as well.
*/
for (i = 0; i < len; ++i, ++start_lbn) {
if (i != 0) { /* If not the first buffer */
s = splbio();
/*
* If the adjacent data is not even in core it
* can't need to be written.
*/
if ((tbp = gbincore(vp, start_lbn)) == NULL) {
splx(s);
break;
}
/*
* If it IS in core, but has different
* characteristics, don't cluster with it.
*/
if ((tbp->b_flags &
(B_VMIO | B_CLUSTEROK | B_INVAL | B_BUSY |
B_DELWRI | B_NEEDCOMMIT))
!= (B_DELWRI | B_CLUSTEROK |
(bp->b_flags & (B_VMIO | B_NEEDCOMMIT)))) {
splx(s);
break;
}
if (tbp->b_wcred != bp->b_wcred) {
splx(s);
break;
}
/*
* Check that the combined cluster
* would make sense with regard to pages
* and would not be too large
*/
if ((tbp->b_bcount != size) ||
((bp->b_blkno + (dbsize * i)) !=
tbp->b_blkno) ||
((tbp->b_npages + bp->b_npages) >
(vp->v_maxio / PAGE_SIZE))) {
splx(s);
break;
}
/*
* Ok, it's passed all the tests,
* so remove it from the free list
* and mark it busy. We will use it.
*/
bremfree(tbp);
tbp->b_flags |= B_BUSY;
tbp->b_flags &= ~B_DONE;
splx(s);
} /* end of code for non-first buffers only */
/* check for latent dependencies to be handled */
if ((LIST_FIRST(&tbp->b_dep)) != NULL &&
bioops.io_start)
(*bioops.io_start)(tbp);
/*
* If the IO is via the VM then we do some
* special VM hackery. (yuck)
*/
if (tbp->b_flags & B_VMIO) {
vm_page_t m;
if (i != 0) { /* if not first buffer */
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
if (m->flags & PG_BUSY)
goto finishcluster;
}
}
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
vm_page_io_start(m);
vm_object_pip_add(m->object, 1);
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages - 1] != m)) {
bp->b_pages[bp->b_npages] = m;
bp->b_npages++;
}
}
}
bp->b_bcount += size;
bp->b_bufsize += size;
s = splbio();
bundirty(tbp);
tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
tbp->b_flags |= B_ASYNC;
reassignbuf(tbp, tbp->b_vp); /* put on clean list */
++tbp->b_vp->v_numoutput;
splx(s);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
}
finishcluster:
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *) bp->b_pages, bp->b_npages);
if (bp->b_bufsize > bp->b_kvasize)
panic(
"cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
bp->b_bufsize, bp->b_kvasize);
bp->b_kvasize = bp->b_bufsize;
totalwritten += bp->b_bufsize;
bp->b_dirtyoff = 0;
bp->b_dirtyend = bp->b_bufsize;
bawrite(bp);
len -= i;
}
return totalwritten;
}
/*
* Collect together all the buffers in a cluster.
* Plus add one additional buffer.
*/
static struct cluster_save *
cluster_collectbufs(vp, last_bp)
struct vnode *vp;
struct buf *last_bp;
{
struct cluster_save *buflist;
struct buf *bp;
daddr_t lbn;
int i, len;
len = vp->v_lastw - vp->v_cstart + 1;
buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
M_SEGMENT, M_WAITOK);
buflist->bs_nchildren = 0;
buflist->bs_children = (struct buf **) (buflist + 1);
for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
(void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp);
buflist->bs_children[i] = bp;
if (bp->b_blkno == bp->b_lblkno)
VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
NULL, NULL);
}
buflist->bs_children[i] = bp = last_bp;
if (bp->b_blkno == bp->b_lblkno)
VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
NULL, NULL);
buflist->bs_nchildren = i + 1;
return (buflist);
}