Hopefully fix the remaining glitches with the BUF_*() changes. This should
(really this time) fix pageout to swap and a couple of clustering cases. This simplifies BUF_KERNPROC() so that it unconditionally reassigns the lock owner rather than testing B_ASYNC and having the caller decide when to do the reassign. At present this is required because some places use B_CALL/b_iodone to free the buffers without B_ASYNC being set. Also, vfs_cluster.c explicitly calls BUF_KERNPROC() when attaching the buffers rather than the parent walking the cluster_head tailq. Reviewed by: Kirk McKusick <mckusick@mckusick.com>
This commit is contained in:
parent
0a692c6a09
commit
5f8ebc1b91
@ -11,7 +11,7 @@
|
||||
* 2. Absolutely no warranty of function or purpose is made by the author
|
||||
* John S. Dyson.
|
||||
*
|
||||
* $Id: vfs_bio.c,v 1.217 1999/06/26 14:46:35 peter Exp $
|
||||
* $Id: vfs_bio.c,v 1.218 1999/06/28 15:32:10 peter Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -517,7 +517,8 @@ bwrite(struct buf * bp)
|
||||
if (curproc != NULL)
|
||||
curproc->p_stats->p_ru.ru_oublock++;
|
||||
splx(s);
|
||||
BUF_KERNPROC(bp);
|
||||
if (oldflags & B_ASYNC)
|
||||
BUF_KERNPROC(bp);
|
||||
VOP_STRATEGY(bp->b_vp, bp);
|
||||
|
||||
/*
|
||||
|
@ -33,7 +33,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
|
||||
* $Id: vfs_cluster.c,v 1.83 1999/06/17 01:25:25 julian Exp $
|
||||
* $Id: vfs_cluster.c,v 1.84 1999/06/26 02:46:08 mckusick Exp $
|
||||
*/
|
||||
|
||||
#include "opt_debug_cluster.h"
|
||||
@ -252,7 +252,8 @@ single_block_read:
|
||||
if ((bp->b_flags & B_CLUSTER) == 0)
|
||||
vfs_busy_pages(bp, 0);
|
||||
bp->b_flags &= ~(B_ERROR|B_INVAL);
|
||||
BUF_KERNPROC(bp);
|
||||
if (bp->b_flags & (B_ASYNC|B_CALL))
|
||||
BUF_KERNPROC(bp);
|
||||
error = VOP_STRATEGY(vp, bp);
|
||||
curproc->p_stats->p_ru.ru_inblock++;
|
||||
}
|
||||
@ -286,7 +287,8 @@ single_block_read:
|
||||
if ((rbp->b_flags & B_CLUSTER) == 0)
|
||||
vfs_busy_pages(rbp, 0);
|
||||
rbp->b_flags &= ~(B_ERROR|B_INVAL);
|
||||
BUF_KERNPROC(rbp);
|
||||
if (rbp->b_flags & (B_ASYNC|B_CALL))
|
||||
BUF_KERNPROC(rbp);
|
||||
(void) VOP_STRATEGY(vp, rbp);
|
||||
curproc->p_stats->p_ru.ru_inblock++;
|
||||
}
|
||||
@ -414,6 +416,11 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
break;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* XXX fbp from caller may not be B_ASYNC, but we are going
|
||||
* to biodone() it in cluster_callback() anyway
|
||||
*/
|
||||
BUF_KERNPROC(tbp);
|
||||
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
|
||||
tbp, b_cluster.cluster_entry);
|
||||
for (j = 0; j < tbp->b_npages; j += 1) {
|
||||
@ -788,6 +795,7 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
reassignbuf(tbp, tbp->b_vp); /* put on clean list */
|
||||
++tbp->b_vp->v_numoutput;
|
||||
splx(s);
|
||||
BUF_KERNPROC(tbp);
|
||||
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
|
||||
tbp, b_cluster.cluster_entry);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)buf.h 8.9 (Berkeley) 3/30/95
|
||||
* $Id: buf.h,v 1.72 1999/06/27 09:13:19 peter Exp $
|
||||
* $Id: buf.h,v 1.73 1999/06/27 11:40:03 peter Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_BUF_H_
|
||||
@ -315,17 +315,8 @@ static __inline void BUF_KERNPROC __P((struct buf *));
|
||||
static __inline void
|
||||
BUF_KERNPROC(struct buf *bp)
|
||||
{
|
||||
struct buf *nbp;
|
||||
int s;
|
||||
|
||||
s = splbio();
|
||||
if (bp->b_flags & B_ASYNC)
|
||||
bp->b_lock.lk_lockholder = LK_KERNPROC;
|
||||
for (nbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
|
||||
nbp; nbp = TAILQ_NEXT(&nbp->b_cluster, cluster_entry))
|
||||
if (nbp->b_flags & B_ASYNC)
|
||||
nbp->b_lock.lk_lockholder = LK_KERNPROC;
|
||||
splx(s);
|
||||
bp->b_lock.lk_lockholder = LK_KERNPROC;
|
||||
}
|
||||
/*
|
||||
* Find out the number of references to a lock.
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)buf.h 8.9 (Berkeley) 3/30/95
|
||||
* $Id: buf.h,v 1.72 1999/06/27 09:13:19 peter Exp $
|
||||
* $Id: buf.h,v 1.73 1999/06/27 11:40:03 peter Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_BUF_H_
|
||||
@ -315,17 +315,8 @@ static __inline void BUF_KERNPROC __P((struct buf *));
|
||||
static __inline void
|
||||
BUF_KERNPROC(struct buf *bp)
|
||||
{
|
||||
struct buf *nbp;
|
||||
int s;
|
||||
|
||||
s = splbio();
|
||||
if (bp->b_flags & B_ASYNC)
|
||||
bp->b_lock.lk_lockholder = LK_KERNPROC;
|
||||
for (nbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
|
||||
nbp; nbp = TAILQ_NEXT(&nbp->b_cluster, cluster_entry))
|
||||
if (nbp->b_flags & B_ASYNC)
|
||||
nbp->b_lock.lk_lockholder = LK_KERNPROC;
|
||||
splx(s);
|
||||
bp->b_lock.lk_lockholder = LK_KERNPROC;
|
||||
}
|
||||
/*
|
||||
* Find out the number of references to a lock.
|
||||
|
Loading…
x
Reference in New Issue
Block a user