Virtualizes & untangles the bioops operations vector.

Ref: Message-ID: <18317.961014572@critter.freebsd.dk> To: current@
This commit is contained in:
Poul-Henning Kamp 2000-06-16 08:48:51 +00:00
parent 57b102722a
commit a2e7a027a7
15 changed files with 84 additions and 46 deletions

View File

@ -222,8 +222,6 @@ struct bio_ops bioops = {
softdep_disk_io_initiation, /* io_start */
softdep_disk_write_complete, /* io_complete */
softdep_deallocate_dependencies, /* io_deallocate */
softdep_fsync, /* io_fsync */
softdep_process_worklist, /* io_sync */
softdep_move_dependencies, /* io_movedeps */
softdep_count_dependencies, /* io_countdeps */
};

View File

@ -417,9 +417,8 @@ spec_strategy(ap)
struct mount *mp;
bp = ap->a_bp;
if ((bp->b_iocmd == BIO_WRITE) &&
(LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start)
(*bioops.io_start)(bp);
if ((bp->b_iocmd == BIO_WRITE) && (LIST_FIRST(&bp->b_dep)) != NULL)
buf_start(bp);
/*
* Collect statistics on synchronous and asynchronous read

View File

@ -616,8 +616,8 @@ bwrite(struct buf * bp)
newbp->b_flags &= ~B_INVAL;
/* move over the dependencies */
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_movedeps)
(*bioops.io_movedeps)(bp, newbp);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_movedeps(bp, newbp);
/*
* Initiate write on the copy, release the original to
@ -673,10 +673,10 @@ vfs_backgroundwritedone(bp)
/*
* Process dependencies then return any unfinished ones.
*/
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
(*bioops.io_complete)(bp);
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_movedeps)
(*bioops.io_movedeps)(bp, origbp);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_complete(bp);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_movedeps(bp, origbp);
/*
* Clear the BX_BKGRDINPROG flag in the original buffer
* and awaken it if it is waiting for the write to complete.
@ -939,8 +939,8 @@ brelse(struct buf * bp)
* cache the buffer.
*/
bp->b_flags |= B_INVAL;
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
(*bioops.io_deallocate)(bp);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_deallocate(bp);
if (bp->b_flags & B_DELWRI) {
--numdirtybuffers;
numdirtywakeup();
@ -1570,8 +1570,8 @@ restart:
crfree(bp->b_wcred);
bp->b_wcred = NOCRED;
}
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
(*bioops.io_deallocate)(bp);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_deallocate(bp);
if (bp->b_xflags & BX_BKGRDINPROG)
panic("losing buffer 3");
LIST_REMOVE(bp, b_hash);
@ -1848,9 +1848,8 @@ flushbufqueues(void)
break;
}
if (LIST_FIRST(&bp->b_dep) != NULL &&
bioops.io_countdeps &&
(bp->b_flags & B_DEFERRED) == 0 &&
(*bioops.io_countdeps)(bp, 0)) {
buf_countdeps(bp, 0)) {
TAILQ_REMOVE(&bufqueues[QUEUE_DIRTY],
bp, b_freelist);
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY],
@ -2664,8 +2663,8 @@ bufdone(struct buf *bp)
splx(s);
return;
}
if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
(*bioops.io_complete)(bp);
if (LIST_FIRST(&bp->b_dep) != NULL)
buf_complete(bp);
if (bp->b_flags & B_VMIO) {
int i, resid;

View File

@ -805,9 +805,8 @@ cluster_wbuild(vp, size, start_lbn, len)
splx(s);
} /* end of code for non-first buffers only */
/* check for latent dependencies to be handled */
if ((LIST_FIRST(&tbp->b_dep)) != NULL &&
bioops.io_start)
(*bioops.io_start)(tbp);
if ((LIST_FIRST(&tbp->b_dep)) != NULL)
buf_start(tbp);
/*
* If the IO is via the VM then we do some
* special VM hackery. (yuck)

View File

@ -1029,8 +1029,7 @@ sched_sync(void)
/*
* Do soft update processing.
*/
if (bioops.io_sync)
(*bioops.io_sync)(NULL);
softdep_process_worklist(NULL);
/*
* The variable rushjob allows the kernel to speed up the

View File

@ -2545,10 +2545,7 @@ fsync(p, uap)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (vp->v_object)
vm_object_page_clean(vp->v_object, 0, 0, 0);
if ((error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p)) == 0 &&
vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
bioops.io_fsync)
error = (*bioops.io_fsync)(vp);
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
VOP_UNLOCK(vp, 0, p);
return (error);
}

View File

@ -1029,8 +1029,7 @@ sched_sync(void)
/*
* Do soft update processing.
*/
if (bioops.io_sync)
(*bioops.io_sync)(NULL);
softdep_process_worklist(NULL);
/*
* The variable rushjob allows the kernel to speed up the

View File

@ -2545,10 +2545,7 @@ fsync(p, uap)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (vp->v_object)
vm_object_page_clean(vp->v_object, 0, 0, 0);
if ((error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p)) == 0 &&
vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
bioops.io_fsync)
error = (*bioops.io_fsync)(vp);
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
VOP_UNLOCK(vp, 0, p);
return (error);
}

View File

@ -1570,9 +1570,8 @@ devfs_strategy(struct vop_strategy_args *ap)
return error;
if ((bp->b_iocmd == BIO_WRITE) &&
(LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start)
(*bioops.io_start)(bp);
if ((bp->b_iocmd == BIO_WRITE) && (LIST_FIRST(&bp->b_dep)) != NULL)
buf_start(bp);
switch (vp->v_type) {
case VCHR:
(*vp->v_rdev->si_devsw->d_strategy)(&bp->b_io);

View File

@ -417,9 +417,8 @@ spec_strategy(ap)
struct mount *mp;
bp = ap->a_bp;
if ((bp->b_iocmd == BIO_WRITE) &&
(LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start)
(*bioops.io_start)(bp);
if ((bp->b_iocmd == BIO_WRITE) && (LIST_FIRST(&bp->b_dep)) != NULL)
buf_start(bp);
/*
* Collect statistics on synchronous and asynchronous read

View File

@ -64,8 +64,6 @@ extern struct bio_ops {
void (*io_start) __P((struct buf *));
void (*io_complete) __P((struct buf *));
void (*io_deallocate) __P((struct buf *));
int (*io_fsync) __P((struct vnode *));
int (*io_sync) __P((struct mount *));
void (*io_movedeps) __P((struct buf *, struct buf *));
int (*io_countdeps) __P((struct buf *, int));
} bioops;
@ -406,6 +404,43 @@ bufq_first(struct buf_queue_head *head)
#define BUF_WRITE(bp) VOP_BWRITE((bp)->b_vp, (bp))
#define BUF_STRATEGY(bp) VOP_STRATEGY((bp)->b_vp, (bp))
static __inline void
buf_start(struct buf *bp)
{
if (bioops.io_start)
(*bioops.io_start)(bp);
}
static __inline void
buf_complete(struct buf *bp)
{
if (bioops.io_complete)
(*bioops.io_complete)(bp);
}
static __inline void
buf_deallocate(struct buf *bp)
{
if (bioops.io_deallocate)
(*bioops.io_deallocate)(bp);
}
static __inline void
buf_movedeps(struct buf *bp, struct buf *bp2)
{
if (bioops.io_movedeps)
(*bioops.io_movedeps)(bp, bp2);
}
static __inline int
buf_countdeps(struct buf *bp, int i)
{
if (bioops.io_countdeps)
return ((*bioops.io_countdeps)(bp, i));
else
return (0);
}
#endif /* _KERNEL */
/*

View File

@ -447,6 +447,7 @@ int vfs_stduninit __P((struct vfsconf *));
int vfs_stdextattrctl __P((struct mount *mp, int cmd, const char *attrname,
caddr_t arg, struct proc *p));
void softdep_process_worklist __P((struct mount *));
#else /* !_KERNEL */
#include <sys/cdefs.h>

View File

@ -222,8 +222,6 @@ struct bio_ops bioops = {
softdep_disk_io_initiation, /* io_start */
softdep_disk_write_complete, /* io_complete */
softdep_deallocate_dependencies, /* io_deallocate */
softdep_fsync, /* io_fsync */
softdep_process_worklist, /* io_sync */
softdep_move_dependencies, /* io_movedeps */
softdep_count_dependencies, /* io_countdeps */
};

View File

@ -253,4 +253,20 @@ softdep_sync_metadata(ap)
return (0);
}
int
softdep_fsync(vp)
struct vnode *vp; /* the "in_core" copy of the inode */
{
return (0);
}
int
softdep_process_worklist(matchmnt)
struct mount *matchmnt;
{
return (0);
}
#endif /* SOFTUPDATES not configured in */

View File

@ -175,7 +175,7 @@ loop:
continue;
if (!wait && LIST_FIRST(&bp->b_dep) != NULL &&
(bp->b_flags & B_DEFERRED) == 0 &&
bioops.io_countdeps && (*bioops.io_countdeps)(bp, 0)) {
buf_countdeps(bp, 0)) {
bp->b_flags |= B_DEFERRED;
continue;
}
@ -278,5 +278,8 @@ loop:
}
}
splx(s);
return (UFS_UPDATE(vp, wait));
error = UFS_UPDATE(vp, wait);
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))
error = softdep_fsync(vp);
return (error);
}