Add BO_* macros parallel to VI_* macros for manipulating the bo_mtx.

Initialize the bo_mtx when we allocate a vnode i getnewvnode() For
now we point to the vnodes interlock mutex, that retains the exact
same locking sematics.

Move v_numoutput from vnode to bufobj.  Add renaming macro to
postpone code sweep.
This commit is contained in:
phk 2004-10-21 14:42:31 +00:00
parent b436dad078
commit fdf614c0ba
3 changed files with 29 additions and 8 deletions

View File

@ -734,6 +734,7 @@ getnewvnode(tag, mp, vops, vpp)
{
struct vnode *vp = NULL;
struct vpollinfo *pollinfo = NULL;
struct bufobj *bo;
mtx_lock(&vnode_free_list_mtx);
@ -782,6 +783,7 @@ getnewvnode(tag, mp, vops, vpp)
}
if (vp) {
freevnodes--;
bo = &vp->v_bufobj;
mtx_unlock(&vnode_free_list_mtx);
#ifdef INVARIANTS
@ -790,7 +792,7 @@ getnewvnode(tag, mp, vops, vpp)
printf("cleaned vnode isn't, "
"address %p, inode %p\n",
vp, vp->v_data);
if (vp->v_numoutput)
if (bo->bo_numoutput)
panic("Clean vnode has pending I/O's");
if (vp->v_writecount != 0)
panic("Non-zero write count");
@ -816,10 +818,10 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_socket = 0;
lockdestroy(vp->v_vnlock);
lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
KASSERT(bo->bo_clean.bv_cnt == 0, ("cleanbufcnt not 0"));
KASSERT(bo->bo_clean.bv_root == NULL, ("cleanblkroot not NULL"));
KASSERT(bo->bo_dirty.bv_cnt == 0, ("dirtybufcnt not 0"));
KASSERT(bo->bo_dirty.bv_root == NULL, ("dirtyblkroot not NULL"));
} else {
numvnodes++;
mtx_unlock(&vnode_free_list_mtx);
@ -828,6 +830,8 @@ getnewvnode(tag, mp, vops, vpp)
mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
VI_LOCK(vp);
vp->v_dd = vp;
bo = &vp->v_bufobj;
bo->bo_mtx = &vp->v_interlock;
vp->v_vnlock = &vp->v_lock;
lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
cache_purge(vp); /* Sets up v_id. */
@ -835,8 +839,8 @@ getnewvnode(tag, mp, vops, vpp)
TAILQ_INIT(&vp->v_cache_dst);
}
TAILQ_INIT(&vp->v_cleanblkhd);
TAILQ_INIT(&vp->v_dirtyblkhd);
TAILQ_INIT(&bo->bo_clean.bv_hd);
TAILQ_INIT(&bo->bo_dirty.bv_hd);
vp->v_type = VNON;
vp->v_tag = tag;
vp->v_op = vops;

View File

@ -66,8 +66,25 @@ struct bufobj {
struct mtx *bo_mtx; /* Mutex which protects "i" things */
struct bufv bo_clean; /* i Clean buffers */
struct bufv bo_dirty; /* i Dirty buffers */
long bo_numoutput; /* i Writes in progress */
};
#define BO_LOCK(bo) \
do { \
KASSERT (bo->bo_mtx != NULL, ("No lock in bufobj")); \
mtx_lock((bo)->bo_mtx); \
} while (0)
#define BO_UNLOCK(bo) \
do { \
KASSERT (bo->bo_mtx != NULL, ("No lock in bufobj")); \
mtx_unlock((bo)->bo_mtx); \
} while (0)
#define BO_MTX(bo) ((bo)->bo_mtx)
#define ASSERT_BO_LOCKED(bo) mtx_assert(bo->bo_mtx, MA_OWNED)
#define ASSERT_BO_UNLOCKED(bo) mtx_assert(bo->bo_mtx, MA_NOTOWNED)
#endif /* defined(_KERNEL) || defined(_KVM_VNODE) */
#endif /* _SYS_BUFOBJ_H_ */

View File

@ -110,7 +110,6 @@ struct vnode {
struct mtx v_interlock; /* lock for "i" things */
u_long v_iflag; /* i vnode flags (see below) */
int v_usecount; /* i ref count of users */
long v_numoutput; /* i writes in progress */
struct thread *v_vxthread; /* i thread owning VXLOCK */
int v_holdcnt; /* i page & buffer references */
struct bufobj v_bufobj; /* * Buffer cache object */
@ -171,6 +170,7 @@ struct vnode {
#define v_dirtyblkhd v_bufobj.bo_dirty.bv_hd
#define v_dirtyblkroot v_bufobj.bo_dirty.bv_root
#define v_dirtybufcnt v_bufobj.bo_dirty.bv_cnt
#define v_numoutput v_bufobj.bo_numoutput
/*
* Userland version of struct vnode, for sysctl.