Add dedicated routines to toggle lockmgr flags such as LK_NOSHARE and

LK_CANRECURSE after a lock is created.  Use them to implement macros that
otherwise manipulated the flags directly.  Assert that the associated
lockmgr lock is exclusively locked by the current thread when manipulating
these flags to ensure the flag updates are safe.  This last change required
some minor shuffling in a few filesystems to exclusively lock a brand new
vnode slightly earlier.

Reviewed by:	kib
MFC after:	3 days
This commit is contained in:
John Baldwin 2010-08-20 19:46:50 +00:00
parent 365ccde0fb
commit 3634d5b241
14 changed files with 47 additions and 19 deletions

View File

@ -412,8 +412,8 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
} else {
vp->v_type = VBAD;
}
VN_LOCK_ASHARE(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
VN_LOCK_ASHARE(vp);
mtx_lock(&devfs_de_interlock);
vp->v_data = de;
de->de_vnode = vp;

View File

@ -140,6 +140,7 @@ ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp)
/*
* NFS supports recursive and shared locking.
*/
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
/*
@ -157,7 +158,6 @@ ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp)
M_NFSFH, M_WAITOK);
bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
np->n_fhp->nfh_len = fhsize;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
error = insmntque(vp, mntp);
if (error != 0) {
*npp = NULL;

View File

@ -230,9 +230,9 @@ nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
/*
* NFS supports recursive and shared locking.
*/
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
error = insmntque(vp, mntp);
if (error != 0) {
*npp = NULL;

View File

@ -185,7 +185,6 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
if (dvp) {
np->n_parent = VTONW(dvp)->n_fid;
}
VN_LOCK_AREC(vp);
sx_xlock(&nwhashlock);
/*
* Another process can create vnode while we blocked in malloc() or
@ -202,6 +201,7 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
nhpp = NWNOHASH(fid);
LIST_INSERT_HEAD(nhpp, np, n_hash);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VN_LOCK_AREC(vp);
sx_xunlock(&nwhashlock);
ASSERT_VOP_LOCKED(dvp, "nwfs_allocvp");

View File

@ -189,8 +189,8 @@ pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
if ((pn->pn_flags & PFS_PROCDEP) != 0)
(*vpp)->v_vflag |= VV_PROCDEP;
pvd->pvd_vnode = *vpp;
VN_LOCK_AREC(*vpp);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
VN_LOCK_AREC(*vpp);
error = insmntque(*vpp, mp);
if (error != 0) {
free(pvd, M_PFSVNCACHE);

View File

@ -253,8 +253,8 @@ smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
} else if (vp->v_type == VREG)
SMBERROR("new vnode '%s' born without parent ?\n", np->n_name);
VN_LOCK_AREC(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VN_LOCK_AREC(vp);
smbfs_hash_lock(smp);
LIST_FOREACH(np2, nhpp, n_hash) {

View File

@ -389,8 +389,8 @@ xfs_vn_allocate(xfs_mount_t *mp, xfs_inode_t *ip, struct xfs_vnode **vpp)
return (error);
}
VN_LOCK_AREC(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VN_LOCK_AREC(vp);
error = insmntque(vp, XVFSTOMNT(XFS_MTOVFS(mp)));
if (error != 0) {
kmem_free(vdata, sizeof(*vdata));

View File

@ -396,6 +396,34 @@ lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
STACK_ZERO(lk);
}
/*
* XXX: Gross hacks to manipulate external lock flags after
* initialization. Used for certain vnode and buf locks.
*/
void
lockallowshare(struct lock *lk)
{
lockmgr_assert(lk, KA_XLOCKED);
lk->lock_object.lo_flags &= ~LK_NOSHARE;
}
void
lockallowrecurse(struct lock *lk)
{
lockmgr_assert(lk, KA_XLOCKED);
lk->lock_object.lo_flags |= LO_RECURSABLE;
}
void
lockdisablerecurse(struct lock *lk)
{
lockmgr_assert(lk, KA_XLOCKED);
lk->lock_object.lo_flags &= ~LO_RECURSABLE;
}
void
lockdestroy(struct lock *lk)
{

View File

@ -84,14 +84,13 @@ static struct vnode *vp_crossmp;
static void
nameiinit(void *dummy __unused)
{
int error;
namei_zone = uma_zcreate("NAMEI", MAXPATHLEN, NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
error = getnewvnode("crossmp", NULL, &dead_vnodeops, &vp_crossmp);
if (error != 0)
panic("nameiinit: getnewvnode");
getnewvnode("crossmp", NULL, &dead_vnodeops, &vp_crossmp);
vn_lock(vp_crossmp, LK_EXCLUSIVE);
VN_LOCK_ASHARE(vp_crossmp);
VOP_UNLOCK(vp_crossmp, 0);
}
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nameiinit, NULL);

View File

@ -150,6 +150,7 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int
/*
* NFS supports recursive and shared locking.
*/
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
if (fhsize > NFS_SMALLFH) {
@ -158,7 +159,6 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int
np->n_fhp = &np->n_fh;
bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
np->n_fhsize = fhsize;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
error = insmntque(vp, mntp);
if (error != 0) {
*npp = NULL;

View File

@ -73,7 +73,10 @@ void _lockmgr_assert(struct lock *lk, int what, const char *file, int line);
#endif
void _lockmgr_disown(struct lock *lk, const char *file, int line);
void lockallowrecurse(struct lock *lk);
void lockallowshare(struct lock *lk);
void lockdestroy(struct lock *lk);
void lockdisablerecurse(struct lock *lk);
void lockinit(struct lock *lk, int prio, const char *wmesg, int timo,
int flags);
#ifdef DDB

View File

@ -419,10 +419,8 @@ extern struct vattr va_null; /* predefined null vattr structure */
#define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
#define VI_MTX(vp) (&(vp)->v_interlock)
#define VN_LOCK_AREC(vp) \
((vp)->v_vnlock->lock_object.lo_flags |= LO_RECURSABLE)
#define VN_LOCK_ASHARE(vp) \
((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE)
#define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock)
#define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock)
#endif /* _KERNEL */

View File

@ -904,8 +904,8 @@ MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF);
#define ACQUIRE_LOCK(lk) mtx_lock(lk)
#define FREE_LOCK(lk) mtx_unlock(lk)
#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LO_RECURSABLE)
#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LO_RECURSABLE)
#define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock)
#define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock)
/*
* Worklist queue management.

View File

@ -1501,6 +1501,7 @@ ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
/*
* FFS supports recursive locking.
*/
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
VN_LOCK_AREC(vp);
vp->v_data = ip;
vp->v_bufobj.bo_bsize = fs->fs_bsize;
@ -1518,7 +1519,6 @@ ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
}
#endif
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
if (ffs_flags & FFSV_FORCEINSMQ)
vp->v_vflag |= VV_FORCEINSMQ;
error = insmntque(vp, mp);