Introduce some functions in the vnode locks namespace and in the ffs

namespace in order to handle lockmgr fields in a controlled way instead
than spreading all around bogus stubs:
- VN_LOCK_AREC() allows lock recursion for a specified vnode
- VN_LOCK_ASHARE() allows lock sharing for a specified vnode

In FFS land:
- BUF_AREC() allows lock recursion for a specified buffer lock
- BUF_NOREC() disallows recursion for a specified buffer lock

Side note: union_subr.c::unionfs_node_update() is the only other function
directly handling lockmgr fields. As this is not simple to fix, it has
been left behind as "sole" exception.
This commit is contained in:
Attilio Rao 2008-02-24 16:38:58 +00:00
parent 93b651d88c
commit 628f51d275
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=176519
12 changed files with 27 additions and 21 deletions

View File

@ -119,8 +119,8 @@ zfs_znode_cache_constructor(void *buf, void *cdrarg, int kmflags)
ASSERT(error == 0);
zp->z_vnode = vp;
vp->v_data = (caddr_t)zp;
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vp->v_vnlock->lk_flags &= ~LK_NOSHARE;
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
} else {
zp->z_vnode = NULL;
}
@ -604,8 +604,8 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
ASSERT(err == 0);
vp = ZTOV(zp);
vp->v_data = (caddr_t)zp;
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vp->v_vnlock->lk_flags &= ~LK_NOSHARE;
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
vp->v_type = IFTOVT((mode_t)zp->z_phys->zp_mode);
if (vp->v_type == VDIR)
zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */

View File

@ -119,8 +119,8 @@ zfs_znode_cache_constructor(void *buf, void *cdrarg, int kmflags)
ASSERT(error == 0);
zp->z_vnode = vp;
vp->v_data = (caddr_t)zp;
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vp->v_vnlock->lk_flags &= ~LK_NOSHARE;
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
} else {
zp->z_vnode = NULL;
}
@ -604,8 +604,8 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
ASSERT(err == 0);
vp = ZTOV(zp);
vp->v_data = (caddr_t)zp;
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vp->v_vnlock->lk_flags &= ~LK_NOSHARE;
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
vp->v_type = IFTOVT((mode_t)zp->z_phys->zp_mode);
if (vp->v_type == VDIR)
zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */

View File

@ -188,7 +188,7 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
if (dvp) {
np->n_parent = VTONW(dvp)->n_fid;
}
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
VN_LOCK_AREC(vp);
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL);
/*
* Another process can create vnode while we blocked in malloc() or

View File

@ -196,7 +196,7 @@ pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
if ((pn->pn_flags & PFS_PROCDEP) != 0)
(*vpp)->v_vflag |= VV_PROCDEP;
pvd->pvd_vnode = *vpp;
(*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE;
VN_LOCK_AREC(*vpp);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
error = insmntque(*vpp, mp);
if (error != 0) {

View File

@ -261,7 +261,7 @@ smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
} else if (vp->v_type == VREG)
SMBERROR("new vnode '%s' born without parent ?\n", np->n_name);
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
VN_LOCK_AREC(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
smbfs_hash_lock(smp, td);

View File

@ -389,7 +389,7 @@ xfs_vn_allocate(xfs_mount_t *mp, xfs_inode_t *ip, struct xfs_vnode **vpp)
return (error);
}
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
VN_LOCK_AREC(vp);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = insmntque(vp, XVFSTOMNT(XFS_MTOVFS(mp)));
if (error != 0) {

View File

@ -84,7 +84,7 @@ nameiinit(void *dummy __unused)
error = getnewvnode("crossmp", NULL, &dead_vnodeops, &vp_crossmp);
if (error != 0)
panic("nameiinit: getnewvnode");
vp_crossmp->v_vnlock->lk_flags &= ~LK_NOSHARE;
VN_LOCK_ASHARE(vp_crossmp);
}
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nameiinit, NULL)

View File

@ -193,7 +193,7 @@ ncp_conn_unlock(struct ncp_conn *conn, struct thread *td)
int
ncp_conn_assert_locked(struct ncp_conn *conn, const char *checker, struct thread *td)
{
if (conn->nc_lock.lk_flags & LK_HAVE_EXCL) return 0;
if (lockstatus(&conn->nc_lock, curthread) == LK_EXCLUSIVE) return 0;
printf("%s: connection isn't locked!\n", checker);
return EIO;
}

View File

@ -158,8 +158,8 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int
/*
* NFS supports recursive and shared locking.
*/
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vp->v_vnlock->lk_flags &= ~LK_NOSHARE;
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
if (fhsize > NFS_SMALLFH) {
MALLOC(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK);
} else

View File

@ -400,6 +400,9 @@ extern void (*lease_updatetime)(int deltat);
#define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
#define VI_MTX(vp) (&(vp)->v_interlock)
#define VN_LOCK_AREC(vp) ((vp)->v_vnlock->lk_flags |= LK_CANRECURSE)
#define VN_LOCK_ASHARE(vp) ((vp)->v_vnlock->lk_flags &= ~LK_NOSHARE)
#endif /* _KERNEL */
/*

View File

@ -564,6 +564,9 @@ MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF);
#define ACQUIRE_LOCK(lk) mtx_lock(lk)
#define FREE_LOCK(lk) mtx_unlock(lk)
#define BUF_AREC(bp) ((bp)->b_lock.lk_flags |= LK_CANRECURSE)
#define BUF_NOREC(bp) ((bp)->b_lock.lk_flags &= ~LK_CANRECURSE)
/*
* Worklist queue management.
* These routines require that the lock be held.
@ -5251,7 +5254,7 @@ softdep_sync_metadata(struct vnode *vp)
return (0);
loop:
/* While syncing snapshots, we must allow recursive lookups */
bp->b_lock.lk_flags |= LK_CANRECURSE;
BUF_AREC(bp);
ACQUIRE_LOCK(&lk);
/*
* As we hold the buffer locked, none of its dependencies
@ -5393,7 +5396,7 @@ softdep_sync_metadata(struct vnode *vp)
/* We reach here only in error and unlocked */
if (error == 0)
panic("softdep_sync_metadata: zero error");
bp->b_lock.lk_flags &= ~LK_CANRECURSE;
BUF_NOREC(bp);
bawrite(bp);
return (error);
}
@ -5405,7 +5408,7 @@ softdep_sync_metadata(struct vnode *vp)
break;
}
VI_UNLOCK(vp);
bp->b_lock.lk_flags &= ~LK_CANRECURSE;
BUF_NOREC(bp);
bawrite(bp);
if (nbp != NULL) {
bp = nbp;

View File

@ -1363,8 +1363,8 @@ ffs_vget(mp, ino, flags, vpp)
/*
* FFS supports recursive and shared locking.
*/
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vp->v_vnlock->lk_flags &= ~LK_NOSHARE;
VN_LOCK_AREC(vp);
VN_LOCK_ASHARE(vp);
vp->v_data = ip;
vp->v_bufobj.bo_bsize = fs->fs_bsize;
ip->i_vnode = vp;