Regularize the vop_stdlock'ing protocol across all the filesystems
that use it. Specifically, vop_stdlock uses the lock pointed to by vp->v_vnlock. By default, getnewvnode sets up vp->v_vnlock to reference vp->v_lock. Filesystems that wish to use the default do not need to allocate a lock at the front of their node structure (as some still did) or do a lockinit. They can simply start using vn_lock/VOP_UNLOCK. Filesystems that wish to manage their own locks, but still use the vop_stdlock functions (such as nullfs) can simply replace vp->v_vnlock with a pointer to the lock that they wish to have used for the vnode. Such filesystems are responsible for setting the vp->v_vnlock back to the default in their vop_reclaim routine (e.g., vp->v_vnlock = &vp->v_lock). In theory, this set of changes cleans up the existing filesystem lock interface and should have no function change to the existing locking scheme. Sponsored by: DARPA & NAI Labs.
This commit is contained in:
parent
2cf51225e1
commit
25230d4c6a
@ -143,7 +143,7 @@ cd9660_ihashins(ip)
|
||||
*ipp = ip;
|
||||
mtx_unlock(&cd9660_ihash_mtx);
|
||||
|
||||
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, curthread);
|
||||
vn_lock(ITOV(ip), LK_EXCLUSIVE | LK_RETRY, curthread);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -223,7 +223,6 @@ cd9660_reclaim(ap)
|
||||
vrele(ip->i_devvp);
|
||||
ip->i_devvp = 0;
|
||||
}
|
||||
lockdestroy(&ip->i_vnode->v_lock);
|
||||
FREE(vp->v_data, M_ISOFSNODE);
|
||||
vp->v_data = NULL;
|
||||
return (0);
|
||||
|
@ -714,11 +714,6 @@ cd9660_vget_internal(mp, ino, flags, vpp, relocated, isodir)
|
||||
}
|
||||
MALLOC(ip, struct iso_node *, sizeof(struct iso_node), M_ISOFSNODE,
|
||||
M_WAITOK | M_ZERO);
|
||||
lockinit(&vp->v_lock, PINOD, "isonode", 0, 0);
|
||||
/*
|
||||
* ISOFS uses stdlock and can share lock structure
|
||||
*/
|
||||
vp->v_vnlock = &vp->v_lock;
|
||||
vp->v_data = ip;
|
||||
ip->i_vnode = vp;
|
||||
ip->i_dev = dev;
|
||||
|
@ -333,7 +333,6 @@ struct hpfsmount {
|
||||
#define H_PARCHANGE 0x0008 /* parent node date was changed */
|
||||
#define H_INVAL 0x0010 /* Invalid node */
|
||||
struct hpfsnode {
|
||||
struct lock h_lock; /* Must be first, for std vops */
|
||||
struct mtx h_interlock;
|
||||
|
||||
LIST_ENTRY(hpfsnode) h_hash;
|
||||
|
@ -512,7 +512,6 @@ hpfs_vget(
|
||||
|
||||
|
||||
mtx_init(&hp->h_interlock, "hpfsnode interlock", NULL, MTX_DEF);
|
||||
lockinit(&hp->h_lock, PINOD, "hpnode", VLKTIMEOUT, 0);
|
||||
|
||||
hp->h_flag = H_INVAL;
|
||||
hp->h_vp = vp;
|
||||
|
@ -643,7 +643,6 @@ hpfs_reclaim(ap)
|
||||
hp->h_devvp = NULL;
|
||||
}
|
||||
|
||||
lockdestroy(&hp->h_lock);
|
||||
mtx_destroy(&hp->h_interlock);
|
||||
|
||||
vp->v_data = NULL;
|
||||
|
@ -260,8 +260,6 @@ deget(pmp, dirclust, diroffset, depp)
|
||||
return error;
|
||||
}
|
||||
bzero((caddr_t)ldep, sizeof *ldep);
|
||||
lockinit(&nvp->v_lock, PINOD, "denode", VLKTIMEOUT, 0);
|
||||
nvp->v_vnlock = &nvp->v_lock;
|
||||
nvp->v_data = ldep;
|
||||
ldep->de_vnode = nvp;
|
||||
ldep->de_flag = 0;
|
||||
@ -670,7 +668,6 @@ msdosfs_reclaim(ap)
|
||||
#if 0 /* XXX */
|
||||
dep->de_flag = 0;
|
||||
#endif
|
||||
lockdestroy(&vp->v_lock);
|
||||
FREE(dep, M_MSDOSFSNODE);
|
||||
vp->v_data = NULL;
|
||||
|
||||
|
@ -69,8 +69,6 @@ struct ntnode {
|
||||
#define FN_VALID 0x0002
|
||||
#define FN_AATTRNAME 0x0004 /* space allocated for f_attrname */
|
||||
struct fnode {
|
||||
struct lock f_lock; /* fnode lock >Keep this first< */
|
||||
|
||||
LIST_ENTRY(fnode) f_fnlist;
|
||||
struct vnode *f_vp; /* Associatied vnode */
|
||||
struct ntnode *f_ip; /* Associated ntnode */
|
||||
|
@ -778,7 +778,6 @@ ntfs_frele(
|
||||
FREE(fp->f_attrname, M_TEMP);
|
||||
if (fp->f_dirblbuf)
|
||||
FREE(fp->f_dirblbuf, M_NTFSDIR);
|
||||
lockdestroy(&fp->f_lock);
|
||||
FREE(fp, M_NTFSFNODE);
|
||||
ntfs_ntrele(ip);
|
||||
}
|
||||
|
@ -741,7 +741,6 @@ ntfs_vgetex(
|
||||
}
|
||||
dprintf(("ntfs_vget: vnode: %p for ntnode: %d\n", vp,ino));
|
||||
|
||||
lockinit(&fp->f_lock, PINOD, "fnode", VLKTIMEOUT, 0);
|
||||
fp->f_vp = vp;
|
||||
vp->v_data = fp;
|
||||
vp->v_type = f_type;
|
||||
|
@ -226,9 +226,6 @@ null_nodeget(mp, lowervp, vpp)
|
||||
vp->v_type = lowervp->v_type;
|
||||
vp->v_data = xp;
|
||||
|
||||
/* Though v_lock is inited by getnewvnode(), we want our own wmesg */
|
||||
lockinit(&vp->v_lock, PVFS, "nunode", VLKTIMEOUT, LK_NOPAUSE);
|
||||
|
||||
/*
|
||||
* From NetBSD:
|
||||
* Now lock the new node. We rely on the fact that we were passed
|
||||
|
@ -758,6 +758,7 @@ null_reclaim(ap)
|
||||
}
|
||||
|
||||
vp->v_data = NULL;
|
||||
vp->v_vnlock = &vp->v_lock;
|
||||
FREE(xp, M_NULLFSNODE);
|
||||
|
||||
return (0);
|
||||
|
@ -185,7 +185,7 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
|
||||
if (dvp) {
|
||||
np->n_parent = VTONW(dvp)->n_fid;
|
||||
}
|
||||
lockinit(&vp->v_lock, PINOD, "nwnode", VLKTIMEOUT, LK_CANRECURSE);
|
||||
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
|
||||
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL, td);
|
||||
/*
|
||||
* Another process can create vnode while we blocked in malloc() or
|
||||
|
@ -179,8 +179,7 @@ pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
|
||||
pvd->pvd_next->pvd_prev = pvd;
|
||||
pfs_vncache = pvd;
|
||||
mtx_unlock(&pfs_vncache_mutex);
|
||||
(*vpp)->v_vnlock = &(*vpp)->v_lock;
|
||||
lockinit((*vpp)->v_vnlock, PINOD, "pfsnod", VLKTIMEOUT, LK_CANRECURSE);
|
||||
(*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE;
|
||||
vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread);
|
||||
return (0);
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
|
||||
return EFBIG;*/
|
||||
td = uiop->uio_td;
|
||||
if (vp->v_type == VDIR) {
|
||||
lks = LK_EXCLUSIVE;/*lockstatus(&vp->v_lock, td);*/
|
||||
lks = LK_EXCLUSIVE;/*lockstatus(vp->v_vnlock, td);*/
|
||||
if (lks == LK_SHARED)
|
||||
vn_lock(vp, LK_UPGRADE | LK_RETRY, td);
|
||||
error = smbfs_readvdir(vp, uiop, cred);
|
||||
|
@ -244,7 +244,7 @@ smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
|
||||
} else if (vp->v_type == VREG)
|
||||
SMBERROR("new vnode '%s' born without parent ?\n", np->n_name);
|
||||
|
||||
lockinit(&vp->v_lock, PINOD, "smbnode", VLKTIMEOUT, LK_CANRECURSE);
|
||||
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
|
||||
smbfs_hash_lock(smp, td);
|
||||
|
@ -613,8 +613,6 @@ udf_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
|
||||
unode->i_dev = udfmp->im_dev;
|
||||
unode->udfmp = udfmp;
|
||||
vp->v_data = unode;
|
||||
lockinit(&vp->v_lock, PINOD, "udfnode", 0, 0);
|
||||
vp->v_vnlock = &vp->v_lock;
|
||||
VREF(udfmp->im_devvp);
|
||||
udf_hashins(unode);
|
||||
|
||||
|
@ -135,8 +135,7 @@ udf_hashins(struct udf_node *node)
|
||||
mtx_lock(&udfmp->hash_mtx);
|
||||
TAILQ_INSERT_TAIL(&udfmp->udf_tqh, node, tq);
|
||||
mtx_unlock(&udfmp->hash_mtx);
|
||||
lockmgr(&node->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0,
|
||||
curthread);
|
||||
vn_lock(node->i_vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -1028,7 +1027,6 @@ udf_reclaim(struct vop_reclaim_args *a)
|
||||
|
||||
if (unode->fentry != NULL)
|
||||
FREE(unode->fentry, M_UDFFENTRY);
|
||||
lockdestroy(&unode->i_vnode->v_lock);
|
||||
uma_zfree(udf_zone_node, unode);
|
||||
vp->v_data = NULL;
|
||||
}
|
||||
|
@ -66,13 +66,8 @@ struct union_mount {
|
||||
|
||||
/*
|
||||
* A cache of vnode references (hangs off v_data)
|
||||
*
|
||||
* Placing un_lock as the first elements theoretically allows us to
|
||||
* use the vop_stdlock functions. However, we need to make sure of
|
||||
* certain side effects so we will still punch in our own code.
|
||||
*/
|
||||
struct union_node {
|
||||
struct lock un_lock;
|
||||
LIST_ENTRY(union_node) un_cache; /* Hash chain */
|
||||
struct vnode *un_vnode; /* Back pointer */
|
||||
struct vnode *un_uppervp; /* overlaying object */
|
||||
|
@ -576,7 +576,6 @@ union_allocvp(vpp, mp, dvp, upperdvp, cnp, uppervp, lowervp, docache)
|
||||
un = VTOUNION(*vpp);
|
||||
bzero(un, sizeof(*un));
|
||||
|
||||
lockinit(&un->un_lock, PVFS, "unlock", VLKTIMEOUT, 0);
|
||||
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
|
||||
un->un_vnode = *vpp;
|
||||
@ -643,7 +642,6 @@ union_freevp(vp)
|
||||
free(un->un_path, M_TEMP);
|
||||
un->un_path = NULL;
|
||||
}
|
||||
lockdestroy(&un->un_lock);
|
||||
|
||||
FREE(vp->v_data, M_TEMP);
|
||||
vp->v_data = 0;
|
||||
|
@ -80,7 +80,6 @@ static int union_inactive(struct vop_inactive_args *ap);
|
||||
static int union_ioctl(struct vop_ioctl_args *ap);
|
||||
static int union_lease(struct vop_lease_args *ap);
|
||||
static int union_link(struct vop_link_args *ap);
|
||||
static int union_lock(struct vop_lock_args *ap);
|
||||
static int union_lookup(struct vop_lookup_args *ap);
|
||||
static int union_lookup1(struct vnode *udvp, struct vnode **dvp,
|
||||
struct vnode **vpp,
|
||||
@ -103,7 +102,6 @@ static int union_poll(struct vop_poll_args *ap);
|
||||
static int union_setattr(struct vop_setattr_args *ap);
|
||||
static int union_strategy(struct vop_strategy_args *ap);
|
||||
static int union_symlink(struct vop_symlink_args *ap);
|
||||
static int union_unlock(struct vop_unlock_args *ap);
|
||||
static int union_whiteout(struct vop_whiteout_args *ap);
|
||||
static int union_write(struct vop_read_args *ap);
|
||||
|
||||
@ -1700,86 +1698,6 @@ union_reclaim(ap)
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
union_lock(ap)
|
||||
struct vop_lock_args *ap;
|
||||
{
|
||||
#if 0
|
||||
struct vnode *vp = ap->a_vp;
|
||||
struct thread *td = ap->a_td;
|
||||
int flags = ap->a_flags;
|
||||
struct union_node *un;
|
||||
#endif
|
||||
int error;
|
||||
|
||||
error = vop_stdlock(ap);
|
||||
#if 0
|
||||
un = VTOUNION(vp);
|
||||
|
||||
if (error == 0) {
|
||||
/*
|
||||
* Lock the upper if it exists and this is an exclusive lock
|
||||
* request.
|
||||
*/
|
||||
if (un->un_uppervp != NULLVP &&
|
||||
(flags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
|
||||
if ((un->un_flags & UN_ULOCK) == 0 && vrefcnt(vp)) {
|
||||
error = vn_lock(un->un_uppervp, flags, td);
|
||||
if (error) {
|
||||
struct vop_unlock_args uap = { 0 };
|
||||
uap.a_vp = ap->a_vp;
|
||||
uap.a_flags = ap->a_flags;
|
||||
uap.a_td = ap->a_td;
|
||||
vop_stdunlock(&uap);
|
||||
return (error);
|
||||
}
|
||||
un->un_flags |= UN_ULOCK;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* union_unlock:
|
||||
*
|
||||
* Unlock our union node. This also unlocks uppervp.
|
||||
*/
|
||||
static int
|
||||
union_unlock(ap)
|
||||
struct vop_unlock_args /* {
|
||||
struct vnode *a_vp;
|
||||
int a_flags;
|
||||
struct thread *a_td;
|
||||
} */ *ap;
|
||||
{
|
||||
#if 0
|
||||
struct union_node *un = VTOUNION(ap->a_vp);
|
||||
#endif
|
||||
int error;
|
||||
|
||||
#if 0
|
||||
KASSERT((un->un_uppervp == NULL || vrefcnt(un->un_uppervp) > 0), ("uppervp usecount is 0"));
|
||||
#endif
|
||||
|
||||
error = vop_stdunlock(ap);
|
||||
#if 0
|
||||
|
||||
/*
|
||||
* If no exclusive locks remain and we are holding an uppervp lock,
|
||||
* remove the uppervp lock.
|
||||
*/
|
||||
|
||||
if ((un->un_flags & UN_ULOCK) &&
|
||||
lockstatus(&un->un_lock, NULL) != LK_EXCLUSIVE) {
|
||||
un->un_flags &= ~UN_ULOCK;
|
||||
VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE, td);
|
||||
}
|
||||
#endif
|
||||
return(error);
|
||||
}
|
||||
|
||||
/*
|
||||
* unionvp do not hold a VM object and there is no need to create one for
|
||||
* upper or lower vp because it is done in the union_open()
|
||||
@ -1936,7 +1854,6 @@ static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
|
||||
{ &vop_islocked_desc, (vop_t *) vop_stdislocked },
|
||||
{ &vop_lease_desc, (vop_t *) union_lease },
|
||||
{ &vop_link_desc, (vop_t *) union_link },
|
||||
{ &vop_lock_desc, (vop_t *) union_lock },
|
||||
{ &vop_lookup_desc, (vop_t *) union_lookup },
|
||||
{ &vop_mkdir_desc, (vop_t *) union_mkdir },
|
||||
{ &vop_mknod_desc, (vop_t *) union_mknod },
|
||||
@ -1956,7 +1873,6 @@ static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
|
||||
{ &vop_setattr_desc, (vop_t *) union_setattr },
|
||||
{ &vop_strategy_desc, (vop_t *) union_strategy },
|
||||
{ &vop_symlink_desc, (vop_t *) union_symlink },
|
||||
{ &vop_unlock_desc, (vop_t *) union_unlock },
|
||||
{ &vop_whiteout_desc, (vop_t *) union_whiteout },
|
||||
{ &vop_write_desc, (vop_t *) union_write },
|
||||
{ NULL, NULL }
|
||||
|
@ -148,7 +148,7 @@ ext2_ihashins(ip)
|
||||
struct ihashhead *ipp;
|
||||
|
||||
/* lock the inode, then put it on the appropriate hash list */
|
||||
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, td);
|
||||
vn_lock(ITOV(ip), LK_EXCLUSIVE | LK_RETRY, td);
|
||||
|
||||
mtx_lock(&ext2_ihash_mtx);
|
||||
ipp = INOHASH(ip->i_dev, ip->i_number);
|
||||
|
@ -546,7 +546,6 @@ ext2_reclaim(ap)
|
||||
vrele(ip->i_devvp);
|
||||
ip->i_devvp = 0;
|
||||
}
|
||||
lockdestroy(&vp->v_lock);
|
||||
FREE(vp->v_data, M_EXT2NODE);
|
||||
vp->v_data = 0;
|
||||
return (0);
|
||||
|
@ -1017,7 +1017,6 @@ ext2_vget(mp, ino, flags, vpp)
|
||||
return (error);
|
||||
}
|
||||
bzero((caddr_t)ip, sizeof(struct inode));
|
||||
lockinit(&vp->v_lock, PINOD, "ext2in", 0, 0);
|
||||
vp->v_data = ip;
|
||||
ip->i_vnode = vp;
|
||||
ip->i_e2fs = fs = ump->um_e2fs;
|
||||
|
@ -546,7 +546,6 @@ ext2_reclaim(ap)
|
||||
vrele(ip->i_devvp);
|
||||
ip->i_devvp = 0;
|
||||
}
|
||||
lockdestroy(&vp->v_lock);
|
||||
FREE(vp->v_data, M_EXT2NODE);
|
||||
vp->v_data = 0;
|
||||
return (0);
|
||||
|
@ -1017,7 +1017,6 @@ ext2_vget(mp, ino, flags, vpp)
|
||||
return (error);
|
||||
}
|
||||
bzero((caddr_t)ip, sizeof(struct inode));
|
||||
lockinit(&vp->v_lock, PINOD, "ext2in", 0, 0);
|
||||
vp->v_data = ip;
|
||||
ip->i_vnode = vp;
|
||||
ip->i_e2fs = fs = ump->um_e2fs;
|
||||
|
@ -143,7 +143,7 @@ cd9660_ihashins(ip)
|
||||
*ipp = ip;
|
||||
mtx_unlock(&cd9660_ihash_mtx);
|
||||
|
||||
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, curthread);
|
||||
vn_lock(ITOV(ip), LK_EXCLUSIVE | LK_RETRY, curthread);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -223,7 +223,6 @@ cd9660_reclaim(ap)
|
||||
vrele(ip->i_devvp);
|
||||
ip->i_devvp = 0;
|
||||
}
|
||||
lockdestroy(&ip->i_vnode->v_lock);
|
||||
FREE(vp->v_data, M_ISOFSNODE);
|
||||
vp->v_data = NULL;
|
||||
return (0);
|
||||
|
@ -714,11 +714,6 @@ cd9660_vget_internal(mp, ino, flags, vpp, relocated, isodir)
|
||||
}
|
||||
MALLOC(ip, struct iso_node *, sizeof(struct iso_node), M_ISOFSNODE,
|
||||
M_WAITOK | M_ZERO);
|
||||
lockinit(&vp->v_lock, PINOD, "isonode", 0, 0);
|
||||
/*
|
||||
* ISOFS uses stdlock and can share lock structure
|
||||
*/
|
||||
vp->v_vnlock = &vp->v_lock;
|
||||
vp->v_data = ip;
|
||||
ip->i_vnode = vp;
|
||||
ip->i_dev = dev;
|
||||
|
@ -276,9 +276,9 @@ vop_stdlock(ap)
|
||||
struct vnode *vp = ap->a_vp;
|
||||
|
||||
#ifndef DEBUG_LOCKS
|
||||
return (lockmgr(&vp->v_lock, ap->a_flags, VI_MTX(vp), ap->a_td));
|
||||
return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td));
|
||||
#else
|
||||
return (debuglockmgr(&vp->v_lock, ap->a_flags, VI_MTX(vp),
|
||||
return (debuglockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
|
||||
ap->a_td, "vop_stdlock", vp->filename, vp->line));
|
||||
#endif
|
||||
}
|
||||
@ -294,7 +294,7 @@ vop_stdunlock(ap)
|
||||
{
|
||||
struct vnode *vp = ap->a_vp;
|
||||
|
||||
return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
|
||||
return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
|
||||
ap->a_td));
|
||||
}
|
||||
|
||||
@ -307,7 +307,7 @@ vop_stdislocked(ap)
|
||||
} */ *ap;
|
||||
{
|
||||
|
||||
return (lockstatus(&ap->a_vp->v_lock, ap->a_td));
|
||||
return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
|
||||
}
|
||||
|
||||
/* Mark the vnode inactive */
|
||||
@ -423,9 +423,9 @@ vop_sharedlock(ap)
|
||||
if (flags & LK_INTERLOCK)
|
||||
vnflags |= LK_INTERLOCK;
|
||||
#ifndef DEBUG_LOCKS
|
||||
return (lockmgr(&vp->v_lock, vnflags, VI_MTX(vp), ap->a_td));
|
||||
return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
|
||||
#else
|
||||
return (debuglockmgr(&vp->v_lock, vnflags, VI_MTX(vp), ap->a_td,
|
||||
return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td,
|
||||
"vop_sharedlock", vp->filename, vp->line));
|
||||
#endif
|
||||
}
|
||||
@ -480,7 +480,7 @@ vop_nolock(ap)
|
||||
}
|
||||
if (flags & LK_INTERLOCK)
|
||||
vnflags |= LK_INTERLOCK;
|
||||
return(lockmgr(&vp->v_lock, vnflags, VI_MTX(vp), ap->a_td));
|
||||
return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
|
||||
#else /* for now */
|
||||
/*
|
||||
* Since we are not using the lock manager, we must clear
|
||||
|
@ -974,6 +974,8 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
vp->v_cstart = 0;
|
||||
vp->v_clen = 0;
|
||||
vp->v_socket = 0;
|
||||
lockdestroy(vp->v_vnlock);
|
||||
lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
|
||||
KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
|
||||
KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
|
||||
} else {
|
||||
@ -984,6 +986,8 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
|
||||
VI_LOCK(vp);
|
||||
vp->v_dd = vp;
|
||||
vp->v_vnlock = &vp->v_lock;
|
||||
lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
|
||||
cache_purge(vp);
|
||||
LIST_INIT(&vp->v_cache_src);
|
||||
TAILQ_INIT(&vp->v_cache_dst);
|
||||
@ -994,7 +998,6 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
vp->v_type = VNON;
|
||||
vp->v_tag = tag;
|
||||
vp->v_op = vops;
|
||||
lockinit(&vp->v_lock, PVFS, "vnlock", VLKTIMEOUT, LK_NOPAUSE);
|
||||
*vpp = vp;
|
||||
vp->v_usecount = 1;
|
||||
vp->v_data = 0;
|
||||
@ -1994,10 +1997,9 @@ addaliasu(nvp, nvp_rdev)
|
||||
ovp->v_data = nvp->v_data;
|
||||
ovp->v_tag = nvp->v_tag;
|
||||
nvp->v_data = NULL;
|
||||
lockinit(&ovp->v_lock, PVFS, nvp->v_lock.lk_wmesg,
|
||||
nvp->v_lock.lk_timo, nvp->v_lock.lk_flags & LK_EXTFLG_MASK);
|
||||
if (nvp->v_vnlock)
|
||||
ovp->v_vnlock = &ovp->v_lock;
|
||||
lockdestroy(ovp->v_vnlock);
|
||||
lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
|
||||
nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
|
||||
ops = ovp->v_op;
|
||||
ovp->v_op = nvp->v_op;
|
||||
if (VOP_ISLOCKED(nvp, curthread)) {
|
||||
@ -2538,9 +2540,6 @@ vclean(vp, flags, td)
|
||||
}
|
||||
|
||||
cache_purge(vp);
|
||||
vp->v_vnlock = NULL;
|
||||
lockdestroy(&vp->v_lock);
|
||||
|
||||
VI_LOCK(vp);
|
||||
if (VSHOULDFREE(vp))
|
||||
vfree(vp);
|
||||
@ -2813,7 +2812,7 @@ vprint(label, vp)
|
||||
strcat(buf, "|VV_OBJBUF");
|
||||
if (buf[0] != '\0')
|
||||
printf(" flags (%s),", &buf[1]);
|
||||
lockmgr_printinfo(&vp->v_lock);
|
||||
lockmgr_printinfo(vp->v_vnlock);
|
||||
printf("\n");
|
||||
if (vp->v_data != NULL) {
|
||||
printf("\t");
|
||||
|
@ -258,7 +258,6 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp)
|
||||
bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
|
||||
np->n_fhsize = fhsize;
|
||||
lockinit(&np->n_rslock, PVFS | rsflags, "nfrslk", 0, LK_NOPAUSE);
|
||||
lockinit(&vp->v_lock, PVFS, "nfsnlk", 0, LK_NOPAUSE);
|
||||
*npp = np;
|
||||
|
||||
if (nfs_node_hash_lock < 0)
|
||||
|
@ -205,7 +205,7 @@ lomacfs_lock(
|
||||
*/
|
||||
lvp = VTOLVP(vp);
|
||||
if (lvp == NULL || flags & LK_THISLAYER)
|
||||
return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, td));
|
||||
return (lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td));
|
||||
if (flags & LK_INTERLOCK) {
|
||||
mtx_unlock(&vp->v_interlock);
|
||||
flags &= ~LK_INTERLOCK;
|
||||
@ -218,7 +218,7 @@ lomacfs_lock(
|
||||
error = vn_lock(lvp, lflags | LK_CANRECURSE, td);
|
||||
if (error)
|
||||
return (error);
|
||||
error = lockmgr(&vp->v_lock, flags, &vp->v_interlock, td);
|
||||
error = lockmgr(vp->v_vnlock, flags, &vp->v_interlock, td);
|
||||
if (error)
|
||||
VOP_UNLOCK(lvp, 0, td);
|
||||
return (error);
|
||||
@ -245,7 +245,7 @@ lomacfs_unlock(
|
||||
struct vnode *lvp = VTOLVP(vp);
|
||||
int error;
|
||||
|
||||
error = lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, td);
|
||||
error = lockmgr(vp->v_vnlock, flags | LK_RELEASE, &vp->v_interlock, td);
|
||||
if (lvp == NULL || flags & LK_THISLAYER || error)
|
||||
return (error);
|
||||
/*
|
||||
@ -269,7 +269,7 @@ lomacfs_islocked(
|
||||
struct vnode *vp = ap->a_vp;
|
||||
struct thread *td = ap->a_td;
|
||||
|
||||
return (lockstatus(&vp->v_lock, td));
|
||||
return (lockstatus(vp->v_vnlock, td));
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1218,10 +1218,9 @@ ffs_vget(mp, ino, flags, vpp)
|
||||
}
|
||||
bzero((caddr_t)ip, sizeof(struct inode));
|
||||
/*
|
||||
* FFS supports lock sharing in the stack of vnodes
|
||||
* FFS supports recursive locking.
|
||||
*/
|
||||
vp->v_vnlock = &vp->v_lock;
|
||||
lockinit(vp->v_vnlock, PINOD, "inode", VLKTIMEOUT, LK_CANRECURSE);
|
||||
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
|
||||
vp->v_data = ip;
|
||||
ip->i_vnode = vp;
|
||||
ip->i_ump = ump;
|
||||
|
@ -179,7 +179,6 @@ ufs_reclaim(ap)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
lockdestroy(&vp->v_lock);
|
||||
#ifdef UFS_DIRHASH
|
||||
if (ip->i_dirhash != NULL)
|
||||
ufsdirhash_free(ip);
|
||||
|
Loading…
Reference in New Issue
Block a user