Axe the 'thread' argument from VOP_ISLOCKED() and lockstatus() as it is

always curthread.

As KPI gets broken by this patch, manpages and __FreeBSD_version will be
updated by further commits.

Tested by:	Andrea Barberio <insomniac at slackware dot it>
This commit is contained in:
attilio 2008-02-25 18:45:57 +00:00
parent 49cb35343e
commit 4014b55830
24 changed files with 77 additions and 93 deletions

View File

@ -1113,7 +1113,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
int ltype = 0; int ltype = 0;
if (cnp->cn_flags & ISDOTDOT) { if (cnp->cn_flags & ISDOTDOT) {
ltype = VOP_ISLOCKED(dvp, td); ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK(dvp, 0); VOP_UNLOCK(dvp, 0);
} }
error = vn_lock(*vpp, cnp->cn_lkflags); error = vn_lock(*vpp, cnp->cn_lkflags);
@ -1171,7 +1171,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
/* ARGSUSED */ /* ARGSUSED */
static int static int
zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode, zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
vnode_t **vpp, cred_t *cr, kthread_t *td) vnode_t **vpp, cred_t *cr)
{ {
znode_t *zp, *dzp = VTOZ(dvp); znode_t *zp, *dzp = VTOZ(dvp);
zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
@ -3245,7 +3245,7 @@ zfs_freebsd_create(ap)
mode = vap->va_mode & ALLPERMS; mode = vap->va_mode & ALLPERMS;
return (zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode, return (zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode,
ap->a_vpp, cnp->cn_cred, cnp->cn_thread)); ap->a_vpp, cnp->cn_cred));
} }
static int static int

View File

@ -1113,7 +1113,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
int ltype = 0; int ltype = 0;
if (cnp->cn_flags & ISDOTDOT) { if (cnp->cn_flags & ISDOTDOT) {
ltype = VOP_ISLOCKED(dvp, td); ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK(dvp, 0); VOP_UNLOCK(dvp, 0);
} }
error = vn_lock(*vpp, cnp->cn_lkflags); error = vn_lock(*vpp, cnp->cn_lkflags);
@ -1171,7 +1171,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
/* ARGSUSED */ /* ARGSUSED */
static int static int
zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode, zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
vnode_t **vpp, cred_t *cr, kthread_t *td) vnode_t **vpp, cred_t *cr)
{ {
znode_t *zp, *dzp = VTOZ(dvp); znode_t *zp, *dzp = VTOZ(dvp);
zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
@ -3245,7 +3245,7 @@ zfs_freebsd_create(ap)
mode = vap->va_mode & ALLPERMS; mode = vap->va_mode & ALLPERMS;
return (zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode, return (zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode,
ap->a_vpp, cnp->cn_cred, cnp->cn_thread)); ap->a_vpp, cnp->cn_cred));
} }
static int static int

View File

@ -360,7 +360,7 @@ devfs_close(struct vop_close_args *ap)
} }
vholdl(vp); vholdl(vp);
VI_UNLOCK(vp); VI_UNLOCK(vp);
vp_locked = VOP_ISLOCKED(vp, td); vp_locked = VOP_ISLOCKED(vp);
VOP_UNLOCK(vp, 0); VOP_UNLOCK(vp, 0);
KASSERT(dev->si_refcount > 0, KASSERT(dev->si_refcount > 0,
("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));

View File

@ -107,7 +107,7 @@ nullfs_mount(struct mount *mp, struct thread *td)
* (XXX) VOP_ISLOCKED is needed? * (XXX) VOP_ISLOCKED is needed?
*/ */
if ((mp->mnt_vnodecovered->v_op == &null_vnodeops) && if ((mp->mnt_vnodecovered->v_op == &null_vnodeops) &&
VOP_ISLOCKED(mp->mnt_vnodecovered, curthread)) { VOP_ISLOCKED(mp->mnt_vnodecovered)) {
VOP_UNLOCK(mp->mnt_vnodecovered, 0); VOP_UNLOCK(mp->mnt_vnodecovered, 0);
isvnunlocked = 1; isvnunlocked = 1;
} }
@ -120,7 +120,7 @@ nullfs_mount(struct mount *mp, struct thread *td)
/* /*
* Re-lock vnode. * Re-lock vnode.
*/ */
if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered, curthread)) if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered))
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY); vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY);
if (error) if (error)
@ -247,7 +247,7 @@ nullfs_root(mp, flags, vpp, td)
VREF(vp); VREF(vp);
#ifdef NULLFS_DEBUG #ifdef NULLFS_DEBUG
if (VOP_ISLOCKED(vp, curthread)) if (VOP_ISLOCKED(vp))
panic("root vnode is locked.\n"); panic("root vnode is locked.\n");
#endif #endif
vn_lock(vp, flags | LK_RETRY); vn_lock(vp, flags | LK_RETRY);

View File

@ -619,9 +619,8 @@ static int
null_islocked(struct vop_islocked_args *ap) null_islocked(struct vop_islocked_args *ap)
{ {
struct vnode *vp = ap->a_vp; struct vnode *vp = ap->a_vp;
struct thread *td = ap->a_td;
return (lockstatus(vp->v_vnlock, td)); return (lockstatus(vp->v_vnlock));
} }
/* /*

View File

@ -203,7 +203,7 @@ smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
return EFBIG;*/ return EFBIG;*/
td = uiop->uio_td; td = uiop->uio_td;
if (vp->v_type == VDIR) { if (vp->v_type == VDIR) {
lks = LK_EXCLUSIVE;/*lockstatus(vp->v_vnlock, td);*/ lks = LK_EXCLUSIVE; /* lockstatus(vp->v_vnlock); */
if (lks == LK_SHARED) if (lks == LK_SHARED)
vn_lock(vp, LK_UPGRADE | LK_RETRY); vn_lock(vp, LK_UPGRADE | LK_RETRY);
error = smbfs_readvdir(vp, uiop, cred); error = smbfs_readvdir(vp, uiop, cred);

View File

@ -414,7 +414,7 @@ unlock:
out: out:
*vpp = vp; *vpp = vp;
MPASS(IFF(error == 0, *vpp != NULL && VOP_ISLOCKED(*vpp, td))); MPASS(IFF(error == 0, *vpp != NULL && VOP_ISLOCKED(*vpp)));
#ifdef INVARIANTS #ifdef INVARIANTS
TMPFS_NODE_LOCK(node); TMPFS_NODE_LOCK(node);
MPASS(*vpp == node->tn_vnode); MPASS(*vpp == node->tn_vnode);
@ -466,7 +466,7 @@ tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
struct tmpfs_node *node; struct tmpfs_node *node;
struct tmpfs_node *parent; struct tmpfs_node *parent;
MPASS(VOP_ISLOCKED(dvp, cnp->cn_thread)); MPASS(VOP_ISLOCKED(dvp));
MPASS(cnp->cn_flags & HASBUF); MPASS(cnp->cn_flags & HASBUF);
tmp = VFS_TO_TMPFS(dvp->v_mount); tmp = VFS_TO_TMPFS(dvp->v_mount);
@ -933,7 +933,7 @@ tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred, struct thread *p)
int error; int error;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(vp, p)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);
@ -975,7 +975,7 @@ tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred, struct thread *p)
} }
node->tn_status |= TMPFS_NODE_CHANGED; node->tn_status |= TMPFS_NODE_CHANGED;
MPASS(VOP_ISLOCKED(vp, p)); MPASS(VOP_ISLOCKED(vp));
return 0; return 0;
} }
@ -993,7 +993,7 @@ tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
int error; int error;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(vp, p)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);
@ -1033,7 +1033,7 @@ tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p)
node->tn_status |= TMPFS_NODE_CHANGED; node->tn_status |= TMPFS_NODE_CHANGED;
MPASS(VOP_ISLOCKED(vp, p)); MPASS(VOP_ISLOCKED(vp));
return 0; return 0;
} }
@ -1056,7 +1056,7 @@ tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
uid_t ouid; uid_t ouid;
gid_t ogid; gid_t ogid;
MPASS(VOP_ISLOCKED(vp, p)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);
@ -1106,7 +1106,7 @@ tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred,
node->tn_mode &= ~(S_ISUID | S_ISGID); node->tn_mode &= ~(S_ISUID | S_ISGID);
} }
MPASS(VOP_ISLOCKED(vp, p)); MPASS(VOP_ISLOCKED(vp));
return 0; return 0;
} }
@ -1125,7 +1125,7 @@ tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
int error; int error;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(vp, p)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);
@ -1163,7 +1163,7 @@ tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred,
/* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
* for us, as will update tn_status; no need to do that here. */ * for us, as will update tn_status; no need to do that here. */
MPASS(VOP_ISLOCKED(vp, p)); MPASS(VOP_ISLOCKED(vp));
return error; return error;
} }
@ -1182,7 +1182,7 @@ tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
int error; int error;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(vp, l)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);
@ -1217,7 +1217,7 @@ tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime,
if (birthtime->tv_nsec != VNOVAL && birthtime->tv_nsec != VNOVAL) if (birthtime->tv_nsec != VNOVAL && birthtime->tv_nsec != VNOVAL)
node->tn_birthtime = *birthtime; node->tn_birthtime = *birthtime;
MPASS(VOP_ISLOCKED(vp, l)); MPASS(VOP_ISLOCKED(vp));
return 0; return 0;
} }

View File

@ -97,7 +97,7 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
if (cnp->cn_flags & ISDOTDOT) { if (cnp->cn_flags & ISDOTDOT) {
int ltype = 0; int ltype = 0;
ltype = VOP_ISLOCKED(dvp, td); ltype = VOP_ISLOCKED(dvp);
vhold(dvp); vhold(dvp);
VOP_UNLOCK(dvp, 0); VOP_UNLOCK(dvp, 0);
/* Allocate a new vnode on the matching entry. */ /* Allocate a new vnode on the matching entry. */
@ -192,7 +192,7 @@ tmpfs_lookup(struct vop_cachedlookup_args *v)
out: out:
/* If there were no errors, *vpp cannot be null and it must be /* If there were no errors, *vpp cannot be null and it must be
* locked. */ * locked. */
MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp, td))); MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp)));
return error; return error;
} }
@ -239,7 +239,7 @@ tmpfs_open(struct vop_open_args *v)
int error; int error;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(vp, v->a_td)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);
@ -257,7 +257,7 @@ tmpfs_open(struct vop_open_args *v)
vnode_create_vobject(vp, node->tn_size, v->a_td); vnode_create_vobject(vp, node->tn_size, v->a_td);
} }
MPASS(VOP_ISLOCKED(vp, v->a_td)); MPASS(VOP_ISLOCKED(vp));
return error; return error;
} }
@ -270,7 +270,7 @@ tmpfs_close(struct vop_close_args *v)
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(vp, v->a_td)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);
@ -295,7 +295,7 @@ tmpfs_access(struct vop_access_args *v)
int error; int error;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(vp, v->a_td)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);
@ -334,7 +334,7 @@ tmpfs_access(struct vop_access_args *v)
node->tn_gid, mode, cred, NULL); node->tn_gid, mode, cred, NULL);
out: out:
MPASS(VOP_ISLOCKED(vp, v->a_td)); MPASS(VOP_ISLOCKED(vp));
return error; return error;
} }
@ -394,7 +394,7 @@ tmpfs_setattr(struct vop_setattr_args *v)
int error; int error;
MPASS(VOP_ISLOCKED(vp, l)); MPASS(VOP_ISLOCKED(vp));
error = 0; error = 0;
@ -436,7 +436,7 @@ tmpfs_setattr(struct vop_setattr_args *v)
* from tmpfs_update. */ * from tmpfs_update. */
tmpfs_update(vp); tmpfs_update(vp);
MPASS(VOP_ISLOCKED(vp, l)); MPASS(VOP_ISLOCKED(vp));
return error; return error;
} }
@ -752,7 +752,7 @@ tmpfs_fsync(struct vop_fsync_args *v)
{ {
struct vnode *vp = v->a_vp; struct vnode *vp = v->a_vp;
MPASS(VOP_ISLOCKED(vp, v->a_td)); MPASS(VOP_ISLOCKED(vp));
tmpfs_update(vp); tmpfs_update(vp);
@ -773,8 +773,8 @@ tmpfs_remove(struct vop_remove_args *v)
struct tmpfs_node *dnode; struct tmpfs_node *dnode;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(dvp, v->a_cnp->cn_thread)); MPASS(VOP_ISLOCKED(dvp));
MPASS(VOP_ISLOCKED(vp, v->a_cnp->cn_thread)); MPASS(VOP_ISLOCKED(vp));
if (vp->v_type == VDIR) { if (vp->v_type == VDIR) {
error = EISDIR; error = EISDIR;
@ -826,7 +826,7 @@ tmpfs_link(struct vop_link_args *v)
struct tmpfs_dirent *de; struct tmpfs_dirent *de;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(dvp, cnp->cn_thread)); MPASS(VOP_ISLOCKED(dvp));
MPASS(cnp->cn_flags & HASBUF); MPASS(cnp->cn_flags & HASBUF);
MPASS(dvp != vp); /* XXX When can this be false? */ MPASS(dvp != vp); /* XXX When can this be false? */
@ -899,8 +899,8 @@ tmpfs_rename(struct vop_rename_args *v)
struct tmpfs_node *tnode; struct tmpfs_node *tnode;
struct tmpfs_node *tdnode; struct tmpfs_node *tdnode;
MPASS(VOP_ISLOCKED(tdvp, tcnp->cn_thread)); MPASS(VOP_ISLOCKED(tdvp));
MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp, tcnp->cn_thread))); MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp)));
MPASS(fcnp->cn_flags & HASBUF); MPASS(fcnp->cn_flags & HASBUF);
MPASS(tcnp->cn_flags & HASBUF); MPASS(tcnp->cn_flags & HASBUF);
@ -1105,8 +1105,8 @@ tmpfs_rmdir(struct vop_rmdir_args *v)
struct tmpfs_node *dnode; struct tmpfs_node *dnode;
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(dvp, v->a_cnp->cn_thread)); MPASS(VOP_ISLOCKED(dvp));
MPASS(VOP_ISLOCKED(vp, v->a_cnp->cn_thread)); MPASS(VOP_ISLOCKED(vp));
tmp = VFS_TO_TMPFS(dvp->v_mount); tmp = VFS_TO_TMPFS(dvp->v_mount);
dnode = VP_TO_TMPFS_DIR(dvp); dnode = VP_TO_TMPFS_DIR(dvp);
@ -1314,7 +1314,7 @@ tmpfs_inactive(struct vop_inactive_args *v)
struct tmpfs_node *node; struct tmpfs_node *node;
MPASS(VOP_ISLOCKED(vp, l)); MPASS(VOP_ISLOCKED(vp));
node = VP_TO_TMPFS_NODE(vp); node = VP_TO_TMPFS_NODE(vp);

View File

@ -425,7 +425,7 @@ unionfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td)
vp = ump->um_rootvp; vp = ump->um_rootvp;
UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n", UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n",
vp, VOP_ISLOCKED(vp, td)); vp, VOP_ISLOCKED(vp));
vref(vp); vref(vp);
if (flags & LK_TYPE_MASK) if (flags & LK_TYPE_MASK)

View File

@ -278,7 +278,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap)
if (LK_SHARED == (cnp->cn_lkflags & LK_TYPE_MASK)) if (LK_SHARED == (cnp->cn_lkflags & LK_TYPE_MASK))
VOP_UNLOCK(vp, 0); VOP_UNLOCK(vp, 0);
if (LK_EXCLUSIVE != VOP_ISLOCKED(vp, td)) { if (LK_EXCLUSIVE != VOP_ISLOCKED(vp)) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
lockflag = 1; lockflag = 1;
} }
@ -528,7 +528,7 @@ unionfs_close(struct vop_close_args *ap)
cred = ap->a_cred; cred = ap->a_cred;
td = ap->a_td; td = ap->a_td;
if (VOP_ISLOCKED(ap->a_vp, td) != LK_EXCLUSIVE) { if (VOP_ISLOCKED(ap->a_vp) != LK_EXCLUSIVE) {
vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY); vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
locked = 1; locked = 1;
} }
@ -1416,7 +1416,7 @@ unionfs_readdir(struct vop_readdir_args *ap)
} }
/* check the open count. unionfs needs to open before readdir. */ /* check the open count. unionfs needs to open before readdir. */
if (VOP_ISLOCKED(ap->a_vp, td) != LK_EXCLUSIVE) { if (VOP_ISLOCKED(ap->a_vp) != LK_EXCLUSIVE) {
vn_lock(ap->a_vp, LK_UPGRADE | LK_RETRY); vn_lock(ap->a_vp, LK_UPGRADE | LK_RETRY);
locked = 1; locked = 1;
} }

View File

@ -617,15 +617,12 @@ _lockmgr_disown(struct lock *lkp, const char *file, int line)
* Determine the status of a lock. * Determine the status of a lock.
*/ */
int int
lockstatus(lkp, td) lockstatus(lkp)
struct lock *lkp; struct lock *lkp;
struct thread *td;
{ {
int lock_type = 0; int lock_type = 0;
int interlocked; int interlocked;
KASSERT(td == curthread,
("%s: thread passed argument (%p) is not valid", __func__, td));
KASSERT((lkp->lk_flags & LK_DESTROYED) == 0, KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
("%s: %p lockmgr is destroyed", __func__, lkp)); ("%s: %p lockmgr is destroyed", __func__, lkp));
@ -635,7 +632,7 @@ lockstatus(lkp, td)
} else } else
interlocked = 0; interlocked = 0;
if (lkp->lk_exclusivecount != 0) { if (lkp->lk_exclusivecount != 0) {
if (lkp->lk_lockholder == td) if (lkp->lk_lockholder == curthread)
lock_type = LK_EXCLUSIVE; lock_type = LK_EXCLUSIVE;
else else
lock_type = LK_EXCLOTHER; lock_type = LK_EXCLOTHER;

View File

@ -314,7 +314,6 @@ cache_lookup(dvp, vpp, cnp)
struct componentname *cnp; struct componentname *cnp;
{ {
struct namecache *ncp; struct namecache *ncp;
struct thread *td;
u_int32_t hash; u_int32_t hash;
int error, ltype; int error, ltype;
@ -322,7 +321,6 @@ cache_lookup(dvp, vpp, cnp)
cnp->cn_flags &= ~MAKEENTRY; cnp->cn_flags &= ~MAKEENTRY;
return (0); return (0);
} }
td = cnp->cn_thread;
retry: retry:
CACHE_LOCK(); CACHE_LOCK();
numcalls++; numcalls++;
@ -426,7 +424,7 @@ success:
* differently... * differently...
*/ */
ltype = cnp->cn_lkflags & (LK_SHARED | LK_EXCLUSIVE); ltype = cnp->cn_lkflags & (LK_SHARED | LK_EXCLUSIVE);
if (ltype == VOP_ISLOCKED(*vpp, td)) if (ltype == VOP_ISLOCKED(*vpp))
return (-1); return (-1);
else if (ltype == LK_EXCLUSIVE) else if (ltype == LK_EXCLUSIVE)
vn_lock(*vpp, LK_UPGRADE | LK_RETRY); vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
@ -434,12 +432,12 @@ success:
} }
ltype = 0; /* silence gcc warning */ ltype = 0; /* silence gcc warning */
if (cnp->cn_flags & ISDOTDOT) { if (cnp->cn_flags & ISDOTDOT) {
ltype = VOP_ISLOCKED(dvp, td); ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK(dvp, 0); VOP_UNLOCK(dvp, 0);
} }
VI_LOCK(*vpp); VI_LOCK(*vpp);
CACHE_UNLOCK(); CACHE_UNLOCK();
error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, td); error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
if (cnp->cn_flags & ISDOTDOT) if (cnp->cn_flags & ISDOTDOT)
vn_lock(dvp, ltype | LK_RETRY); vn_lock(dvp, ltype | LK_RETRY);
if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_lkflags & LK_EXCLUSIVE)) if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_lkflags & LK_EXCLUSIVE))

View File

@ -256,7 +256,6 @@ vop_stdlock(ap)
struct vop_lock1_args /* { struct vop_lock1_args /* {
struct vnode *a_vp; struct vnode *a_vp;
int a_flags; int a_flags;
struct thread *a_td;
char *file; char *file;
int line; int line;
} */ *ap; } */ *ap;
@ -274,7 +273,6 @@ vop_stdunlock(ap)
struct vop_unlock_args /* { struct vop_unlock_args /* {
struct vnode *a_vp; struct vnode *a_vp;
int a_flags; int a_flags;
struct thread *a_td;
} */ *ap; } */ *ap;
{ {
struct vnode *vp = ap->a_vp; struct vnode *vp = ap->a_vp;
@ -287,11 +285,10 @@ int
vop_stdislocked(ap) vop_stdislocked(ap)
struct vop_islocked_args /* { struct vop_islocked_args /* {
struct vnode *a_vp; struct vnode *a_vp;
struct thread *a_td;
} */ *ap; } */ *ap;
{ {
return (lockstatus(ap->a_vp->v_vnlock, ap->a_td)); return (lockstatus(ap->a_vp->v_vnlock));
} }
/* /*

View File

@ -573,7 +573,7 @@ unionlookup:
* last operation. * last operation.
*/ */
if (dp != vp_crossmp && if (dp != vp_crossmp &&
VOP_ISLOCKED(dp, td) == LK_SHARED && VOP_ISLOCKED(dp) == LK_SHARED &&
(cnp->cn_flags & ISLASTCN) && (cnp->cn_flags & LOCKPARENT)) (cnp->cn_flags & ISLASTCN) && (cnp->cn_flags & LOCKPARENT))
vn_lock(dp, LK_UPGRADE|LK_RETRY); vn_lock(dp, LK_UPGRADE|LK_RETRY);
/* /*
@ -782,7 +782,7 @@ success:
* the caller may want it to be exclusively locked. * the caller may want it to be exclusively locked.
*/ */
if ((cnp->cn_flags & (ISLASTCN | LOCKSHARED | LOCKLEAF)) == if ((cnp->cn_flags & (ISLASTCN | LOCKSHARED | LOCKLEAF)) ==
(ISLASTCN | LOCKLEAF) && VOP_ISLOCKED(dp, td) != LK_EXCLUSIVE) { (ISLASTCN | LOCKLEAF) && VOP_ISLOCKED(dp) != LK_EXCLUSIVE) {
vn_lock(dp, LK_UPGRADE | LK_RETRY); vn_lock(dp, LK_UPGRADE | LK_RETRY);
} }
if (vfslocked && dvfslocked) if (vfslocked && dvfslocked)

View File

@ -995,14 +995,12 @@ delmntque(struct vnode *vp)
static void static void
insmntque_stddtr(struct vnode *vp, void *dtr_arg) insmntque_stddtr(struct vnode *vp, void *dtr_arg)
{ {
struct thread *td;
td = curthread; /* XXX ? */
vp->v_data = NULL; vp->v_data = NULL;
vp->v_op = &dead_vnodeops; vp->v_op = &dead_vnodeops;
/* XXX non mp-safe fs may still call insmntque with vnode /* XXX non mp-safe fs may still call insmntque with vnode
unlocked */ unlocked */
if (!VOP_ISLOCKED(vp, td)) if (!VOP_ISLOCKED(vp))
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vgone(vp); vgone(vp);
vput(vp); vput(vp);
@ -1638,7 +1636,7 @@ restart:
VFS_UNLOCK_GIANT(vfslocked); VFS_UNLOCK_GIANT(vfslocked);
vfslocked = 0; vfslocked = 0;
} }
if (VOP_ISLOCKED(vp, curthread) != 0) { if (VOP_ISLOCKED(vp) != 0) {
VFS_UNLOCK_GIANT(vfslocked); VFS_UNLOCK_GIANT(vfslocked);
return (1); return (1);
} }
@ -2208,7 +2206,7 @@ vput(struct vnode *vp)
*/ */
v_decr_useonly(vp); v_decr_useonly(vp);
vp->v_iflag |= VI_OWEINACT; vp->v_iflag |= VI_OWEINACT;
if (VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE) { if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT); error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT);
VI_LOCK(vp); VI_LOCK(vp);
if (error) { if (error) {
@ -2685,7 +2683,7 @@ DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
nmp = TAILQ_NEXT(mp, mnt_list); nmp = TAILQ_NEXT(mp, mnt_list);
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
if (vp->v_type != VMARKER && if (vp->v_type != VMARKER &&
VOP_ISLOCKED(vp, curthread)) VOP_ISLOCKED(vp))
vprint("", vp); vprint("", vp);
} }
nmp = TAILQ_NEXT(mp, mnt_list); nmp = TAILQ_NEXT(mp, mnt_list);
@ -2972,7 +2970,7 @@ vfs_msync(struct mount *mp, int flags)
MNT_VNODE_FOREACH(vp, mp, mvp) { MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp); VI_LOCK(vp);
if ((vp->v_iflag & VI_OBJDIRTY) && if ((vp->v_iflag & VI_OBJDIRTY) &&
(flags == MNT_WAIT || VOP_ISLOCKED(vp, curthread) == 0)) { (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
MNT_IUNLOCK(mp); MNT_IUNLOCK(mp);
if (!vget(vp, if (!vget(vp,
LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
@ -3479,7 +3477,7 @@ void
assert_vop_locked(struct vnode *vp, const char *str) assert_vop_locked(struct vnode *vp, const char *str)
{ {
if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, curthread) == 0) if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0)
vfs_badlock("is not locked but should be", str, vp); vfs_badlock("is not locked but should be", str, vp);
} }
@ -3488,7 +3486,7 @@ assert_vop_unlocked(struct vnode *vp, const char *str)
{ {
if (vp && !IGNORE_LOCK(vp) && if (vp && !IGNORE_LOCK(vp) &&
VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE) VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
vfs_badlock("is locked but should not be", str, vp); vfs_badlock("is locked but should not be", str, vp);
} }
@ -3497,7 +3495,7 @@ assert_vop_elocked(struct vnode *vp, const char *str)
{ {
if (vp && !IGNORE_LOCK(vp) && if (vp && !IGNORE_LOCK(vp) &&
VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE) VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
vfs_badlock("is not exclusive locked but should be", str, vp); vfs_badlock("is not exclusive locked but should be", str, vp);
} }
@ -3507,7 +3505,7 @@ assert_vop_elocked_other(struct vnode *vp, const char *str)
{ {
if (vp && !IGNORE_LOCK(vp) && if (vp && !IGNORE_LOCK(vp) &&
VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER) VOP_ISLOCKED(vp) != LK_EXCLOTHER)
vfs_badlock("is not exclusive locked by another thread", vfs_badlock("is not exclusive locked by another thread",
str, vp); str, vp);
} }
@ -3517,7 +3515,7 @@ assert_vop_slocked(struct vnode *vp, const char *str)
{ {
if (vp && !IGNORE_LOCK(vp) && if (vp && !IGNORE_LOCK(vp) &&
VOP_ISLOCKED(vp, curthread) != LK_SHARED) VOP_ISLOCKED(vp) != LK_SHARED)
vfs_badlock("is not locked shared but should be", str, vp); vfs_badlock("is not locked shared but should be", str, vp);
} }
#endif /* 0 */ #endif /* 0 */
@ -3885,7 +3883,7 @@ vfs_knllocked(void *arg)
{ {
struct vnode *vp = arg; struct vnode *vp = arg;
return (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE); return (VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
} }
int int

View File

@ -61,7 +61,6 @@
vop_islocked { vop_islocked {
IN struct vnode *vp; IN struct vnode *vp;
IN struct thread *td;
}; };
%% lookup dvp L ? ? %% lookup dvp L ? ?

View File

@ -193,7 +193,7 @@ ncp_conn_unlock(struct ncp_conn *conn, struct thread *td)
int int
ncp_conn_assert_locked(struct ncp_conn *conn, const char *checker, struct thread *td) ncp_conn_assert_locked(struct ncp_conn *conn, const char *checker, struct thread *td)
{ {
if (lockstatus(&conn->nc_lock, curthread) == LK_EXCLUSIVE) return 0; if (lockstatus(&conn->nc_lock) == LK_EXCLUSIVE) return 0;
printf("%s: connection isn't locked!\n", checker); printf("%s: connection isn't locked!\n", checker);
return EIO; return EIO;
} }

View File

@ -336,7 +336,7 @@ smb_co_put(struct smb_connobj *cp, struct smb_cred *scred)
int int
smb_co_lockstatus(struct smb_connobj *cp, struct thread *td) smb_co_lockstatus(struct smb_connobj *cp, struct thread *td)
{ {
return lockstatus(&cp->co_lock, td); return lockstatus(&cp->co_lock);
} }
int int

View File

@ -749,8 +749,7 @@ loop:
MNT_VNODE_FOREACH(vp, mp, mvp) { MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp); VI_LOCK(vp);
MNT_IUNLOCK(mp); MNT_IUNLOCK(mp);
if (VOP_ISLOCKED(vp, curthread) || if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
waitfor == MNT_LAZY) { waitfor == MNT_LAZY) {
VI_UNLOCK(vp); VI_UNLOCK(vp);
MNT_ILOCK(mp); MNT_ILOCK(mp);

View File

@ -493,7 +493,7 @@ nfs_upgrade_vnlock(struct vnode *vp)
{ {
int old_lock; int old_lock;
if ((old_lock = VOP_ISLOCKED(vp, curthread)) != LK_EXCLUSIVE) { if ((old_lock = VOP_ISLOCKED(vp)) != LK_EXCLUSIVE) {
if (old_lock == LK_SHARED) { if (old_lock == LK_SHARED) {
/* Upgrade to exclusive lock, this might block */ /* Upgrade to exclusive lock, this might block */
vn_lock(vp, LK_UPGRADE | LK_RETRY); vn_lock(vp, LK_UPGRADE | LK_RETRY);

View File

@ -1047,8 +1047,7 @@ loop:
MNT_VNODE_FOREACH(vp, mp, mvp) { MNT_VNODE_FOREACH(vp, mp, mvp) {
VI_LOCK(vp); VI_LOCK(vp);
MNT_IUNLOCK(mp); MNT_IUNLOCK(mp);
if (VOP_ISLOCKED(vp, curthread) || if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
waitfor == MNT_LAZY) { waitfor == MNT_LAZY) {
VI_UNLOCK(vp); VI_UNLOCK(vp);
MNT_ILOCK(mp); MNT_ILOCK(mp);

View File

@ -303,7 +303,7 @@ BUF_UNLOCK(struct buf *bp)
* Check if a buffer lock is currently held. * Check if a buffer lock is currently held.
*/ */
#define BUF_ISLOCKED(bp) \ #define BUF_ISLOCKED(bp) \
(lockstatus(&(bp)->b_lock, curthread)) (lockstatus(&(bp)->b_lock))
/* /*
* Free a buffer lock. * Free a buffer lock.
*/ */

View File

@ -202,7 +202,7 @@ void _lockmgr_assert(struct lock *, int what, const char *, int);
#endif #endif
void _lockmgr_disown(struct lock *, const char *, int); void _lockmgr_disown(struct lock *, const char *, int);
void lockmgr_printinfo(struct lock *); void lockmgr_printinfo(struct lock *);
int lockstatus(struct lock *, struct thread *); int lockstatus(struct lock *);
int lockwaiters(struct lock *); int lockwaiters(struct lock *);
#define lockmgr(lock, flags, mtx) \ #define lockmgr(lock, flags, mtx) \

View File

@ -66,7 +66,7 @@ static int ffs_rawread_readahead(struct vnode *vp,
static int ffs_rawread_main(struct vnode *vp, static int ffs_rawread_main(struct vnode *vp,
struct uio *uio); struct uio *uio);
static int ffs_rawread_sync(struct vnode *vp, struct thread *td); static int ffs_rawread_sync(struct vnode *vp);
int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
@ -95,7 +95,7 @@ ffs_rawread_setup(void)
static int static int
ffs_rawread_sync(struct vnode *vp, struct thread *td) ffs_rawread_sync(struct vnode *vp)
{ {
int spl; int spl;
int error; int error;
@ -114,14 +114,14 @@ ffs_rawread_sync(struct vnode *vp, struct thread *td)
VI_UNLOCK(vp); VI_UNLOCK(vp);
if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
upgraded = 1; upgraded = 1;
else else
upgraded = 0; upgraded = 0;
VOP_UNLOCK(vp, 0); VOP_UNLOCK(vp, 0);
(void) vn_start_write(vp, &mp, V_WAIT); (void) vn_start_write(vp, &mp, V_WAIT);
VOP_LOCK(vp, LK_EXCLUSIVE); VOP_LOCK(vp, LK_EXCLUSIVE);
} else if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) { } else if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
upgraded = 1; upgraded = 1;
/* Upgrade to exclusive lock, this might block */ /* Upgrade to exclusive lock, this might block */
VOP_LOCK(vp, LK_UPGRADE); VOP_LOCK(vp, LK_UPGRADE);
@ -466,9 +466,7 @@ ffs_rawread(struct vnode *vp,
(uio->uio_resid & (secsize - 1)) == 0) { (uio->uio_resid & (secsize - 1)) == 0) {
/* Sync dirty pages and buffers if needed */ /* Sync dirty pages and buffers if needed */
error = ffs_rawread_sync(vp, error = ffs_rawread_sync(vp);
(uio->uio_td != NULL) ?
uio->uio_td : curthread);
if (error != 0) if (error != 0)
return error; return error;