From 81c794f9983314f28ba954fb5e8982c61eb43569 Mon Sep 17 00:00:00 2001 From: Attilio Rao Date: Mon, 25 Feb 2008 18:45:57 +0000 Subject: [PATCH] Axe the 'thread' argument from VOP_ISLOCKED() and lockstatus() as it is always curthread. As KPI gets broken by this patch, manpages and __FreeBSD_version will be updated by further commits. Tested by: Andrea Barberio --- .../opensolaris/uts/common/fs/zfs/zfs_vnops.c | 6 ++-- .../opensolaris/uts/common/fs/zfs/zfs_vnops.c | 6 ++-- sys/fs/devfs/devfs_vnops.c | 2 +- sys/fs/nullfs/null_vfsops.c | 6 ++-- sys/fs/nullfs/null_vnops.c | 3 +- sys/fs/smbfs/smbfs_io.c | 2 +- sys/fs/tmpfs/tmpfs_subr.c | 24 ++++++------- sys/fs/tmpfs/tmpfs_vnops.c | 36 +++++++++---------- sys/fs/unionfs/union_vfsops.c | 2 +- sys/fs/unionfs/union_vnops.c | 6 ++-- sys/kern/kern_lock.c | 7 ++-- sys/kern/vfs_cache.c | 8 ++--- sys/kern/vfs_default.c | 5 +-- sys/kern/vfs_lookup.c | 4 +-- sys/kern/vfs_subr.c | 24 ++++++------- sys/kern/vnode_if.src | 1 - sys/netncp/ncp_conn.c | 2 +- sys/netsmb/smb_conn.c | 2 +- sys/nfs4client/nfs4_vfsops.c | 3 +- sys/nfsclient/nfs_subs.c | 2 +- sys/nfsclient/nfs_vfsops.c | 3 +- sys/sys/buf.h | 2 +- sys/sys/lockmgr.h | 2 +- sys/ufs/ffs/ffs_rawread.c | 12 +++---- 24 files changed, 77 insertions(+), 93 deletions(-) diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c index 8ba0461d4231..8c67a59eddb5 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c @@ -1113,7 +1113,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp, int ltype = 0; if (cnp->cn_flags & ISDOTDOT) { - ltype = VOP_ISLOCKED(dvp, td); + ltype = VOP_ISLOCKED(dvp); VOP_UNLOCK(dvp, 0); } error = vn_lock(*vpp, cnp->cn_lkflags); @@ -1171,7 +1171,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp, /* ARGSUSED */ static int zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode, - vnode_t **vpp, cred_t *cr, kthread_t *td) + vnode_t **vpp, cred_t *cr) { znode_t *zp, *dzp = VTOZ(dvp); zfsvfs_t *zfsvfs = dzp->z_zfsvfs; @@ -3245,7 +3245,7 @@ zfs_freebsd_create(ap) mode = vap->va_mode & ALLPERMS; return (zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode, - ap->a_vpp, cnp->cn_cred, cnp->cn_thread)); + ap->a_vpp, cnp->cn_cred)); } static int diff --git a/sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c index 8ba0461d4231..8c67a59eddb5 100644 --- a/sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c +++ b/sys/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c @@ -1113,7 +1113,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp, int ltype = 0; if (cnp->cn_flags & ISDOTDOT) { - ltype = VOP_ISLOCKED(dvp, td); + ltype = VOP_ISLOCKED(dvp); VOP_UNLOCK(dvp, 0); } error = vn_lock(*vpp, cnp->cn_lkflags); @@ -1171,7 +1171,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp, /* ARGSUSED */ static int zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode, - vnode_t **vpp, cred_t *cr, kthread_t *td) + vnode_t **vpp, cred_t *cr) { znode_t *zp, *dzp = VTOZ(dvp); zfsvfs_t *zfsvfs = dzp->z_zfsvfs; @@ -3245,7 +3245,7 @@ zfs_freebsd_create(ap) mode = vap->va_mode & ALLPERMS; return (zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode, - ap->a_vpp, cnp->cn_cred, cnp->cn_thread)); + ap->a_vpp, cnp->cn_cred)); } static int diff --git a/sys/fs/devfs/devfs_vnops.c b/sys/fs/devfs/devfs_vnops.c index b08b9fa09bfc..3422986cbfa4 100644 --- a/sys/fs/devfs/devfs_vnops.c +++ b/sys/fs/devfs/devfs_vnops.c @@ -360,7 +360,7 @@ devfs_close(struct vop_close_args *ap) } vholdl(vp); VI_UNLOCK(vp); - vp_locked = VOP_ISLOCKED(vp, td); + vp_locked = VOP_ISLOCKED(vp); VOP_UNLOCK(vp, 0); KASSERT(dev->si_refcount > 0, ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c index 95914f46caf1..7c93dd9888a2 100644 --- a/sys/fs/nullfs/null_vfsops.c +++ b/sys/fs/nullfs/null_vfsops.c @@ -107,7 +107,7 @@ nullfs_mount(struct mount *mp, struct thread *td) * (XXX) VOP_ISLOCKED is needed? */ if ((mp->mnt_vnodecovered->v_op == &null_vnodeops) && - VOP_ISLOCKED(mp->mnt_vnodecovered, curthread)) { + VOP_ISLOCKED(mp->mnt_vnodecovered)) { VOP_UNLOCK(mp->mnt_vnodecovered, 0); isvnunlocked = 1; } @@ -120,7 +120,7 @@ nullfs_mount(struct mount *mp, struct thread *td) /* * Re-lock vnode. */ - if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered, curthread)) + if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered)) vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY); if (error) @@ -247,7 +247,7 @@ nullfs_root(mp, flags, vpp, td) VREF(vp); #ifdef NULLFS_DEBUG - if (VOP_ISLOCKED(vp, curthread)) + if (VOP_ISLOCKED(vp)) panic("root vnode is locked.\n"); #endif vn_lock(vp, flags | LK_RETRY); diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c index d0193fcb5bd9..ca7ed0cdeeaa 100644 --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -619,9 +619,8 @@ static int null_islocked(struct vop_islocked_args *ap) { struct vnode *vp = ap->a_vp; - struct thread *td = ap->a_td; - return (lockstatus(vp->v_vnlock, td)); + return (lockstatus(vp->v_vnlock)); } /* diff --git a/sys/fs/smbfs/smbfs_io.c b/sys/fs/smbfs/smbfs_io.c index 14ebdb72607c..163e72289192 100644 --- a/sys/fs/smbfs/smbfs_io.c +++ b/sys/fs/smbfs/smbfs_io.c @@ -203,7 +203,7 @@ smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred) return EFBIG;*/ td = uiop->uio_td; if (vp->v_type == VDIR) { - lks = LK_EXCLUSIVE;/*lockstatus(vp->v_vnlock, td);*/ + lks = LK_EXCLUSIVE; /* lockstatus(vp->v_vnlock); */ if (lks == LK_SHARED) vn_lock(vp, LK_UPGRADE | LK_RETRY); error = smbfs_readvdir(vp, uiop, cred); diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c index 0dadf8ace4a0..cc1b75fc6a0b 100644 --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -414,7 +414,7 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag, out: *vpp = vp; - MPASS(IFF(error == 0, *vpp != NULL && VOP_ISLOCKED(*vpp, td))); + MPASS(IFF(error == 0, *vpp != NULL && VOP_ISLOCKED(*vpp))); #ifdef INVARIANTS TMPFS_NODE_LOCK(node); MPASS(*vpp == node->tn_vnode); @@ -466,7 +466,7 @@ tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap, struct tmpfs_node *node; struct tmpfs_node *parent; - MPASS(VOP_ISLOCKED(dvp, cnp->cn_thread)); + MPASS(VOP_ISLOCKED(dvp)); MPASS(cnp->cn_flags & HASBUF); tmp = VFS_TO_TMPFS(dvp->v_mount); @@ -933,7 +933,7 @@ tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred, struct thread *p) int error; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp, p)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); @@ -975,7 +975,7 @@ tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred, struct thread *p) } node->tn_status |= TMPFS_NODE_CHANGED; - MPASS(VOP_ISLOCKED(vp, p)); + MPASS(VOP_ISLOCKED(vp)); return 0; } @@ -993,7 +993,7 @@ tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p) int error; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp, p)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); @@ -1033,7 +1033,7 @@ tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p) node->tn_status |= TMPFS_NODE_CHANGED; - MPASS(VOP_ISLOCKED(vp, p)); + MPASS(VOP_ISLOCKED(vp)); return 0; } @@ -1056,7 +1056,7 @@ tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, uid_t ouid; gid_t ogid; - MPASS(VOP_ISLOCKED(vp, p)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); @@ -1106,7 +1106,7 @@ tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, node->tn_mode &= ~(S_ISUID | S_ISGID); } - MPASS(VOP_ISLOCKED(vp, p)); + MPASS(VOP_ISLOCKED(vp)); return 0; } @@ -1125,7 +1125,7 @@ tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred, int error; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp, p)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); @@ -1163,7 +1163,7 @@ tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred, /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents * for us, as will update tn_status; no need to do that here. */ - MPASS(VOP_ISLOCKED(vp, p)); + MPASS(VOP_ISLOCKED(vp)); return error; } @@ -1182,7 +1182,7 @@ tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime, int error; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp, l)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); @@ -1217,7 +1217,7 @@ tmpfs_chtimes(struct vnode *vp, struct timespec *atime, struct timespec *mtime, if (birthtime->tv_nsec != VNOVAL && birthtime->tv_nsec != VNOVAL) node->tn_birthtime = *birthtime; - MPASS(VOP_ISLOCKED(vp, l)); + MPASS(VOP_ISLOCKED(vp)); return 0; } diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c index a0b4b84d5343..ff466ab6ac89 100644 --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -97,7 +97,7 @@ tmpfs_lookup(struct vop_cachedlookup_args *v) if (cnp->cn_flags & ISDOTDOT) { int ltype = 0; - ltype = VOP_ISLOCKED(dvp, td); + ltype = VOP_ISLOCKED(dvp); vhold(dvp); VOP_UNLOCK(dvp, 0); /* Allocate a new vnode on the matching entry. */ @@ -192,7 +192,7 @@ tmpfs_lookup(struct vop_cachedlookup_args *v) out: /* If there were no errors, *vpp cannot be null and it must be * locked. */ - MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp, td))); + MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp))); return error; } @@ -239,7 +239,7 @@ tmpfs_open(struct vop_open_args *v) int error; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp, v->a_td)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); @@ -257,7 +257,7 @@ tmpfs_open(struct vop_open_args *v) vnode_create_vobject(vp, node->tn_size, v->a_td); } - MPASS(VOP_ISLOCKED(vp, v->a_td)); + MPASS(VOP_ISLOCKED(vp)); return error; } @@ -270,7 +270,7 @@ tmpfs_close(struct vop_close_args *v) struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp, v->a_td)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); @@ -295,7 +295,7 @@ tmpfs_access(struct vop_access_args *v) int error; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp, v->a_td)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); @@ -334,7 +334,7 @@ tmpfs_access(struct vop_access_args *v) node->tn_gid, mode, cred, NULL); out: - MPASS(VOP_ISLOCKED(vp, v->a_td)); + MPASS(VOP_ISLOCKED(vp)); return error; } @@ -394,7 +394,7 @@ tmpfs_setattr(struct vop_setattr_args *v) int error; - MPASS(VOP_ISLOCKED(vp, l)); + MPASS(VOP_ISLOCKED(vp)); error = 0; @@ -436,7 +436,7 @@ tmpfs_setattr(struct vop_setattr_args *v) * from tmpfs_update. */ tmpfs_update(vp); - MPASS(VOP_ISLOCKED(vp, l)); + MPASS(VOP_ISLOCKED(vp)); return error; } @@ -752,7 +752,7 @@ tmpfs_fsync(struct vop_fsync_args *v) { struct vnode *vp = v->a_vp; - MPASS(VOP_ISLOCKED(vp, v->a_td)); + MPASS(VOP_ISLOCKED(vp)); tmpfs_update(vp); @@ -773,8 +773,8 @@ tmpfs_remove(struct vop_remove_args *v) struct tmpfs_node *dnode; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(dvp, v->a_cnp->cn_thread)); - MPASS(VOP_ISLOCKED(vp, v->a_cnp->cn_thread)); + MPASS(VOP_ISLOCKED(dvp)); + MPASS(VOP_ISLOCKED(vp)); if (vp->v_type == VDIR) { error = EISDIR; @@ -826,7 +826,7 @@ tmpfs_link(struct vop_link_args *v) struct tmpfs_dirent *de; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(dvp, cnp->cn_thread)); + MPASS(VOP_ISLOCKED(dvp)); MPASS(cnp->cn_flags & HASBUF); MPASS(dvp != vp); /* XXX When can this be false? */ @@ -899,8 +899,8 @@ tmpfs_rename(struct vop_rename_args *v) struct tmpfs_node *tnode; struct tmpfs_node *tdnode; - MPASS(VOP_ISLOCKED(tdvp, tcnp->cn_thread)); - MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp, tcnp->cn_thread))); + MPASS(VOP_ISLOCKED(tdvp)); + MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp))); MPASS(fcnp->cn_flags & HASBUF); MPASS(tcnp->cn_flags & HASBUF); @@ -1105,8 +1105,8 @@ tmpfs_rmdir(struct vop_rmdir_args *v) struct tmpfs_node *dnode; struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(dvp, v->a_cnp->cn_thread)); - MPASS(VOP_ISLOCKED(vp, v->a_cnp->cn_thread)); + MPASS(VOP_ISLOCKED(dvp)); + MPASS(VOP_ISLOCKED(vp)); tmp = VFS_TO_TMPFS(dvp->v_mount); dnode = VP_TO_TMPFS_DIR(dvp); @@ -1314,7 +1314,7 @@ tmpfs_inactive(struct vop_inactive_args *v) struct tmpfs_node *node; - MPASS(VOP_ISLOCKED(vp, l)); + MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); diff --git a/sys/fs/unionfs/union_vfsops.c b/sys/fs/unionfs/union_vfsops.c index 75ab212c87d6..4c566618f5b6 100644 --- a/sys/fs/unionfs/union_vfsops.c +++ b/sys/fs/unionfs/union_vfsops.c @@ -425,7 +425,7 @@ unionfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) vp = ump->um_rootvp; UNIONFSDEBUG("unionfs_root: rootvp=%p locked=%x\n", - vp, VOP_ISLOCKED(vp, td)); + vp, VOP_ISLOCKED(vp)); vref(vp); if (flags & LK_TYPE_MASK) diff --git a/sys/fs/unionfs/union_vnops.c b/sys/fs/unionfs/union_vnops.c index b89808365b04..d0091507c331 100644 --- a/sys/fs/unionfs/union_vnops.c +++ b/sys/fs/unionfs/union_vnops.c @@ -278,7 +278,7 @@ unionfs_lookup(struct vop_cachedlookup_args *ap) if (LK_SHARED == (cnp->cn_lkflags & LK_TYPE_MASK)) VOP_UNLOCK(vp, 0); - if (LK_EXCLUSIVE != VOP_ISLOCKED(vp, td)) { + if (LK_EXCLUSIVE != VOP_ISLOCKED(vp)) { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); lockflag = 1; } @@ -528,7 +528,7 @@ unionfs_close(struct vop_close_args *ap) cred = ap->a_cred; td = ap->a_td; - if (VOP_ISLOCKED(ap->a_vp, td) != LK_EXCLUSIVE) { + if (VOP_ISLOCKED(ap->a_vp) != LK_EXCLUSIVE) { vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY); locked = 1; } @@ -1416,7 +1416,7 @@ unionfs_readdir(struct vop_readdir_args *ap) } /* check the open count. unionfs needs to open before readdir. */ - if (VOP_ISLOCKED(ap->a_vp, td) != LK_EXCLUSIVE) { + if (VOP_ISLOCKED(ap->a_vp) != LK_EXCLUSIVE) { vn_lock(ap->a_vp, LK_UPGRADE | LK_RETRY); locked = 1; } diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 1b14183290bb..d80a85323e63 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -617,15 +617,12 @@ _lockmgr_disown(struct lock *lkp, const char *file, int line) * Determine the status of a lock. */ int -lockstatus(lkp, td) +lockstatus(lkp) struct lock *lkp; - struct thread *td; { int lock_type = 0; int interlocked; - KASSERT(td == curthread, - ("%s: thread passed argument (%p) is not valid", __func__, td)); KASSERT((lkp->lk_flags & LK_DESTROYED) == 0, ("%s: %p lockmgr is destroyed", __func__, lkp)); @@ -635,7 +632,7 @@ lockstatus(lkp, td) } else interlocked = 0; if (lkp->lk_exclusivecount != 0) { - if (lkp->lk_lockholder == td) + if (lkp->lk_lockholder == curthread) lock_type = LK_EXCLUSIVE; else lock_type = LK_EXCLOTHER; diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index b115b08542b9..dd8b97c02c10 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -314,7 +314,6 @@ cache_lookup(dvp, vpp, cnp) struct componentname *cnp; { struct namecache *ncp; - struct thread *td; u_int32_t hash; int error, ltype; @@ -322,7 +321,6 @@ cache_lookup(dvp, vpp, cnp) cnp->cn_flags &= ~MAKEENTRY; return (0); } - td = cnp->cn_thread; retry: CACHE_LOCK(); numcalls++; @@ -426,7 +424,7 @@ cache_lookup(dvp, vpp, cnp) * differently... */ ltype = cnp->cn_lkflags & (LK_SHARED | LK_EXCLUSIVE); - if (ltype == VOP_ISLOCKED(*vpp, td)) + if (ltype == VOP_ISLOCKED(*vpp)) return (-1); else if (ltype == LK_EXCLUSIVE) vn_lock(*vpp, LK_UPGRADE | LK_RETRY); @@ -434,12 +432,12 @@ cache_lookup(dvp, vpp, cnp) } ltype = 0; /* silence gcc warning */ if (cnp->cn_flags & ISDOTDOT) { - ltype = VOP_ISLOCKED(dvp, td); + ltype = VOP_ISLOCKED(dvp); VOP_UNLOCK(dvp, 0); } VI_LOCK(*vpp); CACHE_UNLOCK(); - error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, td); + error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread); if (cnp->cn_flags & ISDOTDOT) vn_lock(dvp, ltype | LK_RETRY); if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_lkflags & LK_EXCLUSIVE)) diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index fe75ed1a6dc9..542253038117 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -256,7 +256,6 @@ vop_stdlock(ap) struct vop_lock1_args /* { struct vnode *a_vp; int a_flags; - struct thread *a_td; char *file; int line; } */ *ap; @@ -274,7 +273,6 @@ vop_stdunlock(ap) struct vop_unlock_args /* { struct vnode *a_vp; int a_flags; - struct thread *a_td; } */ *ap; { struct vnode *vp = ap->a_vp; @@ -287,11 +285,10 @@ int vop_stdislocked(ap) struct vop_islocked_args /* { struct vnode *a_vp; - struct thread *a_td; } */ *ap; { - return (lockstatus(ap->a_vp->v_vnlock, ap->a_td)); + return (lockstatus(ap->a_vp->v_vnlock)); } /* diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index 8c4c612b7d49..67ee6cbddf2a 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -573,7 +573,7 @@ lookup(struct nameidata *ndp) * last operation. */ if (dp != vp_crossmp && - VOP_ISLOCKED(dp, td) == LK_SHARED && + VOP_ISLOCKED(dp) == LK_SHARED && (cnp->cn_flags & ISLASTCN) && (cnp->cn_flags & LOCKPARENT)) vn_lock(dp, LK_UPGRADE|LK_RETRY); /* @@ -782,7 +782,7 @@ lookup(struct nameidata *ndp) * the caller may want it to be exclusively locked. */ if ((cnp->cn_flags & (ISLASTCN | LOCKSHARED | LOCKLEAF)) == - (ISLASTCN | LOCKLEAF) && VOP_ISLOCKED(dp, td) != LK_EXCLUSIVE) { + (ISLASTCN | LOCKLEAF) && VOP_ISLOCKED(dp) != LK_EXCLUSIVE) { vn_lock(dp, LK_UPGRADE | LK_RETRY); } if (vfslocked && dvfslocked) diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index d44510bcb05e..5bdd380fa609 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -995,14 +995,12 @@ delmntque(struct vnode *vp) static void insmntque_stddtr(struct vnode *vp, void *dtr_arg) { - struct thread *td; - td = curthread; /* XXX ? */ vp->v_data = NULL; vp->v_op = &dead_vnodeops; /* XXX non mp-safe fs may still call insmntque with vnode unlocked */ - if (!VOP_ISLOCKED(vp, td)) + if (!VOP_ISLOCKED(vp)) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vgone(vp); vput(vp); @@ -1638,7 +1636,7 @@ sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) VFS_UNLOCK_GIANT(vfslocked); vfslocked = 0; } - if (VOP_ISLOCKED(vp, curthread) != 0) { + if (VOP_ISLOCKED(vp) != 0) { VFS_UNLOCK_GIANT(vfslocked); return (1); } @@ -2208,7 +2206,7 @@ vput(struct vnode *vp) */ v_decr_useonly(vp); vp->v_iflag |= VI_OWEINACT; - if (VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE) { + if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT); VI_LOCK(vp); if (error) { @@ -2685,7 +2683,7 @@ DB_SHOW_COMMAND(lockedvnods, lockedvnodes) nmp = TAILQ_NEXT(mp, mnt_list); TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { if (vp->v_type != VMARKER && - VOP_ISLOCKED(vp, curthread)) + VOP_ISLOCKED(vp)) vprint("", vp); } nmp = TAILQ_NEXT(mp, mnt_list); @@ -2972,7 +2970,7 @@ vfs_msync(struct mount *mp, int flags) MNT_VNODE_FOREACH(vp, mp, mvp) { VI_LOCK(vp); if ((vp->v_iflag & VI_OBJDIRTY) && - (flags == MNT_WAIT || VOP_ISLOCKED(vp, curthread) == 0)) { + (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { MNT_IUNLOCK(mp); if (!vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, @@ -3479,7 +3477,7 @@ void assert_vop_locked(struct vnode *vp, const char *str) { - if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, curthread) == 0) + if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0) vfs_badlock("is not locked but should be", str, vp); } @@ -3488,7 +3486,7 @@ assert_vop_unlocked(struct vnode *vp, const char *str) { if (vp && !IGNORE_LOCK(vp) && - VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE) + VOP_ISLOCKED(vp) == LK_EXCLUSIVE) vfs_badlock("is locked but should not be", str, vp); } @@ -3497,7 +3495,7 @@ assert_vop_elocked(struct vnode *vp, const char *str) { if (vp && !IGNORE_LOCK(vp) && - VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE) + VOP_ISLOCKED(vp) != LK_EXCLUSIVE) vfs_badlock("is not exclusive locked but should be", str, vp); } @@ -3507,7 +3505,7 @@ assert_vop_elocked_other(struct vnode *vp, const char *str) { if (vp && !IGNORE_LOCK(vp) && - VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER) + VOP_ISLOCKED(vp) != LK_EXCLOTHER) vfs_badlock("is not exclusive locked by another thread", str, vp); } @@ -3517,7 +3515,7 @@ assert_vop_slocked(struct vnode *vp, const char *str) { if (vp && !IGNORE_LOCK(vp) && - VOP_ISLOCKED(vp, curthread) != LK_SHARED) + VOP_ISLOCKED(vp) != LK_SHARED) vfs_badlock("is not locked shared but should be", str, vp); } #endif /* 0 */ @@ -3885,7 +3883,7 @@ vfs_knllocked(void *arg) { struct vnode *vp = arg; - return (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE); + return (VOP_ISLOCKED(vp) == LK_EXCLUSIVE); } int diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src index 5c69255106a1..754cbc33cbb6 100644 --- a/sys/kern/vnode_if.src +++ b/sys/kern/vnode_if.src @@ -61,7 +61,6 @@ vop_islocked { IN struct vnode *vp; - IN struct thread *td; }; %% lookup dvp L ? ? diff --git a/sys/netncp/ncp_conn.c b/sys/netncp/ncp_conn.c index bff949844569..8b90db613f79 100644 --- a/sys/netncp/ncp_conn.c +++ b/sys/netncp/ncp_conn.c @@ -193,7 +193,7 @@ ncp_conn_unlock(struct ncp_conn *conn, struct thread *td) int ncp_conn_assert_locked(struct ncp_conn *conn, const char *checker, struct thread *td) { - if (lockstatus(&conn->nc_lock, curthread) == LK_EXCLUSIVE) return 0; + if (lockstatus(&conn->nc_lock) == LK_EXCLUSIVE) return 0; printf("%s: connection isn't locked!\n", checker); return EIO; } diff --git a/sys/netsmb/smb_conn.c b/sys/netsmb/smb_conn.c index 84ca7c141a3e..82f6382da1f8 100644 --- a/sys/netsmb/smb_conn.c +++ b/sys/netsmb/smb_conn.c @@ -336,7 +336,7 @@ smb_co_put(struct smb_connobj *cp, struct smb_cred *scred) int smb_co_lockstatus(struct smb_connobj *cp, struct thread *td) { - return lockstatus(&cp->co_lock, td); + return lockstatus(&cp->co_lock); } int diff --git a/sys/nfs4client/nfs4_vfsops.c b/sys/nfs4client/nfs4_vfsops.c index c83896b16775..e587d53b3e12 100644 --- a/sys/nfs4client/nfs4_vfsops.c +++ b/sys/nfs4client/nfs4_vfsops.c @@ -749,8 +749,7 @@ nfs4_sync(struct mount *mp, int waitfor, struct thread *td) MNT_VNODE_FOREACH(vp, mp, mvp) { VI_LOCK(vp); MNT_IUNLOCK(mp); - if (VOP_ISLOCKED(vp, curthread) || - vp->v_bufobj.bo_dirty.bv_cnt == 0 || + if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY) { VI_UNLOCK(vp); MNT_ILOCK(mp); diff --git a/sys/nfsclient/nfs_subs.c b/sys/nfsclient/nfs_subs.c index d3a5c089ced7..3bd498006111 100644 --- a/sys/nfsclient/nfs_subs.c +++ b/sys/nfsclient/nfs_subs.c @@ -493,7 +493,7 @@ nfs_upgrade_vnlock(struct vnode *vp) { int old_lock; - if ((old_lock = VOP_ISLOCKED(vp, curthread)) != LK_EXCLUSIVE) { + if ((old_lock = VOP_ISLOCKED(vp)) != LK_EXCLUSIVE) { if (old_lock == LK_SHARED) { /* Upgrade to exclusive lock, this might block */ vn_lock(vp, LK_UPGRADE | LK_RETRY); diff --git a/sys/nfsclient/nfs_vfsops.c b/sys/nfsclient/nfs_vfsops.c index 2ea289512d0d..135387180cf1 100644 --- a/sys/nfsclient/nfs_vfsops.c +++ b/sys/nfsclient/nfs_vfsops.c @@ -1047,8 +1047,7 @@ nfs_sync(struct mount *mp, int waitfor, struct thread *td) MNT_VNODE_FOREACH(vp, mp, mvp) { VI_LOCK(vp); MNT_IUNLOCK(mp); - if (VOP_ISLOCKED(vp, curthread) || - vp->v_bufobj.bo_dirty.bv_cnt == 0 || + if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY) { VI_UNLOCK(vp); MNT_ILOCK(mp); diff --git a/sys/sys/buf.h b/sys/sys/buf.h index 06608c9fc7c9..4682ae63e6cd 100644 --- a/sys/sys/buf.h +++ b/sys/sys/buf.h @@ -303,7 +303,7 @@ BUF_UNLOCK(struct buf *bp) * Check if a buffer lock is currently held. */ #define BUF_ISLOCKED(bp) \ - (lockstatus(&(bp)->b_lock, curthread)) + (lockstatus(&(bp)->b_lock)) /* * Free a buffer lock. */ diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h index 353d965b865e..2307127266f3 100644 --- a/sys/sys/lockmgr.h +++ b/sys/sys/lockmgr.h @@ -202,7 +202,7 @@ void _lockmgr_assert(struct lock *, int what, const char *, int); #endif void _lockmgr_disown(struct lock *, const char *, int); void lockmgr_printinfo(struct lock *); -int lockstatus(struct lock *, struct thread *); +int lockstatus(struct lock *); int lockwaiters(struct lock *); #define lockmgr(lock, flags, mtx) \ diff --git a/sys/ufs/ffs/ffs_rawread.c b/sys/ufs/ffs/ffs_rawread.c index 6369f584aaa9..f10e432ae9f8 100644 --- a/sys/ufs/ffs/ffs_rawread.c +++ b/sys/ufs/ffs/ffs_rawread.c @@ -66,7 +66,7 @@ static int ffs_rawread_readahead(struct vnode *vp, static int ffs_rawread_main(struct vnode *vp, struct uio *uio); -static int ffs_rawread_sync(struct vnode *vp, struct thread *td); +static int ffs_rawread_sync(struct vnode *vp); int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); @@ -95,7 +95,7 @@ ffs_rawread_setup(void) static int -ffs_rawread_sync(struct vnode *vp, struct thread *td) +ffs_rawread_sync(struct vnode *vp) { int spl; int error; @@ -114,14 +114,14 @@ ffs_rawread_sync(struct vnode *vp, struct thread *td) VI_UNLOCK(vp); if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { - if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) + if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) upgraded = 1; else upgraded = 0; VOP_UNLOCK(vp, 0); (void) vn_start_write(vp, &mp, V_WAIT); VOP_LOCK(vp, LK_EXCLUSIVE); - } else if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) { + } else if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { upgraded = 1; /* Upgrade to exclusive lock, this might block */ VOP_LOCK(vp, LK_UPGRADE); @@ -466,9 +466,7 @@ ffs_rawread(struct vnode *vp, (uio->uio_resid & (secsize - 1)) == 0) { /* Sync dirty pages and buffers if needed */ - error = ffs_rawread_sync(vp, - (uio->uio_td != NULL) ? - uio->uio_td : curthread); + error = ffs_rawread_sync(vp); if (error != 0) return error;