Cleanup lockmgr interface and exported KPI:

- Remove the "thread" argument from the lockmgr() function as it is
  always curthread now
- Axe lockcount() function as it is no longer used
- Axe LOCKMGR_ASSERT() as it is bogus really and no currently used.
  Hopefully this will be soonly replaced by something suitable for it.
- Remove the prototype for dumplockinfo() as the function is no longer
  present

Addictionally:
- Introduce a KASSERT() in lockstatus() in order to let it accept only
  curthread or NULL as they should only be passed
- Do a little bit of style(9) cleanup on lockmgr.h

KPI results heavilly broken by this change, so manpages and
FreeBSD_version will be modified accordingly by further commits.

Tested by: matteo
This commit is contained in:
Attilio Rao 2008-01-24 12:34:30 +00:00
parent db8665e649
commit 0e9eb108f0
28 changed files with 112 additions and 177 deletions

View File

@ -1560,7 +1560,7 @@ ehci_sync_hc(ehci_softc_t *sc)
}
DPRINTFN(2,("ehci_sync_hc: enter\n"));
/* get doorbell */
lockmgr(&sc->sc_doorbell_lock, LK_EXCLUSIVE, NULL, curthread);
lockmgr(&sc->sc_doorbell_lock, LK_EXCLUSIVE, NULL);
s = splhardusb();
/* ask for doorbell */
EOWRITE4(sc, EHCI_USBCMD, EOREAD4(sc, EHCI_USBCMD) | EHCI_CMD_IAAD);
@ -1571,7 +1571,7 @@ ehci_sync_hc(ehci_softc_t *sc)
EOREAD4(sc, EHCI_USBCMD), EOREAD4(sc, EHCI_USBSTS)));
splx(s);
/* release doorbell */
lockmgr(&sc->sc_doorbell_lock, LK_RELEASE, NULL, curthread);
lockmgr(&sc->sc_doorbell_lock, LK_RELEASE, NULL);
#ifdef DIAGNOSTIC
if (error)
printf("ehci_sync_hc: tsleep() = %d\n", error);

View File

@ -1777,11 +1777,7 @@ udav_lock_mii(struct udav_softc *sc)
__func__));
sc->sc_refcnt++;
#if defined(__NetBSD__)
lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL);
#elif defined(__FreeBSD__)
lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL, curthread);
#endif
}
static void
@ -1790,11 +1786,7 @@ udav_unlock_mii(struct udav_softc *sc)
DPRINTFN(0xff, ("%s: %s: enter\n", device_get_nameunit(sc->sc_dev),
__func__));
#if defined(__NetBSD__)
lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL);
#elif defined(__FreeBSD__)
lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL, curthread);
#endif
if (--sc->sc_refcnt < 0)
usb_detach_wakeup(sc->sc_dev);
}

View File

@ -684,7 +684,7 @@ cd9660_vget_internal(mp, ino, flags, vpp, relocated, isodir)
ip->i_vnode = vp;
ip->i_number = ino;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
free(ip, M_ISOFSNODE);

View File

@ -79,9 +79,9 @@ int hpfs_breadstruct (struct hpfsmount *, lsn_t, u_int, u_int32_t,
#if 0
#define hpfs_hplock(hp, p) \
lockmgr(&(hp)->h_intlock, LK_EXCLUSIVE, (p), curthread)
lockmgr(&(hp)->h_intlock, LK_EXCLUSIVE, (p))
#define hpfs_hpunlock(hp, p) \
lockmgr(&(hp)->h_intlock, LK_RELEASE, (p), curthread)
lockmgr(&(hp)->h_intlock, LK_RELEASE, (p))
#endif
int hpfs_hpbmap (struct hpfsnode *, daddr_t, daddr_t *, int *);

View File

@ -453,7 +453,6 @@ hpfs_vget(
struct hpfsnode *hp;
struct buf *bp;
int error;
struct thread *td;
dprintf(("hpfs_vget(0x%x): ",ino));
@ -507,14 +506,13 @@ hpfs_vget(
hp->h_mode = hpmp->hpm_mode;
hp->h_devvp = hpmp->hpm_devvp;
td = curthread;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
free(hp, M_HPFSNO);
return (error);
}
error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
if (error || *vpp != NULL)
return (error);

View File

@ -105,7 +105,6 @@ deget(pmp, dirclust, diroffset, depp)
struct denode *ldep;
struct vnode *nvp, *xvp;
struct buf *bp;
struct thread *td;
#ifdef MSDOSFS_DEBUG
printf("deget(pmp %p, dirclust %lu, diroffset %lx, depp %p)\n",
@ -171,15 +170,14 @@ deget(pmp, dirclust, diroffset, depp)
ldep->de_inode = inode;
fc_purge(ldep, 0); /* init the fat cache for this denode */
td = curthread;
lockmgr(nvp->v_vnlock, LK_EXCLUSIVE, NULL, td);
lockmgr(nvp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(nvp, mntp);
if (error != 0) {
FREE(ldep, M_MSDOSFSNODE);
*depp = NULL;
return (error);
}
error = vfs_hash_insert(nvp, inode, LK_EXCLUSIVE, td, &xvp,
error = vfs_hash_insert(nvp, inode, LK_EXCLUSIVE, curthread, &xvp,
de_vncmpf, &inode);
if (error) {
*depp = NULL;

View File

@ -358,8 +358,7 @@ ntfs_ntget(ip)
mtx_lock(&ip->i_interlock);
ip->i_usecount++;
lockmgr(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock,
curthread);
lockmgr(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock);
return 0;
}
@ -390,8 +389,7 @@ ntfs_ntlookup(
*ipp = ip;
return (0);
}
} while (lockmgr(&ntfs_hashlock, LK_EXCLUSIVE | LK_SLEEPFAIL, NULL,
curthread));
} while (lockmgr(&ntfs_hashlock, LK_EXCLUSIVE | LK_SLEEPFAIL, NULL));
MALLOC(ip, struct ntnode *, sizeof(struct ntnode), M_NTFSNTNODE,
M_WAITOK | M_ZERO);
@ -413,7 +411,7 @@ ntfs_ntlookup(
ntfs_nthashins(ip);
lockmgr(&ntfs_hashlock, LK_RELEASE, NULL, curthread);
lockmgr(&ntfs_hashlock, LK_RELEASE, NULL);
*ipp = ip;
@ -449,8 +447,7 @@ ntfs_ntput(ip)
#endif
if (ip->i_usecount > 0) {
lockmgr(&ip->i_lock, LK_RELEASE|LK_INTERLOCK, &ip->i_interlock,
curthread);
lockmgr(&ip->i_lock, LK_RELEASE|LK_INTERLOCK, &ip->i_interlock);
return;
}
@ -1982,7 +1979,7 @@ ntfs_toupper_use(mp, ntmp)
struct vnode *vp;
/* get exclusive access */
lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, curthread);
lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL);
/* only read the translation data from a file if it hasn't been
* read already */
@ -2005,7 +2002,7 @@ ntfs_toupper_use(mp, ntmp)
out:
ntfs_toupper_usecount++;
lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, curthread);
lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL);
return (error);
}
@ -2017,7 +2014,7 @@ void
ntfs_toupper_unuse()
{
/* get exclusive access */
lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, curthread);
lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL);
ntfs_toupper_usecount--;
if (ntfs_toupper_usecount == 0) {
@ -2032,7 +2029,7 @@ ntfs_toupper_unuse()
#endif
/* release the lock */
lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, curthread);
lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL);
}
int

View File

@ -674,8 +674,7 @@ null_reclaim(struct vop_reclaim_args *ap)
vnlock = vp->v_vnlock;
vp->v_vnlock = &vp->v_lock;
if (lowervp) {
lockmgr(vp->v_vnlock,
LK_EXCLUSIVE|LK_INTERLOCK, VI_MTX(vp), curthread);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_INTERLOCK, VI_MTX(vp));
vput(lowervp);
} else
panic("null_reclaim: reclaiming an node with now lowervp");

View File

@ -137,7 +137,6 @@ static int
nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
struct vnode *dvp, struct vnode **vpp)
{
struct thread *td = curthread; /* XXX */
struct nwnode *np;
struct nwnode_hash_head *nhpp;
struct nwmount *nmp = VFSTONWFS(mp);
@ -145,20 +144,20 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
int error;
loop:
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL, td);
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL);
rescan:
if (nwfs_hashlookup(nmp, fid, &np) == 0) {
vp = NWTOV(np);
mtx_lock(&vp->v_interlock);
lockmgr(&nwhashlock, LK_RELEASE, NULL, td);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td))
lockmgr(&nwhashlock, LK_RELEASE, NULL);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread))
goto loop;
if (fap)
np->n_attr = fap->attributes;
*vpp = vp;
return(0);
}
lockmgr(&nwhashlock, LK_RELEASE, NULL, td);
lockmgr(&nwhashlock, LK_RELEASE, NULL);
if (fap == NULL || ((fap->attributes & aDIR) == 0 && dvp == NULL))
panic("nwfs_allocvp: fap = %p, dvp = %p\n", fap, dvp);
@ -190,7 +189,7 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
np->n_parent = VTONW(dvp)->n_fid;
}
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL, td);
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL);
/*
* Another process can create vnode while we blocked in malloc() or
* getnewvnode(). Rescan list again.
@ -206,7 +205,7 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
nhpp = NWNOHASH(fid);
LIST_INSERT_HEAD(nhpp, np, n_hash);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
lockmgr(&nwhashlock, LK_RELEASE, NULL, td);
lockmgr(&nwhashlock, LK_RELEASE, NULL);
ASSERT_VOP_LOCKED(dvp, "nwfs_allocvp");
if (vp->v_type == VDIR && dvp && (dvp->v_vflag & VV_ROOT) == 0) {
@ -239,9 +238,9 @@ nwfs_lookupnp(struct nwmount *nmp, ncpfid fid, struct thread *td,
{
int error;
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL, td);
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL);
error = nwfs_hashlookup(nmp, fid, npp);
lockmgr(&nwhashlock, LK_RELEASE, NULL, td);
lockmgr(&nwhashlock, LK_RELEASE, NULL);
return error;
}
@ -274,9 +273,9 @@ nwfs_reclaim(ap)
NCPVNDEBUG("%s: has no parent ?\n",np->n_name);
}
}
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL, td);
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL);
LIST_REMOVE(np, n_hash);
lockmgr(&nwhashlock, LK_RELEASE, NULL, td);
lockmgr(&nwhashlock, LK_RELEASE, NULL);
if (nmp->n_root == np) {
nmp->n_root = NULL;
}

View File

@ -58,8 +58,10 @@
#include <fs/smbfs/smbfs_subr.h>
#define SMBFS_NOHASH(smp, hval) (&(smp)->sm_hash[(hval) & (smp)->sm_hashlen])
#define smbfs_hash_lock(smp, td) lockmgr(&smp->sm_hashlock, LK_EXCLUSIVE, NULL, td)
#define smbfs_hash_unlock(smp, td) lockmgr(&smp->sm_hashlock, LK_RELEASE, NULL, td)
#define smbfs_hash_lock(smp, td) \
lockmgr(&smp->sm_hashlock, LK_EXCLUSIVE, NULL)
#define smbfs_hash_unlock(smp, td) \
lockmgr(&smp->sm_hashlock, LK_RELEASE, NULL)
extern struct vop_vector smbfs_vnodeops; /* XXX -> .h file */
@ -308,7 +310,6 @@ smbfs_reclaim(ap)
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct thread *td = ap->a_td;
struct vnode *dvp;
struct smbnode *np = VTOSMB(vp);
struct smbmount *smp = VTOSMBFS(vp);

View File

@ -613,7 +613,7 @@ udf_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
unode->udfmp = udfmp;
vp->v_data = unode;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
uma_zfree(udf_zone_node, unode);

View File

@ -198,7 +198,7 @@ unionfs_noderem(struct vnode *vp, struct thread *td)
vp->v_vnlock = &(vp->v_lock);
vp->v_data = NULL;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_INTERLOCK, VI_MTX(vp), td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_INTERLOCK, VI_MTX(vp));
if (lvp != NULLVP)
VOP_UNLOCK(lvp, 0);
if (uvp != NULLVP)

View File

@ -984,7 +984,7 @@ ext2_vget(mp, ino, flags, vpp)
ip->i_e2fs = fs = ump->um_e2fs;
ip->i_number = ino;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
free(ip, M_EXT2NODE);

View File

@ -812,10 +812,10 @@ reiserfs_iget(
* must not release nor downgrade the lock (despite flags argument
* says) till it is fully initialized.
*/
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, (struct mtx *)0, td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, (struct mtx *)0);
#endif
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
free(ip, M_REISERFSNODE);

View File

@ -189,23 +189,18 @@ acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *
* accepted shared locks and shared-to-exclusive upgrades to go away.
*/
int
_lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
struct thread *td, char *file, int line)
_lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, char *file,
int line)
{
struct thread *td;
int error;
int extflags, lockflags;
int contested = 0;
uint64_t waitstart = 0;
/*
* Lock owner can only be curthread in order to have a deadlock
* free implementation of the primitive.
*/
KASSERT(td == curthread,
("lockmgr: owner thread (%p) cannot differ from curthread", td));
error = 0;
td = curthread;
if ((flags & LK_INTERNAL) == 0)
mtx_lock(lkp->lk_interlock);
@ -576,6 +571,9 @@ lockstatus(lkp, td)
int lock_type = 0;
int interlocked;
KASSERT(td == NULL || td == curthread,
("%s: thread passed argument (%p) is not valid", __func__, td));
if (!kdb_active) {
interlocked = 1;
mtx_lock(lkp->lk_interlock);
@ -593,21 +591,6 @@ lockstatus(lkp, td)
return (lock_type);
}
/*
* Determine the number of holders of a lock.
*/
int
lockcount(lkp)
struct lock *lkp;
{
int count;
mtx_lock(lkp->lk_interlock);
count = lkp->lk_exclusivecount + lkp->lk_sharecount;
mtx_unlock(lkp->lk_interlock);
return (count);
}
/*
* Determine the number of waiters on a lock.
*/

View File

@ -263,8 +263,8 @@ vop_stdlock(ap)
{
struct vnode *vp = ap->a_vp;
return (_lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), curthread,
ap->a_file, ap->a_line));
return (_lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_file,
ap->a_line));
}
/* See above. */
@ -278,8 +278,7 @@ vop_stdunlock(ap)
{
struct vnode *vp = ap->a_vp;
return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp),
curthread));
return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
}
/* See above. */

View File

@ -1239,7 +1239,7 @@ dounmount(mp, flags, td)
if (flags & MNT_FORCE)
mp->mnt_kern_flag |= MNTK_UNMOUNTF;
error = lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK |
((flags & MNT_FORCE) ? 0 : LK_NOWAIT), MNT_MTX(mp), td);
((flags & MNT_FORCE) ? 0 : LK_NOWAIT), MNT_MTX(mp));
if (error) {
MNT_ILOCK(mp);
mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_NOINSMNTQ |
@ -1314,7 +1314,7 @@ dounmount(mp, flags, td)
mp->mnt_flag |= async_flag;
if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
mp->mnt_kern_flag |= MNTK_ASYNC;
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
if (mp->mnt_kern_flag & MNTK_MWAIT)
wakeup(mp);
MNT_IUNLOCK(mp);
@ -1330,7 +1330,7 @@ dounmount(mp, flags, td)
vput(coveredvp);
}
vfs_event_signal(NULL, VQ_UNMOUNT, 0);
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
vfs_mount_destroy(mp);
return (0);
}

View File

@ -361,7 +361,7 @@ vfs_busy(struct mount *mp, int flags, struct mtx *interlkp,
if (interlkp)
mtx_unlock(interlkp);
lkflags = LK_SHARED | LK_INTERLOCK;
if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td))
if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp)))
panic("vfs_busy: unexpected lock failure");
return (0);
}
@ -373,7 +373,7 @@ void
vfs_unbusy(struct mount *mp, struct thread *td)
{
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
vfs_rel(mp);
}

View File

@ -99,13 +99,13 @@ ncp_conn_destroy(void)
int
ncp_conn_locklist(int flags, struct thread *td)
{
return lockmgr(&listlock, flags | LK_CANRECURSE, 0, td);
return lockmgr(&listlock, flags | LK_CANRECURSE, 0);
}
void
ncp_conn_unlocklist(struct thread *td)
{
lockmgr(&listlock, LK_RELEASE, 0, td);
lockmgr(&listlock, LK_RELEASE, 0);
}
int
@ -129,17 +129,17 @@ ncp_conn_lock_any(struct ncp_conn *conn, struct thread *td, struct ucred *cred)
int error;
if (conn->nc_id == 0) return EACCES;
error = lockmgr(&conn->nc_lock, LK_EXCLUSIVE | LK_CANRECURSE, 0, td);
error = lockmgr(&conn->nc_lock, LK_EXCLUSIVE | LK_CANRECURSE, 0);
if (error == ERESTART)
return EINTR;
error = ncp_chkintr(conn, td);
if (error) {
lockmgr(&conn->nc_lock, LK_RELEASE, 0, td);
lockmgr(&conn->nc_lock, LK_RELEASE, 0);
return error;
}
if (conn->nc_id == 0) {
lockmgr(&conn->nc_lock, LK_RELEASE, 0, td);
lockmgr(&conn->nc_lock, LK_RELEASE, 0);
return EACCES;
}
conn->td = td; /* who currently operates */
@ -187,7 +187,7 @@ ncp_conn_unlock(struct ncp_conn *conn, struct thread *td)
* note, that LK_RELASE will do wakeup() instead of wakeup_one().
* this will do a little overhead
*/
lockmgr(&conn->nc_lock, LK_RELEASE, 0, td);
lockmgr(&conn->nc_lock, LK_RELEASE, 0);
}
int
@ -301,7 +301,7 @@ ncp_conn_free(struct ncp_conn *ncp)
/*
* if signal is raised - how I do react ?
*/
lockmgr(&ncp->nc_lock, LK_DRAIN, 0, td);
lockmgr(&ncp->nc_lock, LK_DRAIN, 0);
lockdestroy(&ncp->nc_lock);
while (ncp->nc_lwant) {
printf("lwant = %d\n", ncp->nc_lwant);
@ -525,14 +525,14 @@ ncp_conn_gethandle(struct ncp_conn *conn, struct thread *td, struct ncp_handle *
{
struct ncp_handle *refp;
lockmgr(&lhlock, LK_EXCLUSIVE, 0, td);
lockmgr(&lhlock, LK_EXCLUSIVE, 0);
SLIST_FOREACH(refp, &lhlist, nh_next)
if (refp->nh_conn == conn && td == refp->nh_td) break;
if (refp) {
conn->ref_cnt++;
refp->nh_ref++;
*handle = refp;
lockmgr(&lhlock, LK_RELEASE, 0, td);
lockmgr(&lhlock, LK_RELEASE, 0);
return 0;
}
MALLOC(refp,struct ncp_handle *,sizeof(struct ncp_handle),M_NCPDATA,
@ -544,7 +544,7 @@ ncp_conn_gethandle(struct ncp_conn *conn, struct thread *td, struct ncp_handle *
refp->nh_id = ncp_next_handle++;
*handle = refp;
conn->ref_cnt++;
lockmgr(&lhlock, LK_RELEASE, 0, td);
lockmgr(&lhlock, LK_RELEASE, 0);
return 0;
}
/*
@ -555,7 +555,7 @@ ncp_conn_puthandle(struct ncp_handle *handle, struct thread *td, int force)
{
struct ncp_handle *refp = handle;
lockmgr(&lhlock, LK_EXCLUSIVE, 0, td);
lockmgr(&lhlock, LK_EXCLUSIVE, 0);
refp->nh_ref--;
refp->nh_conn->ref_cnt--;
if (force) {
@ -566,7 +566,7 @@ ncp_conn_puthandle(struct ncp_handle *handle, struct thread *td, int force)
SLIST_REMOVE(&lhlist, refp, ncp_handle, nh_next);
FREE(refp, M_NCPDATA);
}
lockmgr(&lhlock, LK_RELEASE, 0, td);
lockmgr(&lhlock, LK_RELEASE, 0);
return 0;
}
/*
@ -576,10 +576,10 @@ int
ncp_conn_findhandle(int connHandle, struct thread *td, struct ncp_handle **handle) {
struct ncp_handle *refp;
lockmgr(&lhlock, LK_SHARED, 0, td);
lockmgr(&lhlock, LK_SHARED, 0);
SLIST_FOREACH(refp, &lhlist, nh_next)
if (refp->nh_td == td && refp->nh_id == connHandle) break;
lockmgr(&lhlock, LK_RELEASE, 0, td);
lockmgr(&lhlock, LK_RELEASE, 0);
if (refp == NULL) {
return EBADF;
}
@ -595,7 +595,7 @@ ncp_conn_putprochandles(struct thread *td)
struct ncp_handle *hp, *nhp;
int haveone = 0;
lockmgr(&lhlock, LK_EXCLUSIVE, 0, td);
lockmgr(&lhlock, LK_EXCLUSIVE, 0);
for (hp = SLIST_FIRST(&lhlist); hp; hp = nhp) {
nhp = SLIST_NEXT(hp, nh_next);
if (hp->nh_td != td) continue;
@ -604,7 +604,7 @@ ncp_conn_putprochandles(struct thread *td)
SLIST_REMOVE(&lhlist, hp, ncp_handle, nh_next);
FREE(hp, M_NCPDATA);
}
lockmgr(&lhlock, LK_RELEASE, 0, td);
lockmgr(&lhlock, LK_RELEASE, 0);
return haveone;
}
/*

View File

@ -96,7 +96,7 @@ smb_sm_done(void)
SMBERROR("%d connections still active\n", smb_vclist.co_usecount - 1);
return EBUSY;
}
lockmgr(&smb_vclist.co_lock, LK_DRAIN, 0, curthread);
lockmgr(&smb_vclist.co_lock, LK_DRAIN, 0);
smb_co_done(&smb_vclist);
return 0;
}
@ -242,7 +242,7 @@ static void
smb_co_done(struct smb_connobj *cp)
{
smb_sl_destroy(&cp->co_interlock);
lockmgr(&cp->co_lock, LK_RELEASE, 0, curthread);
lockmgr(&cp->co_lock, LK_RELEASE, 0);
lockdestroy(&cp->co_lock);
}
@ -275,7 +275,6 @@ smb_co_ref(struct smb_connobj *cp)
void
smb_co_rele(struct smb_connobj *cp, struct smb_cred *scred)
{
struct thread *td = scred->scr_td;
SMB_CO_LOCK(cp);
if (cp->co_usecount > 1) {
@ -291,7 +290,7 @@ smb_co_rele(struct smb_connobj *cp, struct smb_cred *scred)
cp->co_usecount--;
cp->co_flags |= SMBO_GONE;
lockmgr(&cp->co_lock, LK_DRAIN | LK_INTERLOCK, &cp->co_interlock, td);
lockmgr(&cp->co_lock, LK_DRAIN | LK_INTERLOCK, &cp->co_interlock);
smb_co_gone(cp, scred);
}
@ -316,7 +315,6 @@ smb_co_get(struct smb_connobj *cp, int flags, struct smb_cred *scred)
void
smb_co_put(struct smb_connobj *cp, struct smb_cred *scred)
{
struct thread *td = scred->scr_td;
SMB_CO_LOCK(cp);
if (cp->co_usecount > 1) {
@ -327,10 +325,10 @@ smb_co_put(struct smb_connobj *cp, struct smb_cred *scred)
} else {
SMBERROR("negative usecount");
}
lockmgr(&cp->co_lock, LK_RELEASE | LK_INTERLOCK, &cp->co_interlock, td);
lockmgr(&cp->co_lock, LK_RELEASE | LK_INTERLOCK, &cp->co_interlock);
if ((cp->co_flags & SMBO_GONE) == 0)
return;
lockmgr(&cp->co_lock, LK_DRAIN, NULL, td);
lockmgr(&cp->co_lock, LK_DRAIN, NULL);
smb_co_gone(cp, scred);
}
@ -353,13 +351,13 @@ smb_co_lock(struct smb_connobj *cp, int flags, struct thread *td)
SMBERROR("recursive lock for object %d\n", cp->co_level);
return 0;
}
return lockmgr(&cp->co_lock, flags, &cp->co_interlock, td);
return lockmgr(&cp->co_lock, flags, &cp->co_interlock);
}
void
smb_co_unlock(struct smb_connobj *cp, int flags, struct thread *td)
{
(void)lockmgr(&cp->co_lock, flags | LK_RELEASE, &cp->co_interlock, td);
(void)lockmgr(&cp->co_lock, flags | LK_RELEASE, &cp->co_interlock);
}
static void

View File

@ -74,9 +74,9 @@ struct idmap_hash {
struct lock hash_lock;
};
#define IDMAP_RLOCK(lock) lockmgr(lock, LK_SHARED, NULL, curthread)
#define IDMAP_WLOCK(lock) lockmgr(lock, LK_EXCLUSIVE, NULL, curthread)
#define IDMAP_UNLOCK(lock) lockmgr(lock, LK_RELEASE, NULL, curthread)
#define IDMAP_RLOCK(lock) lockmgr(lock, LK_SHARED, NULL)
#define IDMAP_WLOCK(lock) lockmgr(lock, LK_EXCLUSIVE, NULL)
#define IDMAP_UNLOCK(lock) lockmgr(lock, LK_RELEASE, NULL)
static struct idmap_hash idmap_uid_hash;

View File

@ -166,7 +166,7 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int
np->n_fhp = &np->n_fh;
bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
np->n_fhsize = fhsize;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mntp);
if (error != 0) {
*npp = NULL;

View File

@ -277,7 +277,7 @@ BUF_LOCK(struct buf *bp, int locktype, struct mtx *interlock)
locktype |= LK_INTERNAL;
bp->b_lock.lk_wmesg = buf_wmesg;
bp->b_lock.lk_prio = PRIBIO + 4;
ret = lockmgr(&(bp)->b_lock, locktype, interlock, curthread);
ret = lockmgr(&(bp)->b_lock, locktype, interlock);
splx(s);
return ret;
}
@ -298,7 +298,7 @@ BUF_TIMELOCK(struct buf *bp, int locktype, struct mtx *interlock,
bp->b_lock.lk_wmesg = wmesg;
bp->b_lock.lk_prio = (PRIBIO + 4) | catch;
bp->b_lock.lk_timo = timo;
ret = lockmgr(&(bp)->b_lock, (locktype), interlock, curthread);
ret = lockmgr(&(bp)->b_lock, (locktype), interlock);
splx(s);
return ret;
}
@ -315,7 +315,7 @@ BUF_UNLOCK(struct buf *bp)
s = splbio();
KASSERT((bp->b_flags & B_REMFREE) == 0,
("BUF_UNLOCK %p while B_REMFREE is still set.", bp));
lockmgr(&(bp)->b_lock, LK_RELEASE, NULL, curthread);
lockmgr(&(bp)->b_lock, LK_RELEASE, NULL);
splx(s);
}

View File

@ -166,44 +166,21 @@ struct lock {
#define LK_KERNPROC ((struct thread *)-2)
#define LK_NOPROC ((struct thread *) -1)
#ifdef INVARIANTS
#define LOCKMGR_ASSERT(lkp, what, p) do { \
switch ((what)) { \
case LK_SHARED: \
if (lockstatus((lkp), (p)) == LK_SHARED) \
break; \
/* fall into exclusive */ \
case LK_EXCLUSIVE: \
if (lockstatus((lkp), (p)) != LK_EXCLUSIVE) \
panic("lock %s %s not held at %s:%d", \
(lkp)->lk_wmesg, #what, __FILE__, \
__LINE__); \
break; \
default: \
panic("unknown LOCKMGR_ASSERT at %s:%d", __FILE__, \
__LINE__); \
} \
} while (0)
#else /* INVARIANTS */
#define LOCKMGR_ASSERT(lkp, p, what)
#endif /* INVARIANTS */
void dumplockinfo(struct lock *lkp);
struct thread;
void lockinit(struct lock *, int prio, const char *wmesg,
int timo, int flags);
void lockdestroy(struct lock *);
int _lockmgr(struct lock *, u_int flags,
struct mtx *, struct thread *p, char *file, int line);
int _lockmgr(struct lock *, u_int flags, struct mtx *, char *file,
int line);
void lockmgr_disown(struct lock *);
void lockmgr_printinfo(struct lock *);
int lockstatus(struct lock *, struct thread *);
int lockcount(struct lock *);
int lockwaiters(struct lock *);
#define lockmgr(lock, flags, mtx, td) _lockmgr((lock), (flags), (mtx), (td), __FILE__, __LINE__)
#define lockmgr(lock, flags, mtx) \
_lockmgr((lock), (flags), (mtx), __FILE__, __LINE__)
#define lockmgr_recursed(lkp) \
((lkp)->lk_exclusivecount > 1)
#ifdef DDB

View File

@ -631,8 +631,8 @@ ffs_snapshot(mp, snapfile)
xp = NULL;
}
lockmgr(vp->v_vnlock, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY,
VI_MTX(vp), td);
lockmgr(&vp->v_lock, LK_RELEASE, NULL, td);
VI_MTX(vp));
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
/*
* If this is the first snapshot on this filesystem, then we need
* to allocate the space for the list of preallocated snapshot blocks.
@ -1591,14 +1591,14 @@ ffs_snapremove(vp)
TAILQ_REMOVE(&sn->sn_head, ip, i_nextsnap);
ip->i_nextsnap.tqe_prev = 0;
VI_UNLOCK(devvp);
lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL, td);
lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
VI_LOCK(vp);
KASSERT(vp->v_vnlock == &sn->sn_lock,
("ffs_snapremove: lost lock mutation"));
vp->v_vnlock = &vp->v_lock;
VI_UNLOCK(vp);
VI_LOCK(devvp);
lockmgr(&sn->sn_lock, LK_RELEASE, NULL, td);
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
try_free_snapdata(devvp, td);
} else
VI_UNLOCK(devvp);
@ -1718,9 +1718,8 @@ ffs_snapblkfree(fs, devvp, bno, size, inum)
VI_UNLOCK(devvp);
return (0);
}
if (lockmgr(&sn->sn_lock,
LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL,
VI_MTX(devvp), td) != 0)
if (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL,
VI_MTX(devvp)) != 0)
goto retry;
TAILQ_FOREACH(ip, &sn->sn_head, i_nextsnap) {
vp = ITOV(ip);
@ -1807,7 +1806,7 @@ ffs_snapblkfree(fs, devvp, bno, size, inum)
}
DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + btodb(size));
ip->i_flag |= IN_CHANGE | IN_UPDATE;
lockmgr(vp->v_vnlock, LK_RELEASE, NULL, td);
lockmgr(vp->v_vnlock, LK_RELEASE, NULL);
return (1);
}
if (lbn >= NDADDR)
@ -1873,7 +1872,7 @@ ffs_snapblkfree(fs, devvp, bno, size, inum)
* not be freed. Although space will be lost, the snapshot
* will stay consistent.
*/
lockmgr(vp->v_vnlock, LK_RELEASE, NULL, td);
lockmgr(vp->v_vnlock, LK_RELEASE, NULL);
return (error);
}
@ -1965,8 +1964,8 @@ ffs_snapshot_mount(mp)
devvp->v_rdev->si_snapdata = sn;
}
lockmgr(vp->v_vnlock, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY,
VI_MTX(vp), td);
lockmgr(&vp->v_lock, LK_RELEASE, NULL, td);
VI_MTX(vp));
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
/*
* Link it onto the active snapshot list.
*/
@ -2048,21 +2047,17 @@ ffs_snapshot_unmount(mp)
vp = ITOV(xp);
TAILQ_REMOVE(&sn->sn_head, xp, i_nextsnap);
xp->i_nextsnap.tqe_prev = 0;
lockmgr(&sn->sn_lock,
LK_INTERLOCK | LK_EXCLUSIVE,
VI_MTX(devvp),
td);
lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE,
VI_MTX(devvp));
VI_LOCK(vp);
lockmgr(&vp->v_lock,
LK_INTERLOCK | LK_EXCLUSIVE,
VI_MTX(vp), td);
lockmgr(&vp->v_lock, LK_INTERLOCK | LK_EXCLUSIVE, VI_MTX(vp));
VI_LOCK(vp);
KASSERT(vp->v_vnlock == &sn->sn_lock,
("ffs_snapshot_unmount: lost lock mutation"));
vp->v_vnlock = &vp->v_lock;
VI_UNLOCK(vp);
lockmgr(&vp->v_lock, LK_RELEASE, NULL, td);
lockmgr(&sn->sn_lock, LK_RELEASE, NULL, td);
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
if (xp->i_effnlink > 0)
vrele(vp);
VI_LOCK(devvp);
@ -2252,9 +2247,8 @@ ffs_copyonwrite(devvp, bp)
/*
* Not in the precomputed list, so check the snapshots.
*/
while (lockmgr(&sn->sn_lock,
LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL,
VI_MTX(devvp), td) != 0) {
while (lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE | LK_SLEEPFAIL,
VI_MTX(devvp)) != 0) {
VI_LOCK(devvp);
sn = devvp->v_rdev->si_snapdata;
if (sn == NULL ||
@ -2377,7 +2371,7 @@ ffs_copyonwrite(devvp, bp)
else
launched_async_io = 1;
}
lockmgr(vp->v_vnlock, LK_RELEASE, NULL, td);
lockmgr(vp->v_vnlock, LK_RELEASE, NULL);
td->td_pflags = (td->td_pflags & ~TDP_NORUNNINGBUF) |
prev_norunningbuf;
if (launched_async_io && (td->td_pflags & TDP_NORUNNINGBUF) == 0)
@ -2517,8 +2511,8 @@ try_free_snapdata(struct vnode *devvp,
snapblklist = sn->sn_blklist;
sn->sn_blklist = NULL;
sn->sn_listsize = 0;
lockmgr(&sn->sn_lock, LK_DRAIN|LK_INTERLOCK, VI_MTX(devvp), td);
lockmgr(&sn->sn_lock, LK_RELEASE, NULL, td);
lockmgr(&sn->sn_lock, LK_DRAIN|LK_INTERLOCK, VI_MTX(devvp));
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
lockdestroy(&sn->sn_lock);
free(sn, M_UFSMNT);
if (snapblklist != NULL)

View File

@ -1381,7 +1381,7 @@ ffs_vget(mp, ino, flags, vpp)
#endif
td = curthread;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
uma_zfree(uma_inode, ip);

View File

@ -370,8 +370,8 @@ ffs_lock(ap)
flags |= LK_INTERLOCK;
}
lkp = vp->v_vnlock;
result = _lockmgr(lkp, flags, VI_MTX(vp), curthread,
ap->a_file, ap->a_line);
result = _lockmgr(lkp, flags, VI_MTX(vp), ap->a_file,
ap->a_line);
if (lkp == vp->v_vnlock || result != 0)
break;
/*
@ -382,7 +382,7 @@ ffs_lock(ap)
* right lock. Release it, and try to get the
* new lock.
*/
(void) _lockmgr(lkp, LK_RELEASE, VI_MTX(vp), curthread,
(void) _lockmgr(lkp, LK_RELEASE, VI_MTX(vp),
ap->a_file, ap->a_line);
if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;

View File

@ -104,14 +104,14 @@ ufs_extattr_uepm_lock(struct ufsmount *ump, struct thread *td)
/* Ideally, LK_CANRECURSE would not be used, here. */
lockmgr(&ump->um_extattr.uepm_lock, LK_EXCLUSIVE | LK_RETRY |
LK_CANRECURSE, 0, td);
LK_CANRECURSE, 0);
}
static void
ufs_extattr_uepm_unlock(struct ufsmount *ump, struct thread *td)
{
lockmgr(&ump->um_extattr.uepm_lock, LK_RELEASE, 0, td);
lockmgr(&ump->um_extattr.uepm_lock, LK_RELEASE, 0);
}
/*-