Remove explicit calling of lockmgr() with the NULL argument.

Now, lockmgr() function can only be called passing curthread and the
KASSERT() is upgraded according with this.

In order to support on-the-fly owner switching, the new function
lockmgr_disown() has been introduced and gets used in BUF_KERNPROC().
KPI, so, results changed and FreeBSD version will be bumped soon.
Differently from previous code, we assume idle thread cannot try to
acquire the lockmgr as it cannot sleep, so loose the relative check[1]
in BUF_KERNPROC().

Tested by: kris

[1] kib asked for a KASSERT in the lockmgr_disown() about this
condition, but after thinking at it, as this is a well known general
rule, I found it not really necessary.
This commit is contained in:
Attilio Rao 2008-01-08 23:48:31 +00:00
parent 4ad6d200d6
commit d7a7e17968
7 changed files with 60 additions and 41 deletions

View File

@ -1560,7 +1560,7 @@ ehci_sync_hc(ehci_softc_t *sc)
}
DPRINTFN(2,("ehci_sync_hc: enter\n"));
/* get doorbell */
lockmgr(&sc->sc_doorbell_lock, LK_EXCLUSIVE, NULL, NULL);
lockmgr(&sc->sc_doorbell_lock, LK_EXCLUSIVE, NULL, curthread);
s = splhardusb();
/* ask for doorbell */
EOWRITE4(sc, EHCI_USBCMD, EOREAD4(sc, EHCI_USBCMD) | EHCI_CMD_IAAD);
@ -1571,7 +1571,7 @@ ehci_sync_hc(ehci_softc_t *sc)
EOREAD4(sc, EHCI_USBCMD), EOREAD4(sc, EHCI_USBSTS)));
splx(s);
/* release doorbell */
lockmgr(&sc->sc_doorbell_lock, LK_RELEASE, NULL, NULL);
lockmgr(&sc->sc_doorbell_lock, LK_RELEASE, NULL, curthread);
#ifdef DIAGNOSTIC
if (error)
printf("ehci_sync_hc: tsleep() = %d\n", error);

View File

@ -1780,7 +1780,7 @@ udav_lock_mii(struct udav_softc *sc)
#if defined(__NetBSD__)
lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL);
#elif defined(__FreeBSD__)
lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL, NULL);
lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL, curthread);
#endif
}
@ -1793,7 +1793,7 @@ udav_unlock_mii(struct udav_softc *sc)
#if defined(__NetBSD__)
lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL);
#elif defined(__FreeBSD__)
lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL, NULL);
lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL, curthread);
#endif
if (--sc->sc_refcnt < 0)
usb_detach_wakeup(sc->sc_dev);

View File

@ -78,8 +78,10 @@ int hpfs_breadstruct (struct hpfsmount *, lsn_t, u_int, u_int32_t,
hpfs_breadstruct(hpmp, lsn, D_BSIZE, D_MAGIC, bpp)
#if 0
#define hpfs_hplock(hp, p) lockmgr(&(hp)->h_intlock, LK_EXCLUSIVE, (p), NULL)
#define hpfs_hpunlock(hp, p) lockmgr(&(hp)->h_intlock, LK_RELEASE, (p), NULL)
#define hpfs_hplock(hp, p) \
lockmgr(&(hp)->h_intlock, LK_EXCLUSIVE, (p), curthread)
#define hpfs_hpunlock(hp, p) \
lockmgr(&(hp)->h_intlock, LK_RELEASE, (p), curthread)
#endif
int hpfs_hpbmap (struct hpfsnode *, daddr_t, daddr_t *, int *);

View File

@ -359,7 +359,7 @@ ntfs_ntget(ip)
mtx_lock(&ip->i_interlock);
ip->i_usecount++;
lockmgr(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock,
NULL);
curthread);
return 0;
}
@ -391,7 +391,7 @@ ntfs_ntlookup(
return (0);
}
} while (lockmgr(&ntfs_hashlock, LK_EXCLUSIVE | LK_SLEEPFAIL, NULL,
NULL));
curthread));
MALLOC(ip, struct ntnode *, sizeof(struct ntnode), M_NTFSNTNODE,
M_WAITOK | M_ZERO);
@ -413,7 +413,7 @@ ntfs_ntlookup(
ntfs_nthashins(ip);
lockmgr(&ntfs_hashlock, LK_RELEASE, NULL, NULL);
lockmgr(&ntfs_hashlock, LK_RELEASE, NULL, curthread);
*ipp = ip;
@ -450,7 +450,7 @@ ntfs_ntput(ip)
if (ip->i_usecount > 0) {
lockmgr(&ip->i_lock, LK_RELEASE|LK_INTERLOCK, &ip->i_interlock,
NULL);
curthread);
return;
}
@ -1982,7 +1982,7 @@ ntfs_toupper_use(mp, ntmp)
struct vnode *vp;
/* get exclusive access */
lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, NULL);
lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, curthread);
/* only read the translation data from a file if it hasn't been
* read already */
@ -2005,7 +2005,7 @@ ntfs_toupper_use(mp, ntmp)
out:
ntfs_toupper_usecount++;
lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, NULL);
lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, curthread);
return (error);
}
@ -2017,7 +2017,7 @@ void
ntfs_toupper_unuse()
{
/* get exclusive access */
lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, NULL);
lockmgr(&ntfs_toupper_lock, LK_EXCLUSIVE, NULL, curthread);
ntfs_toupper_usecount--;
if (ntfs_toupper_usecount == 0) {
@ -2032,7 +2032,7 @@ ntfs_toupper_unuse()
#endif
/* release the lock */
lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, NULL);
lockmgr(&ntfs_toupper_lock, LK_RELEASE, NULL, curthread);
}
int

View File

@ -105,7 +105,7 @@ unlock_lockmgr(struct lock_object *lock)
panic("lockmgr locks do not support sleep interlocking");
}
#define COUNT(td, x) if ((td)) (td)->td_locks += (x)
#define COUNT(td, x) ((td)->td_locks += (x))
#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
LK_SHARE_NONZERO | LK_WAIT_NONZERO)
@ -194,24 +194,18 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
{
int error;
struct thread *thr;
int extflags, lockflags;
int contested = 0;
uint64_t waitstart = 0;
/*
* Lock owner can only be curthread or, at least, NULL in order to
* have a deadlock free implementation of the primitive.
* Lock owner can only be curthread in order to have a deadlock
* free implementation of the primitive.
*/
KASSERT(td == NULL || td == curthread,
("lockmgr: owner thread (%p) cannot differ from curthread or NULL",
td));
KASSERT(td == curthread,
("lockmgr: owner thread (%p) cannot differ from curthread", td));
error = 0;
if (td == NULL)
thr = LK_KERNPROC;
else
thr = td;
if ((flags & LK_INTERNAL) == 0)
mtx_lock(lkp->lk_interlock);
@ -260,7 +254,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* lock requests or upgrade requests ( but not the exclusive
* lock itself ).
*/
if (lkp->lk_lockholder != thr) {
if (lkp->lk_lockholder != td) {
lockflags = LK_HAVE_EXCL;
if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
@ -286,10 +280,10 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
/* FALLTHROUGH downgrade */
case LK_DOWNGRADE:
KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0,
("lockmgr: not holding exclusive lock "
"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
lkp->lk_lockholder, td, lkp->lk_exclusivecount));
sharelock(td, lkp, lkp->lk_exclusivecount);
COUNT(td, -lkp->lk_exclusivecount);
lkp->lk_exclusivecount = 0;
@ -308,7 +302,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
if (lkp->lk_lockholder == thr)
if (lkp->lk_lockholder == td)
panic("lockmgr: upgrade exclusive lock");
if (lkp->lk_sharecount <= 0)
panic("lockmgr: upgrade without shared");
@ -342,7 +336,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_flags |= LK_HAVE_EXCL;
lkp->lk_lockholder = thr;
lkp->lk_lockholder = td;
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
@ -362,7 +356,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
/* FALLTHROUGH exclusive request */
case LK_EXCLUSIVE:
if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
if (lkp->lk_lockholder == td) {
/*
* Recursive lock.
*/
@ -400,7 +394,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
break;
}
lkp->lk_flags |= LK_HAVE_EXCL;
lkp->lk_lockholder = thr;
lkp->lk_lockholder = td;
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
@ -413,10 +407,10 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
case LK_RELEASE:
if (lkp->lk_exclusivecount != 0) {
if (lkp->lk_lockholder != thr &&
if (lkp->lk_lockholder != td &&
lkp->lk_lockholder != LK_KERNPROC) {
panic("lockmgr: thread %p, not %s %p unlocking",
thr, "exclusive lock holder",
td, "exclusive lock holder",
lkp->lk_lockholder);
}
if (lkp->lk_lockholder != LK_KERNPROC)
@ -433,7 +427,7 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
shareunlock(td, lkp, 1);
else {
printf("lockmgr: thread %p unlocking unheld lock\n",
thr);
td);
kdb_backtrace();
}
@ -448,14 +442,14 @@ _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp,
* check for holding a shared lock, but at least we can
* check for an exclusive one.
*/
if (lkp->lk_lockholder == thr)
if (lkp->lk_lockholder == td)
panic("lockmgr: draining against myself");
error = acquiredrain(lkp, extflags);
if (error)
break;
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
lkp->lk_lockholder = thr;
lkp->lk_lockholder = td;
lkp->lk_exclusivecount = 1;
COUNT(td, 1);
#if defined(DEBUG_LOCKS)
@ -543,6 +537,31 @@ lockdestroy(lkp)
lock_destroy(&lkp->lk_object);
}
/*
* Disown the lockmgr.
*/
void
lockmgr_disown(struct lock *lkp)
{
struct thread *td;
td = curthread;
KASSERT(lkp->lk_exclusivecount || lkp->lk_lockholder == LK_KERNPROC,
("%s: %p lockmgr must be exclusively locked", __func__, lkp));
KASSERT(lkp->lk_lockholder == td,
("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp,
td));
/*
* Drop the lock reference and switch the owner. This will result
* in an atomic operation like td_lock is only accessed by curthread
* and lk_lockholder only needs one write.
*/
if (lkp->lk_lockholder == td)
td->td_locks--;
lkp->lk_lockholder = LK_KERNPROC;
}
/*
* Determine the status of a lock.
*/

View File

@ -340,11 +340,8 @@ static __inline void BUF_KERNPROC(struct buf *);
static __inline void
BUF_KERNPROC(struct buf *bp)
{
struct thread *td = curthread;
if (!TD_IS_IDLETHREAD(td) && bp->b_lock.lk_lockholder == td)
td->td_locks--;
bp->b_lock.lk_lockholder = LK_KERNPROC;
lockmgr_disown(&bp->b_lock);
}
#endif
/*

View File

@ -197,6 +197,7 @@ void lockdestroy(struct lock *);
int _lockmgr(struct lock *, u_int flags,
struct mtx *, struct thread *p, char *file, int line);
void lockmgr_disown(struct lock *);
void lockmgr_printinfo(struct lock *);
int lockstatus(struct lock *, struct thread *);
int lockcount(struct lock *);