- Add sx_descr description member to sx lock structure

- Add sx_xholder member to sx struct which is used for INVARIANTS-enabled
  assertions. It indicates the thread that presently owns the xlock.
- Add some assertions to the sx lock code that will detect the fatal
  API abuse:
     xlock --> xlock
     xlock --> slock
  which now works thanks to sx_xholder.
  Notice that the remaining two problematic cases:
     slock --> xlock
     slock --> slock (a little less problematic, but still recursion)
  will need to be handled by witness eventually, as they are more
  involved.

Reviewed by: jhb, jake, jasone
This commit is contained in:
bmilekic 2001-03-06 06:17:05 +00:00
parent a710dd7194
commit c3da764666
2 changed files with 37 additions and 13 deletions

View File

@ -34,12 +34,12 @@
* Priority propagation will not generally raise the priority of lock holders,
* so should not be relied upon in combination with sx locks.
*
* The witness code can not detect lock cycles.
* The witness code can not detect lock cycles (yet).
*
* XXX: When witness is made to function with sx locks, it will need to
* XXX: be taught to deal with these situations, as they are more involved:
* slock --> xlock (deadlock)
* slock --> slock (slock recursion, not fatal)
* xlock --> xlock (deadlock)
* xlock --> slock (deadlock)
*/
#include <sys/param.h>
@ -58,7 +58,9 @@ sx_init(struct sx *sx, const char *description)
cv_init(&sx->sx_shrd_cv, description);
sx->sx_shrd_wcnt = 0;
cv_init(&sx->sx_excl_cv, description);
sx->sx_descr = description;
sx->sx_excl_wcnt = 0;
sx->sx_xholder = NULL;
}
void
@ -66,7 +68,7 @@ sx_destroy(struct sx *sx)
{
KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
0), ("%s: holders or waiters\n", __FUNCTION__));
0), ("%s (%s): holders or waiters\n", __FUNCTION__, sx->sx_descr));
mtx_destroy(&sx->sx_lock);
cv_destroy(&sx->sx_shrd_cv);
@ -78,6 +80,9 @@ sx_slock(struct sx *sx)
{
mtx_lock(&sx->sx_lock);
KASSERT(sx->sx_xholder != curproc,
("%s (%s): trying to get slock while xlock is held\n", __FUNCTION__,
sx->sx_descr));
/*
* Loop in case we lose the race for lock acquisition.
@ -100,6 +105,16 @@ sx_xlock(struct sx *sx)
mtx_lock(&sx->sx_lock);
/*
* With sx locks, we're absolutely not permitted to recurse on
* xlocks, as it is fatal (deadlock). Normally, recursion is handled
* by WITNESS, but as it is not semantically correct to hold the
* xlock while in here, we consider it API abuse and put it under
* INVARIANTS.
*/
KASSERT(sx->sx_xholder != curproc,
("%s (%s): xlock already held", __FUNCTION__, sx->sx_descr));
/* Loop in case we lose the race for lock acquisition. */
while (sx->sx_cnt != 0) {
sx->sx_excl_wcnt++;
@ -107,8 +122,11 @@ sx_xlock(struct sx *sx)
sx->sx_excl_wcnt--;
}
MPASS(sx->sx_cnt == 0);
/* Acquire an exclusive lock. */
sx->sx_cnt--;
sx->sx_xholder = curproc;
mtx_unlock(&sx->sx_lock);
}
@ -118,7 +136,7 @@ sx_sunlock(struct sx *sx)
{
mtx_lock(&sx->sx_lock);
KASSERT((sx->sx_cnt > 0), ("%s: lacking slock\n", __FUNCTION__));
SX_ASSERT_SLOCKED(sx);
/* Release. */
sx->sx_cnt--;
@ -143,10 +161,12 @@ sx_xunlock(struct sx *sx)
{
mtx_lock(&sx->sx_lock);
KASSERT((sx->sx_cnt == -1), ("%s: lacking xlock\n", __FUNCTION__));
SX_ASSERT_XLOCKED(sx);
MPASS(sx->sx_cnt == -1);
/* Release. */
sx->sx_cnt++;
sx->sx_xholder = NULL;
/*
* Wake up waiters if there are any. Give precedence to slock waiters.

View File

@ -35,12 +35,14 @@
#include <sys/condvar.h>
struct sx {
struct mtx sx_lock; /* General protection lock and xlock. */
struct mtx sx_lock; /* General protection lock. */
const char *sx_descr; /* sx lock description. */
int sx_cnt; /* -1: xlock, > 0: slock count. */
struct cv sx_shrd_cv; /* slock waiters. */
int sx_shrd_wcnt; /* Number of slock waiters. */
struct cv sx_excl_cv; /* xlock waiters. */
int sx_excl_wcnt; /* Number of xlock waiters. */
struct proc *sx_xholder; /* Thread presently holding xlock. */
};
#ifdef _KERNEL
@ -58,20 +60,22 @@ void sx_xunlock(struct sx *sx);
*/
#define SX_ASSERT_SLOCKED(sx) do { \
mtx_lock(&(sx)->sx_lock); \
KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock\n", \
__FUNCTION__)); \
KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock %s\n", \
__FUNCTION__, (sx)->sx_descr)); \
mtx_unlock(&(sx)->sx_lock); \
} while (0)
/*
* SX_ASSERT_XLOCKED() can only detect that at least *some* thread owns an
* xlock, but it cannot guarantee that *this* thread owns an xlock.
* SX_ASSERT_XLOCKED() detects and guarantees that *we* own the xlock.
*/
#define SX_ASSERT_XLOCKED(sx) do { \
mtx_lock(&(sx)->sx_lock); \
KASSERT(((sx)->sx_cnt == -1), ("%s: lacking xlock\n", \
__FUNCTION__)); \
KASSERT(((sx)->sx_xholder == curproc), \
("%s: thread %p lacking xlock %s\n", __FUNCTION__, \
(sx)->sx_descr, curproc)); \
mtx_unlock(&(sx)->sx_lock); \
} while (0)
#else /* INVARIANTS */
#define SX_ASSERT_SLOCKED(sx)
#define SX_ASSERT_XLOCKER(sx)