- Add trylock variants of shared and exclusive locks.

- The sx assertions don't actually need the internal sx mutex lock, so
  don't bother doing so.
- Add a new assertion SX_ASSERT_LOCKED() that asserts that either a
  shared or exclusive lock should be held.  This assertion should be used
  instead of SX_ASSERT_SLOCKED() in almost all cases.
- Adjust some KASSERT()'s to include file and line information.
- Use the new witness_assert() function in the WITNESS case for sx slock
  asserts to verify that the current thread actually owns a slock.
This commit is contained in:
John Baldwin 2001-06-27 06:39:37 +00:00
parent 04297fe609
commit 5f36700a32
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=78872
2 changed files with 75 additions and 27 deletions

View File

@ -95,8 +95,8 @@ _sx_slock(struct sx *sx, const char *file, int line)
mtx_lock(&sx->sx_lock);
KASSERT(sx->sx_xholder != curproc,
("%s (%s): trying to get slock while xlock is held\n", __FUNCTION__,
sx->sx_object.lo_name));
("%s (%s): slock while xlock is held @ %s:%d\n", __FUNCTION__,
sx->sx_object.lo_name, file, line));
/*
* Loop in case we lose the race for lock acquisition.
@ -116,6 +116,24 @@ _sx_slock(struct sx *sx, const char *file, int line)
mtx_unlock(&sx->sx_lock);
}
int
_sx_try_slock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
if (sx->sx_cnt >= 0) {
sx->sx_cnt++;
LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
mtx_unlock(&sx->sx_lock);
return (1);
} else {
LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
return (0);
}
}
void
_sx_xlock(struct sx *sx, const char *file, int line)
{
@ -152,12 +170,32 @@ _sx_xlock(struct sx *sx, const char *file, int line)
mtx_unlock(&sx->sx_lock);
}
int
_sx_try_xlock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
if (sx->sx_cnt == 0) {
sx->sx_cnt--;
sx->sx_xholder = curproc;
LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
line);
mtx_unlock(&sx->sx_lock);
return (1);
} else {
LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
return (0);
}
}
void
_sx_sunlock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
_SX_ASSERT_SLOCKED(sx);
_SX_ASSERT_SLOCKED(sx, file, line);
WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
@ -186,7 +224,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
_SX_ASSERT_XLOCKED(sx);
_SX_ASSERT_XLOCKED(sx, file, line);
MPASS(sx->sx_cnt == -1);
WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);

View File

@ -50,48 +50,58 @@ void sx_init(struct sx *sx, const char *description);
void sx_destroy(struct sx *sx);
void _sx_slock(struct sx *sx, const char *file, int line);
void _sx_xlock(struct sx *sx, const char *file, int line);
int _sx_try_slock(struct sx *sx, const char *file, int line);
int _sx_try_xlock(struct sx *sx, const char *file, int line);
void _sx_sunlock(struct sx *sx, const char *file, int line);
void _sx_xunlock(struct sx *sx, const char *file, int line);
#define sx_slock(sx) _sx_slock((sx), __FILE__, __LINE__)
#define sx_xlock(sx) _sx_xlock((sx), __FILE__, __LINE__)
#define sx_sunlock(sx) _sx_sunlock((sx), __FILE__, __LINE__)
#define sx_xunlock(sx) _sx_xunlock((sx), __FILE__, __LINE__)
#define sx_slock(sx) _sx_slock((sx), __FILE__, __LINE__)
#define sx_xlock(sx) _sx_xlock((sx), __FILE__, __LINE__)
#define sx_try_slock(sx) _sx_try_slock((sx), __FILE__, __LINE__)
#define sx_try_xlock(sx) _sx_try_xlock((sx), __FILE__, __LINE__)
#define sx_sunlock(sx) _sx_sunlock((sx), __FILE__, __LINE__)
#define sx_xunlock(sx) _sx_xunlock((sx), __FILE__, __LINE__)
#ifdef INVARIANTS
/*
* SX_ASSERT_SLOCKED() can only detect that at least *some* thread owns an
* slock, but it cannot guarantee that *this* thread owns an slock.
* In the non-WITNESS case, SX_ASSERT_LOCKED() and SX_ASSERT_SLOCKED()
* can only detect that at least *some* thread owns an slock, but it cannot
* guarantee that *this* thread owns an slock.
*/
#define SX_ASSERT_SLOCKED(sx) do { \
mtx_lock(&(sx)->sx_lock); \
_SX_ASSERT_SLOCKED((sx)); \
mtx_unlock(&(sx)->sx_lock); \
#ifdef WITNESS
#define _SX_ASSERT_LOCKED(sx, file, line) \
witness_assert(&(sx)->sx_object, LA_LOCKED, file, line)
#define _SX_ASSERT_SLOCKED(sx, file, line) \
witness_assert(&(sx)->sx_object, LA_SLOCKED, file, line)
#else
#define _SX_ASSERT_LOCKED(sx, file, line) do { \
KASSERT(((sx)->sx_cnt > 0 || (sx)->sx_xholder == curproc), \
("Lock %s not locked @ %s:%d", (sx)->sx_object.lo_name, \
file, line)); \
} while (0)
#define _SX_ASSERT_SLOCKED(sx) do { \
KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock %s\n", \
__FUNCTION__, (sx)->sx_object.lo_name)); \
#define _SX_ASSERT_SLOCKED(sx, file, line) do { \
KASSERT(((sx)->sx_cnt > 0), ("Lock %s not share locked @ %s:%d",\
(sx)->sx_object.lo_name, file, line)); \
} while (0)
#endif
#define SX_ASSERT_LOCKED(sx) _SX_ASSERT_LOCKED((sx), __FILE__, __LINE__)
#define SX_ASSERT_SLOCKED(sx) _SX_ASSERT_SLOCKED((sx), __FILE__, __LINE__)
/*
* SX_ASSERT_XLOCKED() detects and guarantees that *we* own the xlock.
*/
#define SX_ASSERT_XLOCKED(sx) do { \
mtx_lock(&(sx)->sx_lock); \
_SX_ASSERT_XLOCKED((sx)); \
mtx_unlock(&(sx)->sx_lock); \
} while (0)
#define _SX_ASSERT_XLOCKED(sx) do { \
#define _SX_ASSERT_XLOCKED(sx, file, line) do { \
KASSERT(((sx)->sx_xholder == curproc), \
("%s: thread %p lacking xlock %s\n", __FUNCTION__, \
curproc, (sx)->sx_object.lo_name)); \
("Lock %s not exclusively locked @ %s:%d", \
(sx)->sx_object.lo_name, file, line)); \
} while (0)
#define SX_ASSERT_XLOCKED(sx) _SX_ASSERT_XLOCKED((sx), __FILE__, __LINE__)
#else /* INVARIANTS */
#define SX_ASSERT_SLOCKED(sx)
#define SX_ASSERT_XLOCKED(sx)
#define _SX_ASSERT_SLOCKED(sx)
#define _SX_ASSERT_XLOCKED(sx)
#define _SX_ASSERT_SLOCKED(sx, file, line)
#define _SX_ASSERT_XLOCKED(sx, file, line)
#endif /* INVARIANTS */
#endif /* _KERNEL */