locks: remove the file + line argument from internal primitives when not used

The pair is of use only in debug or LOCKPROF kernels, but was passed (zeroed)
for many locks even in production kernels.

While here whack the tid argument from wlock hard and xlock hard.

There is no kbi change of any sort - "external" primitives still accept the
pair.
This commit is contained in:
Mateusz Guzik 2017-11-22 21:51:17 +00:00
parent 4e96bf3a37
commit 013c0b493f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=326106
7 changed files with 191 additions and 76 deletions

View File

@ -380,9 +380,8 @@ __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
* is already owned, it will recursively acquire the lock.
*/
int
_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
_mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
{
struct mtx *m;
struct thread *td;
uintptr_t tid, v;
#ifdef LOCK_PROFILING
@ -397,8 +396,6 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
if (SCHEDULER_STOPPED_TD(td))
return (1);
m = mtxlock2mtx(c);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
curthread, m->lock_object.lo_name, file, line));
@ -443,6 +440,15 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
return (rval);
}
int
_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
{
struct mtx *m;
m = mtxlock2mtx(c);
return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
}
/*
* __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
*

View File

@ -273,7 +273,7 @@ _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
tid = (uintptr_t)curthread;
v = RW_UNLOCKED;
if (!_rw_write_lock_fetch(rw, &v, tid))
_rw_wlock_hard(rw, v, tid, file, line);
_rw_wlock_hard(rw, v, file, line);
else
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw,
0, 0, file, line, LOCKSTAT_WRITER);
@ -369,8 +369,8 @@ _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
RW_LOCK_READ || ((td)->td_rw_rlocks && (_rw) & RW_LOCK_READ))
static bool __always_inline
__rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp,
const char *file, int line)
__rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp
LOCK_FILE_LINE_ARG_DEF)
{
/*
@ -399,10 +399,9 @@ __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp,
}
static void __noinline
__rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
const char *file, int line)
__rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
LOCK_FILE_LINE_ARG_DEF)
{
struct rwlock *rw;
struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
volatile struct thread *owner;
@ -434,7 +433,6 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
#elif defined(KDTRACE_HOOKS)
lock_delay_arg_init(&lda, NULL);
#endif
rw = rwlock2rw(c);
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
@ -454,7 +452,7 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
#endif
for (;;) {
if (__rw_rlock_try(rw, td, &v, file, line))
if (__rw_rlock_try(rw, td, &v LOCK_FILE_LINE_ARG))
break;
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
@ -612,14 +610,12 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
}
void
__rw_rlock(volatile uintptr_t *c, const char *file, int line)
__rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
{
struct rwlock *rw;
struct thread *td;
uintptr_t v;
td = curthread;
rw = rwlock2rw(c);
KASSERT(kdb_active != 0 || SCHEDULER_STOPPED_TD(td) ||
!TD_IS_IDLETHREAD(td),
@ -634,14 +630,23 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
v = RW_READ_VALUE(rw);
if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__acquire) ||
!__rw_rlock_try(rw, td, &v, file, line)))
__rw_rlock_hard(c, td, v, file, line);
!__rw_rlock_try(rw, td, &v LOCK_FILE_LINE_ARG)))
__rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
WITNESS_LOCK(&rw->lock_object, 0, file, line);
TD_LOCKS_INC(curthread);
}
void
__rw_rlock(volatile uintptr_t *c, const char *file, int line)
{
struct rwlock *rw;
rw = rwlock2rw(c);
__rw_rlock_int(rw LOCK_FILE_LINE_ARG);
}
int
__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
{
@ -724,18 +729,15 @@ __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp)
}
static void __noinline
__rw_runlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
const char *file, int line)
__rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
LOCK_FILE_LINE_ARG_DEF)
{
struct rwlock *rw;
struct turnstile *ts;
uintptr_t x, queue;
if (SCHEDULER_STOPPED())
return;
rw = rwlock2rw(c);
for (;;) {
if (__rw_runlock_try(rw, td, &v))
break;
@ -799,17 +801,14 @@ __rw_runlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
}
void
_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
_rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
{
struct rwlock *rw;
struct thread *td;
uintptr_t v;
rw = rwlock2rw(c);
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
__rw_assert(c, RA_RLOCKED, file, line);
__rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
@ -818,20 +817,29 @@ _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__release) ||
!__rw_runlock_try(rw, td, &v)))
__rw_runlock_hard(c, td, v, file, line);
__rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG);
TD_LOCKS_DEC(curthread);
}
void
_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
struct rwlock *rw;
rw = rwlock2rw(c);
_rw_runlock_cookie_int(rw LOCK_FILE_LINE_ARG);
}
/*
* This function is called when we are unable to obtain a write lock on the
* first try. This means that at least one other thread holds either a
* read or write lock.
*/
void
__rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
const char *file, int line)
__rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t tid;
struct rwlock *rw;
struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
@ -857,6 +865,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
int doing_lockprof;
#endif
tid = (uintptr_t)curthread;
if (SCHEDULER_STOPPED())
return;
@ -1069,8 +1078,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
* on this lock.
*/
void
__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
int line)
__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid LOCK_FILE_LINE_ARG_DEF)
{
struct rwlock *rw;
struct turnstile *ts;
@ -1145,9 +1153,8 @@ __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
* lock. Returns true if the upgrade succeeded and false otherwise.
*/
int
__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
__rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
{
struct rwlock *rw;
uintptr_t v, x, tid;
struct turnstile *ts;
int success;
@ -1155,11 +1162,9 @@ __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
if (SCHEDULER_STOPPED())
return (1);
rw = rwlock2rw(c);
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
__rw_assert(c, RA_RLOCKED, file, line);
__rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
/*
* Attempt to switch from one reader to a writer. If there
@ -1217,13 +1222,21 @@ __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
return (success);
}
int
__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
{
struct rwlock *rw;
rw = rwlock2rw(c);
return (__rw_try_upgrade_int(rw LOCK_FILE_LINE_ARG));
}
/*
* Downgrade a write lock into a single read lock.
*/
void
__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
__rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
{
struct rwlock *rw;
struct turnstile *ts;
uintptr_t tid, v;
int rwait, wwait;
@ -1231,11 +1244,9 @@ __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
rw = rwlock2rw(c);
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
__rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line);
#ifndef INVARIANTS
if (rw_recursed(rw))
panic("downgrade of a recursed lock");
@ -1287,6 +1298,15 @@ __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
LOCKSTAT_RECORD0(rw__downgrade, rw);
}
void
__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
{
struct rwlock *rw;
rw = rwlock2rw(c);
__rw_downgrade_int(rw LOCK_FILE_LINE_ARG);
}
#ifdef INVARIANT_SUPPORT
#ifndef INVARIANTS
#undef __rw_assert

View File

@ -259,7 +259,7 @@ sx_destroy(struct sx *sx)
}
int
sx_try_slock_(struct sx *sx, const char *file, int line)
sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t x;
@ -290,6 +290,13 @@ sx_try_slock_(struct sx *sx, const char *file, int line)
return (0);
}
int
sx_try_slock_(struct sx *sx, const char *file, int line)
{
return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
}
int
_sx_xlock(struct sx *sx, int opts, const char *file, int line)
{
@ -307,7 +314,7 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
tid = (uintptr_t)curthread;
x = SX_LOCK_UNLOCKED;
if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
error = _sx_xlock_hard(sx, x, tid, opts, file, line);
error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
else
LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
0, 0, file, line, LOCKSTAT_WRITER);
@ -322,7 +329,7 @@ _sx_xlock(struct sx *sx, int opts, const char *file, int line)
}
int
sx_try_xlock_(struct sx *sx, const char *file, int line)
sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
{
struct thread *td;
uintptr_t tid, x;
@ -370,6 +377,13 @@ sx_try_xlock_(struct sx *sx, const char *file, int line)
return (rval);
}
int
sx_try_xlock_(struct sx *sx, const char *file, int line)
{
return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
}
void
_sx_xunlock(struct sx *sx, const char *file, int line)
{
@ -394,7 +408,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
* Return 1 if if the upgrade succeed, 0 otherwise.
*/
int
sx_try_upgrade_(struct sx *sx, const char *file, int line)
sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t x;
int success;
@ -423,11 +437,18 @@ sx_try_upgrade_(struct sx *sx, const char *file, int line)
return (success);
}
int
sx_try_upgrade_(struct sx *sx, const char *file, int line)
{
return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
}
/*
* Downgrade an unrecursed exclusive lock into a single shared lock.
*/
void
sx_downgrade_(struct sx *sx, const char *file, int line)
sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t x;
int wakeup_swapper;
@ -490,6 +511,13 @@ sx_downgrade_(struct sx *sx, const char *file, int line)
kick_proc0();
}
void
sx_downgrade_(struct sx *sx, const char *file, int line)
{
sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
}
/*
* This function represents the so-called 'hard case' for sx_xlock
* operation. All 'easy case' failures are redirected to this. Note
@ -497,10 +525,10 @@ sx_downgrade_(struct sx *sx, const char *file, int line)
* accessible from at least sx.h.
*/
int
_sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
const char *file, int line)
_sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
{
GIANT_DECLARE;
uintptr_t tid;
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
u_int i, n, spintries = 0;
@ -523,6 +551,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
#endif
int extra_work = 0;
tid = (uintptr_t)curthread;
if (SCHEDULER_STOPPED())
return (0);
@ -770,7 +799,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
* accessible from at least sx.h.
*/
void
_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
_sx_xunlock_hard(struct sx *sx, uintptr_t tid LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t x, setx;
int queue, wakeup_swapper;
@ -835,7 +864,7 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
}
static bool __always_inline
__sx_slock_try(struct sx *sx, uintptr_t *xp, const char *file, int line)
__sx_slock_try(struct sx *sx, uintptr_t *xp LOCK_FILE_LINE_ARG_DEF)
{
/*
@ -859,7 +888,7 @@ __sx_slock_try(struct sx *sx, uintptr_t *xp, const char *file, int line)
}
static int __noinline
_sx_slock_hard(struct sx *sx, int opts, const char *file, int line, uintptr_t x)
_sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
{
GIANT_DECLARE;
#ifdef ADAPTIVE_SX
@ -914,7 +943,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line, uintptr_t x)
* shared locks once there is an exclusive waiter.
*/
for (;;) {
if (__sx_slock_try(sx, &x, file, line))
if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))
break;
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
@ -1058,7 +1087,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line, uintptr_t x)
}
int
_sx_slock(struct sx *sx, int opts, const char *file, int line)
_sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t x;
int error;
@ -1074,8 +1103,8 @@ _sx_slock(struct sx *sx, int opts, const char *file, int line)
error = 0;
x = SX_READ_VALUE(sx);
if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) ||
!__sx_slock_try(sx, &x, file, line)))
error = _sx_slock_hard(sx, opts, file, line, x);
!__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)))
error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
if (error == 0) {
LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
WITNESS_LOCK(&sx->lock_object, 0, file, line);
@ -1084,6 +1113,13 @@ _sx_slock(struct sx *sx, int opts, const char *file, int line)
return (error);
}
int
_sx_slock(struct sx *sx, int opts, const char *file, int line)
{
return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
}
static bool __always_inline
_sx_sunlock_try(struct sx *sx, uintptr_t *xp)
{
@ -1135,7 +1171,7 @@ _sx_sunlock_try(struct sx *sx, uintptr_t *xp)
}
static void __noinline
_sx_sunlock_hard(struct sx *sx, uintptr_t x, const char *file, int line)
_sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
{
int wakeup_swapper;
@ -1182,7 +1218,7 @@ _sx_sunlock_hard(struct sx *sx, uintptr_t x, const char *file, int line)
}
void
_sx_sunlock(struct sx *sx, const char *file, int line)
_sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t x;
@ -1195,11 +1231,18 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
x = SX_READ_VALUE(sx);
if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) ||
!_sx_sunlock_try(sx, &x)))
_sx_sunlock_hard(sx, x, file, line);
_sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG);
TD_LOCKS_DEC(curthread);
}
void
_sx_sunlock(struct sx *sx, const char *file, int line)
{
_sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
}
#ifdef INVARIANT_SUPPORT
#ifndef INVARIANTS
#undef _sx_assert

View File

@ -137,9 +137,13 @@ struct lock_class {
* operations. Otherwise, use default values to avoid the unneeded bloat.
*/
#if LOCK_DEBUG > 0
#define LOCK_FILE_LINE_ARG_DEF , const char *file, int line
#define LOCK_FILE_LINE_ARG , file, line
#define LOCK_FILE __FILE__
#define LOCK_LINE __LINE__
#else
#define LOCK_FILE_LINE_ARG_DEF
#define LOCK_FILE_LINE_ARG
#define LOCK_FILE NULL
#define LOCK_LINE 0
#endif

View File

@ -91,6 +91,7 @@ void _mtx_init(volatile uintptr_t *c, const char *name, const char *type,
int opts);
void _mtx_destroy(volatile uintptr_t *c);
void mtx_sysinit(void *arg);
int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF);
int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
int line);
void mutex_init(void);

View File

@ -105,7 +105,7 @@
\
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) || \
!_rw_write_lock_fetch((rw), &_v, _tid))) \
_rw_wlock_hard((rw), _v, _tid, (file), (line)); \
_rw_wlock_hard((rw), _v, (file), (line)); \
} while (0)
/* Release a write lock. */
@ -128,16 +128,22 @@ void rw_sysinit(void *arg);
void rw_sysinit_flags(void *arg);
int _rw_wowned(const volatile uintptr_t *c);
void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line);
int __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
int __rw_try_wlock(volatile uintptr_t *c, const char *file, int line);
void _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line);
void __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
void __rw_rlock(volatile uintptr_t *c, const char *file, int line);
int __rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
int __rw_try_rlock(volatile uintptr_t *c, const char *file, int line);
void _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
void _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line);
void __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
const char *file, int line);
void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid,
const char *file, int line);
void __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v
LOCK_FILE_LINE_ARG_DEF);
void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v
LOCK_FILE_LINE_ARG_DEF);
int __rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
int __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line);
void __rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF);
void __rw_downgrade(volatile uintptr_t *c, const char *file, int line);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void __rw_assert(const volatile uintptr_t *c, int what, const char *file,
@ -163,20 +169,38 @@ void __rw_assert(const volatile uintptr_t *c, int what, const char *file,
__rw_try_wlock(&(rw)->rw_lock, f, l)
#define _rw_wunlock(rw, f, l) \
_rw_wunlock_cookie(&(rw)->rw_lock, f, l)
#define _rw_rlock(rw, f, l) \
__rw_rlock(&(rw)->rw_lock, f, l)
#define _rw_try_rlock(rw, f, l) \
__rw_try_rlock(&(rw)->rw_lock, f, l)
#if LOCK_DEBUG > 0
#define _rw_rlock(rw, f, l) \
__rw_rlock(&(rw)->rw_lock, f, l)
#define _rw_runlock(rw, f, l) \
_rw_runlock_cookie(&(rw)->rw_lock, f, l)
#define _rw_wlock_hard(rw, v, t, f, l) \
__rw_wlock_hard(&(rw)->rw_lock, v, t, f, l)
#define _rw_wunlock_hard(rw, t, f, l) \
__rw_wunlock_hard(&(rw)->rw_lock, t, f, l)
#else
#define _rw_rlock(rw, f, l) \
__rw_rlock_int((struct rwlock *)rw)
#define _rw_runlock(rw, f, l) \
_rw_runlock_cookie_int((struct rwlock *)rw)
#endif
#if LOCK_DEBUG > 0
#define _rw_wlock_hard(rw, v, f, l) \
__rw_wlock_hard(&(rw)->rw_lock, v, f, l)
#define _rw_wunlock_hard(rw, v, f, l) \
__rw_wunlock_hard(&(rw)->rw_lock, v, f, l)
#define _rw_try_upgrade(rw, f, l) \
__rw_try_upgrade(&(rw)->rw_lock, f, l)
#define _rw_downgrade(rw, f, l) \
__rw_downgrade(&(rw)->rw_lock, f, l)
#else
#define _rw_wlock_hard(rw, v, f, l) \
__rw_wlock_hard(&(rw)->rw_lock, v)
#define _rw_wunlock_hard(rw, v, f, l) \
__rw_wunlock_hard(&(rw)->rw_lock, v)
#define _rw_try_upgrade(rw, f, l) \
__rw_try_upgrade_int(rw)
#define _rw_downgrade(rw, f, l) \
__rw_downgrade_int(rw)
#endif
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
#define _rw_assert(rw, w, f, l) \
__rw_assert(&(rw)->rw_lock, w, f, l)

View File

@ -101,18 +101,22 @@ void sx_sysinit(void *arg);
#define sx_init(sx, desc) sx_init_flags((sx), (desc), 0)
void sx_init_flags(struct sx *sx, const char *description, int opts);
void sx_destroy(struct sx *sx);
int sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
int sx_try_slock_(struct sx *sx, const char *file, int line);
int sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
int sx_try_xlock_(struct sx *sx, const char *file, int line);
int sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
int sx_try_upgrade_(struct sx *sx, const char *file, int line);
void sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
void sx_downgrade_(struct sx *sx, const char *file, int line);
int _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF);
int _sx_slock(struct sx *sx, int opts, const char *file, int line);
int _sx_xlock(struct sx *sx, int opts, const char *file, int line);
void _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF);
void _sx_sunlock(struct sx *sx, const char *file, int line);
void _sx_xunlock(struct sx *sx, const char *file, int line);
int _sx_xlock_hard(struct sx *sx, uintptr_t v, uintptr_t tid, int opts,
const char *file, int line);
void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
line);
int _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF);
void _sx_xunlock_hard(struct sx *sx, uintptr_t tid LOCK_FILE_LINE_ARG_DEF);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void _sx_assert(const struct sx *sx, int what, const char *file, int line);
#endif
@ -157,7 +161,7 @@ __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
!atomic_fcmpset_acq_ptr(&sx->sx_lock, &v, tid)))
error = _sx_xlock_hard(sx, v, tid, opts, file, line);
error = _sx_xlock_hard(sx, v, opts);
return (error);
}
@ -170,7 +174,7 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)))
_sx_xunlock_hard(sx, tid, file, line);
_sx_xunlock_hard(sx, tid);
}
#endif
@ -195,6 +199,7 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
#define sx_xunlock_(sx, file, line) \
__sx_xunlock((sx), curthread, (file), (line))
#endif /* LOCK_DEBUG > 0 || SX_NOINLINE */
#if (LOCK_DEBUG > 0)
#define sx_slock_(sx, file, line) \
(void)_sx_slock((sx), 0, (file), (line))
#define sx_slock_sig_(sx, file, line) \
@ -205,6 +210,18 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
#define sx_try_xlock(sx) sx_try_xlock_((sx), LOCK_FILE, LOCK_LINE)
#define sx_try_upgrade(sx) sx_try_upgrade_((sx), LOCK_FILE, LOCK_LINE)
#define sx_downgrade(sx) sx_downgrade_((sx), LOCK_FILE, LOCK_LINE)
#else
#define sx_slock_(sx, file, line) \
(void)_sx_slock_int((sx), 0)
#define sx_slock_sig_(sx, file, line) \
_sx_slock_int((sx), SX_INTERRUPTIBLE)
#define sx_sunlock_(sx, file, line) \
_sx_sunlock_int((sx))
#define sx_try_slock(sx) sx_try_slock_int((sx))
#define sx_try_xlock(sx) sx_try_xlock_int((sx))
#define sx_try_upgrade(sx) sx_try_upgrade_int((sx))
#define sx_downgrade(sx) sx_downgrade_int((sx))
#endif
#ifdef INVARIANTS
#define sx_assert_(sx, what, file, line) \
_sx_assert((sx), (what), (file), (line))