Adaptive spinning in write path with readers and writer starvation avoidance.

- Move recursion checking into rwlock inlines to free a bit for use with
   adaptive spinners.
 - Clear the RW_LOCK_WRITE_SPINNERS flag whenever the lock state changes
   causing write spinners to restart their loop.
 - Write spinners are limited by a count while readers hold the lock as
   there is no way to know for certain whether readers are running still.
 - In the read path block if there are write waiters or spinners to avoid
   starving writers.  Use a new per-thread count, td_rw_rlocks, to skip
   starvation avoidance if it might cause a deadlock.
 - Remove or change invalid assertions in turnstiles.

Reviewed by:    attilio (developed parts of the patch as well)
Sponsored by:   Nokia
This commit is contained in:
Jeff Roberson 2008-02-06 01:02:13 +00:00
parent abd94b3d59
commit 5dff04c31f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=176017
4 changed files with 181 additions and 166 deletions

View File

@ -219,6 +219,17 @@ _rw_wunlock(struct rwlock *rw, const char *file, int line)
lock_profile_release_lock(&rw->lock_object);
__rw_wunlock(rw, curthread, file, line);
}
/*
* Determines whether a new reader can acquire a lock. Succeeds if the
* reader already owns a read lock and the lock is locked for read to
* prevent deadlock from reader recursion. Also succeeds if the lock
* is unlocked and has no writer waiters or spinners. Failing otherwise
* prioritizes writers before readers.
*/
#define RW_CAN_READ(_rw) \
((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \
(RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \
RW_LOCK_READ)
void
_rw_rlock(struct rwlock *rw, const char *file, int line)
@ -229,7 +240,7 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
#endif
uint64_t waittime = 0;
int contested = 0;
uintptr_t x;
uintptr_t v;
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
@ -238,17 +249,6 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
rw->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);
/*
* Note that we don't make any attempt to try to block read
* locks once a writer has blocked on the lock. The reason is
* that we currently allow for read locks to recurse and we
* don't keep track of all the holders of read locks. Thus, if
* we were to block readers once a writer blocked and a reader
* tried to recurse on their reader lock after a writer had
* blocked we would end up in a deadlock since the reader would
* be blocked on the writer, and the writer would be blocked
* waiting for the reader to release its original read lock.
*/
for (;;) {
/*
* Handle the easy case. If no other thread has a write
@ -260,22 +260,20 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* completely unlocked rwlock since such a lock is encoded
* as a read lock with no waiters.
*/
x = rw->rw_lock;
if (x & RW_LOCK_READ) {
v = rw->rw_lock;
if (RW_CAN_READ(v)) {
/*
* The RW_LOCK_READ_WAITERS flag should only be set
* if another thread currently holds a write lock,
* and in that case RW_LOCK_READ should be clear.
* if the lock has been unlocked and write waiters
* were present.
*/
MPASS((x & RW_LOCK_READ_WAITERS) == 0);
if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
x + RW_ONE_READER)) {
if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
v + RW_ONE_READER)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR4(KTR_LOCK,
"%s: %p succeed %p -> %p", __func__,
rw, (void *)x,
(void *)(x + RW_ONE_READER));
rw, (void *)v,
(void *)(v + RW_ONE_READER));
break;
}
cpu_spinwait();
@ -290,32 +288,35 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* the owner stops running or the state of the lock
* changes.
*/
owner = (struct thread *)RW_OWNER(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
__func__, rw, owner);
while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
TD_IS_RUNNING(owner))
cpu_spinwait();
continue;
if ((v & RW_LOCK_READ) == 0) {
owner = (struct thread *)RW_OWNER(v);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, rw, owner);
while ((struct thread*)RW_OWNER(rw->rw_lock) ==
owner && TD_IS_RUNNING(owner))
cpu_spinwait();
continue;
}
}
#endif
/*
* Okay, now it's the hard case. Some other thread already
* has a write lock, so acquire the turnstile lock so we can
* begin the process of blocking.
* has a write lock or there are write waiters present,
* acquire the turnstile lock so we can begin the process
* of blocking.
*/
ts = turnstile_trywait(&rw->lock_object);
/*
* The lock might have been released while we spun, so
* recheck its state and restart the loop if there is no
* longer a write lock.
* recheck its state and restart the loop if needed.
*/
x = rw->rw_lock;
if (x & RW_LOCK_READ) {
v = rw->rw_lock;
if (RW_CAN_READ(v)) {
turnstile_cancel(ts);
cpu_spinwait();
continue;
@ -326,23 +327,30 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
* If the current owner of the lock is executing on another
* CPU quit the hard path and try to spin.
*/
owner = (struct thread *)RW_OWNER(x);
if (TD_IS_RUNNING(owner)) {
turnstile_cancel(ts);
cpu_spinwait();
continue;
if ((v & RW_LOCK_READ) == 0) {
owner = (struct thread *)RW_OWNER(v);
if (TD_IS_RUNNING(owner)) {
turnstile_cancel(ts);
cpu_spinwait();
continue;
}
}
#endif
/*
* Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS
* flag is already set, then we can go ahead and block. If
* it is not set then try to set it. If we fail to set it
* drop the turnstile lock and restart the loop.
* The lock is held in write mode or it already has waiters.
*/
if (!(x & RW_LOCK_READ_WAITERS)) {
if (!atomic_cmpset_ptr(&rw->rw_lock, x,
x | RW_LOCK_READ_WAITERS)) {
MPASS(!RW_CAN_READ(v));
/*
* If the RW_LOCK_READ_WAITERS flag is already set, then
* we can go ahead and block. If it is not set then try
* to set it. If we fail to set it drop the turnstile
* lock and restart the loop.
*/
if (!(v & RW_LOCK_READ_WAITERS)) {
if (!atomic_cmpset_ptr(&rw->rw_lock, v,
v | RW_LOCK_READ_WAITERS)) {
turnstile_cancel(ts);
cpu_spinwait();
continue;
@ -375,18 +383,20 @@ _rw_rlock(struct rwlock *rw, const char *file, int line)
LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
WITNESS_LOCK(&rw->lock_object, 0, file, line);
curthread->td_locks++;
curthread->td_rw_rlocks++;
}
void
_rw_runlock(struct rwlock *rw, const char *file, int line)
{
struct turnstile *ts;
uintptr_t x;
uintptr_t x, v, queue;
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
_rw_assert(rw, RA_RLOCKED, file, line);
curthread->td_locks--;
curthread->td_rw_rlocks--;
WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
@ -410,31 +420,14 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
}
continue;
}
/*
* We should never have read waiters while at least one
* thread holds a read lock. (See note above)
*/
KASSERT(!(x & RW_LOCK_READ_WAITERS),
("%s: waiting readers", __func__));
/*
* If there aren't any waiters for a write lock, then try
* to drop it quickly.
*/
if (!(x & RW_LOCK_WRITE_WAITERS)) {
/*
* There shouldn't be any flags set and we should
* be the only read lock. If we fail to release
* the single read lock, then another thread might
* have just acquired a read lock, so go back up
* to the multiple read locks case.
*/
MPASS(x == RW_READERS_LOCK(1));
if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
RW_UNLOCKED)) {
if (!(x & RW_LOCK_WAITERS)) {
MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
RW_READERS_LOCK(1));
if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded",
__func__, rw);
@ -442,18 +435,13 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
}
continue;
}
/*
* There should just be one reader with one or more
* writers waiting.
*/
MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS));
/*
* Ok, we know we have a waiting writer and we think we
* are the last reader, so grab the turnstile lock.
* Ok, we know we have waiters and we think we are the
* last reader, so grab the turnstile lock.
*/
turnstile_chain_lock(&rw->lock_object);
v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
MPASS(v & RW_LOCK_WAITERS);
/*
* Try to drop our lock leaving the lock in a unlocked
@ -471,8 +459,14 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
* acquired a read lock, so drop the turnstile lock and
* restart.
*/
if (!atomic_cmpset_ptr(&rw->rw_lock,
RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
x = RW_UNLOCKED;
if (v & RW_LOCK_WRITE_WAITERS) {
queue = TS_EXCLUSIVE_QUEUE;
x |= (v & RW_LOCK_READ_WAITERS);
} else
queue = TS_SHARED_QUEUE;
if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
x)) {
turnstile_chain_unlock(&rw->lock_object);
continue;
}
@ -489,7 +483,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
*/
ts = turnstile_lookup(&rw->lock_object);
MPASS(ts != NULL);
turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
turnstile_broadcast(ts, queue);
turnstile_unpend(ts, TS_SHARED_LOCK);
turnstile_chain_unlock(&rw->lock_object);
break;
@ -508,9 +502,11 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
volatile struct thread *owner;
int spintries = 0;
int i;
#endif
uint64_t waittime = 0;
uintptr_t v;
uintptr_t v, x;
int contested = 0;
if (rw_wlocked(rw)) {
@ -518,7 +514,6 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
("%s: recursing but non-recursive rw %s @ %s:%d\n",
__func__, rw->lock_object.lo_name, file, line));
rw->rw_recurse++;
atomic_set_ptr(&rw->rw_lock, RW_LOCK_RECURSED);
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
return;
@ -548,21 +543,27 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
cpu_spinwait();
continue;
}
if ((v & RW_LOCK_READ) && RW_READERS(v) && spintries < 100) {
if (!(v & RW_LOCK_WRITE_SPINNER)) {
if (!atomic_cmpset_ptr(&rw->rw_lock, v,
v | RW_LOCK_WRITE_SPINNER)) {
cpu_spinwait();
continue;
}
}
spintries++;
for (i = 100000; i > 0; i--) {
if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
break;
cpu_spinwait();
}
if (i)
continue;
}
#endif
ts = turnstile_trywait(&rw->lock_object);
v = rw->rw_lock;
/*
* If the lock was released while spinning on the
* turnstile chain lock, try again.
*/
if (v == RW_UNLOCKED) {
turnstile_cancel(ts);
cpu_spinwait();
continue;
}
#ifdef ADAPTIVE_RWLOCKS
/*
* If the current owner of the lock is executing on another
@ -577,31 +578,24 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
}
}
#endif
/*
* If the lock was released by a writer with both readers
* and writers waiting and a reader hasn't woken up and
* acquired the lock yet, rw_lock will be set to the
* value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS. If we see
* that value, try to acquire it once. Note that we have
* to preserve the RW_LOCK_WRITE_WAITERS flag as there are
* other writers waiting still. If we fail, restart the
* loop.
* If the lock was released while waiting for the turnstile
* chain lock retry.
*/
if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) {
if (atomic_cmpset_acq_ptr(&rw->rw_lock,
RW_UNLOCKED | RW_LOCK_WRITE_WAITERS,
tid | RW_LOCK_WRITE_WAITERS)) {
turnstile_claim(ts);
CTR2(KTR_LOCK, "%s: %p claimed by new writer",
__func__, rw);
x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
if ((v & ~x) == RW_UNLOCKED) {
x &= ~RW_LOCK_WRITE_SPINNER;
if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
if (x)
turnstile_claim(ts);
else
turnstile_cancel(ts);
break;
}
turnstile_cancel(ts);
cpu_spinwait();
continue;
}
/*
* If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
* set it. If we fail to set it, then loop back and try
@ -618,7 +612,6 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
CTR2(KTR_LOCK, "%s: %p set write waiters flag",
__func__, rw);
}
/*
* We were unable to acquire the lock and the write waiters
* flag is set, so we must block on the turnstile.
@ -630,6 +623,9 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
__func__, rw);
#ifdef ADAPTIVE_RWLOCKS
spintries = 0;
#endif
}
lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime,
file, line);
@ -648,12 +644,12 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
int queue;
if (rw_wlocked(rw) && rw_recursed(rw)) {
if ((--rw->rw_recurse) == 0)
atomic_clear_ptr(&rw->rw_lock, RW_LOCK_RECURSED);
rw->rw_recurse--;
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
return;
}
v = rw->rw_lock;
KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
("%s: neither of the waiter flags are set", __func__));
@ -707,7 +703,7 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
int
_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
{
uintptr_t v, tid;
uintptr_t v, x, tid;
struct turnstile *ts;
int success;
@ -723,36 +719,50 @@ _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
* turnstile. So, do the simple case of no waiters first.
*/
tid = (uintptr_t)curthread;
if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) {
success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
tid);
goto out;
}
success = 0;
for (;;) {
v = rw->rw_lock;
if (RW_READERS(v) > 1)
break;
if (!(v & RW_LOCK_WAITERS)) {
success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
if (!success)
continue;
break;
}
/*
* Ok, we think we have write waiters, so lock the
* turnstile.
*/
ts = turnstile_trywait(&rw->lock_object);
/*
* Try to switch from one reader to a writer again. This time
* we honor the current state of the RW_LOCK_WRITE_WAITERS
* flag. If we obtain the lock with the flag set, then claim
* ownership of the turnstile.
*/
v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
tid | v);
if (success && v)
turnstile_claim(ts);
else
/*
* Ok, we think we have waiters, so lock the turnstile.
*/
ts = turnstile_trywait(&rw->lock_object);
v = rw->rw_lock;
if (RW_READERS(v) > 1) {
turnstile_cancel(ts);
break;
}
/*
* Try to switch from one reader to a writer again. This time
* we honor the current state of the waiters flags.
* If we obtain the lock with the flags set, then claim
* ownership of the turnstile.
*/
x = rw->rw_lock & RW_LOCK_WAITERS;
success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
if (success) {
if (x)
turnstile_claim(ts);
else
turnstile_cancel(ts);
break;
}
turnstile_cancel(ts);
out:
}
LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
if (success)
if (success) {
curthread->td_rw_rlocks--;
WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
}
return (success);
}
@ -764,6 +774,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
{
struct turnstile *ts;
uintptr_t tid, v;
int rwait, wwait;
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
@ -778,8 +789,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
/*
* Convert from a writer to a single reader. First we handle
* the easy case with no waiters. If there are any waiters, we
* lock the turnstile, "disown" the lock, and awaken any read
* waiters.
* lock the turnstile and "disown" the lock.
*/
tid = (uintptr_t)curthread;
if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
@ -790,26 +800,32 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
* read the waiter flags without any races.
*/
turnstile_chain_lock(&rw->lock_object);
v = rw->rw_lock;
MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
v = rw->rw_lock & RW_LOCK_WAITERS;
rwait = v & RW_LOCK_READ_WAITERS;
wwait = v & RW_LOCK_WRITE_WAITERS;
MPASS(rwait | wwait);
/*
* Downgrade from a write lock while preserving
* RW_LOCK_WRITE_WAITERS and give up ownership of the
* turnstile. If there are any read waiters, wake them up.
* Downgrade from a write lock while preserving waiters flag
* and give up ownership of the turnstile.
*/
ts = turnstile_lookup(&rw->lock_object);
MPASS(ts != NULL);
if (v & RW_LOCK_READ_WAITERS)
if (!wwait)
v &= ~RW_LOCK_READ_WAITERS;
atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
/*
* Wake other readers if there are no writers pending. Otherwise they
* won't be able to acquire the lock anyway.
*/
if (rwait && !wwait) {
turnstile_broadcast(ts, TS_SHARED_QUEUE);
atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
(v & RW_LOCK_WRITE_WAITERS));
if (v & RW_LOCK_READ_WAITERS)
turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
else
} else
turnstile_disown(ts);
turnstile_chain_unlock(&rw->lock_object);
out:
curthread->td_rw_rlocks++;
LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
}

View File

@ -674,8 +674,6 @@ turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
td = curthread;
mtx_assert(&ts->ts_lock, MA_OWNED);
if (queue == TS_SHARED_QUEUE)
MPASS(owner != NULL);
if (owner)
MPASS(owner->td_proc->p_magic == P_MAGIC);
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
@ -766,8 +764,7 @@ turnstile_signal(struct turnstile *ts, int queue)
MPASS(ts != NULL);
mtx_assert(&ts->ts_lock, MA_OWNED);
MPASS(curthread->td_proc->p_magic == P_MAGIC);
MPASS(ts->ts_owner == curthread ||
(queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
/*
@ -818,8 +815,7 @@ turnstile_broadcast(struct turnstile *ts, int queue)
MPASS(ts != NULL);
mtx_assert(&ts->ts_lock, MA_OWNED);
MPASS(curthread->td_proc->p_magic == P_MAGIC);
MPASS(ts->ts_owner == curthread ||
(queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
/*
* We must have the chain locked so that we can remove the empty
* turnstile from the hash queue.
@ -869,8 +865,7 @@ turnstile_unpend(struct turnstile *ts, int owner_type)
MPASS(ts != NULL);
mtx_assert(&ts->ts_lock, MA_OWNED);
MPASS(ts->ts_owner == curthread ||
(owner_type == TS_SHARED_LOCK && ts->ts_owner == NULL));
MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
MPASS(!TAILQ_EMPTY(&ts->ts_pending));
/*

View File

@ -229,8 +229,9 @@ struct thread {
u_char td_lastcpu; /* (t) Last cpu we were on. */
u_char td_oncpu; /* (t) Which cpu we are on. */
volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
short td_locks; /* (k) Count of non-spin locks. */
u_char td_tsqueue; /* (t) Turnstile queue blocked on. */
short td_locks; /* (k) Count of non-spin locks. */
short td_rw_rlocks; /* (k) count of rwlock read locks. */
struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */
const char *td_lockname; /* (t) Name of lock blocked on. */
LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */

View File

@ -66,10 +66,11 @@
#define RW_LOCK_READ 0x01
#define RW_LOCK_READ_WAITERS 0x02
#define RW_LOCK_WRITE_WAITERS 0x04
#define RW_LOCK_RECURSED 0x08
#define RW_LOCK_WRITE_SPINNER 0x08
#define RW_LOCK_FLAGMASK \
(RW_LOCK_READ | RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS | \
RW_LOCK_RECURSED)
RW_LOCK_WRITE_SPINNER)
#define RW_LOCK_WAITERS (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
#define RW_OWNER(x) ((x) & ~RW_LOCK_FLAGMASK)
#define RW_READERS_SHIFT 4
@ -113,7 +114,9 @@
#define __rw_wunlock(rw, tid, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
\
if (!_rw_write_unlock((rw), _tid)) \
if ((rw)->rw_recurse) \
(rw)->rw_recurse--; \
else if (!_rw_write_unlock((rw), _tid)) \
_rw_wunlock_hard((rw), _tid, (file), (line)); \
} while (0)