locks: re-check the reason to go to sleep after locking sleepq/turnstile
In both rw and sx locks we always go to sleep if the lock owner is not running. We do spin for some time if the lock is read-locked. However, if we decide to go to sleep due to the lock owner being off cpu and after sleepq/turnstile gets acquired the lock is read-locked, we should fallback to the aforementioned wait.
This commit is contained in:
parent
518e4554be
commit
28f1a9e3ff
@ -872,6 +872,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
|
||||
#ifdef ADAPTIVE_RWLOCKS
|
||||
int spintries = 0;
|
||||
int i, n;
|
||||
int sleep_reason = 0;
|
||||
#endif
|
||||
uintptr_t x;
|
||||
#ifdef LOCK_PROFILING
|
||||
@ -952,6 +953,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
|
||||
* running on another CPU, spin until the owner stops
|
||||
* running or the state of the lock changes.
|
||||
*/
|
||||
sleep_reason = 1;
|
||||
owner = lv_rw_wowner(v);
|
||||
if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
|
||||
if (LOCK_LOG_TEST(&rw->lock_object, 0))
|
||||
@ -995,6 +997,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
|
||||
#endif
|
||||
if (i != rowner_loops)
|
||||
continue;
|
||||
sleep_reason = 2;
|
||||
}
|
||||
#endif
|
||||
ts = turnstile_trywait(&rw->lock_object);
|
||||
@ -1015,6 +1018,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
|
||||
turnstile_cancel(ts);
|
||||
continue;
|
||||
}
|
||||
} else if (RW_READERS(v) > 0 && sleep_reason == 1) {
|
||||
turnstile_cancel(ts);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
|
@ -534,6 +534,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
|
||||
volatile struct thread *owner;
|
||||
u_int i, n, spintries = 0;
|
||||
bool adaptive;
|
||||
int sleep_reason = 0;
|
||||
#endif
|
||||
#ifdef LOCK_PROFILING
|
||||
uint64_t waittime = 0;
|
||||
@ -647,6 +648,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
|
||||
sched_tdname(curthread), "running");
|
||||
continue;
|
||||
}
|
||||
sleep_reason = 1;
|
||||
} else if (SX_SHARERS(x) && spintries < asx_retries) {
|
||||
KTR_STATE1(KTR_SCHED, "thread",
|
||||
sched_tdname(curthread), "spinning",
|
||||
@ -671,6 +673,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
|
||||
sched_tdname(curthread), "running");
|
||||
if (i != asx_loops)
|
||||
continue;
|
||||
sleep_reason = 2;
|
||||
}
|
||||
#endif
|
||||
sleepq:
|
||||
@ -695,9 +698,14 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
|
||||
* chain lock. If so, drop the sleep queue lock and try
|
||||
* again.
|
||||
*/
|
||||
if (!(x & SX_LOCK_SHARED) && adaptive) {
|
||||
owner = (struct thread *)SX_OWNER(x);
|
||||
if (TD_IS_RUNNING(owner)) {
|
||||
if (adaptive) {
|
||||
if (!(x & SX_LOCK_SHARED)) {
|
||||
owner = (struct thread *)SX_OWNER(x);
|
||||
if (TD_IS_RUNNING(owner)) {
|
||||
sleepq_release(&sx->lock_object);
|
||||
continue;
|
||||
}
|
||||
} else if (SX_SHARERS(x) > 0 && sleep_reason == 1) {
|
||||
sleepq_release(&sx->lock_object);
|
||||
continue;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user