locks: fix a corner case in r327399

If there were exactly rowner_retries/asx_retries (by default: 10) transitions
between read and write state and the waiters still did not get the lock, the
next owner -> reader transition would result in the code correctly falling
back to turnstile/sleepq where it would incorrectly think it was waiting
for a writer and decide to leave turnstile/sleepq to loop back. From this
point it would take ts/sq trips until the lock gets released.

The bug sometimes manifested itself in stalls during -j 128 package builds.

Refactor the code to fix the bug, while here remove some of the gratituous
differences between rw and sx locks.
This commit is contained in:
Mateusz Guzik 2018-03-04 21:38:30 +00:00
parent 2a8e8f7892
commit d94df98c5c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=330414
2 changed files with 43 additions and 45 deletions

View File

@ -875,7 +875,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
#ifdef ADAPTIVE_RWLOCKS
int spintries = 0;
int i, n;
int sleep_reason = 0;
enum { READERS, WRITER } sleep_reason;
#endif
uintptr_t x;
#ifdef LOCK_PROFILING
@ -956,9 +956,11 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
* running on another CPU, spin until the owner stops
* running or the state of the lock changes.
*/
sleep_reason = 1;
owner = lv_rw_wowner(v);
if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
if (!(v & RW_LOCK_READ)) {
sleep_reason = WRITER;
owner = lv_rw_wowner(v);
if (!TD_IS_RUNNING(owner))
goto ts;
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
__func__, rw, owner);
@ -973,9 +975,10 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
continue;
}
if ((v & RW_LOCK_READ) && RW_READERS(v) &&
spintries < rowner_retries) {
} else if (RW_READERS(v) > 0) {
sleep_reason = READERS;
if (spintries == rowner_retries)
goto ts;
if (!(v & RW_LOCK_WRITE_SPINNER)) {
if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
v | RW_LOCK_WRITE_SPINNER)) {
@ -993,15 +996,15 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
if ((v & RW_LOCK_WRITE_SPINNER) == 0)
break;
}
#ifdef KDTRACE_HOOKS
lda.spin_cnt += i;
#endif
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
#ifdef KDTRACE_HOOKS
lda.spin_cnt += rowner_loops - i;
#endif
if (i < rowner_loops)
continue;
sleep_reason = 2;
}
ts:
#endif
ts = turnstile_trywait(&rw->lock_object);
v = RW_READ_VALUE(rw);
@ -1021,7 +1024,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
turnstile_cancel(ts);
continue;
}
} else if (RW_READERS(v) > 0 && sleep_reason == 1) {
} else if (RW_READERS(v) > 0 && sleep_reason == WRITER) {
turnstile_cancel(ts);
continue;
}

View File

@ -533,8 +533,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
u_int i, n, spintries = 0;
enum { READERS, WRITER } sleep_reason;
bool adaptive;
int sleep_reason = 0;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@ -628,37 +628,33 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
* running or the state of the lock changes.
*/
if ((x & SX_LOCK_SHARED) == 0) {
sleep_reason = WRITER;
owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
"lockname:\"%s\"",
sx->lock_object.lo_name);
do {
lock_delay(&lda);
x = SX_READ_VALUE(sx);
owner = lv_sx_owner(x);
} while (owner != NULL &&
TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
}
sleep_reason = 1;
} else if (SX_SHARERS(x) && spintries < asx_retries) {
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", sx->lock_object.lo_name);
if (!TD_IS_RUNNING(owner))
goto sleepq;
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
__func__, sx, owner);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
"spinning", "lockname:\"%s\"",
sx->lock_object.lo_name);
do {
lock_delay(&lda);
x = SX_READ_VALUE(sx);
owner = lv_sx_owner(x);
} while (owner != NULL && TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
continue;
} else if (SX_SHARERS(x) > 0) {
sleep_reason = READERS;
if (spintries == asx_retries)
goto sleepq;
spintries++;
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
"spinning", "lockname:\"%s\"",
sx->lock_object.lo_name);
for (i = 0; i < asx_loops; i += n) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
"%s: shared spinning on %p with %u and %u",
__func__, sx, spintries, i);
n = SX_SHARERS(x);
lock_delay_spin(n);
x = SX_READ_VALUE(sx);
@ -669,11 +665,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
#ifdef KDTRACE_HOOKS
lda.spin_cnt += i;
#endif
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
if (i < asx_loops)
continue;
sleep_reason = 2;
}
sleepq:
#endif
@ -705,7 +700,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
sleepq_release(&sx->lock_object);
continue;
}
} else if (SX_SHARERS(x) > 0 && sleep_reason == 1) {
} else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
sleepq_release(&sx->lock_object);
continue;
}