locks: partially tidy up waiting on readers

spin first instant of instantly re-readoing and don't re-read after
spinning is finished - the state is already known.

Note the code is subject to significant changes later.

MFC after:	1 week
This commit is contained in:
Mateusz Guzik 2017-10-05 13:01:18 +00:00
parent 78d58cb6bc
commit 20a15d1752
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=324314
2 changed files with 9 additions and 11 deletions

View File

@ -489,12 +489,11 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
"spinning", "lockname:\"%s\"",
rw->lock_object.lo_name);
for (i = 0; i < rowner_loops; i++) {
cpu_spinwait();
v = RW_READ_VALUE(rw);
if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(td, v))
break;
cpu_spinwait();
}
v = RW_READ_VALUE(rw);
#ifdef KDTRACE_HOOKS
lda.spin_cnt += rowner_loops - i;
#endif
@ -930,13 +929,13 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
"spinning", "lockname:\"%s\"",
rw->lock_object.lo_name);
for (i = 0; i < rowner_loops; i++) {
if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
break;
cpu_spinwait();
v = RW_READ_VALUE(rw);
if ((v & RW_LOCK_WRITE_SPINNER) == 0)
break;
}
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
v = RW_READ_VALUE(rw);
#ifdef KDTRACE_HOOKS
lda.spin_cnt += rowner_loops - i;
#endif

View File

@ -605,18 +605,17 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
CTR4(KTR_LOCK,
"%s: shared spinning on %p with %u and %u",
__func__, sx, spintries, i);
x = sx->sx_lock;
cpu_spinwait();
x = SX_READ_VALUE(sx);
if ((x & SX_LOCK_SHARED) == 0 ||
SX_SHARERS(x) == 0)
break;
cpu_spinwait();
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
}
#ifdef KDTRACE_HOOKS
lda.spin_cnt += i;
#endif
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
x = SX_READ_VALUE(sx);
if (i != asx_loops)
continue;
}