sx: read the SX_NOADAPTIVE flag and Giant ownership only once

These used to be read multiple times when waiting for the lock the become
free, which had the potential to issue completely avoidable traffic.
This commit is contained in:
Mateusz Guzik 2017-12-31 00:37:50 +00:00
parent 03cd714fc5
commit fb10612355
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=327397

View File

@ -91,7 +91,7 @@ PMC_SOFT_DECLARE( , , lock, failed);
WITNESS_SAVE_DECL(Giant) \
#define GIANT_SAVE(work) do { \
if (mtx_owned(&Giant)) { \
if (__predict_false(mtx_owned(&Giant))) { \
work++; \
WITNESS_SAVE(&Giant.lock_object, Giant); \
while (mtx_owned(&Giant)) { \
@ -533,6 +533,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
u_int i, n, spintries = 0;
bool adaptive;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@ -581,6 +582,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
#ifdef ADAPTIVE_SX
adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0);
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@ -597,6 +602,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
state = x;
}
#endif
#ifndef INVARIANTS
GIANT_SAVE(extra_work);
#endif
for (;;) {
if (x == SX_LOCK_UNLOCKED) {
@ -604,67 +612,68 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
break;
continue;
}
#ifdef INVARIANTS
GIANT_SAVE(extra_work);
#endif
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
#ifdef ADAPTIVE_SX
if (__predict_false(!adaptive))
goto sleepq;
/*
* If the lock is write locked and the owner is
* running on another CPU, spin until the owner stops
* running or the state of the lock changes.
*/
if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
if ((x & SX_LOCK_SHARED) == 0) {
owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
"lockname:\"%s\"",
sx->lock_object.lo_name);
GIANT_SAVE(extra_work);
do {
lock_delay(&lda);
x = SX_READ_VALUE(sx);
owner = lv_sx_owner(x);
} while (owner != NULL &&
TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
}
} else if (SX_SHARERS(x) && spintries < asx_retries) {
if ((x & SX_LOCK_SHARED) == 0) {
owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE(extra_work);
spintries++;
for (i = 0; i < asx_loops; i += n) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
"%s: shared spinning on %p with %u and %u",
__func__, sx, spintries, i);
n = SX_SHARERS(x);
lock_delay_spin(n);
"lockname:\"%s\"",
sx->lock_object.lo_name);
do {
lock_delay(&lda);
x = SX_READ_VALUE(sx);
if ((x & SX_LOCK_SHARED) == 0 ||
SX_SHARERS(x) == 0)
break;
}
#ifdef KDTRACE_HOOKS
lda.spin_cnt += i;
#endif
owner = lv_sx_owner(x);
} while (owner != NULL &&
TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
if (i != asx_loops)
continue;
continue;
}
} else if (SX_SHARERS(x) && spintries < asx_retries) {
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", sx->lock_object.lo_name);
spintries++;
for (i = 0; i < asx_loops; i += n) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
"%s: shared spinning on %p with %u and %u",
__func__, sx, spintries, i);
n = SX_SHARERS(x);
lock_delay_spin(n);
x = SX_READ_VALUE(sx);
if ((x & SX_LOCK_SHARED) == 0 ||
SX_SHARERS(x) == 0)
break;
}
#ifdef KDTRACE_HOOKS
lda.spin_cnt += i;
#endif
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
if (i != asx_loops)
continue;
}
#endif
sleepq:
sleepq_lock(&sx->lock_object);
x = SX_READ_VALUE(sx);
retry_sleepq:
@ -686,8 +695,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
* chain lock. If so, drop the sleep queue lock and try
* again.
*/
if (!(x & SX_LOCK_SHARED) &&
(sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
if (!(x & SX_LOCK_SHARED) && adaptive) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
@ -742,7 +750,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs(&sx->lock_object);
#endif
GIANT_SAVE(extra_work);
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
@ -892,6 +899,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
GIANT_DECLARE;
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
bool adaptive;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@ -920,6 +928,10 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
lock_delay_arg_init(&lda, NULL);
#endif
#ifdef ADAPTIVE_SX
adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0);
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@ -936,6 +948,9 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
state = x;
}
#endif
#ifndef INVARIANTS
GIANT_SAVE(extra_work);
#endif
/*
* As with rwlocks, we don't make any attempt to try to block
@ -944,39 +959,42 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
for (;;) {
if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))
break;
#ifdef INVARIANTS
GIANT_SAVE(extra_work);
#endif
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
#ifdef ADAPTIVE_SX
if (__predict_false(!adaptive))
goto sleepq;
/*
* If the owner is running on another CPU, spin until
* the owner stops running or the state of the lock
* changes.
*/
if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE(extra_work);
do {
lock_delay(&lda);
x = SX_READ_VALUE(sx);
owner = lv_sx_owner(x);
} while (owner != NULL && TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
}
owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
__func__, sx, owner);
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", sx->lock_object.lo_name);
do {
lock_delay(&lda);
x = SX_READ_VALUE(sx);
owner = lv_sx_owner(x);
} while (owner != NULL && TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
}
#endif
sleepq:
/*
* Some other thread already has an exclusive lock, so
* start the process of blocking.
@ -999,8 +1017,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
* the owner stops running or the state of the lock
* changes.
*/
if (!(x & SX_LOCK_SHARED) &&
(sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
if (!(x & SX_LOCK_SHARED) && adaptive) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
@ -1035,7 +1052,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs(&sx->lock_object);
#endif
GIANT_SAVE(extra_work);
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);