diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index ca9c1613210a..4adcbc8f56cf 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -414,7 +414,7 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v, #ifdef ADAPTIVE_RWLOCKS volatile struct thread *owner; int spintries = 0; - int i; + int i, n; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; @@ -488,8 +488,9 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v, KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); - for (i = 0; i < rowner_loops; i++) { - cpu_spinwait(); + for (i = 0; i < rowner_loops; i += n) { + n = RW_READERS(v); + lock_delay_spin(n); v = RW_READ_VALUE(rw); if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(td, v)) break; @@ -830,7 +831,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, #ifdef ADAPTIVE_RWLOCKS volatile struct thread *owner; int spintries = 0; - int i; + int i, n; #endif uintptr_t x; #ifdef LOCK_PROFILING @@ -928,8 +929,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); - for (i = 0; i < rowner_loops; i++) { - cpu_spinwait(); + for (i = 0; i < rowner_loops; i += n) { + n = RW_READERS(v); + lock_delay_spin(n); v = RW_READ_VALUE(rw); if ((v & RW_LOCK_WRITE_SPINNER) == 0) break; diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 486a0b361d61..bc3d22b21f37 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -502,7 +502,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts, GIANT_DECLARE; #ifdef ADAPTIVE_SX volatile struct thread *owner; - u_int i, spintries = 0; + u_int i, n, spintries = 0; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; @@ -600,12 +600,13 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts, "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); spintries++; - for (i = 0; i < asx_loops; i++) { + for (i = 0; i < asx_loops; i += n) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: shared spinning on %p with %u and %u", __func__, sx, spintries, i); - cpu_spinwait(); + n = SX_SHARERS(x); + lock_delay_spin(n); x = SX_READ_VALUE(sx); if ((x & SX_LOCK_SHARED) == 0 || SX_SHARERS(x) == 0) diff --git a/sys/sys/lock.h b/sys/sys/lock.h index a5beba116fca..fb3ad3101e88 100644 --- a/sys/sys/lock.h +++ b/sys/sys/lock.h @@ -226,6 +226,13 @@ lock_delay_arg_init(struct lock_delay_arg *la, struct lock_delay_config *lc) la->spin_cnt = 0; } +#define lock_delay_spin(n) do { \ + u_int _i; \ + \ + for (_i = (n); _i > 0; _i--) \ + cpu_spinwait(); \ +} while (0) + #define LOCK_DELAY_SYSINIT(func) \ SYSINIT(func##_ld, SI_SUB_LOCK, SI_ORDER_ANY, func, NULL)