From d6dabc3153c96a0f27189a972d5839554702caa0 Mon Sep 17 00:00:00 2001 From: jeff Date: Fri, 4 Apr 2008 10:00:46 +0000 Subject: [PATCH] - Add sysctls at debug.rwlock to control the behavior of the speculative spinning when readers hold a lock. This spinning is speculative because, unlike the write case, we can not test whether the owners are running. - Add speculative read spinning for readers who are blocked by pending writers while a read lock is still held. This allows the thread to spin until the write lock succeeds after which it may spin until the writer has released the lock. This prevents excessive context switches when readers and writers both hold the lock for brief periods. Sponsored by: Nokia --- sys/kern/kern_rwlock.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index b2469edde1ea..6a0aab924cae 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -39,10 +39,12 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include +#include #include #include @@ -54,6 +56,14 @@ CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); #define ADAPTIVE_RWLOCKS #endif +#ifdef ADAPTIVE_RWLOCKS +static int rowner_retries = 10; +static int rowner_loops = 10000; +SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging"); +SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); +SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); +#endif + #ifdef DDB #include @@ -261,6 +271,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line) struct turnstile *ts; #ifdef ADAPTIVE_RWLOCKS volatile struct thread *owner; + int spintries = 0; + int i; #endif uint64_t waittime = 0; int contested = 0; @@ -324,6 +336,16 @@ _rw_rlock(struct rwlock *rw, const char *file, int line) cpu_spinwait(); continue; } + } else if (spintries < rowner_retries) { + spintries++; + for (i = 0; i < rowner_loops; i++) { + v = rw->rw_lock; + if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) + break; + cpu_spinwait(); + } + if (i != rowner_loops) + continue; } #endif @@ -592,7 +614,8 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) cpu_spinwait(); continue; } - if ((v & RW_LOCK_READ) && RW_READERS(v) && spintries < 100) { + if ((v & RW_LOCK_READ) && RW_READERS(v) && + spintries < rowner_retries) { if (!(v & RW_LOCK_WRITE_SPINNER)) { if (!atomic_cmpset_ptr(&rw->rw_lock, v, v | RW_LOCK_WRITE_SPINNER)) { @@ -601,12 +624,12 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) } } spintries++; - for (i = 100000; i > 0; i--) { + for (i = 0; i < rowner_loops; i++) { if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) break; cpu_spinwait(); } - if (i) + if (i != rowner_loops) continue; } #endif