Add code to make default mutexes adaptive if the ADAPTIVE_MUTEXES kernel
option is used (not on by default). - In the case of trying to lock a mutex, if the MTX_CONTESTED flag is set, then we can safely read the thread pointer from the mtx_lock member while holding sched_lock. We then examine the thread to see if it is currently executing on another CPU. If it is, then we keep looping instead of blocking. - In the case of trying to unlock a mutex, it is now possible for a mutex to have MTX_CONTESTED set in mtx_lock but to not have any threads actually blocked on it, so we need to handle that case. In that case, we just release the lock as if MTX_CONTESTED was not set and return. - We do not adaptively spin on Giant as Giant is held for long times and it slows SMP systems down to a crawl (it was taking several minutes, like 5-10 or so for my test alpha and sparc64 SMP boxes to boot up when they adaptively spinned on Giant). - We only compile in the code to do this for SMP kernels, it doesn't make sense for UP kernels. Tested on: i386, alpha, sparc64
This commit is contained in:
parent
a05068c0d6
commit
2498cf8c42
@ -120,6 +120,11 @@ options ROOTDEVNAME=\"ufs:da0s2e\"
|
||||
# Mandatory:
|
||||
options SMP # Symmetric MultiProcessor Kernel
|
||||
|
||||
# ADAPTIVE_MUTEXES changes the behavior of blocking mutexes to spin
|
||||
# if the thread that currently owns the mutex is executing on another
|
||||
# CPU.
|
||||
options ADAPTIVE_MUTEXES
|
||||
|
||||
# SMP Debugging Options:
|
||||
#
|
||||
# MUTEX_DEBUG enables various extra assertions in the mutex code.
|
||||
|
@ -57,6 +57,7 @@ ADW_ALLOW_MEMIO opt_adw.h # Allow PCI devices to use memory
|
||||
|
||||
# Miscellaneous options.
|
||||
GEOM opt_geom.h
|
||||
ADAPTIVE_MUTEXES
|
||||
COMPAT_43 opt_compat.h
|
||||
COMPAT_SUNOS opt_compat.h
|
||||
COMPILING_LINT opt_global.h
|
||||
|
@ -34,6 +34,7 @@
|
||||
* Machine independent bits of mutex implementation.
|
||||
*/
|
||||
|
||||
#include "opt_adaptive_mutexes.h"
|
||||
#include "opt_ddb.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -459,6 +460,9 @@ void
|
||||
_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
{
|
||||
struct thread *td = curthread;
|
||||
#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
|
||||
struct thread *owner;
|
||||
#endif
|
||||
|
||||
if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
|
||||
m->mtx_recurse++;
|
||||
@ -514,6 +518,19 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
continue;
|
||||
}
|
||||
|
||||
#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
|
||||
/*
|
||||
* If the current owner of the lock is executing on another
|
||||
* CPU, spin instead of blocking.
|
||||
*/
|
||||
owner = (struct thread *)(v & MTX_FLAGMASK);
|
||||
if (m != &Giant && owner->td_kse != NULL &&
|
||||
owner->td_kse->ke_oncpu != NOCPU) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
continue;
|
||||
}
|
||||
#endif /* SMP && ADAPTIVE_MUTEXES */
|
||||
|
||||
/*
|
||||
* We definitely must sleep for this lock.
|
||||
*/
|
||||
@ -651,6 +668,15 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
|
||||
|
||||
td1 = TAILQ_FIRST(&m->mtx_blocked);
|
||||
#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
|
||||
if (td1 == NULL) {
|
||||
_release_lock_quick(m);
|
||||
if (LOCK_LOG_TEST(&m->mtx_object, opts))
|
||||
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
MPASS(td->td_proc->p_magic == P_MAGIC);
|
||||
MPASS(td1->td_proc->p_magic == P_MAGIC);
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
* Machine independent bits of mutex implementation.
|
||||
*/
|
||||
|
||||
#include "opt_adaptive_mutexes.h"
|
||||
#include "opt_ddb.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -459,6 +460,9 @@ void
|
||||
_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
{
|
||||
struct thread *td = curthread;
|
||||
#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
|
||||
struct thread *owner;
|
||||
#endif
|
||||
|
||||
if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
|
||||
m->mtx_recurse++;
|
||||
@ -514,6 +518,19 @@ _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
continue;
|
||||
}
|
||||
|
||||
#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
|
||||
/*
|
||||
* If the current owner of the lock is executing on another
|
||||
* CPU, spin instead of blocking.
|
||||
*/
|
||||
owner = (struct thread *)(v & MTX_FLAGMASK);
|
||||
if (m != &Giant && owner->td_kse != NULL &&
|
||||
owner->td_kse->ke_oncpu != NOCPU) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
continue;
|
||||
}
|
||||
#endif /* SMP && ADAPTIVE_MUTEXES */
|
||||
|
||||
/*
|
||||
* We definitely must sleep for this lock.
|
||||
*/
|
||||
@ -651,6 +668,15 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
|
||||
|
||||
td1 = TAILQ_FIRST(&m->mtx_blocked);
|
||||
#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
|
||||
if (td1 == NULL) {
|
||||
_release_lock_quick(m);
|
||||
if (LOCK_LOG_TEST(&m->mtx_object, opts))
|
||||
CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
MPASS(td->td_proc->p_magic == P_MAGIC);
|
||||
MPASS(td1->td_proc->p_magic == P_MAGIC);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user