Create a mutex pool API for short term leaf mutexes.

Replace the manual mutex pool in kern_lock.c (lockmgr locks) with the new API.
Replace the mutexes embedded in sxlocks with the new API.
This commit is contained in:
Matthew Dillon 2001-11-13 21:55:13 +00:00
parent 6ba3aeb390
commit f286003909
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=86333
8 changed files with 162 additions and 74 deletions

View File

@ -781,6 +781,7 @@ kern/kern_malloc.c standard
kern/kern_mib.c standard
kern/kern_module.c standard
kern/kern_mutex.c standard
kern/kern_mtxpool.c standard
kern/kern_ntptime.c standard
kern/kern_physio.c standard
kern/kern_proc.c standard

View File

@ -72,9 +72,7 @@
* share a fixed (at boot time) number of mutexes across all lockmgr locks in
* order to keep sizeof(struct lock) down.
*/
extern int lock_nmtx;
int lock_mtx_selector;
struct mtx *lock_mtx_array;
int lock_mtx_valid;
static struct mtx lock_mtx;
static int acquire(struct lock *lkp, int extflags, int wanted);
@ -84,28 +82,16 @@ static int acquiredrain(struct lock *lkp, int extflags) ;
static void
lockmgr_init(void *dummy __unused)
{
int i;
/*
* Initialize the lockmgr protection mutex if it hasn't already been
* done. Unless something changes about kernel startup order, VM
* initialization will always cause this mutex to already be
* initialized in a call to lockinit().
*/
if (lock_mtx_selector == 0)
if (lock_mtx_valid == 0) {
mtx_init(&lock_mtx, "lockmgr", MTX_DEF);
else {
/*
* This is necessary if (lock_nmtx == 1) and doesn't hurt
* otherwise.
*/
lock_mtx_selector = 0;
lock_mtx_valid = 1;
}
lock_mtx_array = (struct mtx *)malloc(sizeof(struct mtx) * lock_nmtx,
M_CACHE, M_WAITOK | M_ZERO);
for (i = 0; i < lock_nmtx; i++)
mtx_init(&lock_mtx_array[i], "lockmgr interlock", MTX_DEF);
}
SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL)
@ -507,29 +493,19 @@ lockinit(lkp, prio, wmesg, timo, flags)
CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
"timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
if (lock_mtx_array != NULL) {
if (lock_mtx_valid == 0) {
mtx_init(&lock_mtx, "lockmgr", MTX_DEF);
lock_mtx_valid = 1;
}
/*
* XXX cleanup - make sure mtxpool is always initialized before
* this is ever called.
*/
if (mtx_pool_valid) {
mtx_lock(&lock_mtx);
lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector];
lock_mtx_selector++;
if (lock_mtx_selector == lock_nmtx)
lock_mtx_selector = 0;
lkp->lk_interlock = mtx_pool_alloc();
mtx_unlock(&lock_mtx);
} else {
/*
* Giving lockmgr locks that are initialized during boot a
* pointer to the internal lockmgr mutex is safe, since the
* lockmgr code itself doesn't call lockinit() (which could
* cause mutex recursion).
*/
if (lock_mtx_selector == 0) {
/*
* This case only happens during kernel bootstrapping,
* so there's no reason to protect modification of
* lock_mtx_selector or lock_mtx.
*/
mtx_init(&lock_mtx, "lockmgr", MTX_DEF);
lock_mtx_selector = 1;
}
lkp->lk_interlock = &lock_mtx;
}
lkp->lk_flags = (flags & LK_EXTFLG_MASK);

112
sys/kern/kern_mtxpool.c Normal file
View File

@ -0,0 +1,112 @@
/*-
* Copyright (c) 2001 Matthew Dillon. All Rights Reserved. Copyright
* terms are as specified in the COPYRIGHT file at the base of the source
* tree.
*
* Mutex pool routines. These routines are designed to be used as short
* term leaf mutexes (e.g. the last mutex you might aquire other then
* calling msleep()). They operate using a shared pool. A mutex is chosen
* from the pool based on the supplied pointer (which may or may not be
* valid).
*
* Advantages:
* - no structural overhead. Mutexes can be associated with structures
* without adding bloat to the structures.
* - mutexes can be obtained for invalid pointers, useful when uses
* mutexes to interlock destructor ops.
* - no initialization/destructor overhead
* - can be used with msleep.
*
* Disadvantages:
* - should generally only be used as leaf mutexes
* - pool/pool dependancy ordering cannot be depended on.
* - possible L1 cache mastersip contention between cpus
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#ifndef MTX_POOL_SIZE
#define MTX_POOL_SIZE 128
#endif
#define MTX_POOL_MASK (MTX_POOL_SIZE-1)
static struct mtx mtx_pool_ary[MTX_POOL_SIZE];
int mtx_pool_valid = 0;
/*
* Inline version of mtx_pool_find(), used to streamline our main API
* function calls.
*/
static __inline
struct mtx *
_mtx_pool_find(void *ptr)
{
return(&mtx_pool_ary[((int)ptr ^ ((int)ptr >> 6)) & MTX_POOL_MASK]);
}
static void
mtx_pool_setup(void *dummy __unused)
{
int i;
for (i = 0; i < MTX_POOL_SIZE; ++i)
mtx_init(&mtx_pool_ary[i], "pool mutex", MTX_DEF);
mtx_pool_valid = 1;
}
/*
* Obtain a (shared) mutex from the pool. The returned mutex is a leaf
* level mutex, meaning that if you obtain it you cannot obtain any other
* mutexes until you release it. You can legally msleep() on the mutex.
*/
struct mtx *
mtx_pool_alloc(void)
{
static int si;
return(&mtx_pool_ary[si++ & MTX_POOL_MASK]);
}
/*
* Return the (shared) pool mutex associated with the specified address.
* The returned mutex is a leaf level mutex, meaning that if you obtain it
* you cannot obtain any other mutexes until you release it. You can
* legally msleep() on the mutex.
*/
struct mtx *
mtx_pool_find(void *ptr)
{
return(_mtx_pool_find(ptr));
}
/*
* Combined find/lock operation. Lock the pool mutex associated with
* the specified address.
*/
void
mtx_pool_lock(void *ptr)
{
mtx_lock(_mtx_pool_find(ptr));
}
/*
* Combined find/unlock operation. Unlock the pool mutex associated with
* the specified address.
*/
void
mtx_pool_unlock(void *ptr)
{
mtx_unlock(_mtx_pool_find(ptr));
}
SYSINIT(mtxpooli, SI_SUB_MUTEX, SI_ORDER_FIRST, mtx_pool_setup, NULL)

View File

@ -65,8 +65,7 @@ sx_init(struct sx *sx, const char *description)
lock->lo_name = description;
lock->lo_flags = LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE |
LO_UPGRADABLE;
mtx_init(&sx->sx_lock, "sx backing lock",
MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
sx->sx_lock = mtx_pool_find(sx);
sx->sx_cnt = 0;
cv_init(&sx->sx_shrd_cv, description);
sx->sx_shrd_wcnt = 0;
@ -89,7 +88,7 @@ sx_destroy(struct sx *sx)
0), ("%s (%s): holders or waiters\n", __FUNCTION__,
sx->sx_object.lo_name));
mtx_destroy(&sx->sx_lock);
sx->sx_lock = NULL;
cv_destroy(&sx->sx_shrd_cv);
cv_destroy(&sx->sx_excl_cv);
@ -100,7 +99,7 @@ void
_sx_slock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
KASSERT(sx->sx_xholder != curthread,
("%s (%s): slock while xlock is held @ %s:%d\n", __FUNCTION__,
sx->sx_object.lo_name, file, line));
@ -110,7 +109,7 @@ _sx_slock(struct sx *sx, const char *file, int line)
*/
while (sx->sx_cnt < 0) {
sx->sx_shrd_wcnt++;
cv_wait(&sx->sx_shrd_cv, &sx->sx_lock);
cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
sx->sx_shrd_wcnt--;
}
@ -120,23 +119,23 @@ _sx_slock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, 0, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
}
int
_sx_try_slock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
if (sx->sx_cnt >= 0) {
sx->sx_cnt++;
LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
return (1);
} else {
LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
return (0);
}
}
@ -145,7 +144,7 @@ void
_sx_xlock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
/*
* With sx locks, we're absolutely not permitted to recurse on
@ -161,7 +160,7 @@ _sx_xlock(struct sx *sx, const char *file, int line)
/* Loop in case we lose the race for lock acquisition. */
while (sx->sx_cnt != 0) {
sx->sx_excl_wcnt++;
cv_wait(&sx->sx_excl_cv, &sx->sx_lock);
cv_wait(&sx->sx_excl_cv, sx->sx_lock);
sx->sx_excl_wcnt--;
}
@ -174,25 +173,25 @@ _sx_xlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
}
int
_sx_try_xlock(struct sx *sx, const char *file, int line)
{
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
if (sx->sx_cnt == 0) {
sx->sx_cnt--;
sx->sx_xholder = curthread;
LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
return (1);
} else {
LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
return (0);
}
}
@ -202,7 +201,7 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
{
_sx_assert(sx, SX_SLOCKED, file, line);
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
@ -223,7 +222,7 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
}
void
@ -231,7 +230,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
{
_sx_assert(sx, SX_XLOCKED, file, line);
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
MPASS(sx->sx_cnt == -1);
WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
@ -250,7 +249,7 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
}
int
@ -258,7 +257,7 @@ _sx_try_upgrade(struct sx *sx, const char *file, int line)
{
_sx_assert(sx, SX_SLOCKED, file, line);
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
if (sx->sx_cnt == 1) {
sx->sx_cnt = -1;
@ -268,11 +267,11 @@ _sx_try_upgrade(struct sx *sx, const char *file, int line)
WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
return (1);
} else {
LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
return (0);
}
}
@ -282,7 +281,7 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
{
_sx_assert(sx, SX_XLOCKED, file, line);
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
MPASS(sx->sx_cnt == -1);
WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
@ -294,7 +293,7 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
}
#ifdef INVARIANT_SUPPORT
@ -317,21 +316,21 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
#ifdef WITNESS
witness_assert(&sx->sx_object, what, file, line);
#else
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
if (sx->sx_cnt <= 0 &&
(what == SX_SLOCKED || sx->sx_xholder == curthread))
printf("Lock %s not %slocked @ %s:%d",
sx->sx_object.lo_name, (what == SX_SLOCKED) ?
"share " : "", file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
#endif
break;
case SX_XLOCKED:
mtx_lock(&sx->sx_lock);
mtx_lock(sx->sx_lock);
if (sx->sx_xholder != curthread)
printf("Lock %s not exclusively locked @ %s:%d",
sx->sx_object.lo_name, file, line);
mtx_unlock(&sx->sx_lock);
mtx_unlock(sx->sx_lock);
break;
default:
panic("Unknown sx lock assertion: %d @ %s:%d", what, file,

View File

@ -90,14 +90,6 @@ u_quad_t sgrowsiz; /* amount to grow stack */
*/
struct buf *swbuf;
/*
* Total number of shared mutexes to protect all lockmgr locks.
*/
#ifndef LOCKMUTEX
#define LOCKMUTEX 10
#endif
int lock_nmtx = LOCKMUTEX;
/*
* Boot time overrides
*/

View File

@ -117,6 +117,7 @@ enum sysinit_sub_id {
SI_SUB_KMEM = 0x1800000, /* kernel memory*/
SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
SI_SUB_WITNESS = 0x1A80000, /* witness initialization */
SI_SUB_MUTEX = 0x1AC0000, /* mutex pool */
SI_SUB_LOCK = 0x1B00000, /* lockmgr locks */
SI_SUB_EVENTHANDLER = 0x1C00000, /* eventhandler init */
SI_SUB_KLD = 0x2000000, /* KLD and module setup */

View File

@ -239,6 +239,13 @@ void mtx_unlock_giant(int s);
#define mtx_unlock(m) mtx_unlock_flags((m), 0)
#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0)
struct mtx *mtx_pool_find(void *ptr);
struct mtx *mtx_pool_alloc(void);
void mtx_pool_lock(void *ptr);
void mtx_pool_unlock(void *ptr);
extern int mtx_pool_valid;
#ifndef LOCK_DEBUG
#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h>
#endif

View File

@ -37,7 +37,7 @@
struct sx {
struct lock_object sx_object; /* Common lock properties. */
struct mtx sx_lock; /* General protection lock. */
struct mtx *sx_lock; /* General protection lock. */
int sx_cnt; /* -1: xlock, > 0: slock count. */
struct cv sx_shrd_cv; /* slock waiters. */
int sx_shrd_wcnt; /* Number of slock waiters. */