Extend the mutex pool implementation to permit the creation and use of
multiple mutex pools with different options and sizes. Mutex pools can be created with either the default sleep mutexes or with spin mutexes. A dynamically created mutex pool can now be destroyed if it is no longer needed. Create two pools by default, one that matches the existing pool that uses the MTX_NOWITNESS option that should be used for building higher level locks, and a new pool with witness checking enabled. Modify the users of the existing mutex pool to use the appropriate pool in the new implementation. Reviewed by: jhb
This commit is contained in:
parent
3c63a8b4fc
commit
857d9c60d0
@ -1209,7 +1209,7 @@ falloc(td, resultfp, resultfd)
|
||||
* descriptor to the list of open files at that point, otherwise
|
||||
* put it at the front of the list of open files.
|
||||
*/
|
||||
fp->f_mtxp = mtx_pool_alloc();
|
||||
fp->f_mtxp = mtx_pool_alloc(mtxpool_sleep);
|
||||
fp->f_count = 1;
|
||||
fp->f_cred = crhold(td->td_ucred);
|
||||
fp->f_ops = &badfileops;
|
||||
|
@ -547,9 +547,9 @@ lockinit(lkp, prio, wmesg, timo, flags)
|
||||
* XXX cleanup - make sure mtxpool is always initialized before
|
||||
* this is ever called.
|
||||
*/
|
||||
if (mtx_pool_valid) {
|
||||
if (mtxpool_lockbuilder != NULL) {
|
||||
mtx_lock(&lock_mtx);
|
||||
lkp->lk_interlock = mtx_pool_alloc();
|
||||
lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
|
||||
mtx_unlock(&lock_mtx);
|
||||
} else {
|
||||
lkp->lk_interlock = &lock_mtx;
|
||||
|
@ -35,51 +35,49 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#ifndef MTX_POOL_SIZE
|
||||
#define MTX_POOL_SIZE 128
|
||||
|
||||
MALLOC_DEFINE(M_MTXPOOL, "mtx_pool", "mutex pool");
|
||||
|
||||
/* Pool sizes must be a power of two */
|
||||
#ifndef MTX_POOL_LOCKBUILDER_SIZE
|
||||
#define MTX_POOL_LOCKBUILDER_SIZE 128
|
||||
#endif
|
||||
#ifndef MTX_POOL_SLEEP_SIZE
|
||||
#define MTX_POOL_SLEEP_SIZE 128
|
||||
#endif
|
||||
#define MTX_POOL_MASK (MTX_POOL_SIZE - 1)
|
||||
|
||||
static struct mtx mtx_pool_ary[MTX_POOL_SIZE];
|
||||
struct mtxpool_header {
|
||||
int mtxpool_size;
|
||||
int mtxpool_mask;
|
||||
int mtxpool_shift;
|
||||
int mtxpool_next;
|
||||
};
|
||||
|
||||
int mtx_pool_valid = 0;
|
||||
struct mtx_pool {
|
||||
struct mtxpool_header mtx_pool_header;
|
||||
struct mtx mtx_pool_ary[1];
|
||||
};
|
||||
|
||||
/*
|
||||
* Inline version of mtx_pool_find(), used to streamline our main API
|
||||
* function calls.
|
||||
*/
|
||||
static __inline struct mtx *
|
||||
_mtx_pool_find(void *ptr)
|
||||
{
|
||||
int p;
|
||||
static struct mtx_pool_lockbuilder {
|
||||
struct mtxpool_header mtx_pool_header;
|
||||
struct mtx mtx_pool_ary[MTX_POOL_LOCKBUILDER_SIZE];
|
||||
} lockbuilder_pool;
|
||||
|
||||
p = (int)(uintptr_t)ptr;
|
||||
return (&mtx_pool_ary[(p ^ (p >> 6)) & MTX_POOL_MASK]);
|
||||
}
|
||||
#define mtx_pool_size mtx_pool_header.mtxpool_size
|
||||
#define mtx_pool_mask mtx_pool_header.mtxpool_mask
|
||||
#define mtx_pool_shift mtx_pool_header.mtxpool_shift
|
||||
#define mtx_pool_next mtx_pool_header.mtxpool_next
|
||||
|
||||
static void
|
||||
mtx_pool_setup(void *dummy __unused)
|
||||
{
|
||||
int i;
|
||||
struct mtx_pool *mtxpool_sleep;
|
||||
struct mtx_pool *mtxpool_lockbuilder;
|
||||
|
||||
for (i = 0; i < MTX_POOL_SIZE; ++i)
|
||||
mtx_init(&mtx_pool_ary[i], "pool mutex", NULL,
|
||||
MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
|
||||
mtx_pool_valid = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Obtain a (shared) mutex from the pool. The returned mutex is a leaf
|
||||
* level mutex, meaning that if you obtain it you cannot obtain any other
|
||||
* mutexes until you release it. You can legally msleep() on the mutex.
|
||||
*/
|
||||
struct mtx *
|
||||
mtx_pool_alloc(void)
|
||||
{
|
||||
static int si;
|
||||
|
||||
return (&mtx_pool_ary[si++ & MTX_POOL_MASK]);
|
||||
}
|
||||
#if UINTPTR_MAX == UINT64_MAX /* 64 bits */
|
||||
# define POINTER_BITS 64
|
||||
# define HASH_MULTIPLIER 11400714819323198485u /* (2^64)*(sqrt(5)-1)/2 */
|
||||
#else /* assume 32 bits */
|
||||
# define POINTER_BITS 32
|
||||
# define HASH_MULTIPLIER 2654435769u /* (2^32)*(sqrt(5)-1)/2 */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Return the (shared) pool mutex associated with the specified address.
|
||||
@ -88,32 +86,113 @@ mtx_pool_alloc(void)
|
||||
* legally msleep() on the mutex.
|
||||
*/
|
||||
struct mtx *
|
||||
mtx_pool_find(void *ptr)
|
||||
mtx_pool_find(struct mtx_pool *pool, void *ptr)
|
||||
{
|
||||
int p;
|
||||
|
||||
return (_mtx_pool_find(ptr));
|
||||
KASSERT(pool != NULL, ("_mtx_pool_find(): null pool"));
|
||||
/*
|
||||
* Fibonacci hash, see Knuth's
|
||||
* _Art of Computer Programming, Volume 3 / Sorting and Searching_
|
||||
*/
|
||||
p = ((HASH_MULTIPLIER * (uintptr_t)ptr) >> pool->mtx_pool_shift) &
|
||||
pool->mtx_pool_mask;
|
||||
return (&pool->mtx_pool_ary[p]);
|
||||
}
|
||||
|
||||
static void
|
||||
mtx_pool_initialize(struct mtx_pool *pool, const char *mtx_name, int pool_size,
|
||||
int opts)
|
||||
{
|
||||
int i, maskbits;
|
||||
|
||||
pool->mtx_pool_size = pool_size;
|
||||
pool->mtx_pool_mask = pool_size - 1;
|
||||
for (i = 1, maskbits = 0; (i & pool_size) == 0; i = i << 1)
|
||||
maskbits++;
|
||||
pool->mtx_pool_shift = POINTER_BITS - maskbits;
|
||||
pool->mtx_pool_next = 0;
|
||||
for (i = 0; i < pool_size; ++i)
|
||||
mtx_init(&pool->mtx_pool_ary[i], mtx_name, NULL, opts);
|
||||
}
|
||||
|
||||
struct mtx_pool *
|
||||
mtx_pool_create(const char *mtx_name, int pool_size, int opts)
|
||||
{
|
||||
struct mtx_pool *pool;
|
||||
|
||||
if (pool_size <= 0 || !powerof2(pool_size)) {
|
||||
printf("WARNING: %s pool size is not a power of 2.\n",
|
||||
mtx_name);
|
||||
pool_size = 128;
|
||||
}
|
||||
MALLOC(pool, struct mtx_pool *,
|
||||
sizeof (struct mtx_pool) + ((pool_size - 1) * sizeof (struct mtx)),
|
||||
M_MTXPOOL, M_WAITOK | M_ZERO);
|
||||
mtx_pool_initialize(pool, mtx_name, pool_size, opts);
|
||||
return pool;
|
||||
}
|
||||
|
||||
void
|
||||
mtx_pool_destroy(struct mtx_pool **poolp)
|
||||
{
|
||||
int i;
|
||||
struct mtx_pool *pool = *poolp;
|
||||
|
||||
for (i = pool->mtx_pool_size - 1; i >= 0; --i)
|
||||
mtx_destroy(&pool->mtx_pool_ary[i]);
|
||||
FREE(pool, M_MTXPOOL);
|
||||
*poolp = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
mtx_pool_setup_static(void *dummy __unused)
|
||||
{
|
||||
mtx_pool_initialize((struct mtx_pool *)&lockbuilder_pool,
|
||||
"lockbuilder mtxpool", MTX_POOL_LOCKBUILDER_SIZE,
|
||||
MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
|
||||
mtxpool_lockbuilder = (struct mtx_pool *)&lockbuilder_pool;
|
||||
}
|
||||
|
||||
static void
|
||||
mtx_pool_setup_dynamic(void *dummy __unused)
|
||||
{
|
||||
mtxpool_sleep = mtx_pool_create("sleep mtxpool",
|
||||
MTX_POOL_SLEEP_SIZE, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
* Combined find/lock operation. Lock the pool mutex associated with
|
||||
* the specified address.
|
||||
* Obtain a (shared) mutex from the pool. The returned mutex is a leaf
|
||||
* level mutex, meaning that if you obtain it you cannot obtain any other
|
||||
* mutexes until you release it. You can legally msleep() on the mutex.
|
||||
*/
|
||||
void
|
||||
mtx_pool_lock(void *ptr)
|
||||
struct mtx *
|
||||
mtx_pool_alloc(struct mtx_pool *pool)
|
||||
{
|
||||
int i;
|
||||
|
||||
mtx_lock(_mtx_pool_find(ptr));
|
||||
KASSERT(pool != NULL, ("mtx_pool_alloc(): null pool"));
|
||||
/*
|
||||
* mtx_pool_next is unprotected against multiple accesses,
|
||||
* but simultaneous access by two CPUs should not be very
|
||||
* harmful.
|
||||
*/
|
||||
i = pool->mtx_pool_next;
|
||||
pool->mtx_pool_next = (i + 1) & pool->mtx_pool_mask;
|
||||
return (&pool->mtx_pool_ary[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Combined find/unlock operation. Unlock the pool mutex associated with
|
||||
* the specified address.
|
||||
* The lockbuilder pool must be initialized early because the lockmgr
|
||||
* and sx locks depend on it. The sx locks are used in the kernel
|
||||
* memory allocator. The lockmgr subsystem is initialized by
|
||||
* SYSINIT(..., SI_SUB_LOCK, ...).
|
||||
*
|
||||
* We can't call MALLOC() to dynamically allocate the sleep pool
|
||||
* until after kmeminit() has been called, which is done by
|
||||
* SYSINIT(..., SI_SUB_KMEM, ...).
|
||||
*/
|
||||
void
|
||||
mtx_pool_unlock(void *ptr)
|
||||
{
|
||||
|
||||
mtx_unlock(_mtx_pool_find(ptr));
|
||||
}
|
||||
|
||||
SYSINIT(mtxpooli, SI_SUB_MTX_POOL, SI_ORDER_FIRST, mtx_pool_setup, NULL);
|
||||
SYSINIT(mtxpooli1, SI_SUB_MTX_POOL_STATIC, SI_ORDER_FIRST,
|
||||
mtx_pool_setup_static, NULL);
|
||||
SYSINIT(mtxpooli2, SI_SUB_MTX_POOL_DYNAMIC, SI_ORDER_FIRST,
|
||||
mtx_pool_setup_dynamic, NULL);
|
||||
|
@ -1652,7 +1652,7 @@ crget(void)
|
||||
|
||||
MALLOC(cr, struct ucred *, sizeof(*cr), M_CRED, M_WAITOK | M_ZERO);
|
||||
cr->cr_ref = 1;
|
||||
cr->cr_mtxp = mtx_pool_find(cr);
|
||||
cr->cr_mtxp = mtx_pool_find(mtxpool_sleep, cr);
|
||||
#ifdef MAC
|
||||
mac_init_cred(cr);
|
||||
#endif
|
||||
|
@ -893,7 +893,7 @@ uifind(uid)
|
||||
free(uip, M_UIDINFO);
|
||||
uip = old_uip;
|
||||
} else {
|
||||
uip->ui_mtxp = mtx_pool_alloc();
|
||||
uip->ui_mtxp = mtx_pool_alloc(mtxpool_sleep);
|
||||
uip->ui_uid = uid;
|
||||
LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ sx_init(struct sx *sx, const char *description)
|
||||
lock->lo_type = lock->lo_name = description;
|
||||
lock->lo_flags = LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE |
|
||||
LO_UPGRADABLE;
|
||||
sx->sx_lock = mtx_pool_find(sx);
|
||||
sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
|
||||
sx->sx_cnt = 0;
|
||||
cv_init(&sx->sx_shrd_cv, description);
|
||||
sx->sx_shrd_wcnt = 0;
|
||||
|
@ -112,11 +112,12 @@ enum sysinit_sub_id {
|
||||
SI_SUB_TUNABLES = 0x0700000, /* establish tunable values */
|
||||
SI_SUB_CONSOLE = 0x0800000, /* console*/
|
||||
SI_SUB_COPYRIGHT = 0x0800001, /* first use of console*/
|
||||
SI_SUB_MTX_POOL = 0x0900000, /* mutex pool */
|
||||
SI_SUB_MTX_POOL_STATIC = 0x0900000, /* static mutex pool */
|
||||
SI_SUB_VM = 0x1000000, /* virtual memory system init*/
|
||||
SI_SUB_KMEM = 0x1800000, /* kernel memory*/
|
||||
SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
|
||||
SI_SUB_WITNESS = 0x1A80000, /* witness initialization */
|
||||
SI_SUB_MTX_POOL_DYNAMIC = 0x1AC0000, /* dynamic mutex pool */
|
||||
SI_SUB_LOCK = 0x1B00000, /* lockmgr locks */
|
||||
SI_SUB_EVENTHANDLER = 0x1C00000, /* eventhandler init */
|
||||
SI_SUB_KLD = 0x2000000, /* KLD and module setup */
|
||||
|
@ -236,12 +236,30 @@ void _mtx_assert(struct mtx *m, int what, const char *file, int line);
|
||||
#define mtx_unlock(m) mtx_unlock_flags((m), 0)
|
||||
#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0)
|
||||
|
||||
struct mtx *mtx_pool_find(void *ptr);
|
||||
struct mtx *mtx_pool_alloc(void);
|
||||
void mtx_pool_lock(void *ptr);
|
||||
void mtx_pool_unlock(void *ptr);
|
||||
struct mtx_pool;
|
||||
|
||||
extern int mtx_pool_valid;
|
||||
struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts);
|
||||
void mtx_pool_destroy(struct mtx_pool **poolp);
|
||||
struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr);
|
||||
struct mtx *mtx_pool_alloc(struct mtx_pool *pool);
|
||||
struct mtx *mtx_pool_alloc_spin(struct mtx_pool *pool);
|
||||
#define mtx_pool_lock(pool, ptr) \
|
||||
mtx_lock(mtx_pool_find((pool), (ptr)))
|
||||
#define mtx_pool_lock_spin(pool, ptr) \
|
||||
mtx_lock_spin(mtx_pool_find((pool), (ptr)))
|
||||
#define mtx_pool_unlock(pool, ptr) \
|
||||
mtx_unlock(mtx_pool_find((pool), (ptr)))
|
||||
#define mtx_pool_unlock_spin(pool, ptr) \
|
||||
mtx_unlock_spin(mtx_pool_find((pool), (ptr)))
|
||||
|
||||
/*
|
||||
* mtxpool_lockbuilder is a pool of sleep locks that is not witness
|
||||
* checked and should only be used for building higher level locks.
|
||||
*
|
||||
* mtxpool_sleep is a general purpose pool of sleep mutexes.
|
||||
*/
|
||||
extern struct mtx_pool *mtxpool_lockbuilder;
|
||||
extern struct mtx_pool *mtxpool_sleep;
|
||||
|
||||
#ifndef LOCK_DEBUG
|
||||
#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h>
|
||||
|
Loading…
x
Reference in New Issue
Block a user