freebsd-dev/sys/kern/kern_mtxpool.c
Brian Feldman 0e0af8ecda Rename SI_SUB_MUTEX to SI_SUB_MTX_POOL to make the name at all accurate.
While doing this, move it earlier in the sysinit boot process so that the
VM system can use it.

After that, the system is now able to use sx locks instead of lockmgr
locks in the VM system.  To accomplish this, some of the more
questionable uses of the locks (such as testing whether they are
owned or not, as well as allowing shared+exclusive recursion) are
removed, and simpler logic throughout is used so locks should also be
easier to understand.

This has been tested on my laptop for months, and has not shown any
problems on SMP systems, either, so appears quite safe.  One more
user of lockmgr down, many more to go :)
2002-03-13 23:48:08 +00:00

116 lines
2.8 KiB
C

/*-
* Copyright (c) 2001 Matthew Dillon. All Rights Reserved. Copyright
* terms are as specified in the COPYRIGHT file at the base of the source
* tree.
*
* Mutex pool routines. These routines are designed to be used as short
* term leaf mutexes (e.g. the last mutex you might aquire other then
* calling msleep()). They operate using a shared pool. A mutex is chosen
* from the pool based on the supplied pointer (which may or may not be
* valid).
*
* Advantages:
* - no structural overhead. Mutexes can be associated with structures
* without adding bloat to the structures.
* - mutexes can be obtained for invalid pointers, useful when uses
* mutexes to interlock destructor ops.
* - no initialization/destructor overhead
* - can be used with msleep.
*
* Disadvantages:
* - should generally only be used as leaf mutexes
* - pool/pool dependancy ordering cannot be depended on.
* - possible L1 cache mastersip contention between cpus
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#ifndef MTX_POOL_SIZE
#define MTX_POOL_SIZE 128
#endif
#define MTX_POOL_MASK (MTX_POOL_SIZE-1)
static struct mtx mtx_pool_ary[MTX_POOL_SIZE];
int mtx_pool_valid = 0;
/*
* Inline version of mtx_pool_find(), used to streamline our main API
* function calls.
*/
static __inline
struct mtx *
_mtx_pool_find(void *ptr)
{
int p;
p = (int)(uintptr_t)ptr;
return(&mtx_pool_ary[(p ^ (p >> 6)) & MTX_POOL_MASK]);
}
static void
mtx_pool_setup(void *dummy __unused)
{
int i;
for (i = 0; i < MTX_POOL_SIZE; ++i)
mtx_init(&mtx_pool_ary[i], "pool mutex", MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
mtx_pool_valid = 1;
}
/*
* Obtain a (shared) mutex from the pool. The returned mutex is a leaf
* level mutex, meaning that if you obtain it you cannot obtain any other
* mutexes until you release it. You can legally msleep() on the mutex.
*/
struct mtx *
mtx_pool_alloc(void)
{
static int si;
return(&mtx_pool_ary[si++ & MTX_POOL_MASK]);
}
/*
* Return the (shared) pool mutex associated with the specified address.
* The returned mutex is a leaf level mutex, meaning that if you obtain it
* you cannot obtain any other mutexes until you release it. You can
* legally msleep() on the mutex.
*/
struct mtx *
mtx_pool_find(void *ptr)
{
return(_mtx_pool_find(ptr));
}
/*
* Combined find/lock operation. Lock the pool mutex associated with
* the specified address.
*/
void
mtx_pool_lock(void *ptr)
{
mtx_lock(_mtx_pool_find(ptr));
}
/*
* Combined find/unlock operation. Unlock the pool mutex associated with
* the specified address.
*/
void
mtx_pool_unlock(void *ptr)
{
mtx_unlock(_mtx_pool_find(ptr));
}
SYSINIT(mtxpooli, SI_SUB_MTX_POOL, SI_ORDER_FIRST, mtx_pool_setup, NULL)