Garbage collect mtxpool_lockbuilder, the mutex pool historically used
for lockmgr and sx interlocks, but unused since optimised versions of those sleep locks were introduced. This will save a (quite) small amount of memory in all kernel configurations. The sleep mutex pool is retained as it is used for 'struct bio' and several other consumers. Discussed with: jhb MFC after: 3 days
This commit is contained in:
parent
382c3dae47
commit
7ca3cc4389
@ -59,9 +59,6 @@ __FBSDID("$FreeBSD$");
|
||||
static MALLOC_DEFINE(M_MTXPOOL, "mtx_pool", "mutex pool");
|
||||
|
||||
/* Pool sizes must be a power of two */
|
||||
#ifndef MTX_POOL_LOCKBUILDER_SIZE
|
||||
#define MTX_POOL_LOCKBUILDER_SIZE 128
|
||||
#endif
|
||||
#ifndef MTX_POOL_SLEEP_SIZE
|
||||
#define MTX_POOL_SLEEP_SIZE 128
|
||||
#endif
|
||||
@ -78,18 +75,12 @@ struct mtx_pool {
|
||||
struct mtx mtx_pool_ary[1];
|
||||
};
|
||||
|
||||
static struct mtx_pool_lockbuilder {
|
||||
struct mtxpool_header mtx_pool_header;
|
||||
struct mtx mtx_pool_ary[MTX_POOL_LOCKBUILDER_SIZE];
|
||||
} lockbuilder_pool;
|
||||
|
||||
#define mtx_pool_size mtx_pool_header.mtxpool_size
|
||||
#define mtx_pool_mask mtx_pool_header.mtxpool_mask
|
||||
#define mtx_pool_shift mtx_pool_header.mtxpool_shift
|
||||
#define mtx_pool_next mtx_pool_header.mtxpool_next
|
||||
|
||||
struct mtx_pool *mtxpool_sleep;
|
||||
struct mtx_pool *mtxpool_lockbuilder;
|
||||
|
||||
#if UINTPTR_MAX == UINT64_MAX /* 64 bits */
|
||||
# define POINTER_BITS 64
|
||||
@ -165,15 +156,6 @@ mtx_pool_destroy(struct mtx_pool **poolp)
|
||||
*poolp = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
mtx_pool_setup_static(void *dummy __unused)
|
||||
{
|
||||
mtx_pool_initialize((struct mtx_pool *)&lockbuilder_pool,
|
||||
"lockbuilder mtxpool", MTX_POOL_LOCKBUILDER_SIZE,
|
||||
MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
|
||||
mtxpool_lockbuilder = (struct mtx_pool *)&lockbuilder_pool;
|
||||
}
|
||||
|
||||
static void
|
||||
mtx_pool_setup_dynamic(void *dummy __unused)
|
||||
{
|
||||
@ -202,17 +184,5 @@ mtx_pool_alloc(struct mtx_pool *pool)
|
||||
return (&pool->mtx_pool_ary[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* The lockbuilder pool must be initialized early because the lockmgr
|
||||
* and sx locks depend on it. The sx locks are used in the kernel
|
||||
* memory allocator. The lockmgr subsystem is initialized by
|
||||
* SYSINIT(..., SI_SUB_LOCKMGR, ...).
|
||||
*
|
||||
* We can't call malloc() to dynamically allocate the sleep pool
|
||||
* until after kmeminit() has been called, which is done by
|
||||
* SYSINIT(..., SI_SUB_KMEM, ...).
|
||||
*/
|
||||
SYSINIT(mtxpooli1, SI_SUB_MTX_POOL_STATIC, SI_ORDER_FIRST,
|
||||
mtx_pool_setup_static, NULL);
|
||||
SYSINIT(mtxpooli2, SI_SUB_MTX_POOL_DYNAMIC, SI_ORDER_FIRST,
|
||||
mtx_pool_setup_dynamic, NULL);
|
||||
|
@ -92,7 +92,6 @@ enum sysinit_sub_id {
|
||||
SI_SUB_COPYRIGHT = 0x0800001, /* first use of console*/
|
||||
SI_SUB_SETTINGS = 0x0880000, /* check and recheck settings */
|
||||
SI_SUB_MTX_POOL_STATIC = 0x0900000, /* static mutex pool */
|
||||
SI_SUB_LOCKMGR = 0x0980000, /* lockmgr locks */
|
||||
SI_SUB_VM = 0x1000000, /* virtual memory system init*/
|
||||
SI_SUB_KMEM = 0x1800000, /* kernel memory*/
|
||||
SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
|
||||
|
@ -323,12 +323,8 @@ struct mtx *mtx_pool_alloc(struct mtx_pool *pool);
|
||||
mtx_unlock_spin(mtx_pool_find((pool), (ptr)))
|
||||
|
||||
/*
|
||||
* mtxpool_lockbuilder is a pool of sleep locks that is not witness
|
||||
* checked and should only be used for building higher level locks.
|
||||
*
|
||||
* mtxpool_sleep is a general purpose pool of sleep mutexes.
|
||||
*/
|
||||
extern struct mtx_pool *mtxpool_lockbuilder;
|
||||
extern struct mtx_pool *mtxpool_sleep;
|
||||
|
||||
#ifndef LOCK_DEBUG
|
||||
|
Loading…
Reference in New Issue
Block a user