Rearrange the SYSINIT order to call lockmgr_init() earlier so that

the runtime lockmgr initialization code in lockinit() can be eliminated.

Reviewed by:	jhb
This commit is contained in:
truckman 2003-07-16 01:00:39 +00:00
parent e611c41699
commit 68ed1d12ac
3 changed files with 6 additions and 29 deletions

View File

@ -74,7 +74,6 @@ __FBSDID("$FreeBSD$");
* share a fixed (at boot time) number of mutexes across all lockmgr locks in
* order to keep sizeof(struct lock) down.
*/
int lock_mtx_valid;
static struct mtx lock_mtx;
static int acquire(struct lock **lkpp, int extflags, int wanted);
@ -84,18 +83,9 @@ static int acquiredrain(struct lock *lkp, int extflags) ;
static void
lockmgr_init(void *dummy __unused)
{
/*
* Initialize the lockmgr protection mutex if it hasn't already been
* done. Unless something changes about kernel startup order, VM
* initialization will always cause this mutex to already be
* initialized in a call to lockinit().
*/
if (lock_mtx_valid == 0) {
mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
lock_mtx_valid = 1;
}
mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
}
SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL)
SYSINIT(lmgrinit, SI_SUB_LOCKMGR, SI_ORDER_FIRST, lockmgr_init, NULL)
static LOCK_INLINE void
sharelock(struct lock *lkp, int incr) {
@ -539,21 +529,7 @@ lockinit(lkp, prio, wmesg, timo, flags)
CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
"timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
if (lock_mtx_valid == 0) {
mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
lock_mtx_valid = 1;
}
/*
* XXX cleanup - make sure mtxpool is always initialized before
* this is ever called.
*/
if (mtxpool_lockbuilder != NULL) {
mtx_lock(&lock_mtx);
lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
mtx_unlock(&lock_mtx);
} else {
lkp->lk_interlock = &lock_mtx;
}
lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
lkp->lk_flags = (flags & LK_EXTFLG_MASK);
lkp->lk_sharecount = 0;
lkp->lk_waitcount = 0;

View File

@ -186,7 +186,7 @@ mtx_pool_alloc(struct mtx_pool *pool)
* The lockbuilder pool must be initialized early because the lockmgr
* and sx locks depend on it. The sx locks are used in the kernel
* memory allocator. The lockmgr subsystem is initialized by
* SYSINIT(..., SI_SUB_LOCK, ...).
* SYSINIT(..., SI_SUB_LOCKMGR, ...).
*
* We can't call MALLOC() to dynamically allocate the sleep pool
* until after kmeminit() has been called, which is done by

View File

@ -113,12 +113,13 @@ enum sysinit_sub_id {
SI_SUB_CONSOLE = 0x0800000, /* console*/
SI_SUB_COPYRIGHT = 0x0800001, /* first use of console*/
SI_SUB_MTX_POOL_STATIC = 0x0900000, /* static mutex pool */
SI_SUB_LOCKMGR = 0x0980000, /* lockmgr locks */
SI_SUB_VM = 0x1000000, /* virtual memory system init*/
SI_SUB_KMEM = 0x1800000, /* kernel memory*/
SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
SI_SUB_WITNESS = 0x1A80000, /* witness initialization */
SI_SUB_MTX_POOL_DYNAMIC = 0x1AC0000, /* dynamic mutex pool */
SI_SUB_LOCK = 0x1B00000, /* lockmgr locks */
SI_SUB_LOCK = 0x1B00000, /* various locks */
SI_SUB_EVENTHANDLER = 0x1C00000, /* eventhandler init */
SI_SUB_KLD = 0x2000000, /* KLD and module setup */
SI_SUB_CPU = 0x2100000, /* CPU resource(s)*/