- Convert turnstiles and sleepqueus to use UMA. This provides a modest

speedup and will be more useful after each gains a spinlock in the
   impending thread_lock() commit.
 - Move initialization and asserts into init/fini routines.  fini routines
   are only needed in the INVARIANTS case for now.

Submitted by:	Attilio Rao <attilio@FreeBSD.org>
Tested by:	kris, jeff
This commit is contained in:
jeff 2007-05-18 06:32:24 +00:00
parent 9f6d75987a
commit beb495eff1
2 changed files with 106 additions and 38 deletions

View File

@ -70,7 +70,6 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sched.h>
@ -78,6 +77,8 @@ __FBSDID("$FreeBSD$");
#include <sys/sleepqueue.h>
#include <sys/sysctl.h>
#include <vm/uma.h>
#ifdef DDB
#include <ddb/ddb.h>
#endif
@ -142,8 +143,7 @@ SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
0, "maxmimum depth achieved of a single chain");
#endif
static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
static MALLOC_DEFINE(M_SLEEPQUEUE, "sleepqueue", "sleep queues");
static uma_zone_t sleepq_zone;
/*
* Prototypes for non-exported routines.
@ -151,9 +151,14 @@ static MALLOC_DEFINE(M_SLEEPQUEUE, "sleepqueue", "sleep queues");
static int sleepq_catch_signals(void *wchan);
static int sleepq_check_signals(void);
static int sleepq_check_timeout(void);
#ifdef INVARIANTS
static void sleepq_dtor(void *mem, int size, void *arg);
#endif
static int sleepq_init(void *mem, int size, int flags);
static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
int pri);
static void sleepq_switch(void *wchan);
static void sleepq_timeout(void *arg);
static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri);
/*
* Early initialization of sleep queues that is called from the sleepinit()
@ -184,23 +189,24 @@ init_sleepqueues(void)
NULL);
#endif
}
sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
#ifdef INVARIANTS
NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
#else
NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
#endif
thread0.td_sleepqueue = sleepq_alloc();
}
/*
* Malloc and initialize a new sleep queue for a new thread.
* Get a sleep queue for a new thread.
*/
struct sleepqueue *
sleepq_alloc(void)
{
struct sleepqueue *sq;
int i;
sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO);
for (i = 0; i < NR_SLEEPQS; i++)
TAILQ_INIT(&sq->sq_blocked[i]);
LIST_INIT(&sq->sq_free);
return (sq);
return (uma_zalloc(sleepq_zone, M_WAITOK));
}
/*
@ -209,12 +215,8 @@ sleepq_alloc(void)
void
sleepq_free(struct sleepqueue *sq)
{
int i;
MPASS(sq != NULL);
for (i = 0; i < NR_SLEEPQS; i++)
MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
free(sq, M_SLEEPQUEUE);
uma_zfree(sleepq_zone, sq);
}
/*
@ -666,6 +668,39 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
setrunnable(td);
}
#ifdef INVARIANTS
/*
* UMA zone item deallocator.
*/
static void
sleepq_dtor(void *mem, int size, void *arg)
{
struct sleepqueue *sq;
int i;
sq = mem;
for (i = 0; i < NR_SLEEPQS; i++)
MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
}
#endif
/*
* UMA zone item initializer.
*/
static int
sleepq_init(void *mem, int size, int flags)
{
struct sleepqueue *sq;
int i;
bzero(mem, size);
sq = mem;
for (i = 0; i < NR_SLEEPQS; i++)
TAILQ_INIT(&sq->sq_blocked[i]);
LIST_INIT(&sq->sq_free);
return (0);
}
/*
* Find the highest priority thread sleeping on a wait channel and resume it.
*/

View File

@ -46,14 +46,14 @@
* chain. Each chain contains a spin mutex that protects all of the
* turnstiles in the chain.
*
* Each time a thread is created, a turnstile is malloc'd and attached to
* that thread. When a thread blocks on a lock, if it is the first thread
* to block, it lends its turnstile to the lock. If the lock already has
* a turnstile, then it gives its turnstile to the lock's turnstile's free
* list. When a thread is woken up, it takes a turnstile from the free list
* if there are any other waiters. If it is the only thread blocked on the
* lock, then it reclaims the turnstile associated with the lock and removes
* it from the hash table.
* Each time a thread is created, a turnstile is allocated from a UMA zone
* and attached to that thread. When a thread blocks on a lock, if it is the
* first thread to block, it lends its turnstile to the lock. If the lock
* already has a turnstile, then it gives its turnstile to the lock's
* turnstile's free list. When a thread is woken up, it takes a turnstile from
* the free list if there are any other waiters. If it is the only thread
* blocked on the lock, then it reclaims the turnstile associated with the lock
* and removes it from the hash table.
*/
#include <sys/cdefs.h>
@ -67,7 +67,6 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/queue.h>
@ -75,6 +74,8 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/turnstile.h>
#include <vm/uma.h>
#ifdef DDB
#include <sys/kdb.h>
#include <ddb/ddb.h>
@ -143,8 +144,7 @@ SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
#endif
static struct mtx td_contested_lock;
static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
static MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles");
static uma_zone_t turnstile_zone;
/*
* Prototypes for non-exported routines.
@ -158,6 +158,10 @@ static int turnstile_adjust_thread(struct turnstile *ts,
struct thread *td);
static struct thread *turnstile_first_waiter(struct turnstile *ts);
static void turnstile_setowner(struct turnstile *ts, struct thread *owner);
#ifdef INVARIANTS
static void turnstile_dtor(void *mem, int size, void *arg);
#endif
static int turnstile_init(void *mem, int size, int flags);
/*
* Walks the chain of turnstiles and their owners to propagate the priority
@ -379,6 +383,12 @@ static void
init_turnstile0(void *dummy)
{
turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
#ifdef INVARIANTS
NULL, turnstile_dtor, turnstile_init, NULL, UMA_ALIGN_CACHE, 0);
#else
NULL, NULL, turnstile_init, NULL, UMA_ALIGN_CACHE, 0);
#endif
thread0.td_turnstile = turnstile_alloc();
}
SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
@ -447,20 +457,47 @@ turnstile_setowner(struct turnstile *ts, struct thread *owner)
LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
}
#ifdef INVARIANTS
/*
* Malloc a turnstile for a new thread, initialize it and return it.
* UMA zone item deallocator.
*/
struct turnstile *
turnstile_alloc(void)
static void
turnstile_dtor(void *mem, int size, void *arg)
{
struct turnstile *ts;
ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO);
ts = mem;
MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
MPASS(TAILQ_EMPTY(&ts->ts_pending));
}
#endif
/*
* UMA zone item initializer.
*/
static int
turnstile_init(void *mem, int size, int flags)
{
struct turnstile *ts;
bzero(mem, size);
ts = mem;
TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
TAILQ_INIT(&ts->ts_pending);
LIST_INIT(&ts->ts_free);
return (ts);
return (0);
}
/*
* Get a turnstile for a new thread.
*/
struct turnstile *
turnstile_alloc(void)
{
return (uma_zalloc(turnstile_zone, M_WAITOK));
}
/*
@ -470,11 +507,7 @@ void
turnstile_free(struct turnstile *ts)
{
MPASS(ts != NULL);
MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
MPASS(TAILQ_EMPTY(&ts->ts_pending));
free(ts, M_TURNSTILE);
uma_zfree(turnstile_zone, ts);
}
/*