- Protect the callout wheel with a separate spin mutex, callout_lock.

- Use the mutex in hardclock to ensure no races between it and
  softclock.
- Make softclock be INTR_MPSAFE and provide a flag,
  CALLOUT_MPSAFE, which specifies that a callout handler does not
  need giant.  There is still no way to set this flag when
  regstering a callout.

Reviewed by:	-smp@, jlemon
This commit is contained in:
Jake Burkholder 2000-11-19 06:02:32 +00:00
parent 46ee285632
commit fa2fbc3dac
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=68889
11 changed files with 50 additions and 20 deletions

View File

@ -387,6 +387,8 @@ cpu_startup(dummy)
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN);
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)
if (1)

View File

@ -409,6 +409,8 @@ cpu_startup(dummy)
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN);
#if defined(USERCONFIG)
userconfig();
cninit(); /* the preferred console may have changed */

View File

@ -409,6 +409,8 @@ cpu_startup(dummy)
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN);
#if defined(USERCONFIG)
userconfig();
cninit(); /* the preferred console may have changed */

View File

@ -319,6 +319,8 @@ cpu_startup(dummy)
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", MTX_SPIN);
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)
if (1)

View File

@ -154,6 +154,7 @@ hardclock(frame)
register struct clockframe *frame;
{
register struct proc *p;
int need_softclock = 0;
p = curproc;
if (p != idleproc) {
@ -187,16 +188,25 @@ hardclock(frame)
statclock(frame);
tc_windup();
ticks++;
/*
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
mtx_enter(&callout_lock, MTX_SPIN);
ticks++;
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
sched_swi(softclock_ih, SWI_NOSWITCH);
need_softclock = 1;
} else if (softticks + 1 == ticks)
++softticks;
mtx_exit(&callout_lock, MTX_SPIN);
/*
* sched_swi acquires sched_lock, so we don't want to call it with
* callout_lock held; incorrect locking order.
*/
if (need_softclock)
sched_swi(softclock_ih, SWI_NOSWITCH);
}
/*

View File

@ -258,7 +258,7 @@ start_softintr(dummy)
{
net_ih = sinthand_add("net", NULL, swi_net, NULL, SWI_NET, 0);
softclock_ih =
sinthand_add("clock", &clk_ithd, softclock, NULL, SWI_CLOCK, 0);
sinthand_add("clock", &clk_ithd, softclock, NULL, SWI_CLOCK, INTR_MPSAFE);
vm_ih = sinthand_add("vm", NULL, swi_vm, NULL, SWI_VM, 0);
}

View File

@ -703,6 +703,7 @@ static char *spin_order_list[] = {
#ifdef __i386__
"clk",
#endif
"callout",
/*
* leaf locks
*/

View File

@ -56,6 +56,7 @@ struct callout_list callfree;
int callwheelsize, callwheelbits, callwheelmask;
struct callout_tailq *callwheel;
int softticks; /* Like ticks, but for softclock(). */
struct mtx callout_lock;
static struct callout *nextsoftcheck; /* Next callout to be checked. */
@ -90,7 +91,7 @@ softclock(void *dummy)
steps = 0;
s = splhigh();
mtx_enter(&sched_lock, MTX_SPIN);
mtx_enter(&callout_lock, MTX_SPIN);
while (softticks != ticks) {
softticks++;
/*
@ -107,21 +108,23 @@ softclock(void *dummy)
if (steps >= MAX_SOFTCLOCK_STEPS) {
nextsoftcheck = c;
/* Give interrupts a chance. */
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
s = splhigh();
mtx_enter(&sched_lock, MTX_SPIN);
mtx_enter(&callout_lock, MTX_SPIN);
c = nextsoftcheck;
steps = 0;
}
} else {
void (*c_func)(void *);
void *c_arg;
int c_flags;
nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(bucket, c, c_links.tqe);
c_func = c->c_func;
c_arg = c->c_arg;
c_flags = c->c_flags;
c->c_func = NULL;
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
c->c_flags = CALLOUT_LOCAL_ALLOC;
@ -131,18 +134,22 @@ softclock(void *dummy)
c->c_flags =
(c->c_flags & ~CALLOUT_PENDING);
}
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
if (!(c_flags & CALLOUT_MPSAFE))
mtx_enter(&Giant, MTX_DEF);
splx(s);
c_func(c_arg);
s = splhigh();
mtx_enter(&sched_lock, MTX_SPIN);
if (!(c_flags & CALLOUT_MPSAFE))
mtx_exit(&Giant, MTX_DEF);
mtx_enter(&callout_lock, MTX_SPIN);
steps = 0;
c = nextsoftcheck;
}
}
}
nextsoftcheck = NULL;
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
}
@ -173,7 +180,7 @@ timeout(ftn, arg, to_ticks)
struct callout_handle handle;
s = splhigh();
mtx_enter(&sched_lock, MTX_SPIN);
mtx_enter(&callout_lock, MTX_SPIN);
/* Fill in the next free callout structure. */
new = SLIST_FIRST(&callfree);
@ -185,7 +192,7 @@ timeout(ftn, arg, to_ticks)
callout_reset(new, to_ticks, ftn, arg);
handle.callout = new;
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
return (handle);
}
@ -207,10 +214,10 @@ untimeout(ftn, arg, handle)
return;
s = splhigh();
mtx_enter(&sched_lock, MTX_SPIN);
mtx_enter(&callout_lock, MTX_SPIN);
if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
callout_stop(handle.callout);
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
}
@ -244,7 +251,7 @@ callout_reset(c, to_ticks, ftn, arg)
int s;
s = splhigh();
mtx_enter(&sched_lock, MTX_SPIN);
mtx_enter(&callout_lock, MTX_SPIN);
if (c->c_flags & CALLOUT_PENDING)
callout_stop(c);
@ -262,7 +269,7 @@ callout_reset(c, to_ticks, ftn, arg)
c->c_time = ticks + to_ticks;
TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
c, c_links.tqe);
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
}
@ -273,13 +280,13 @@ callout_stop(c)
int s;
s = splhigh();
mtx_enter(&sched_lock, MTX_SPIN);
mtx_enter(&callout_lock, MTX_SPIN);
/*
* Don't attempt to delete a callout that's not on the queue.
*/
if (!(c->c_flags & CALLOUT_PENDING)) {
c->c_flags &= ~CALLOUT_ACTIVE;
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
return;
}
@ -294,7 +301,7 @@ callout_stop(c)
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
}
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
}
@ -356,7 +363,7 @@ adjust_timeout_calltodo(time_change)
/* don't collide with softclock() */
s = splhigh();
mtx_enter(&sched_lock, MTX_SPIN);
mtx_enter(&callout_lock, MTX_SPIN);
for (p = calltodo.c_next; p != NULL; p = p->c_next) {
p->c_time -= delta_ticks;
@ -367,7 +374,7 @@ adjust_timeout_calltodo(time_change)
/* take back the ticks the timer didn't use (p->c_time <= 0) */
delta_ticks = -p->c_time;
}
mtx_exit(&sched_lock, MTX_SPIN);
mtx_exit(&callout_lock, MTX_SPIN);
splx(s);
return;

View File

@ -703,6 +703,7 @@ static char *spin_order_list[] = {
#ifdef __i386__
"clk",
#endif
"callout",
/*
* leaf locks
*/

View File

@ -703,6 +703,7 @@ static char *spin_order_list[] = {
#ifdef __i386__
"clk",
#endif
"callout",
/*
* leaf locks
*/

View File

@ -61,6 +61,7 @@ struct callout {
#define CALLOUT_LOCAL_ALLOC 0x0001 /* was allocated from callfree */
#define CALLOUT_ACTIVE 0x0002 /* callout is currently active */
#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */
#define CALLOUT_MPSAFE 0x0008 /* callout handler is mp safe */
struct callout_handle {
struct callout *callout;
@ -72,6 +73,7 @@ extern struct callout *callout;
extern int ncallout;
extern struct callout_tailq *callwheel;
extern int callwheelsize, callwheelbits, callwheelmask, softticks;
extern struct mtx callout_lock;
#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)