Use the recently added msleep_spin() function to simplify the

callout_drain() logic.  We no longer need a separate non-spin mutex to
do sleep/wakeup with, instead we can now just use the one spin mutex to
manage all the callout functionality.
This commit is contained in:
John Baldwin 2006-02-23 19:13:12 +00:00
parent 9fc9d84b9b
commit b36f458861
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=155957

View File

@ -78,37 +78,22 @@ static struct callout *nextsoftcheck; /* Next callout to be checked. */
/**
* Locked by callout_lock:
* curr_callout - If a callout is in progress, it is curr_callout.
* If curr_callout is non-NULL, threads waiting on
* callout_wait will be woken up as soon as the
* If curr_callout is non-NULL, threads waiting in
* callout_drain() will be woken up as soon as the
* relevant callout completes.
* curr_cancelled - Changing to 1 with both callout_lock and c_mtx held
* guarantees that the current callout will not run.
* The softclock() function sets this to 0 before it
* drops callout_lock to acquire c_mtx, and it calls
* the handler only if curr_cancelled still 0 when
* the handler only if curr_cancelled is still 0 after
* c_mtx is successfully acquired.
* wakeup_ctr - Incremented every time a thread wants to wait
* for a callout to complete. Modified only when
* callout_wait - If a thread is waiting in callout_drain(), then
* callout_wait is nonzero. Set only when
* curr_callout is non-NULL.
* wakeup_needed - If a thread is waiting on callout_wait, then
* wakeup_needed is nonzero. Increased only when
* cutt_callout is non-NULL.
*/
static struct callout *curr_callout;
static int curr_cancelled;
static int wakeup_ctr;
static int wakeup_needed;
/**
* Locked by callout_wait_lock:
* callout_wait - If wakeup_needed is set, callout_wait will be
* triggered after the current callout finishes.
* wakeup_done_ctr - Set to the current value of wakeup_ctr after
* callout_wait is triggered.
*/
static struct mtx callout_wait_lock;
static struct cv callout_wait;
static int wakeup_done_ctr;
static int callout_wait;
/*
* kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
@ -157,8 +142,6 @@ kern_timeout_callwheel_init(void)
TAILQ_INIT(&callwheel[i]);
}
mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF);
cv_init(&callout_wait, "callout_wait");
}
/*
@ -188,7 +171,6 @@ softclock(void *dummy)
int mpcalls;
int mtxcalls;
int gcalls;
int wakeup_cookie;
#ifdef DIAGNOSTIC
struct bintime bt1, bt2;
struct timespec ts2;
@ -262,8 +244,7 @@ softclock(void *dummy)
*/
if (curr_cancelled) {
mtx_unlock(c_mtx);
mtx_lock_spin(&callout_lock);
goto done_locked;
goto skip;
}
/* The callout cannot be stopped now. */
curr_cancelled = 1;
@ -308,22 +289,16 @@ softclock(void *dummy)
#endif
if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
mtx_unlock(c_mtx);
skip:
mtx_lock_spin(&callout_lock);
done_locked:
curr_callout = NULL;
if (wakeup_needed) {
if (callout_wait) {
/*
* There might be someone waiting
* There is someone waiting
* for the callout to complete.
*/
wakeup_cookie = wakeup_ctr;
mtx_unlock_spin(&callout_lock);
mtx_lock(&callout_wait_lock);
cv_broadcast(&callout_wait);
wakeup_done_ctr = wakeup_cookie;
mtx_unlock(&callout_wait_lock);
mtx_lock_spin(&callout_lock);
wakeup_needed = 0;
wakeup(&callout_wait);
callout_wait = 0;
}
steps = 0;
c = nextsoftcheck;
@ -445,7 +420,7 @@ callout_reset(c, to_ticks, ftn, arg)
*/
if (c->c_mtx != NULL && !curr_cancelled)
cancelled = curr_cancelled = 1;
if (wakeup_needed) {
if (callout_wait) {
/*
* Someone has called callout_drain to kill this
* callout. Don't reschedule.
@ -497,7 +472,7 @@ _callout_stop_safe(c, safe)
struct callout *c;
int safe;
{
int use_mtx, wakeup_cookie;
int use_mtx;
if (!safe && c->c_mtx != NULL) {
#ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */
@ -512,37 +487,47 @@ _callout_stop_safe(c, safe)
mtx_lock_spin(&callout_lock);
/*
* Don't attempt to delete a callout that's not on the queue.
* If the callout isn't pending, it's not on the queue, so
* don't attempt to remove it from the queue. We can try to
* stop it by other means however.
*/
if (!(c->c_flags & CALLOUT_PENDING)) {
c->c_flags &= ~CALLOUT_ACTIVE;
/*
* If it wasn't on the queue and it isn't the current
* callout, then we can't stop it, so just bail.
*/
if (c != curr_callout) {
mtx_unlock_spin(&callout_lock);
return (0);
}
if (safe) {
/* We need to wait until the callout is finished. */
wakeup_needed = 1;
wakeup_cookie = wakeup_ctr++;
mtx_unlock_spin(&callout_lock);
mtx_lock(&callout_wait_lock);
/*
* Check to make sure that softclock() didn't
* do the wakeup in between our dropping
* callout_lock and picking up callout_wait_lock
* The current callout is running (or just
* about to run) and blocking is allowed, so
* just wait for the current invocation to
* finish.
*/
if (wakeup_cookie - wakeup_done_ctr > 0)
cv_wait(&callout_wait, &callout_wait_lock);
mtx_unlock(&callout_wait_lock);
while (c == curr_callout) {
callout_wait = 1;
msleep_spin(&callout_wait, &callout_lock,
"codrain", 0);
}
} else if (use_mtx && !curr_cancelled) {
/* We can stop the callout before it runs. */
/*
* The current callout is waiting for it's
* mutex which we hold. Cancel the callout
* and return. After our caller drops the
* mutex, the callout will be skipped in
* softclock().
*/
curr_cancelled = 1;
mtx_unlock_spin(&callout_lock);
return (1);
} else
mtx_unlock_spin(&callout_lock);
}
mtx_unlock_spin(&callout_lock);
return (0);
}
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);