The softclock_call_cc() is executing with the callout already removed

from the callwheel. Calculate the cc->cc_next before removing the
callout, otherwise the code followed the invalid tailq links.  After
this, make softclock_call_cc() return void, since it always return
cc->cc_next, which is immediately available to the softclock()
anyway. This also allows to eliminate a label under #ifdef SMP.

Remove the assignment of cc->cc_next from callout_cc_del(), since the
function is called with the callout already removed from callwheel.

If cancelling the migration, also clear the CALLOUT_DFRMIGRATION flag.

Postpone the free of the timeout(9) allocated callouts after the
migration checks are done.

Add some more strict asserts about the state of the callout in
callout_call_cc().

Reviewed by:	attilio
Reported and tested by:	pho (previous version)
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2012-12-05 19:02:22 +00:00
parent 1c7d98d0df
commit eb8a718686
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=243901

View File

@ -439,15 +439,13 @@ static void
callout_cc_del(struct callout *c, struct callout_cpu *cc)
{
if (cc->cc_next == c)
cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
c->c_func = NULL;
SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
}
if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0)
return;
c->c_func = NULL;
SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
}
static struct callout *
static void
softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
int *lockcalls, int *gcalls)
{
@ -469,7 +467,9 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
static timeout_t *lastfunc;
#endif
cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
(CALLOUT_PENDING | CALLOUT_ACTIVE),
("softclock_call_cc: pend|act %p %x", c, c->c_flags));
class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
c_lock = c->c_lock;
@ -537,20 +537,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
class->lc_unlock(c_lock);
skip:
CC_LOCK(cc);
/*
* If the current callout is locally allocated (from
* timeout(9)) then put it on the freelist.
*
* Note: we need to check the cached copy of c_flags because
* if it was not local, then it's not safe to deref the
* callout pointer.
*/
if (c_flags & CALLOUT_LOCAL_ALLOC) {
KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC,
("corrupted callout"));
c->c_func = NULL;
SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
}
KASSERT(cc->cc_curr == c, ("mishandled cc_curr"));
cc->cc_curr = NULL;
if (cc->cc_waiting) {
/*
@ -559,13 +546,17 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
* If the callout was scheduled for
* migration just cancel it.
*/
if (cc_cme_migrating(cc))
if (cc_cme_migrating(cc)) {
cc_cme_cleanup(cc);
c->c_flags &= ~CALLOUT_DFRMIGRATION;
}
cc->cc_waiting = 0;
CC_UNLOCK(cc);
wakeup(&cc->cc_waiting);
CC_LOCK(cc);
} else if (cc_cme_migrating(cc)) {
KASSERT((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0,
("Migrating legacy callout %p", c));
#ifdef SMP
/*
* If the callout was scheduled for
@ -585,7 +576,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
"deferred cancelled %p func %p arg %p",
c, new_func, new_arg);
callout_cc_del(c, cc);
goto nextc;
return;
}
c->c_flags &= ~CALLOUT_DFRMIGRATION;
@ -604,10 +595,18 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
panic("migration should not happen");
#endif
}
#ifdef SMP
nextc:
#endif
return (cc->cc_next);
/*
* If the current callout is locally allocated (from
* timeout(9)) then put it on the freelist.
*
* Note: we need to check the cached copy of c_flags because
* if it was not local, then it's not safe to deref the
* callout pointer.
*/
KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
c->c_flags == CALLOUT_LOCAL_ALLOC,
("corrupted callout"));
callout_cc_del(c, cc);
}
/*
@ -674,10 +673,12 @@ softclock(void *arg)
steps = 0;
}
} else {
cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(bucket, c, c_links.tqe);
c = softclock_call_cc(c, cc, &mpcalls,
softclock_call_cc(c, cc, &mpcalls,
&lockcalls, &gcalls);
steps = 0;
c = cc->cc_next;
}
}
}
@ -1022,6 +1023,8 @@ _callout_stop_safe(c, safe)
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
if (cc->cc_next == c)
cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
c_links.tqe);
callout_cc_del(c, cc);