When processing a timeout() callout and returning it to the free
list, set `curr_callout' to NULL. This ensures that we won't attempt to cancel the current callout if the original callout structure gets recycled while we wait to acquire Giant. This is reported to fix an intermittent syscons problem that was introduced by revision 1.96.
This commit is contained in:
parent
f7167d3e91
commit
57c037be1c
@ -251,11 +251,12 @@ softclock(void *dummy)
|
||||
c->c_flags = CALLOUT_LOCAL_ALLOC;
|
||||
SLIST_INSERT_HEAD(&callfree, c,
|
||||
c_links.sle);
|
||||
curr_callout = NULL;
|
||||
} else {
|
||||
c->c_flags =
|
||||
(c->c_flags & ~CALLOUT_PENDING);
|
||||
curr_callout = c;
|
||||
}
|
||||
curr_callout = c;
|
||||
curr_cancelled = 0;
|
||||
mtx_unlock_spin(&callout_lock);
|
||||
if (c_mtx != NULL) {
|
||||
|
Loading…
Reference in New Issue
Block a user