Make sure callouts are not processed one tick late.
The problem was introduced in SVN 180608/ rev 1.114 and affects all users of callout_reset() (including select, usleep, setitimer). A better fix probably involves replicating 'ticks' in the struct callout_cpu; this commit is just a temporary thing so that we can MFC it after a suitable test time and RE approval. MFC after: 3 days
This commit is contained in:
parent
fa2eebfce6
commit
446e861708
@ -244,7 +244,7 @@ callout_tick(void)
|
||||
need_softclock = 0;
|
||||
cc = CC_SELF();
|
||||
mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
|
||||
for (; (cc->cc_softticks - ticks) < 0; cc->cc_softticks++) {
|
||||
for (; (cc->cc_softticks - ticks) <= 0; cc->cc_softticks++) {
|
||||
bucket = cc->cc_softticks & callwheelmask;
|
||||
if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
|
||||
need_softclock = 1;
|
||||
@ -323,7 +323,7 @@ softclock(void *arg)
|
||||
steps = 0;
|
||||
cc = (struct callout_cpu *)arg;
|
||||
CC_LOCK(cc);
|
||||
while (cc->cc_softticks != ticks) {
|
||||
while (cc->cc_softticks - 1 != ticks) {
|
||||
/*
|
||||
* cc_softticks may be modified by hard clock, so cache
|
||||
* it while we work on a given bucket.
|
||||
|
Loading…
x
Reference in New Issue
Block a user