- Fix a bug in the previous workaround for the tsleep/endtsleep race.

callout_stop() would fail in two cases:
    1) The timeout was currently executing, and
    2) The timeout had already executed.
  We only needed to work around the race for 1).  We caught some instances
  of 2) via the PS_TIMEOUT flag, however, if endtsleep() fired after the
  process had been woken up but before it had resumed execution,
  PS_TIMEOUT would not be set, but callout_stop() would fail, so we
  would block the process until endtsleep() resumed it.  Except that
  endtsleep() had already run and couldn't resume it.  This adds a new flag
  PS_TIMOFAIL to indicate the case of 2) when PS_TIMEOUT isn't set.
- Implement this race fix for condition variables as well.

Tested by:	sos
This commit is contained in:
jhb 2001-08-21 18:42:45 +00:00
parent fbda0f8d3b
commit 4f6c3a9342
3 changed files with 34 additions and 8 deletions

View File

@ -345,8 +345,17 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
if (p->p_sflag & PS_TIMEOUT) {
p->p_sflag &= ~PS_TIMEOUT;
rval = EWOULDBLOCK;
} else
callout_stop(&p->p_slpcallout);
} else if (p->p_sflag & PS_TIMOFAIL)
p->p_sflag &= ~PS_TIMOFAIL;
else if (callout_stop(&p->p_slpcallout) == 0) {
/*
* Work around race with cv_timedwait_end similar to that
* between msleep and endtsleep.
*/
p->p_sflag |= PS_TIMEOUT;
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
}
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
@ -407,8 +416,17 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
if (p->p_sflag & PS_TIMEOUT) {
p->p_sflag &= ~PS_TIMEOUT;
rval = EWOULDBLOCK;
} else
callout_stop(&p->p_slpcallout);
} else if (p->p_sflag & PS_TIMOFAIL)
p->p_sflag &= ~PS_TIMOFAIL;
else if (callout_stop(&p->p_slpcallout) == 0) {
/*
* Work around race with cv_timedwait_end similar to that
* between msleep and endtsleep.
*/
p->p_sflag |= PS_TIMEOUT;
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
}
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
@ -538,12 +556,16 @@ cv_timedwait_end(void *arg)
CTR3(KTR_PROC, "cv_timedwait_end: proc %p (pid %d, %s)", p, p->p_pid,
p->p_comm);
mtx_lock_spin(&sched_lock);
if (p->p_wchan != NULL) {
if (p->p_sflag & PS_TIMEOUT) {
p->p_sflag &= ~PS_TIMEOUT;
setrunqueue(p);
} else if (p->p_wchan != NULL) {
if (p->p_stat == SSLEEP)
setrunnable(p);
else
cv_waitq_remove(p);
p->p_sflag |= PS_TIMEOUT;
}
} else
p->p_sflag |= PS_TIMOFAIL;
mtx_unlock_spin(&sched_lock);
}

View File

@ -451,7 +451,9 @@ msleep(ident, mtx, priority, wmesg, timo)
p->p_sflag &= ~PS_TIMEOUT;
if (sig == 0)
rval = EWOULDBLOCK;
} else if (timo && callout_stop(&p->p_slpcallout) == 0) {
} else if (p->p_sflag & PS_TIMOFAIL)
p->p_sflag &= ~PS_TIMOFAIL;
else if (timo && callout_stop(&p->p_slpcallout) == 0) {
/*
* This isn't supposed to be pretty. If we are here, then
* the endtsleep() callout is currently executing on another
@ -524,7 +526,8 @@ endtsleep(arg)
else
unsleep(p);
p->p_sflag |= PS_TIMEOUT;
}
} else
p->p_sflag |= PS_TIMOFAIL;
mtx_unlock_spin(&sched_lock);
}

View File

@ -321,6 +321,7 @@ struct proc {
#define PS_SWAPPING 0x00200 /* Process is being swapped. */
#define PS_ASTPENDING 0x00400 /* Process has a pending ast. */
#define PS_NEEDRESCHED 0x00800 /* Process needs to yield. */
#define PS_TIMOFAIL 0x01000 /* Timeout from sleep after we were awake. */
#define P_MAGIC 0xbeefface