Now that the return value semantics of cv's for multithreaded processes
have been unified with that of msleep(9), further refine the sleepq interface and consolidate some duplicated code: - Move the pre-sleep checks for theaded processes into a thread_sleep_check() function in kern_thread.c. - Move all handling of TDF_SINTR to be internal to subr_sleepqueue.c. Specifically, if a thread is awakened by something other than a signal while checking for signals before going to sleep, clear TDF_SINTR in sleepq_catch_signals(). This removes a sched_lock lock/unlock combo in that edge case during an interruptible sleep. Also, fix sleepq_check_signals() to properly handle the condition if TDF_SINTR is clear rather than requiring the callers of the sleepq API to notice this edge case and call a non-_sig variant of sleepq_wait(). - Clarify the flags arguments to sleepq_add(), sleepq_signal() and sleepq_broadcast() by creating an explicit submask for sleepq types. Also, add an explicit SLEEPQ_MSLEEP type rather than a magic number of 0. Also, add a SLEEPQ_INTERRUPTIBLE flag for use with sleepq_add() and move the setting of TDF_SINTR to sleepq_add() if this flag is set rather than sleepq_catch_signals(). Note that it is the caller's responsibility to ensure that sleepq_catch_signals() is called if and only if this flag is passed to the preceeding sleepq_add(). Note that this also removes a sched_lock lock/unlock pair from sleepq_catch_signals(). It also ensures that for an interruptible sleep, TDF_SINTR is always set when TD_ON_SLEEPQ() is true.
This commit is contained in:
parent
3648c3517a
commit
007ddf7e7a
@ -153,7 +153,6 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
rval = 0;
|
||||
#ifdef KTRACE
|
||||
if (KTRPOINT(td, KTR_CSW))
|
||||
ktrcsw(1, 0);
|
||||
@ -180,32 +179,21 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
|
||||
* thread or if our thread is marked as interrupted.
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (p->p_flag & P_SA || p->p_numthreads > 1) {
|
||||
if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td)
|
||||
rval = EINTR;
|
||||
else if (td->td_flags & TDF_INTERRUPT)
|
||||
rval = td->td_intrval;
|
||||
if (rval != 0) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
sleepq_release(cvp);
|
||||
return (rval);
|
||||
}
|
||||
}
|
||||
rval = thread_sleep_check(td);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (rval != 0) {
|
||||
sleepq_release(cvp);
|
||||
return (rval);
|
||||
}
|
||||
|
||||
cvp->cv_waiters++;
|
||||
DROP_GIANT();
|
||||
mtx_unlock(mp);
|
||||
|
||||
sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
|
||||
sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
|
||||
SLEEPQ_INTERRUPTIBLE);
|
||||
sig = sleepq_catch_signals(cvp);
|
||||
if (sig == 0 && !TD_ON_SLEEPQ(td)) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_flags &= ~TDF_SINTR;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
sleepq_wait(cvp);
|
||||
} else
|
||||
rval = sleepq_wait_sig(cvp);
|
||||
rval = sleepq_wait_sig(cvp);
|
||||
if (rval == 0)
|
||||
rval = sleepq_calc_signal_retval(sig);
|
||||
|
||||
@ -320,33 +308,22 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
|
||||
* thread or if our thread is marked as interrupted.
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (p->p_flag & P_SA || p->p_numthreads > 1) {
|
||||
if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td)
|
||||
rval = EINTR;
|
||||
else if (td->td_flags & TDF_INTERRUPT)
|
||||
rval = td->td_intrval;
|
||||
if (rval != 0) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
sleepq_release(cvp);
|
||||
return (rval);
|
||||
}
|
||||
}
|
||||
rval = thread_sleep_check(td);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (rval != 0) {
|
||||
sleepq_release(cvp);
|
||||
return (rval);
|
||||
}
|
||||
|
||||
cvp->cv_waiters++;
|
||||
DROP_GIANT();
|
||||
mtx_unlock(mp);
|
||||
|
||||
sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
|
||||
sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR |
|
||||
SLEEPQ_INTERRUPTIBLE);
|
||||
sleepq_set_timeout(cvp, timo);
|
||||
sig = sleepq_catch_signals(cvp);
|
||||
if (sig == 0 && !TD_ON_SLEEPQ(td)) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_flags &= ~TDF_SINTR;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
rval = sleepq_timedwait(cvp);
|
||||
} else
|
||||
rval = sleepq_timedwait_sig(cvp, sig != 0);
|
||||
rval = sleepq_timedwait_sig(cvp, sig != 0);
|
||||
if (rval == 0)
|
||||
rval = sleepq_calc_signal_retval(sig);
|
||||
|
||||
|
@ -125,7 +125,7 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
struct sleepqueue *sq;
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
int catch, rval, sig;
|
||||
int catch, rval, sig, flags;
|
||||
WITNESS_SAVE_DECL(mtx);
|
||||
|
||||
td = curthread;
|
||||
@ -166,28 +166,19 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
sleepq_remove(td, td->td_wchan);
|
||||
|
||||
sq = sleepq_lookup(ident);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
|
||||
if (p->p_flag & P_SA || p->p_numthreads > 1) {
|
||||
if (catch) {
|
||||
/*
|
||||
* Just don't bother if we are exiting
|
||||
* and not the exiting thread or thread was marked as
|
||||
* interrupted.
|
||||
* Don't bother sleeping if we are exiting and not the exiting
|
||||
* thread or if our thread is marked as interrupted.
|
||||
*/
|
||||
if (catch) {
|
||||
if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
sleepq_release(ident);
|
||||
return (EINTR);
|
||||
}
|
||||
if (td->td_flags & TDF_INTERRUPT) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
sleepq_release(ident);
|
||||
return (td->td_intrval);
|
||||
}
|
||||
mtx_lock_spin(&sched_lock);
|
||||
rval = thread_sleep_check(td);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (rval != 0) {
|
||||
sleepq_release(ident);
|
||||
return (rval);
|
||||
}
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
CTR5(KTR_PROC, "msleep: thread %p (pid %ld, %s) on %s (%p)",
|
||||
(void *)td, (long)p->p_pid, p->p_comm, wmesg, ident);
|
||||
|
||||
@ -207,17 +198,14 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
* stopped, then td will no longer be on a sleep queue upon
|
||||
* return from cursig().
|
||||
*/
|
||||
sleepq_add(sq, ident, mtx, wmesg, 0);
|
||||
flags = SLEEPQ_MSLEEP;
|
||||
if (catch)
|
||||
flags |= SLEEPQ_INTERRUPTIBLE;
|
||||
sleepq_add(sq, ident, mtx, wmesg, flags);
|
||||
if (timo)
|
||||
sleepq_set_timeout(ident, timo);
|
||||
if (catch) {
|
||||
sig = sleepq_catch_signals(ident);
|
||||
if (sig == 0 && !TD_ON_SLEEPQ(td)) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_flags &= ~TDF_SINTR;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
catch = 0;
|
||||
}
|
||||
} else
|
||||
sig = 0;
|
||||
|
||||
@ -262,7 +250,7 @@ wakeup(ident)
|
||||
register void *ident;
|
||||
{
|
||||
|
||||
sleepq_broadcast(ident, 0, -1);
|
||||
sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -275,7 +263,7 @@ wakeup_one(ident)
|
||||
register void *ident;
|
||||
{
|
||||
|
||||
sleepq_signal(ident, 0, -1);
|
||||
sleepq_signal(ident, SLEEPQ_MSLEEP, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1110,3 +1110,22 @@ thread_single_end(void)
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called before going into an interruptible sleep to see if we have been
|
||||
* interrupted or requested to exit.
|
||||
*/
|
||||
int
|
||||
thread_sleep_check(struct thread *td)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
if (p->p_flag & P_SA || p->p_numthreads > 1) {
|
||||
if ((p->p_flag & P_SINGLE_EXIT) && p->p_singlethread != td)
|
||||
return (EINTR);
|
||||
if (td->td_flags & TDF_INTERRUPT)
|
||||
return (td->td_intrval);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ struct sleepqueue {
|
||||
LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
|
||||
LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
|
||||
void *sq_wchan; /* (c) Wait channel. */
|
||||
int sq_flags; /* (c) Flags. */
|
||||
int sq_type; /* (c) Queue type. */
|
||||
#ifdef INVARIANTS
|
||||
struct mtx *sq_lock; /* (c) Associated lock. */
|
||||
#endif
|
||||
@ -279,7 +279,7 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
|
||||
#ifdef INVARIANTS
|
||||
sq->sq_lock = lock;
|
||||
#endif
|
||||
sq->sq_flags = flags;
|
||||
sq->sq_type = flags & SLEEPQ_TYPE;
|
||||
TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
|
||||
} else {
|
||||
MPASS(wchan == sq->sq_wchan);
|
||||
@ -297,6 +297,8 @@ sleepq_add(struct sleepqueue *sq, void *wchan, struct mtx *lock,
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_wchan = wchan;
|
||||
td->td_wmesg = wmesg;
|
||||
if (flags & SLEEPQ_INTERRUPTIBLE)
|
||||
td->td_flags |= TDF_SINTR;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
@ -345,10 +347,8 @@ sleepq_catch_signals(void *wchan)
|
||||
(void *)td, (long)p->p_pid, p->p_comm);
|
||||
|
||||
/* Mark thread as being in an interruptible sleep. */
|
||||
mtx_lock_spin(&sched_lock);
|
||||
MPASS(td->td_flags & TDF_SINTR);
|
||||
MPASS(TD_ON_SLEEPQ(td));
|
||||
td->td_flags |= TDF_SINTR;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
sleepq_release(wchan);
|
||||
|
||||
/* See if there are any pending signals for this thread. */
|
||||
@ -364,15 +364,20 @@ sleepq_catch_signals(void *wchan)
|
||||
|
||||
/*
|
||||
* If there were pending signals and this thread is still on
|
||||
* the sleep queue, remove it from the sleep queue.
|
||||
* the sleep queue, remove it from the sleep queue. If the
|
||||
* thread was removed from the sleep queue while we were blocked
|
||||
* above, then clear TDF_SINTR before returning.
|
||||
*/
|
||||
sq = sleepq_lookup(wchan);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
sleepq_remove_thread(sq, td);
|
||||
} else
|
||||
} else {
|
||||
if (!TD_ON_SLEEPQ(td) && sig == 0)
|
||||
td->td_flags &= ~TDF_SINTR;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
return (sig);
|
||||
}
|
||||
|
||||
@ -465,6 +470,13 @@ sleepq_check_signals(void)
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
td = curthread;
|
||||
|
||||
/*
|
||||
* If TDF_SINTR is clear, then we were awakened while executing
|
||||
* sleepq_catch_signals().
|
||||
*/
|
||||
if (!(td->td_flags & TDF_SINTR))
|
||||
return (0);
|
||||
|
||||
/* We are no longer in an interruptible sleep. */
|
||||
td->td_flags &= ~TDF_SINTR;
|
||||
|
||||
@ -513,6 +525,7 @@ void
|
||||
sleepq_wait(void *wchan)
|
||||
{
|
||||
|
||||
MPASS(!(curthread->td_flags & TDF_SINTR));
|
||||
sleepq_switch(wchan);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
@ -541,6 +554,7 @@ sleepq_timedwait(void *wchan)
|
||||
{
|
||||
int rval;
|
||||
|
||||
MPASS(!(curthread->td_flags & TDF_SINTR));
|
||||
sleepq_switch(wchan);
|
||||
rval = sleepq_check_timeout();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -649,7 +663,7 @@ sleepq_signal(void *wchan, int flags, int pri)
|
||||
sleepq_release(wchan);
|
||||
return;
|
||||
}
|
||||
KASSERT(sq->sq_flags == flags,
|
||||
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
|
||||
("%s: mismatch between sleep/wakeup and cv_*", __func__));
|
||||
/* XXX: Do for all sleep queues eventually. */
|
||||
if (flags & SLEEPQ_CONDVAR)
|
||||
@ -679,7 +693,7 @@ sleepq_broadcast(void *wchan, int flags, int pri)
|
||||
sleepq_release(wchan);
|
||||
return;
|
||||
}
|
||||
KASSERT(sq->sq_flags == flags,
|
||||
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
|
||||
("%s: mismatch between sleep/wakeup and cv_*", __func__));
|
||||
/* XXX: Do for all sleep queues eventually. */
|
||||
if (flags & SLEEPQ_CONDVAR)
|
||||
|
@ -953,6 +953,7 @@ void thread_reap(void);
|
||||
struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
|
||||
int thread_single(int how);
|
||||
void thread_single_end(void);
|
||||
int thread_sleep_check(struct thread *td);
|
||||
void thread_stash(struct thread *td);
|
||||
int thread_suspend_check(int how);
|
||||
void thread_suspend_one(struct thread *td);
|
||||
|
@ -82,7 +82,10 @@ struct thread;
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
#define SLEEPQ_CONDVAR 0x1 /* Sleep queue is a cv. */
|
||||
#define SLEEPQ_TYPE 0x0ff /* Mask of sleep queue types. */
|
||||
#define SLEEPQ_MSLEEP 0x00 /* Used by msleep/wakeup. */
|
||||
#define SLEEPQ_CONDVAR 0x01 /* Used for a cv. */
|
||||
#define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */
|
||||
|
||||
void init_sleepqueues(void);
|
||||
void sleepq_abort(struct thread *td);
|
||||
|
Loading…
x
Reference in New Issue
Block a user