Switch the sleep/wakeup and condition variable implementations to use the

sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
  and condition variables.  Having sleep qeueus in a hash table avoids
  having to allocate a queue head for each wait channel.  Thus, struct cv
  has shrunk down to just a single char * pointer now.  However, the
  hash table does not hold threads directly, but queue heads.  This means
  that once you have located a queue in the hash bucket, you no longer have
  to walk the rest of the hash chain looking for threads.  Instead, you have
  a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
  differentiates between cv's and sleep/wakeup.  For example, calls to
  abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
  Thus, the TDF_CVWAITQ flag is removed.  Also, calls to unsleep() and
  cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
  sleep's no longer inherently bump the priority.  Instead, this is soley
  a propery of msleep() which explicitly calls sched_prio() before
  blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used.  The
  associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
  dropped and replaced with a single explicit clearing of td_wchan.
  TD_SET_ONSLEEPQ() would really have only made sense if it had taken
  the wait channel and message as arguments anyway.  Now that that only
  happens in one place, a macro would be overkill.
This commit is contained in:
John Baldwin 2004-02-27 18:52:44 +00:00
parent e5bb601d87
commit 44f3b09204
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=126326
15 changed files with 175 additions and 615 deletions

View File

@ -1151,6 +1151,7 @@ kern/subr_prof.c standard
kern/subr_rman.c standard
kern/subr_sbuf.c standard
kern/subr_scanf.c standard
kern/subr_sleepqueue.c standard
kern/subr_smp.c standard
kern/subr_taskqueue.c standard
kern/subr_trap.c standard

View File

@ -126,20 +126,8 @@ dumpthread(volatile struct proc *p, volatile struct thread *td)
if (p->p_flag & P_SA)
db_printf( " thread %p ksegrp %p ", td, td->td_ksegrp);
if (TD_ON_SLEEPQ(td)) {
if (td->td_flags & TDF_CVWAITQ)
if (TD_IS_SLEEPING(td))
db_printf("[CV]");
else
db_printf("[CVQ");
else
if (TD_IS_SLEEPING(td))
db_printf("[SLP]");
else
db_printf("[SLPQ");
db_printf("%s %p]", td->td_wmesg,
(void *)td->td_wchan);
}
if (TD_ON_SLEEPQ(td))
db_printf("[SLPQ %s %p]", td->td_wmesg, (void *)td->td_wchan);
switch (td->td_state) {
case TDS_INHIBITED:
if (TD_ON_LOCK(td)) {
@ -147,11 +135,9 @@ dumpthread(volatile struct proc *p, volatile struct thread *td)
td->td_lockname,
(void *)td->td_blocked);
}
#if 0 /* covered above */
if (TD_IS_SLEEPING(td)) {
db_printf("[SLP]");
}
#endif
if (TD_IS_SWAPPED(td)) {
db_printf("[SWAP]");
}

View File

@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <sys/condvar.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/sleepqueue.h>
#include <sys/resourcevar.h>
#ifdef KTRACE
#include <sys/uio.h>
@ -56,35 +57,6 @@ __FBSDID("$FreeBSD$");
mtx_assert((mp), MA_OWNED | MA_NOTRECURSED); \
} while (0)
#ifdef INVARIANTS
#define CV_WAIT_VALIDATE(cvp, mp) do { \
if (TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
/* Only waiter. */ \
(cvp)->cv_mtx = (mp); \
} else { \
/* \
* Other waiter; assert that we're using the \
* same mutex. \
*/ \
KASSERT((cvp)->cv_mtx == (mp), \
("%s: Multiple mutexes", __func__)); \
} \
} while (0)
#define CV_SIGNAL_VALIDATE(cvp) do { \
if (!TAILQ_EMPTY(&(cvp)->cv_waitq)) { \
KASSERT(mtx_owned((cvp)->cv_mtx), \
("%s: Mutex not owned", __func__)); \
} \
} while (0)
#else
#define CV_WAIT_VALIDATE(cvp, mp)
#define CV_SIGNAL_VALIDATE(cvp)
#endif
static void cv_timedwait_end(void *arg);
/*
* Initialize a condition variable. Must be called before use.
*/
@ -92,8 +64,6 @@ void
cv_init(struct cv *cvp, const char *desc)
{
TAILQ_INIT(&cvp->cv_waitq);
cvp->cv_mtx = NULL;
cvp->cv_description = desc;
}
@ -104,82 +74,13 @@ cv_init(struct cv *cvp, const char *desc)
void
cv_destroy(struct cv *cvp)
{
#ifdef INVARIANTS
struct sleepqueue *sq;
KASSERT(cv_waitq_empty(cvp), ("%s: cv_waitq non-empty", __func__));
}
/*
* Common code for cv_wait* functions. All require sched_lock.
*/
/*
* Switch context.
*/
static __inline void
cv_switch(struct thread *td)
{
TD_SET_SLEEPING(td);
mi_switch(SW_VOL);
CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)", td,
td->td_proc->p_pid, td->td_proc->p_comm);
}
/*
* Switch context, catching signals.
*/
static __inline int
cv_switch_catch(struct thread *td)
{
struct proc *p;
int sig;
/*
* We put ourselves on the sleep queue and start our timeout before
* calling cursig, as we could stop there, and a wakeup or a SIGCONT (or
* both) could occur while we were stopped. A SIGCONT would cause us to
* be marked as TDS_SLP without resuming us, thus we must be ready for
* sleep when cursig is called. If the wakeup happens while we're
* stopped, td->td_wchan will be 0 upon return from cursig,
* and TD_ON_SLEEPQ() will return false.
*/
td->td_flags |= TDF_SINTR;
mtx_unlock_spin(&sched_lock);
p = td->td_proc;
PROC_LOCK(p);
mtx_lock(&p->p_sigacts->ps_mtx);
sig = cursig(td);
mtx_unlock(&p->p_sigacts->ps_mtx);
if (thread_suspend_check(1))
sig = SIGSTOP;
mtx_lock_spin(&sched_lock);
PROC_UNLOCK(p);
if (sig != 0) {
if (TD_ON_SLEEPQ(td))
cv_waitq_remove(td);
TD_SET_RUNNING(td);
} else if (TD_ON_SLEEPQ(td)) {
cv_switch(td);
}
td->td_flags &= ~TDF_SINTR;
return sig;
}
/*
* Add a thread to the wait queue of a condition variable.
*/
static __inline void
cv_waitq_add(struct cv *cvp, struct thread *td)
{
td->td_flags |= TDF_CVWAITQ;
TD_SET_ON_SLEEPQ(td);
td->td_wchan = cvp;
td->td_wmesg = cvp->cv_description;
CTR3(KTR_PROC, "cv_waitq_add: thread %p (pid %d, %s)", td,
td->td_proc->p_pid, td->td_proc->p_comm);
TAILQ_INSERT_TAIL(&cvp->cv_waitq, td, td_slpq);
sched_sleep(td, td->td_priority);
sq = sleepq_lookup(cvp);
sleepq_release(cvp);
KASSERT(sq == NULL, ("%s: associated sleep queue non-empty", __func__));
#endif
}
/*
@ -192,6 +93,7 @@ cv_waitq_add(struct cv *cvp, struct thread *td)
void
cv_wait(struct cv *cvp, struct mtx *mp)
{
struct sleepqueue *sq;
struct thread *td;
WITNESS_SAVE_DECL(mp);
@ -205,7 +107,7 @@ cv_wait(struct cv *cvp, struct mtx *mp)
"Waiting on \"%s\"", cvp->cv_description);
WITNESS_SAVE(&mp->mtx_object, mp);
if (cold ) {
if (cold || panicstr) {
/*
* During autoconfiguration, just give interrupts
* a chance, then just return. Don't run any other
@ -215,17 +117,14 @@ cv_wait(struct cv *cvp, struct mtx *mp)
return;
}
mtx_lock_spin(&sched_lock);
CV_WAIT_VALIDATE(cvp, mp);
sq = sleepq_lookup(cvp);
DROP_GIANT();
mtx_unlock(mp);
cv_waitq_add(cvp, td);
cv_switch(td);
sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
sleepq_wait(cvp);
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
@ -244,10 +143,10 @@ cv_wait(struct cv *cvp, struct mtx *mp)
int
cv_wait_sig(struct cv *cvp, struct mtx *mp)
{
struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int rval;
int sig;
int rval, sig;
WITNESS_SAVE_DECL(mp);
td = curthread;
@ -272,32 +171,25 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
return 0;
}
mtx_lock_spin(&sched_lock);
sq = sleepq_lookup(cvp);
CV_WAIT_VALIDATE(cvp, mp);
/* XXX: Missing the threading checks from msleep! */
DROP_GIANT();
mtx_unlock(mp);
cv_waitq_add(cvp, td);
sig = cv_switch_catch(td);
mtx_unlock_spin(&sched_lock);
sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
sig = sleepq_catch_signals(cvp);
/*
* XXX: Missing magic return value handling for no signal
* caught but thread woken up during check.
*/
rval = sleepq_wait_sig(cvp);
if (rval == 0)
rval = sleepq_calc_signal_retval(sig);
/* XXX: Part of missing threading checks? */
PROC_LOCK(p);
mtx_lock(&p->p_sigacts->ps_mtx);
if (sig == 0) {
sig = cursig(td); /* XXXKSE */
if (sig == 0 && td->td_flags & TDF_INTERRUPT)
rval = td->td_intrval;
}
if (sig != 0) {
if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
rval = EINTR;
else
rval = ERESTART;
}
mtx_unlock(&p->p_sigacts->ps_mtx);
if (p->p_flag & P_WEXIT)
rval = EINTR;
PROC_UNLOCK(p);
@ -321,6 +213,7 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
int
cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
{
struct sleepqueue *sq;
struct thread *td;
int rval;
WITNESS_SAVE_DECL(mp);
@ -346,34 +239,15 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
return 0;
}
mtx_lock_spin(&sched_lock);
CV_WAIT_VALIDATE(cvp, mp);
sq = sleepq_lookup(cvp);
DROP_GIANT();
mtx_unlock(mp);
cv_waitq_add(cvp, td);
callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
cv_switch(td);
sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
sleepq_set_timeout(sq, cvp, timo);
rval = sleepq_timedwait(cvp, 0);
if (td->td_flags & TDF_TIMEOUT) {
td->td_flags &= ~TDF_TIMEOUT;
rval = EWOULDBLOCK;
} else if (td->td_flags & TDF_TIMOFAIL)
td->td_flags &= ~TDF_TIMOFAIL;
else if (callout_stop(&td->td_slpcallout) == 0) {
/*
* Work around race with cv_timedwait_end similar to that
* between msleep and endtsleep.
* Go back to sleep.
*/
TD_SET_SLEEPING(td);
mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
@ -394,6 +268,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
int
cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
{
struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int rval;
@ -422,48 +297,24 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
return 0;
}
mtx_lock_spin(&sched_lock);
CV_WAIT_VALIDATE(cvp, mp);
sq = sleepq_lookup(cvp);
DROP_GIANT();
mtx_unlock(mp);
cv_waitq_add(cvp, td);
callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
sig = cv_switch_catch(td);
if (td->td_flags & TDF_TIMEOUT) {
td->td_flags &= ~TDF_TIMEOUT;
rval = EWOULDBLOCK;
} else if (td->td_flags & TDF_TIMOFAIL)
td->td_flags &= ~TDF_TIMOFAIL;
else if (callout_stop(&td->td_slpcallout) == 0) {
/*
* Work around race with cv_timedwait_end similar to that
* between msleep and endtsleep.
* Go back to sleep.
*/
TD_SET_SLEEPING(td);
mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
mtx_unlock_spin(&sched_lock);
sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
sleepq_set_timeout(sq, cvp, timo);
sig = sleepq_catch_signals(cvp);
/*
* XXX: Missing magic return value handling for no signal
* caught but thread woken up during check.
*/
rval = sleepq_timedwait_sig(cvp, sig != 0);
if (rval == 0)
rval = sleepq_calc_signal_retval(sig);
/* XXX: Part of missing threading checks? */
PROC_LOCK(p);
mtx_lock(&p->p_sigacts->ps_mtx);
if (sig == 0) {
sig = cursig(td);
if (sig == 0 && td->td_flags & TDF_INTERRUPT)
rval = td->td_intrval;
}
if (sig != 0) {
if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
rval = EINTR;
else
rval = ERESTART;
}
mtx_unlock(&p->p_sigacts->ps_mtx);
if (p->p_flag & P_WEXIT)
rval = EINTR;
PROC_UNLOCK(p);
@ -479,24 +330,6 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
return (rval);
}
/*
* Common code for signal and broadcast. Assumes waitq is not empty. Must be
* called with sched_lock held.
*/
static __inline void
cv_wakeup(struct cv *cvp)
{
struct thread *td;
mtx_assert(&sched_lock, MA_OWNED);
td = TAILQ_FIRST(&cvp->cv_waitq);
KASSERT(td->td_wchan == cvp, ("%s: bogus wchan", __func__));
KASSERT(td->td_flags & TDF_CVWAITQ, ("%s: not on waitq", __func__));
cv_waitq_remove(td);
TD_CLR_SLEEPING(td);
setrunnable(td);
}
/*
* Signal a condition variable, wakes up one waiting thread. Will also wakeup
* the swapper if the process is not in memory, so that it can bring the
@ -508,13 +341,7 @@ void
cv_signal(struct cv *cvp)
{
KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
mtx_lock_spin(&sched_lock);
if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
CV_SIGNAL_VALIDATE(cvp);
cv_wakeup(cvp);
}
mtx_unlock_spin(&sched_lock);
sleepq_signal(cvp, SLEEPQ_CONDVAR, -1);
}
/*
@ -524,82 +351,6 @@ cv_signal(struct cv *cvp)
void
cv_broadcastpri(struct cv *cvp, int pri)
{
struct thread *td;
KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
mtx_lock_spin(&sched_lock);
CV_SIGNAL_VALIDATE(cvp);
while (!TAILQ_EMPTY(&cvp->cv_waitq)) {
if (pri >= PRI_MIN && pri <= PRI_MAX) {
td = TAILQ_FIRST(&cvp->cv_waitq);
if (td->td_priority > pri)
td->td_priority = pri;
}
cv_wakeup(cvp);
}
mtx_unlock_spin(&sched_lock);
sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri);
}
/*
* Remove a thread from the wait queue of its condition variable. This may be
* called externally.
*/
void
cv_waitq_remove(struct thread *td)
{
struct cv *cvp;
mtx_assert(&sched_lock, MA_OWNED);
if ((cvp = td->td_wchan) != NULL && td->td_flags & TDF_CVWAITQ) {
TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
td->td_flags &= ~TDF_CVWAITQ;
td->td_wmesg = NULL;
TD_CLR_ON_SLEEPQ(td);
}
}
/*
* Timeout function for cv_timedwait. Put the thread on the runqueue and set
* its timeout flag.
*/
static void
cv_timedwait_end(void *arg)
{
struct thread *td;
td = arg;
CTR3(KTR_PROC, "cv_timedwait_end: thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
if (TD_ON_SLEEPQ(td)) {
cv_waitq_remove(td);
td->td_flags |= TDF_TIMEOUT;
} else {
td->td_flags |= TDF_TIMOFAIL;
}
TD_CLR_SLEEPING(td);
setrunnable(td);
mtx_unlock_spin(&sched_lock);
}
/*
* For now only abort interruptable waits.
* The others will have to either complete on their own or have a timeout.
*/
void
cv_abort(struct thread *td)
{
CTR3(KTR_PROC, "cv_abort: thread %p (pid %d, %s)", td,
td->td_proc->p_pid, td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
if ((td->td_flags & (TDF_SINTR|TDF_TIMEOUT)) == TDF_SINTR) {
if (TD_ON_SLEEPQ(td)) {
cv_waitq_remove(td);
}
TD_CLR_SLEEPING(td);
setrunnable(td);
}
mtx_unlock_spin(&sched_lock);
}

View File

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/filedesc.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/sleepqueue.h>
#include <sys/sx.h>
#include <sys/tty.h>
#include <sys/turnstile.h>
@ -188,6 +189,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
vm_thread_new(td, 0);
cpu_thread_setup(td);
td->td_sleepqueue = sleepq_alloc();
td->td_turnstile = turnstile_alloc();
td->td_sched = (struct td_sched *)&td[1];
}
@ -202,6 +204,7 @@ thread_fini(void *mem, int size)
td = (struct thread *)mem;
turnstile_free(td->td_turnstile);
sleepq_free(td->td_sleepqueue);
vm_thread_dispose(td);
}
@ -456,12 +459,8 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
td2->td_intrval = EINTR;
else
td2->td_intrval = ERESTART;
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
if (td2->td_flags & TDF_CVWAITQ)
cv_abort(td2);
else
abortsleep(td2);
}
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
sleepq_abort(td2);
mtx_unlock_spin(&sched_lock);
}
PROC_UNLOCK(p);
@ -648,7 +647,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
((td2->td_wchan == &kg->kg_completed) ||
(td2->td_wchan == &p->p_siglist &&
(ku->ku_mflags & KMF_WAITSIGEVENT)))) {
abortsleep(td2);
sleepq_abort(td2);
} else {
ku->ku_flags |= KUF_DOUPCALL;
}
@ -1907,10 +1906,7 @@ thread_single(int force_exit)
}
if (TD_ON_SLEEPQ(td2) &&
(td2->td_flags & TDF_SINTR)) {
if (td2->td_flags & TDF_CVWAITQ)
cv_abort(td2);
else
abortsleep(td2);
sleepq_abort(td2);
}
} else {
if (TD_IS_SUSPENDED(td2))

View File

@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/pioctl.h>
#include <sys/resourcevar.h>
#include <sys/sleepqueue.h>
#include <sys/smp.h>
#include <sys/stat.h>
#include <sys/sx.h>
@ -1869,12 +1870,8 @@ do_tdsignal(struct thread *td, int sig, sigtarget_t target)
* It may run a bit until it hits a thread_suspend_check().
*/
mtx_lock_spin(&sched_lock);
if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
if (td->td_flags & TDF_CVWAITQ)
cv_abort(td);
else
abortsleep(td);
}
if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
sleepq_abort(td);
mtx_unlock_spin(&sched_lock);
goto out;
/*
@ -1969,9 +1966,8 @@ tdsigwakeup(struct thread *td, int sig, sig_t action)
* be noticed when the process returns through
* trap() or syscall().
*/
if ((td->td_flags & TDF_SINTR) == 0) {
if ((td->td_flags & TDF_SINTR) == 0)
return;
}
/*
* Process is sleeping and traced. Make it runnable
* so it can discover the signal in issignal() and stop
@ -1999,14 +1995,10 @@ tdsigwakeup(struct thread *td, int sig, sig_t action)
/*
* Raise priority to at least PUSER.
*/
if (td->td_priority > PUSER) {
if (td->td_priority > PUSER)
td->td_priority = PUSER;
}
}
if (td->td_flags & TDF_CVWAITQ)
cv_abort(td);
else
abortsleep(td);
sleepq_abort(td);
}
#ifdef SMP
else {
@ -2015,9 +2007,8 @@ tdsigwakeup(struct thread *td, int sig, sig_t action)
* other than kicking ourselves if we are running.
* It will either never be noticed, or noticed very soon.
*/
if (TD_IS_RUNNING(td) && td != curthread) {
if (TD_IS_RUNNING(td) && td != curthread)
forward_signal(td);
}
}
#endif
}

View File

@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/resourcevar.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/sleepqueue.h>
#include <sys/smp.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
@ -95,7 +96,6 @@ static fixpt_t cexp[3] = {
static int fscale __unused = FSCALE;
SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
static void endtsleep(void *);
static void loadav(void *arg);
static void lboltcb(void *arg);
@ -116,6 +116,7 @@ sleepinit(void)
hogticks = (hz / 10) * 2; /* Default only. */
for (i = 0; i < TABLESIZE; i++)
TAILQ_INIT(&slpque[i]);
init_sleepqueues();
}
/*
@ -141,47 +142,26 @@ msleep(ident, mtx, priority, wmesg, timo)
int priority, timo;
const char *wmesg;
{
struct thread *td = curthread;
struct proc *p = td->td_proc;
int sig, catch = priority & PCATCH;
int rval = 0;
struct sleepqueue *sq;
struct thread *td;
struct proc *p;
int catch, rval, sig;
WITNESS_SAVE_DECL(mtx);
td = curthread;
p = td->td_proc;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
/* XXX: mtx == NULL ?? */
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mtx->mtx_object,
"Sleeping on \"%s\"", wmesg);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, mtx == NULL ? NULL :
&mtx->mtx_object, "Sleeping on \"%s\"", wmesg);
KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
("sleeping without a mutex"));
/*
* If we are capable of async syscalls and there isn't already
* another one ready to return, start a new thread
* and queue it as ready to run. Note that there is danger here
* because we need to make sure that we don't sleep allocating
* the thread (recursion here might be bad).
*/
mtx_lock_spin(&sched_lock);
if (p->p_flag & P_SA || p->p_numthreads > 1) {
/*
* Just don't bother if we are exiting
* and not the exiting thread or thread was marked as
* interrupted.
*/
if (catch) {
if ((p->p_flag & P_WEXIT) && p->p_singlethread != td) {
mtx_unlock_spin(&sched_lock);
return (EINTR);
}
if (td->td_flags & TDF_INTERRUPT) {
mtx_unlock_spin(&sched_lock);
return (td->td_intrval);
}
}
}
if (cold ) {
KASSERT(p != NULL, ("msleep1"));
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
if (cold) {
/*
* During autoconfiguration, just return;
* don't run any other procs or panic below,
@ -192,9 +172,52 @@ msleep(ident, mtx, priority, wmesg, timo)
*/
if (mtx != NULL && priority & PDROP)
mtx_unlock(mtx);
mtx_unlock_spin(&sched_lock);
return (0);
}
catch = priority & PCATCH;
rval = 0;
/*
* If we are already on a sleep queue, then remove us from that
* sleep queue first. We have to do this to handle recursive
* sleeps.
*/
if (TD_ON_SLEEPQ(td))
sleepq_remove(td, td->td_wchan);
sq = sleepq_lookup(ident);
mtx_lock_spin(&sched_lock);
/*
* If we are capable of async syscalls and there isn't already
* another one ready to return, start a new thread
* and queue it as ready to run. Note that there is danger here
* because we need to make sure that we don't sleep allocating
* the thread (recursion here might be bad).
*/
if (p->p_flag & P_SA || p->p_numthreads > 1) {
/*
* Just don't bother if we are exiting
* and not the exiting thread or thread was marked as
* interrupted.
*/
if (catch) {
if ((p->p_flag & P_WEXIT) && p->p_singlethread != td) {
mtx_unlock_spin(&sched_lock);
sleepq_release(ident);
return (EINTR);
}
if (td->td_flags & TDF_INTERRUPT) {
mtx_unlock_spin(&sched_lock);
sleepq_release(ident);
return (td->td_intrval);
}
}
}
mtx_unlock_spin(&sched_lock);
CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
td, p->p_pid, p->p_comm, wmesg, ident);
DROP_GIANT();
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
@ -203,101 +226,55 @@ msleep(ident, mtx, priority, wmesg, timo)
if (priority & PDROP)
mtx = NULL;
}
KASSERT(p != NULL, ("msleep1"));
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
td, p->p_pid, p->p_comm, wmesg, ident);
td->td_wchan = ident;
td->td_wmesg = wmesg;
TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
TD_SET_ON_SLEEPQ(td);
if (timo)
callout_reset(&td->td_slpcallout, timo, endtsleep, td);
/*
* We put ourselves on the sleep queue and start our timeout
* before calling thread_suspend_check, as we could stop there, and
* a wakeup or a SIGCONT (or both) could occur while we were stopped.
* without resuming us, thus we must be ready for sleep
* when cursig is called. If the wakeup happens while we're
* stopped, td->td_wchan will be 0 upon return from cursig.
* before calling thread_suspend_check, as we could stop there,
* and a wakeup or a SIGCONT (or both) could occur while we were
* stopped without resuming us. Thus, we must be ready for sleep
* when cursig() is called. If the wakeup happens while we're
* stopped, then td will no longer be on a sleep queue upon
* return from cursig().
*/
sleepq_add(sq, ident, mtx, wmesg, 0);
if (timo)
sleepq_set_timeout(sq, ident, timo);
if (catch) {
CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
p->p_pid, p->p_comm);
td->td_flags |= TDF_SINTR;
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
mtx_lock(&p->p_sigacts->ps_mtx);
sig = cursig(td);
mtx_unlock(&p->p_sigacts->ps_mtx);
if (sig == 0 && thread_suspend_check(1))
sig = SIGSTOP;
mtx_lock_spin(&sched_lock);
PROC_UNLOCK(p);
if (sig != 0) {
if (TD_ON_SLEEPQ(td))
unsleep(td);
} else if (!TD_ON_SLEEPQ(td))
sig = sleepq_catch_signals(ident);
if (sig == 0 && !TD_ON_SLEEPQ(td)) {
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_SINTR;
mtx_unlock_spin(&sched_lock);
catch = 0;
}
} else
sig = 0;
/*
* Let the scheduler know we're about to voluntarily go to sleep.
* Adjust this threads priority.
*
* XXX: Do we need to save priority in td_base_pri?
*/
sched_sleep(td, priority & PRIMASK);
mtx_lock_spin(&sched_lock);
sched_prio(td, priority & PRIMASK);
mtx_unlock_spin(&sched_lock);
if (TD_ON_SLEEPQ(td)) {
TD_SET_SLEEPING(td);
mi_switch(SW_VOL);
if (timo && catch)
rval = sleepq_timedwait_sig(ident, sig != 0);
else if (timo)
rval = sleepq_timedwait(ident, sig != 0);
else if (catch)
rval = sleepq_wait_sig(ident);
else {
sleepq_wait(ident);
rval = 0;
}
/*
* We're awake from voluntary sleep.
*/
CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
td->td_flags &= ~TDF_SINTR;
if (td->td_flags & TDF_TIMEOUT) {
td->td_flags &= ~TDF_TIMEOUT;
if (sig == 0)
rval = EWOULDBLOCK;
} else if (td->td_flags & TDF_TIMOFAIL) {
td->td_flags &= ~TDF_TIMOFAIL;
} else if (timo && callout_stop(&td->td_slpcallout) == 0) {
/*
* This isn't supposed to be pretty. If we are here, then
* the endtsleep() callout is currently executing on another
* CPU and is either spinning on the sched_lock or will be
* soon. If we don't synchronize here, there is a chance
* that this process may msleep() again before the callout
* has a chance to run and the callout may end up waking up
* the wrong msleep(). Yuck.
*/
TD_SET_SLEEPING(td);
mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
(rval == 0)) {
rval = td->td_intrval;
}
mtx_unlock_spin(&sched_lock);
if (rval == 0 && catch) {
PROC_LOCK(p);
/* XXX: shouldn't we always be calling cursig()? */
mtx_lock(&p->p_sigacts->ps_mtx);
if (sig != 0 || (sig = cursig(td))) {
if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
rval = EINTR;
else
rval = ERESTART;
}
mtx_unlock(&p->p_sigacts->ps_mtx);
PROC_UNLOCK(p);
}
if (rval == 0 && catch)
rval = sleepq_calc_signal_retval(sig);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
@ -310,82 +287,6 @@ msleep(ident, mtx, priority, wmesg, timo)
return (rval);
}
/*
* Implement timeout for msleep().
*
* If process hasn't been awakened (wchan non-zero),
* set timeout flag and undo the sleep. If proc
* is stopped, just unsleep so it will remain stopped.
* MP-safe, called without the Giant mutex.
*/
static void
endtsleep(arg)
void *arg;
{
register struct thread *td;
td = (struct thread *)arg;
CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
/*
* This is the other half of the synchronization with msleep()
* described above. If the TDS_TIMEOUT flag is set, we lost the
* race and just need to put the process back on the runqueue.
*/
if (TD_ON_SLEEPQ(td)) {
TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
TD_CLR_ON_SLEEPQ(td);
td->td_flags |= TDF_TIMEOUT;
td->td_wmesg = NULL;
} else
td->td_flags |= TDF_TIMOFAIL;
TD_CLR_SLEEPING(td);
setrunnable(td);
mtx_unlock_spin(&sched_lock);
}
/*
* Abort a thread, as if an interrupt had occured. Only abort
* interruptable waits (unfortunatly it isn't only safe to abort others).
* This is about identical to cv_abort().
* Think about merging them?
* Also, whatever the signal code does...
*/
void
abortsleep(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
/*
* If the TDF_TIMEOUT flag is set, just leave. A
* timeout is scheduled anyhow.
*/
if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
if (TD_ON_SLEEPQ(td)) {
unsleep(td);
TD_CLR_SLEEPING(td);
setrunnable(td);
}
}
}
/*
* Remove a process from its wait queue
*/
void
unsleep(struct thread *td)
{
mtx_lock_spin(&sched_lock);
if (TD_ON_SLEEPQ(td)) {
TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
TD_CLR_ON_SLEEPQ(td);
td->td_wmesg = NULL;
}
mtx_unlock_spin(&sched_lock);
}
/*
* Make all processes sleeping on the specified identifier runnable.
*/
@ -393,27 +294,8 @@ void
wakeup(ident)
register void *ident;
{
register struct slpquehead *qp;
register struct thread *td;
struct thread *ntd;
struct proc *p;
mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
restart:
for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
ntd = TAILQ_NEXT(td, td_slpq);
if (td->td_wchan == ident) {
unsleep(td);
TD_CLR_SLEEPING(td);
setrunnable(td);
p = td->td_proc;
CTR3(KTR_PROC,"wakeup: thread %p (pid %d, %s)",
td, p->p_pid, p->p_comm);
goto restart;
}
}
mtx_unlock_spin(&sched_lock);
sleepq_broadcast(ident, 0, -1);
}
/*
@ -425,26 +307,8 @@ void
wakeup_one(ident)
register void *ident;
{
register struct proc *p;
register struct slpquehead *qp;
register struct thread *td;
struct thread *ntd;
mtx_lock_spin(&sched_lock);
qp = &slpque[LOOKUP(ident)];
for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
ntd = TAILQ_NEXT(td, td_slpq);
if (td->td_wchan == ident) {
unsleep(td);
TD_CLR_SLEEPING(td);
setrunnable(td);
p = td->td_proc;
CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
td, p->p_pid, p->p_comm);
break;
}
}
mtx_unlock_spin(&sched_lock);
sleepq_signal(ident, 0, -1);
}
/*

View File

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/filedesc.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/sleepqueue.h>
#include <sys/sx.h>
#include <sys/tty.h>
#include <sys/turnstile.h>
@ -188,6 +189,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
vm_thread_new(td, 0);
cpu_thread_setup(td);
td->td_sleepqueue = sleepq_alloc();
td->td_turnstile = turnstile_alloc();
td->td_sched = (struct td_sched *)&td[1];
}
@ -202,6 +204,7 @@ thread_fini(void *mem, int size)
td = (struct thread *)mem;
turnstile_free(td->td_turnstile);
sleepq_free(td->td_sleepqueue);
vm_thread_dispose(td);
}
@ -456,12 +459,8 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
td2->td_intrval = EINTR;
else
td2->td_intrval = ERESTART;
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
if (td2->td_flags & TDF_CVWAITQ)
cv_abort(td2);
else
abortsleep(td2);
}
if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
sleepq_abort(td2);
mtx_unlock_spin(&sched_lock);
}
PROC_UNLOCK(p);
@ -648,7 +647,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
((td2->td_wchan == &kg->kg_completed) ||
(td2->td_wchan == &p->p_siglist &&
(ku->ku_mflags & KMF_WAITSIGEVENT)))) {
abortsleep(td2);
sleepq_abort(td2);
} else {
ku->ku_flags |= KUF_DOUPCALL;
}
@ -1907,10 +1906,7 @@ thread_single(int force_exit)
}
if (TD_ON_SLEEPQ(td2) &&
(td2->td_flags & TDF_SINTR)) {
if (td2->td_flags & TDF_CVWAITQ)
cv_abort(td2);
else
abortsleep(td2);
sleepq_abort(td2);
}
} else {
if (TD_IS_SUSPENDED(td2))

View File

@ -622,12 +622,12 @@ sched_prio(struct thread *td, u_char prio)
}
void
sched_sleep(struct thread *td, u_char prio)
sched_sleep(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
td->td_ksegrp->kg_slptime = 0;
td->td_priority = prio;
td->td_base_pri = td->td_priority;
}
void

View File

@ -1219,12 +1219,12 @@ sched_nice(struct ksegrp *kg, int nice)
}
void
sched_sleep(struct thread *td, u_char prio)
sched_sleep(struct thread *td)
{
mtx_assert(&sched_lock, MA_OWNED);
td->td_slptime = ticks;
td->td_priority = prio;
td->td_base_pri = td->td_priority;
CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
td->td_kse, td->td_slptime);

View File

@ -60,6 +60,7 @@ __FBSDID("$FreeBSD$");
#include <sys/poll.h>
#include <sys/resourcevar.h>
#include <sys/selinfo.h>
#include <sys/sleepqueue.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@ -1212,15 +1213,9 @@ doselwakeup(sip, pri)
TAILQ_REMOVE(&td->td_selq, sip, si_thrlist);
sip->si_thread = NULL;
mtx_lock_spin(&sched_lock);
if (td->td_wchan == &selwait) {
cv_waitq_remove(td);
TD_CLR_SLEEPING(td);
if (pri >= PRI_MIN && pri <= PRI_MAX && td->td_priority > pri)
td->td_priority = pri;
setrunnable(td);
} else
td->td_flags &= ~TDF_SELECT;
td->td_flags &= ~TDF_SELECT;
mtx_unlock_spin(&sched_lock);
sleepq_remove(td, &selwait);
mtx_unlock(&sellock);
}

View File

@ -62,6 +62,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/sleepqueue.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
@ -1598,13 +1599,7 @@ speedup_syncer()
int ret = 0;
td = FIRST_THREAD_IN_PROC(updateproc);
mtx_lock_spin(&sched_lock);
if (td->td_wchan == &lbolt) {
unsleep(td);
TD_CLR_SLEEPING(td);
setrunnable(td);
}
mtx_unlock_spin(&sched_lock);
sleepq_remove(td, &lbolt);
mtx_lock(&sync_mtx);
if (rushjob < syncdelay / 2) {
rushjob += 1;

View File

@ -41,11 +41,6 @@ TAILQ_HEAD(cv_waitq, thread);
* Condition variable.
*/
struct cv {
struct cv_waitq cv_waitq; /* Queue of condition waiters. */
struct mtx *cv_mtx; /*
* Mutex passed in by cv_*wait*(),
* currently only used for INVARIANTS.
*/
const char *cv_description;
};
@ -63,10 +58,6 @@ void cv_broadcastpri(struct cv *cvp, int);
#define cv_broadcast(cvp) cv_broadcastpri(cvp, -1)
void cv_waitq_remove(struct thread *td);
void cv_abort(struct thread *td);
#define cv_waitq_empty(cvp) (TAILQ_EMPTY(&(cvp)->cv_waitq))
#define cv_wmesg(cvp) ((cvp)->cv_description)
#endif /* _KERNEL */

View File

@ -157,6 +157,7 @@ struct ke_sched;
struct kg_sched;
struct nlminfo;
struct p_sched;
struct sleepqueue;
struct td_sched;
struct trapframe;
struct turnstile;
@ -265,6 +266,7 @@ struct thread {
TAILQ_ENTRY(thread) td_runq; /* (j/z) Run queue(s). XXXKSE */
TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
struct turnstile *td_turnstile; /* (k) Associated turnstile. */
/* Cleared during fork1() or thread_sched_upcall(). */
@ -344,9 +346,7 @@ struct thread {
#define TDF_TIMEOUT 0x000010 /* Timing out during sleep. */
#define TDF_IDLETD 0x000020 /* This is one of the per-CPU idle threads. */
#define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */
#define TDF_CVWAITQ 0x000080 /* Thread is on a cv_waitq (not slpq). */
#define TDF_TSNOBLOCK 0x000100 /* Don't block on a turnstile due to race. */
#define TDF_ONSLEEPQ 0x000200 /* On the sleep queue. */
#define TDF_ASTPENDING 0x000800 /* Thread has some asynchronous events. */
#define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */
#define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */
@ -414,11 +414,6 @@ struct thread {
#define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING
#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
#define TD_SET_ON_SLEEPQ(td) do {(td)->td_flags |= TDF_ONSLEEPQ; } while (0)
#define TD_CLR_ON_SLEEPQ(td) do { \
(td)->td_flags &= ~TDF_ONSLEEPQ; \
(td)->td_wchan = NULL; \
} while (0)
/*
* The schedulable entity that can be given a context to run.

View File

@ -65,7 +65,7 @@ void sched_exit_thread(struct thread *td, struct thread *child);
void sched_fork_thread(struct thread *td, struct thread *child);
fixpt_t sched_pctcpu(struct thread *td);
void sched_prio(struct thread *td, u_char prio);
void sched_sleep(struct thread *td, u_char prio);
void sched_sleep(struct thread *td);
void sched_switch(struct thread *td);
void sched_userret(struct thread *td);
void sched_wakeup(struct thread *td);

View File

@ -312,7 +312,6 @@ extern watchdog_tickle_fn wdog_tickler;
*/
int msleep(void *chan, struct mtx *mtx, int pri, const char *wmesg,
int timo);
void abortsleep(struct thread *td);
#define tsleep(chan, pri, wmesg, timo) msleep(chan, NULL, pri, wmesg, timo)
void wakeup(void *chan) __nonnull(1);
void wakeup_one(void *chan) __nonnull(1);