2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
2017-11-27 15:20:12 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2001-02-12 00:20:08 +00:00
|
|
|
* Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
|
|
|
|
* All rights reserved.
|
1999-08-19 00:06:53 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
|
2004-09-01 06:42:02 +00:00
|
|
|
#include "opt_sched.h"
|
2004-07-02 20:21:44 +00:00
|
|
|
|
1999-08-19 00:06:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2004-07-10 21:36:01 +00:00
|
|
|
#include <sys/kdb.h>
|
1999-08-19 00:06:53 +00:00
|
|
|
#include <sys/kernel.h>
|
2000-09-07 01:33:02 +00:00
|
|
|
#include <sys/ktr.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1999-08-19 00:06:53 +00:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/queue.h>
|
2002-10-12 05:32:24 +00:00
|
|
|
#include <sys/sched.h>
|
2003-04-02 23:53:30 +00:00
|
|
|
#include <sys/smp.h>
|
2008-03-20 05:51:16 +00:00
|
|
|
#include <sys/sysctl.h>
|
2004-09-01 06:42:02 +00:00
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
#include <machine/cpu.h>
|
|
|
|
|
2005-12-18 18:10:57 +00:00
|
|
|
/* Uncomment this to enable logging of critical_enter/exit. */
|
|
|
|
#if 0
|
|
|
|
#define KTR_CRITICAL KTR_SCHED
|
|
|
|
#else
|
|
|
|
#define KTR_CRITICAL 0
|
|
|
|
#endif
|
|
|
|
|
2004-09-02 18:59:15 +00:00
|
|
|
#ifdef FULL_PREEMPTION
|
|
|
|
#ifndef PREEMPTION
|
|
|
|
#error "The FULL_PREEMPTION option requires the PREEMPTION option"
|
|
|
|
#endif
|
|
|
|
#endif
|
1999-08-19 00:06:53 +00:00
|
|
|
|
2002-05-25 01:12:23 +00:00
|
|
|
CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
|
|
|
|
|
2005-03-20 17:05:12 +00:00
|
|
|
/*
|
|
|
|
* kern.sched.preemption allows user space to determine if preemption support
|
|
|
|
* is compiled in or not. It is not currently a boot or runtime flag that
|
|
|
|
* can be changed.
|
|
|
|
*/
|
|
|
|
#ifdef PREEMPTION
|
|
|
|
static int kern_sched_preemption = 1;
|
|
|
|
#else
|
|
|
|
static int kern_sched_preemption = 0;
|
|
|
|
#endif
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
|
|
|
|
&kern_sched_preemption, 0, "Kernel preemption enabled");
|
|
|
|
|
2008-04-17 04:20:10 +00:00
|
|
|
/*
|
|
|
|
* Support for scheduler stats exported via kern.sched.stats. All stats may
|
|
|
|
* be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere
|
|
|
|
* with SCHED_STAT_DEFINE().
|
|
|
|
*/
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
#ifdef SCHED_STATS
|
2020-02-26 14:26:36 +00:00
|
|
|
SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
|
|
|
|
"switch stats");
|
2009-06-25 01:33:51 +00:00
|
|
|
|
|
|
|
/* Switch reasons from mi_switch(). */
|
|
|
|
DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
|
|
|
|
SCHED_STAT_DEFINE_VAR(uncategorized,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(preempt,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(owepreempt,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(turnstile,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(sleepq,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
|
2015-01-22 11:12:42 +00:00
|
|
|
SCHED_STAT_DEFINE_VAR(sleepqtimo,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
|
2009-06-25 01:33:51 +00:00
|
|
|
SCHED_STAT_DEFINE_VAR(relinquish,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(needresched,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(idle,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(iwait,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(suspend,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(remotepreempt,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
|
|
|
|
SCHED_STAT_DEFINE_VAR(remotewakeidle,
|
|
|
|
&DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
|
2008-04-17 04:20:10 +00:00
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
static int
|
|
|
|
sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
2008-04-17 04:20:10 +00:00
|
|
|
struct sysctl_oid *p;
|
2009-06-25 01:33:51 +00:00
|
|
|
uintptr_t counter;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
int error;
|
|
|
|
int val;
|
2009-06-25 01:33:51 +00:00
|
|
|
int i;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
|
|
|
|
val = 0;
|
|
|
|
error = sysctl_handle_int(oidp, &val, 0, req);
|
|
|
|
if (error != 0 || req->newptr == NULL)
|
|
|
|
return (error);
|
|
|
|
if (val == 0)
|
|
|
|
return (0);
|
2008-04-17 04:20:10 +00:00
|
|
|
/*
|
|
|
|
* Traverse the list of children of _kern_sched_stats and reset each
|
|
|
|
* to 0. Skip the reset entry.
|
|
|
|
*/
|
|
|
|
SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
|
|
|
|
if (p == oidp || p->oid_arg1 == NULL)
|
|
|
|
continue;
|
2009-06-25 01:33:51 +00:00
|
|
|
counter = (uintptr_t)p->oid_arg1;
|
2010-06-11 18:46:34 +00:00
|
|
|
CPU_FOREACH(i) {
|
2009-06-25 01:33:51 +00:00
|
|
|
*(long *)(dpcpu_off[i] + counter) = 0;
|
|
|
|
}
|
2008-04-17 04:20:10 +00:00
|
|
|
}
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2020-02-26 14:26:36 +00:00
|
|
|
SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset,
|
2021-12-27 04:07:33 +00:00
|
|
|
CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_MPSAFE, NULL, 0,
|
2020-02-26 14:26:36 +00:00
|
|
|
sysctl_stats_reset, "I",
|
|
|
|
"Reset scheduler statistics");
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
#endif
|
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/************************************************************************
|
|
|
|
* Functions that manipulate runnability from a thread perspective. *
|
|
|
|
************************************************************************/
|
2006-10-26 21:42:22 +00:00
|
|
|
/*
|
|
|
|
* Select the thread that will be run next.
|
|
|
|
*/
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
|
2017-11-17 02:45:38 +00:00
|
|
|
static __noinline struct thread *
|
|
|
|
choosethread_panic(struct thread *td)
|
|
|
|
{
|
2002-12-28 01:23:07 +00:00
|
|
|
|
|
|
|
/*
|
2003-05-21 18:53:25 +00:00
|
|
|
* If we are in panic, only allow system threads,
|
|
|
|
* plus the one we are running in, to be run.
|
2002-12-28 01:23:07 +00:00
|
|
|
*/
|
2017-11-17 02:45:38 +00:00
|
|
|
retry:
|
|
|
|
if (((td->td_proc->p_flag & P_SYSTEM) == 0 &&
|
2003-05-21 18:53:25 +00:00
|
|
|
(td->td_flags & TDF_INPANIC) == 0)) {
|
|
|
|
/* note that it is no longer on the run queue */
|
|
|
|
TD_SET_CAN_RUN(td);
|
2017-11-17 02:45:38 +00:00
|
|
|
td = sched_choose();
|
2002-07-17 02:23:44 +00:00
|
|
|
goto retry;
|
2003-05-21 18:53:25 +00:00
|
|
|
}
|
2002-12-28 01:23:07 +00:00
|
|
|
|
2002-09-11 08:13:56 +00:00
|
|
|
TD_SET_RUNNING(td);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
return (td);
|
|
|
|
}
|
|
|
|
|
2017-11-17 02:45:38 +00:00
|
|
|
struct thread *
|
|
|
|
choosethread(void)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
|
|
|
|
td = sched_choose();
|
|
|
|
|
2020-01-12 06:07:54 +00:00
|
|
|
if (KERNEL_PANICKED())
|
2017-11-17 02:45:38 +00:00
|
|
|
return (choosethread_panic(td));
|
|
|
|
|
|
|
|
TD_SET_RUNNING(td);
|
|
|
|
return (td);
|
|
|
|
}
|
|
|
|
|
2004-07-02 20:21:44 +00:00
|
|
|
/*
|
|
|
|
* Kernel thread preemption implementation. Critical sections mark
|
|
|
|
* regions of code in which preemptions are not allowed.
|
2012-12-09 04:54:22 +00:00
|
|
|
*
|
|
|
|
* It might seem a good idea to inline critical_enter() but, in order
|
|
|
|
* to prevent instructions reordering by the compiler, a __compiler_membar()
|
|
|
|
* would have to be used here (the same as sched_pin()). The performance
|
|
|
|
* penalty imposed by the membar could, then, produce slower code than
|
|
|
|
* the function call itself, for most cases.
|
2004-07-02 20:21:44 +00:00
|
|
|
*/
|
2001-12-18 00:27:18 +00:00
|
|
|
void
|
2018-07-03 01:55:09 +00:00
|
|
|
critical_enter_KBI(void)
|
2001-12-18 00:27:18 +00:00
|
|
|
{
|
2018-07-03 01:55:09 +00:00
|
|
|
#ifdef KTR
|
|
|
|
struct thread *td = curthread;
|
|
|
|
#endif
|
|
|
|
critical_enter();
|
2005-12-18 18:10:57 +00:00
|
|
|
CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
|
2007-11-14 06:21:24 +00:00
|
|
|
(long)td->td_proc->p_pid, td->td_name, td->td_critnest);
|
2001-12-18 00:27:18 +00:00
|
|
|
}
|
|
|
|
|
2018-07-03 01:55:09 +00:00
|
|
|
void __noinline
|
2018-05-22 19:24:57 +00:00
|
|
|
critical_exit_preempt(void)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
int flags;
|
|
|
|
|
2018-05-23 10:13:17 +00:00
|
|
|
/*
|
|
|
|
* If td_critnest is 0, it is possible that we are going to get
|
|
|
|
* preempted again before reaching the code below. This happens
|
|
|
|
* rarely and is harmless. However, this means td_owepreempt may
|
|
|
|
* now be unset.
|
|
|
|
*/
|
2018-05-22 19:24:57 +00:00
|
|
|
td = curthread;
|
|
|
|
if (td->td_critnest != 0)
|
|
|
|
return;
|
|
|
|
if (kdb_active)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Microoptimization: we committed to switch,
|
|
|
|
* disable preemption in interrupt handlers
|
|
|
|
* while spinning for the thread lock.
|
|
|
|
*/
|
|
|
|
td->td_critnest = 1;
|
|
|
|
thread_lock(td);
|
|
|
|
td->td_critnest--;
|
|
|
|
flags = SW_INVOL | SW_PREEMPT;
|
|
|
|
if (TD_IS_IDLETHREAD(td))
|
|
|
|
flags |= SWT_IDLE;
|
|
|
|
else
|
|
|
|
flags |= SWT_OWEPREEMPT;
|
2019-12-15 21:26:50 +00:00
|
|
|
mi_switch(flags);
|
2018-05-22 19:24:57 +00:00
|
|
|
}
|
|
|
|
|
2001-12-18 00:27:18 +00:00
|
|
|
void
|
2018-07-03 01:55:09 +00:00
|
|
|
critical_exit_KBI(void)
|
2001-12-18 00:27:18 +00:00
|
|
|
{
|
2018-07-03 01:55:09 +00:00
|
|
|
#ifdef KTR
|
|
|
|
struct thread *td = curthread;
|
|
|
|
#endif
|
|
|
|
critical_exit();
|
2005-12-18 18:10:57 +00:00
|
|
|
CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
|
2007-11-14 06:21:24 +00:00
|
|
|
(long)td->td_proc->p_pid, td->td_name, td->td_critnest);
|
2001-12-18 00:27:18 +00:00
|
|
|
}
|
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/************************************************************************
|
|
|
|
* SYSTEM RUN QUEUE manipulations and tests *
|
|
|
|
************************************************************************/
|
|
|
|
/*
|
|
|
|
* Initialize a run structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
runq_init(struct runq *rq)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
bzero(rq, sizeof *rq);
|
|
|
|
for (i = 0; i < RQ_NQS; i++)
|
|
|
|
TAILQ_INIT(&rq->rq_queues[i]);
|
|
|
|
}
|
|
|
|
|
2001-02-12 00:20:08 +00:00
|
|
|
/*
|
|
|
|
* Clear the status bit of the queue corresponding to priority level pri,
|
|
|
|
* indicating that it is empty.
|
|
|
|
*/
|
|
|
|
static __inline void
|
|
|
|
runq_clrbit(struct runq *rq, int pri)
|
|
|
|
{
|
|
|
|
struct rqbits *rqb;
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2001-02-12 00:20:08 +00:00
|
|
|
rqb = &rq->rq_status;
|
|
|
|
CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)],
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
|
|
|
|
RQB_BIT(pri), RQB_WORD(pri));
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the index of the first non-empty run queue. This is done by
|
|
|
|
* scanning the status bits, a set bit indicates a non-empty queue.
|
|
|
|
*/
|
|
|
|
static __inline int
|
|
|
|
runq_findbit(struct runq *rq)
|
|
|
|
{
|
|
|
|
struct rqbits *rqb;
|
|
|
|
int pri;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
rqb = &rq->rq_status;
|
|
|
|
for (i = 0; i < RQB_LEN; i++)
|
|
|
|
if (rqb->rqb_bits[i]) {
|
2002-06-20 06:21:20 +00:00
|
|
|
pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
|
2001-02-12 00:20:08 +00:00
|
|
|
CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
|
|
|
|
rqb->rqb_bits[i], i, pri);
|
|
|
|
return (pri);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2007-01-04 08:39:58 +00:00
|
|
|
static __inline int
|
2007-08-20 06:36:12 +00:00
|
|
|
runq_findbit_from(struct runq *rq, u_char pri)
|
2007-01-04 08:39:58 +00:00
|
|
|
{
|
|
|
|
struct rqbits *rqb;
|
2007-08-20 06:36:12 +00:00
|
|
|
rqb_word_t mask;
|
2007-01-04 08:39:58 +00:00
|
|
|
int i;
|
|
|
|
|
2007-08-20 06:36:12 +00:00
|
|
|
/*
|
|
|
|
* Set the mask for the first word so we ignore priorities before 'pri'.
|
|
|
|
*/
|
|
|
|
mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
|
2007-01-04 08:39:58 +00:00
|
|
|
rqb = &rq->rq_status;
|
|
|
|
again:
|
2007-08-20 06:36:12 +00:00
|
|
|
for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
|
|
|
|
mask = rqb->rqb_bits[i] & mask;
|
|
|
|
if (mask == 0)
|
|
|
|
continue;
|
|
|
|
pri = RQB_FFS(mask) + (i << RQB_L2BPW);
|
|
|
|
CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
|
|
|
|
mask, i, pri);
|
|
|
|
return (pri);
|
2007-01-04 08:39:58 +00:00
|
|
|
}
|
2007-08-20 06:36:12 +00:00
|
|
|
if (pri == 0)
|
|
|
|
return (-1);
|
|
|
|
/*
|
|
|
|
* Wrap back around to the beginning of the list just once so we
|
|
|
|
* scan the whole thing.
|
|
|
|
*/
|
|
|
|
pri = 0;
|
|
|
|
goto again;
|
2007-01-04 08:39:58 +00:00
|
|
|
}
|
|
|
|
|
2001-02-12 00:20:08 +00:00
|
|
|
/*
|
|
|
|
* Set the status bit of the queue corresponding to priority level pri,
|
|
|
|
* indicating that it is non-empty.
|
|
|
|
*/
|
|
|
|
static __inline void
|
|
|
|
runq_setbit(struct runq *rq, int pri)
|
|
|
|
{
|
|
|
|
struct rqbits *rqb;
|
|
|
|
|
|
|
|
rqb = &rq->rq_status;
|
|
|
|
CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)],
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
|
|
|
|
RQB_BIT(pri), RQB_WORD(pri));
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
|
1999-08-19 00:06:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-12-06 06:34:57 +00:00
|
|
|
* Add the thread to the queue specified by its priority, and set the
|
2001-02-12 00:20:08 +00:00
|
|
|
* corresponding status bit.
|
1999-08-19 00:06:53 +00:00
|
|
|
*/
|
|
|
|
void
|
2008-03-20 05:51:16 +00:00
|
|
|
runq_add(struct runq *rq, struct thread *td, int flags)
|
1999-08-19 00:06:53 +00:00
|
|
|
{
|
2001-02-12 00:20:08 +00:00
|
|
|
struct rqhead *rqh;
|
|
|
|
int pri;
|
1999-08-19 00:06:53 +00:00
|
|
|
|
2008-03-20 05:51:16 +00:00
|
|
|
pri = td->td_priority / RQ_PPQ;
|
|
|
|
td->td_rqindex = pri;
|
2001-02-12 00:20:08 +00:00
|
|
|
runq_setbit(rq, pri);
|
|
|
|
rqh = &rq->rq_queues[pri];
|
2008-03-20 05:51:16 +00:00
|
|
|
CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
|
|
|
|
td, td->td_priority, pri, rqh);
|
2004-10-05 22:03:10 +00:00
|
|
|
if (flags & SRQ_PREEMPTED) {
|
2008-03-20 05:51:16 +00:00
|
|
|
TAILQ_INSERT_HEAD(rqh, td, td_runq);
|
2004-10-05 22:03:10 +00:00
|
|
|
} else {
|
2008-03-20 05:51:16 +00:00
|
|
|
TAILQ_INSERT_TAIL(rqh, td, td_runq);
|
2004-10-05 22:03:10 +00:00
|
|
|
}
|
1999-08-19 00:06:53 +00:00
|
|
|
}
|
|
|
|
|
2007-01-04 08:39:58 +00:00
|
|
|
void
|
2008-03-20 05:51:16 +00:00
|
|
|
runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
|
2007-01-04 08:39:58 +00:00
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
|
|
|
|
|
|
|
KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
|
2008-03-20 05:51:16 +00:00
|
|
|
td->td_rqindex = pri;
|
2007-01-04 08:39:58 +00:00
|
|
|
runq_setbit(rq, pri);
|
|
|
|
rqh = &rq->rq_queues[pri];
|
2008-03-20 05:51:16 +00:00
|
|
|
CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
|
|
|
|
td, td->td_priority, pri, rqh);
|
2007-01-04 08:39:58 +00:00
|
|
|
if (flags & SRQ_PREEMPTED) {
|
2008-03-20 05:51:16 +00:00
|
|
|
TAILQ_INSERT_HEAD(rqh, td, td_runq);
|
2007-01-04 08:39:58 +00:00
|
|
|
} else {
|
2008-03-20 05:51:16 +00:00
|
|
|
TAILQ_INSERT_TAIL(rqh, td, td_runq);
|
2007-01-04 08:39:58 +00:00
|
|
|
}
|
|
|
|
}
|
1999-08-19 00:06:53 +00:00
|
|
|
/*
|
2001-02-12 00:20:08 +00:00
|
|
|
* Return true if there are runnable processes of any priority on the run
|
|
|
|
* queue, false otherwise. Has no side effects, does not modify the run
|
|
|
|
* queue structure.
|
1999-08-19 00:06:53 +00:00
|
|
|
*/
|
2001-02-12 00:20:08 +00:00
|
|
|
int
|
|
|
|
runq_check(struct runq *rq)
|
1999-08-19 00:06:53 +00:00
|
|
|
{
|
2001-02-12 00:20:08 +00:00
|
|
|
struct rqbits *rqb;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
rqb = &rq->rq_status;
|
|
|
|
for (i = 0; i < RQB_LEN; i++)
|
|
|
|
if (rqb->rqb_bits[i]) {
|
|
|
|
CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
|
|
|
|
rqb->rqb_bits[i], i);
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
CTR0(KTR_RUNQ, "runq_check: empty");
|
|
|
|
|
|
|
|
return (0);
|
1999-08-19 00:06:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-10-12 05:32:24 +00:00
|
|
|
* Find the highest priority process on the run queue.
|
1999-08-19 00:06:53 +00:00
|
|
|
*/
|
2008-03-20 05:51:16 +00:00
|
|
|
struct thread *
|
2008-03-20 02:14:02 +00:00
|
|
|
runq_choose_fuzz(struct runq *rq, int fuzz)
|
1999-08-19 00:06:53 +00:00
|
|
|
{
|
2001-02-12 00:20:08 +00:00
|
|
|
struct rqhead *rqh;
|
2008-03-20 05:51:16 +00:00
|
|
|
struct thread *td;
|
2001-02-12 00:20:08 +00:00
|
|
|
int pri;
|
1999-08-19 00:06:53 +00:00
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
while ((pri = runq_findbit(rq)) != -1) {
|
2001-02-12 00:20:08 +00:00
|
|
|
rqh = &rq->rq_queues[pri];
|
2004-09-01 06:42:02 +00:00
|
|
|
/* fuzz == 1 is normal.. 0 or less are ignored */
|
2008-03-20 02:14:02 +00:00
|
|
|
if (fuzz > 1) {
|
2004-09-01 06:42:02 +00:00
|
|
|
/*
|
|
|
|
* In the first couple of entries, check if
|
|
|
|
* there is one for our CPU as a preference.
|
|
|
|
*/
|
2008-03-20 02:14:02 +00:00
|
|
|
int count = fuzz;
|
2004-09-01 06:42:02 +00:00
|
|
|
int cpu = PCPU_GET(cpuid);
|
2008-03-20 05:51:16 +00:00
|
|
|
struct thread *td2;
|
|
|
|
td2 = td = TAILQ_FIRST(rqh);
|
2004-09-01 06:42:02 +00:00
|
|
|
|
2008-03-20 05:51:16 +00:00
|
|
|
while (count-- && td2) {
|
2008-05-12 06:42:06 +00:00
|
|
|
if (td2->td_lastcpu == cpu) {
|
2008-03-20 05:51:16 +00:00
|
|
|
td = td2;
|
2004-09-01 06:42:02 +00:00
|
|
|
break;
|
|
|
|
}
|
2008-03-20 05:51:16 +00:00
|
|
|
td2 = TAILQ_NEXT(td2, td_runq);
|
2004-09-01 06:42:02 +00:00
|
|
|
}
|
2005-12-28 17:13:31 +00:00
|
|
|
} else
|
2008-03-20 05:51:16 +00:00
|
|
|
td = TAILQ_FIRST(rqh);
|
|
|
|
KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
|
2008-03-20 02:14:02 +00:00
|
|
|
CTR3(KTR_RUNQ,
|
2008-03-20 05:51:16 +00:00
|
|
|
"runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
|
|
|
|
return (td);
|
2008-03-20 02:14:02 +00:00
|
|
|
}
|
|
|
|
CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the highest priority process on the run queue.
|
|
|
|
*/
|
2008-03-20 05:51:16 +00:00
|
|
|
struct thread *
|
2008-03-20 02:14:02 +00:00
|
|
|
runq_choose(struct runq *rq)
|
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
2008-03-20 05:51:16 +00:00
|
|
|
struct thread *td;
|
2008-03-20 02:14:02 +00:00
|
|
|
int pri;
|
|
|
|
|
|
|
|
while ((pri = runq_findbit(rq)) != -1) {
|
|
|
|
rqh = &rq->rq_queues[pri];
|
2008-03-20 05:51:16 +00:00
|
|
|
td = TAILQ_FIRST(rqh);
|
|
|
|
KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
CTR3(KTR_RUNQ,
|
2008-03-20 05:51:16 +00:00
|
|
|
"runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
|
|
|
|
return (td);
|
2001-02-12 00:20:08 +00:00
|
|
|
}
|
2008-03-20 05:51:16 +00:00
|
|
|
CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
|
2001-02-12 00:20:08 +00:00
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
return (NULL);
|
2001-02-12 00:20:08 +00:00
|
|
|
}
|
|
|
|
|
2008-03-20 05:51:16 +00:00
|
|
|
struct thread *
|
2007-02-08 01:52:25 +00:00
|
|
|
runq_choose_from(struct runq *rq, u_char idx)
|
2007-01-04 08:39:58 +00:00
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
2008-03-20 05:51:16 +00:00
|
|
|
struct thread *td;
|
2007-01-04 08:39:58 +00:00
|
|
|
int pri;
|
|
|
|
|
2007-01-04 12:10:58 +00:00
|
|
|
if ((pri = runq_findbit_from(rq, idx)) != -1) {
|
2007-01-04 08:39:58 +00:00
|
|
|
rqh = &rq->rq_queues[pri];
|
2008-03-20 05:51:16 +00:00
|
|
|
td = TAILQ_FIRST(rqh);
|
|
|
|
KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
|
2007-01-04 08:39:58 +00:00
|
|
|
CTR4(KTR_RUNQ,
|
2008-03-20 05:51:16 +00:00
|
|
|
"runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
|
|
|
|
pri, td, td->td_rqindex, rqh);
|
|
|
|
return (td);
|
2007-01-04 08:39:58 +00:00
|
|
|
}
|
2008-03-20 05:51:16 +00:00
|
|
|
CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
|
2007-01-04 08:39:58 +00:00
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
2001-02-12 00:20:08 +00:00
|
|
|
/*
|
2006-12-06 06:34:57 +00:00
|
|
|
* Remove the thread from the queue specified by its priority, and clear the
|
2001-02-12 00:20:08 +00:00
|
|
|
* corresponding status bit if the queue becomes empty.
|
2007-01-23 08:46:51 +00:00
|
|
|
* Caller must set state afterwards.
|
2001-02-12 00:20:08 +00:00
|
|
|
*/
|
|
|
|
void
|
2008-03-20 05:51:16 +00:00
|
|
|
runq_remove(struct runq *rq, struct thread *td)
|
2007-01-04 08:39:58 +00:00
|
|
|
{
|
|
|
|
|
2008-03-20 05:51:16 +00:00
|
|
|
runq_remove_idx(rq, td, NULL);
|
2007-01-04 08:39:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-03-20 05:51:16 +00:00
|
|
|
runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
|
2001-02-12 00:20:08 +00:00
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
2007-02-08 01:52:25 +00:00
|
|
|
u_char pri;
|
2001-02-12 00:20:08 +00:00
|
|
|
|
2008-03-20 05:51:16 +00:00
|
|
|
KASSERT(td->td_flags & TDF_INMEM,
|
2007-09-17 05:31:39 +00:00
|
|
|
("runq_remove_idx: thread swapped out"));
|
2008-03-20 05:51:16 +00:00
|
|
|
pri = td->td_rqindex;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
|
2001-02-12 00:20:08 +00:00
|
|
|
rqh = &rq->rq_queues[pri];
|
2008-03-20 05:51:16 +00:00
|
|
|
CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
|
|
|
|
td, td->td_priority, pri, rqh);
|
|
|
|
TAILQ_REMOVE(rqh, td, td_runq);
|
2001-02-12 00:20:08 +00:00
|
|
|
if (TAILQ_EMPTY(rqh)) {
|
2007-01-04 08:39:58 +00:00
|
|
|
CTR0(KTR_RUNQ, "runq_remove_idx: empty");
|
2001-02-12 00:20:08 +00:00
|
|
|
runq_clrbit(rq, pri);
|
2007-01-04 08:39:58 +00:00
|
|
|
if (idx != NULL && *idx == pri)
|
|
|
|
*idx = (pri + 1) % RQ_NQS;
|
1999-08-19 00:06:53 +00:00
|
|
|
}
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
}
|