2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
2001-02-12 00:20:08 +00:00
|
|
|
* Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
|
|
|
|
* All rights reserved.
|
1999-08-19 00:06:53 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
|
2004-09-01 06:42:02 +00:00
|
|
|
#include "opt_sched.h"
|
2004-07-02 20:21:44 +00:00
|
|
|
|
2004-09-05 02:09:54 +00:00
|
|
|
#ifndef KERN_SWITCH_INCLUDE
|
1999-08-19 00:06:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2004-07-10 21:36:01 +00:00
|
|
|
#include <sys/kdb.h>
|
1999-08-19 00:06:53 +00:00
|
|
|
#include <sys/kernel.h>
|
2000-09-07 01:33:02 +00:00
|
|
|
#include <sys/ktr.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1999-08-19 00:06:53 +00:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/queue.h>
|
2002-10-12 05:32:24 +00:00
|
|
|
#include <sys/sched.h>
|
2004-09-05 02:09:54 +00:00
|
|
|
#else /* KERN_SWITCH_INCLUDE */
|
2003-11-17 08:58:16 +00:00
|
|
|
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
|
2003-04-02 23:53:30 +00:00
|
|
|
#include <sys/smp.h>
|
|
|
|
#endif
|
2004-09-01 06:42:02 +00:00
|
|
|
#if defined(SMP) && defined(SCHED_4BSD)
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#endif
|
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
#include <machine/cpu.h>
|
|
|
|
|
2005-12-18 18:10:57 +00:00
|
|
|
/* Uncomment this to enable logging of critical_enter/exit. */
|
|
|
|
#if 0
|
|
|
|
#define KTR_CRITICAL KTR_SCHED
|
|
|
|
#else
|
|
|
|
#define KTR_CRITICAL 0
|
|
|
|
#endif
|
|
|
|
|
2004-09-02 18:59:15 +00:00
|
|
|
#ifdef FULL_PREEMPTION
|
|
|
|
#ifndef PREEMPTION
|
|
|
|
#error "The FULL_PREEMPTION option requires the PREEMPTION option"
|
|
|
|
#endif
|
|
|
|
#endif
|
1999-08-19 00:06:53 +00:00
|
|
|
|
2002-05-25 01:12:23 +00:00
|
|
|
CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
|
|
|
|
|
2005-03-20 17:05:12 +00:00
|
|
|
/*
|
|
|
|
* kern.sched.preemption allows user space to determine if preemption support
|
|
|
|
* is compiled in or not. It is not currently a boot or runtime flag that
|
|
|
|
* can be changed.
|
|
|
|
*/
|
|
|
|
#ifdef PREEMPTION
|
|
|
|
static int kern_sched_preemption = 1;
|
|
|
|
#else
|
|
|
|
static int kern_sched_preemption = 0;
|
|
|
|
#endif
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
|
|
|
|
&kern_sched_preemption, 0, "Kernel preemption enabled");
|
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
#ifdef SCHED_STATS
|
|
|
|
long switch_preempt;
|
|
|
|
long switch_owepreempt;
|
|
|
|
long switch_turnstile;
|
|
|
|
long switch_sleepq;
|
|
|
|
long switch_sleepqtimo;
|
|
|
|
long switch_relinquish;
|
|
|
|
long switch_needresched;
|
|
|
|
static SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
|
|
|
|
SYSCTL_INT(_kern_sched_stats, OID_AUTO, preempt, CTLFLAG_RD, &switch_preempt, 0, "");
|
|
|
|
SYSCTL_INT(_kern_sched_stats, OID_AUTO, owepreempt, CTLFLAG_RD, &switch_owepreempt, 0, "");
|
|
|
|
SYSCTL_INT(_kern_sched_stats, OID_AUTO, turnstile, CTLFLAG_RD, &switch_turnstile, 0, "");
|
|
|
|
SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepq, CTLFLAG_RD, &switch_sleepq, 0, "");
|
|
|
|
SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepqtimo, CTLFLAG_RD, &switch_sleepqtimo, 0, "");
|
|
|
|
SYSCTL_INT(_kern_sched_stats, OID_AUTO, relinquish, CTLFLAG_RD, &switch_relinquish, 0, "");
|
|
|
|
SYSCTL_INT(_kern_sched_stats, OID_AUTO, needresched, CTLFLAG_RD, &switch_needresched, 0, "");
|
|
|
|
static int
|
|
|
|
sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int val;
|
|
|
|
|
|
|
|
val = 0;
|
|
|
|
error = sysctl_handle_int(oidp, &val, 0, req);
|
|
|
|
if (error != 0 || req->newptr == NULL)
|
|
|
|
return (error);
|
|
|
|
if (val == 0)
|
|
|
|
return (0);
|
|
|
|
switch_preempt = 0;
|
|
|
|
switch_owepreempt = 0;
|
|
|
|
switch_turnstile = 0;
|
|
|
|
switch_sleepq = 0;
|
|
|
|
switch_sleepqtimo = 0;
|
|
|
|
switch_relinquish = 0;
|
|
|
|
switch_needresched = 0;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
|
|
|
|
0, sysctl_stats_reset, "I", "Reset scheduler statistics");
|
|
|
|
#endif
|
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/************************************************************************
|
|
|
|
* Functions that manipulate runnability from a thread perspective. *
|
|
|
|
************************************************************************/
|
2006-10-26 21:42:22 +00:00
|
|
|
/*
|
|
|
|
* Select the thread that will be run next.
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *
|
|
|
|
choosethread(void)
|
1999-08-19 00:06:53 +00:00
|
|
|
{
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
struct thread *td;
|
|
|
|
|
2003-11-17 08:58:16 +00:00
|
|
|
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
|
2003-04-02 23:53:30 +00:00
|
|
|
if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
|
|
|
|
/* Shutting down, run idlethread on AP's */
|
|
|
|
td = PCPU_GET(idlethread);
|
|
|
|
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
|
|
|
|
TD_SET_RUNNING(td);
|
|
|
|
return (td);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-07-17 02:23:44 +00:00
|
|
|
retry:
|
2007-01-23 08:46:51 +00:00
|
|
|
td = sched_choose();
|
2002-12-28 01:23:07 +00:00
|
|
|
|
|
|
|
/*
|
2003-05-21 18:53:25 +00:00
|
|
|
* If we are in panic, only allow system threads,
|
|
|
|
* plus the one we are running in, to be run.
|
2002-12-28 01:23:07 +00:00
|
|
|
*/
|
2002-07-17 02:23:44 +00:00
|
|
|
if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
|
2003-05-21 18:53:25 +00:00
|
|
|
(td->td_flags & TDF_INPANIC) == 0)) {
|
|
|
|
/* note that it is no longer on the run queue */
|
|
|
|
TD_SET_CAN_RUN(td);
|
2002-07-17 02:23:44 +00:00
|
|
|
goto retry;
|
2003-05-21 18:53:25 +00:00
|
|
|
}
|
2002-12-28 01:23:07 +00:00
|
|
|
|
2002-09-11 08:13:56 +00:00
|
|
|
TD_SET_RUNNING(td);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
return (td);
|
|
|
|
}
|
|
|
|
|
2004-07-02 20:21:44 +00:00
|
|
|
/*
|
|
|
|
* Kernel thread preemption implementation. Critical sections mark
|
|
|
|
* regions of code in which preemptions are not allowed.
|
|
|
|
*/
|
2001-12-18 00:27:18 +00:00
|
|
|
void
|
|
|
|
critical_enter(void)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
|
|
|
|
td = curthread;
|
|
|
|
td->td_critnest++;
|
2005-12-18 18:10:57 +00:00
|
|
|
CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
|
2004-11-07 23:11:32 +00:00
|
|
|
(long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
|
2001-12-18 00:27:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
critical_exit(void)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
|
|
|
|
td = curthread;
|
2004-02-02 08:13:27 +00:00
|
|
|
KASSERT(td->td_critnest != 0,
|
|
|
|
("critical_exit: td_critnest == 0"));
|
2005-05-23 23:01:53 +00:00
|
|
|
#ifdef PREEMPTION
|
2001-12-18 00:27:18 +00:00
|
|
|
if (td->td_critnest == 1) {
|
2005-04-08 03:37:53 +00:00
|
|
|
td->td_critnest = 0;
|
|
|
|
if (td->td_owepreempt) {
|
|
|
|
td->td_critnest = 1;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_lock(td);
|
2005-04-08 03:37:53 +00:00
|
|
|
td->td_critnest--;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
SCHED_STAT_INC(switch_owepreempt);
|
2007-08-03 23:35:35 +00:00
|
|
|
mi_switch(SW_INVOL|SW_PREEMPT, NULL);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_unlock(td);
|
2004-07-02 20:21:44 +00:00
|
|
|
}
|
2005-12-28 17:13:31 +00:00
|
|
|
} else
|
2004-07-02 20:21:44 +00:00
|
|
|
#endif
|
2001-12-18 00:27:18 +00:00
|
|
|
td->td_critnest--;
|
2005-12-28 17:13:31 +00:00
|
|
|
|
2005-12-18 18:10:57 +00:00
|
|
|
CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
|
2004-11-07 23:11:32 +00:00
|
|
|
(long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
|
2001-12-18 00:27:18 +00:00
|
|
|
}
|
|
|
|
|
2004-07-02 20:21:44 +00:00
|
|
|
/*
|
|
|
|
* This function is called when a thread is about to be put on run queue
|
|
|
|
* because it has been made runnable or its priority has been adjusted. It
|
|
|
|
* determines if the new thread should be immediately preempted to. If so,
|
|
|
|
* it switches to it and eventually returns true. If not, it returns false
|
|
|
|
* so that the caller may place the thread on an appropriate run queue.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
maybe_preempt(struct thread *td)
|
|
|
|
{
|
2004-07-03 00:57:43 +00:00
|
|
|
#ifdef PREEMPTION
|
2004-07-02 20:21:44 +00:00
|
|
|
struct thread *ctd;
|
|
|
|
int cpri, pri;
|
2004-07-03 00:57:43 +00:00
|
|
|
#endif
|
2004-07-02 20:21:44 +00:00
|
|
|
|
|
|
|
#ifdef PREEMPTION
|
|
|
|
/*
|
|
|
|
* The new thread should not preempt the current thread if any of the
|
|
|
|
* following conditions are true:
|
|
|
|
*
|
2005-03-17 15:18:01 +00:00
|
|
|
* - The kernel is in the throes of crashing (panicstr).
|
2004-07-16 21:04:55 +00:00
|
|
|
* - The current thread has a higher (numerically lower) or
|
|
|
|
* equivalent priority. Note that this prevents curthread from
|
|
|
|
* trying to preempt to itself.
|
2004-07-02 20:21:44 +00:00
|
|
|
* - It is too early in the boot for context switches (cold is set).
|
|
|
|
* - The current thread has an inhibitor set or is in the process of
|
|
|
|
* exiting. In this case, the current thread is about to switch
|
|
|
|
* out anyways, so there's no point in preempting. If we did,
|
|
|
|
* the current thread would not be properly resumed as well, so
|
|
|
|
* just avoid that whole landmine.
|
|
|
|
* - If the new thread's priority is not a realtime priority and
|
|
|
|
* the current thread's priority is not an idle priority and
|
|
|
|
* FULL_PREEMPTION is disabled.
|
|
|
|
*
|
|
|
|
* If all of these conditions are false, but the current thread is in
|
|
|
|
* a nested critical section, then we have to defer the preemption
|
|
|
|
* until we exit the critical section. Otherwise, switch immediately
|
|
|
|
* to the new thread.
|
|
|
|
*/
|
|
|
|
ctd = curthread;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2006-12-06 06:34:57 +00:00
|
|
|
KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
|
2004-09-06 07:23:14 +00:00
|
|
|
("thread has no (or wrong) sched-private part."));
|
2004-09-13 23:02:52 +00:00
|
|
|
KASSERT((td->td_inhibitors == 0),
|
2006-12-31 15:56:04 +00:00
|
|
|
("maybe_preempt: trying to run inhibited thread"));
|
2004-07-02 20:21:44 +00:00
|
|
|
pri = td->td_priority;
|
|
|
|
cpri = ctd->td_priority;
|
2005-03-17 15:18:01 +00:00
|
|
|
if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
|
2007-01-23 08:46:51 +00:00
|
|
|
TD_IS_INHIBITED(ctd))
|
2004-07-02 20:21:44 +00:00
|
|
|
return (0);
|
|
|
|
#ifndef FULL_PREEMPTION
|
2005-06-10 03:00:29 +00:00
|
|
|
if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
|
2004-07-02 20:21:44 +00:00
|
|
|
return (0);
|
|
|
|
#endif
|
2005-06-09 19:43:08 +00:00
|
|
|
|
2004-07-02 20:21:44 +00:00
|
|
|
if (ctd->td_critnest > 1) {
|
|
|
|
CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
|
|
|
|
ctd->td_critnest);
|
2005-04-08 03:37:53 +00:00
|
|
|
ctd->td_owepreempt = 1;
|
2004-07-02 20:21:44 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
/*
|
2004-10-05 22:03:10 +00:00
|
|
|
* Thread is runnable but not yet put on system run queue.
|
2004-07-02 20:21:44 +00:00
|
|
|
*/
|
2007-07-19 08:58:40 +00:00
|
|
|
MPASS(ctd->td_lock == td->td_lock);
|
2004-07-02 20:21:44 +00:00
|
|
|
MPASS(TD_ON_RUNQ(td));
|
|
|
|
TD_SET_RUNNING(td);
|
|
|
|
CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
|
|
|
|
td->td_proc->p_pid, td->td_proc->p_comm);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
SCHED_STAT_INC(switch_preempt);
|
2004-10-05 22:03:10 +00:00
|
|
|
mi_switch(SW_INVOL|SW_PREEMPT, td);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
/*
|
|
|
|
* td's lock pointer may have changed. We have to return with it
|
|
|
|
* locked.
|
|
|
|
*/
|
|
|
|
spinlock_enter();
|
|
|
|
thread_unlock(ctd);
|
|
|
|
thread_lock(td);
|
|
|
|
spinlock_exit();
|
2004-07-02 20:21:44 +00:00
|
|
|
return (1);
|
|
|
|
#else
|
|
|
|
return (0);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-08-06 15:49:44 +00:00
|
|
|
#if 0
|
2004-07-02 20:21:44 +00:00
|
|
|
#ifndef PREEMPTION
|
|
|
|
/* XXX: There should be a non-static version of this. */
|
|
|
|
static void
|
|
|
|
printf_caddr_t(void *data)
|
|
|
|
{
|
|
|
|
printf("%s", (char *)data);
|
|
|
|
}
|
|
|
|
static char preempt_warning[] =
|
|
|
|
"WARNING: Kernel preemption is disabled, expect reduced performance.\n";
|
|
|
|
SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
|
|
|
|
preempt_warning)
|
|
|
|
#endif
|
2004-08-06 15:49:44 +00:00
|
|
|
#endif
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
* SYSTEM RUN QUEUE manipulations and tests *
|
|
|
|
************************************************************************/
|
|
|
|
/*
|
|
|
|
* Initialize a run structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
runq_init(struct runq *rq)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
bzero(rq, sizeof *rq);
|
|
|
|
for (i = 0; i < RQ_NQS; i++)
|
|
|
|
TAILQ_INIT(&rq->rq_queues[i]);
|
|
|
|
}
|
|
|
|
|
2001-02-12 00:20:08 +00:00
|
|
|
/*
|
|
|
|
* Clear the status bit of the queue corresponding to priority level pri,
|
|
|
|
* indicating that it is empty.
|
|
|
|
*/
|
|
|
|
static __inline void
|
|
|
|
runq_clrbit(struct runq *rq, int pri)
|
|
|
|
{
|
|
|
|
struct rqbits *rqb;
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2001-02-12 00:20:08 +00:00
|
|
|
rqb = &rq->rq_status;
|
|
|
|
CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)],
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
|
|
|
|
RQB_BIT(pri), RQB_WORD(pri));
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the index of the first non-empty run queue. This is done by
|
|
|
|
* scanning the status bits, a set bit indicates a non-empty queue.
|
|
|
|
*/
|
|
|
|
static __inline int
|
|
|
|
runq_findbit(struct runq *rq)
|
|
|
|
{
|
|
|
|
struct rqbits *rqb;
|
|
|
|
int pri;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
rqb = &rq->rq_status;
|
|
|
|
for (i = 0; i < RQB_LEN; i++)
|
|
|
|
if (rqb->rqb_bits[i]) {
|
2002-06-20 06:21:20 +00:00
|
|
|
pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
|
2001-02-12 00:20:08 +00:00
|
|
|
CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
|
|
|
|
rqb->rqb_bits[i], i, pri);
|
|
|
|
return (pri);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2007-01-04 08:39:58 +00:00
|
|
|
static __inline int
|
2007-08-20 06:36:12 +00:00
|
|
|
runq_findbit_from(struct runq *rq, u_char pri)
|
2007-01-04 08:39:58 +00:00
|
|
|
{
|
|
|
|
struct rqbits *rqb;
|
2007-08-20 06:36:12 +00:00
|
|
|
rqb_word_t mask;
|
2007-01-04 08:39:58 +00:00
|
|
|
int i;
|
|
|
|
|
2007-08-20 06:36:12 +00:00
|
|
|
/*
|
|
|
|
* Set the mask for the first word so we ignore priorities before 'pri'.
|
|
|
|
*/
|
|
|
|
mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
|
2007-01-04 08:39:58 +00:00
|
|
|
rqb = &rq->rq_status;
|
|
|
|
again:
|
2007-08-20 06:36:12 +00:00
|
|
|
for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
|
|
|
|
mask = rqb->rqb_bits[i] & mask;
|
|
|
|
if (mask == 0)
|
|
|
|
continue;
|
|
|
|
pri = RQB_FFS(mask) + (i << RQB_L2BPW);
|
|
|
|
CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
|
|
|
|
mask, i, pri);
|
|
|
|
return (pri);
|
2007-01-04 08:39:58 +00:00
|
|
|
}
|
2007-08-20 06:36:12 +00:00
|
|
|
if (pri == 0)
|
|
|
|
return (-1);
|
|
|
|
/*
|
|
|
|
* Wrap back around to the beginning of the list just once so we
|
|
|
|
* scan the whole thing.
|
|
|
|
*/
|
|
|
|
pri = 0;
|
|
|
|
goto again;
|
2007-01-04 08:39:58 +00:00
|
|
|
}
|
|
|
|
|
2001-02-12 00:20:08 +00:00
|
|
|
/*
|
|
|
|
* Set the status bit of the queue corresponding to priority level pri,
|
|
|
|
* indicating that it is non-empty.
|
|
|
|
*/
|
|
|
|
static __inline void
|
|
|
|
runq_setbit(struct runq *rq, int pri)
|
|
|
|
{
|
|
|
|
struct rqbits *rqb;
|
|
|
|
|
|
|
|
rqb = &rq->rq_status;
|
|
|
|
CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)],
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
|
|
|
|
RQB_BIT(pri), RQB_WORD(pri));
|
|
|
|
rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
|
1999-08-19 00:06:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-12-06 06:34:57 +00:00
|
|
|
* Add the thread to the queue specified by its priority, and set the
|
2001-02-12 00:20:08 +00:00
|
|
|
* corresponding status bit.
|
1999-08-19 00:06:53 +00:00
|
|
|
*/
|
|
|
|
void
|
2006-12-06 06:34:57 +00:00
|
|
|
runq_add(struct runq *rq, struct td_sched *ts, int flags)
|
1999-08-19 00:06:53 +00:00
|
|
|
{
|
2001-02-12 00:20:08 +00:00
|
|
|
struct rqhead *rqh;
|
|
|
|
int pri;
|
1999-08-19 00:06:53 +00:00
|
|
|
|
2006-12-06 06:34:57 +00:00
|
|
|
pri = ts->ts_thread->td_priority / RQ_PPQ;
|
|
|
|
ts->ts_rqindex = pri;
|
2001-02-12 00:20:08 +00:00
|
|
|
runq_setbit(rq, pri);
|
|
|
|
rqh = &rq->rq_queues[pri];
|
2006-12-06 06:34:57 +00:00
|
|
|
CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
|
|
|
|
ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
|
2004-10-05 22:03:10 +00:00
|
|
|
if (flags & SRQ_PREEMPTED) {
|
2006-12-06 06:34:57 +00:00
|
|
|
TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
|
2004-10-05 22:03:10 +00:00
|
|
|
} else {
|
2006-12-06 06:34:57 +00:00
|
|
|
TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
|
2004-10-05 22:03:10 +00:00
|
|
|
}
|
1999-08-19 00:06:53 +00:00
|
|
|
}
|
|
|
|
|
2007-01-04 08:39:58 +00:00
|
|
|
void
|
2007-02-08 01:52:25 +00:00
|
|
|
runq_add_pri(struct runq *rq, struct td_sched *ts, u_char pri, int flags)
|
2007-01-04 08:39:58 +00:00
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
|
|
|
|
|
|
|
KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
|
|
|
|
ts->ts_rqindex = pri;
|
|
|
|
runq_setbit(rq, pri);
|
|
|
|
rqh = &rq->rq_queues[pri];
|
|
|
|
CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
|
|
|
|
ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
|
|
|
|
if (flags & SRQ_PREEMPTED) {
|
|
|
|
TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
|
|
|
|
}
|
|
|
|
}
|
1999-08-19 00:06:53 +00:00
|
|
|
/*
|
2001-02-12 00:20:08 +00:00
|
|
|
* Return true if there are runnable processes of any priority on the run
|
|
|
|
* queue, false otherwise. Has no side effects, does not modify the run
|
|
|
|
* queue structure.
|
1999-08-19 00:06:53 +00:00
|
|
|
*/
|
2001-02-12 00:20:08 +00:00
|
|
|
int
|
|
|
|
runq_check(struct runq *rq)
|
1999-08-19 00:06:53 +00:00
|
|
|
{
|
2001-02-12 00:20:08 +00:00
|
|
|
struct rqbits *rqb;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
rqb = &rq->rq_status;
|
|
|
|
for (i = 0; i < RQB_LEN; i++)
|
|
|
|
if (rqb->rqb_bits[i]) {
|
|
|
|
CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
|
|
|
|
rqb->rqb_bits[i], i);
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
CTR0(KTR_RUNQ, "runq_check: empty");
|
|
|
|
|
|
|
|
return (0);
|
1999-08-19 00:06:53 +00:00
|
|
|
}
|
|
|
|
|
2004-09-01 06:42:02 +00:00
|
|
|
#if defined(SMP) && defined(SCHED_4BSD)
|
|
|
|
int runq_fuzz = 1;
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
|
|
|
|
#endif
|
|
|
|
|
1999-08-19 00:06:53 +00:00
|
|
|
/*
|
2002-10-12 05:32:24 +00:00
|
|
|
* Find the highest priority process on the run queue.
|
1999-08-19 00:06:53 +00:00
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *
|
2001-02-12 00:20:08 +00:00
|
|
|
runq_choose(struct runq *rq)
|
1999-08-19 00:06:53 +00:00
|
|
|
{
|
2001-02-12 00:20:08 +00:00
|
|
|
struct rqhead *rqh;
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2001-02-12 00:20:08 +00:00
|
|
|
int pri;
|
1999-08-19 00:06:53 +00:00
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
while ((pri = runq_findbit(rq)) != -1) {
|
2001-02-12 00:20:08 +00:00
|
|
|
rqh = &rq->rq_queues[pri];
|
2004-09-01 06:42:02 +00:00
|
|
|
#if defined(SMP) && defined(SCHED_4BSD)
|
|
|
|
/* fuzz == 1 is normal.. 0 or less are ignored */
|
|
|
|
if (runq_fuzz > 1) {
|
|
|
|
/*
|
|
|
|
* In the first couple of entries, check if
|
|
|
|
* there is one for our CPU as a preference.
|
|
|
|
*/
|
|
|
|
int count = runq_fuzz;
|
|
|
|
int cpu = PCPU_GET(cpuid);
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts2;
|
|
|
|
ts2 = ts = TAILQ_FIRST(rqh);
|
2004-09-01 06:42:02 +00:00
|
|
|
|
2006-12-06 06:34:57 +00:00
|
|
|
while (count-- && ts2) {
|
|
|
|
if (ts->ts_thread->td_lastcpu == cpu) {
|
|
|
|
ts = ts2;
|
2004-09-01 06:42:02 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-12-06 06:34:57 +00:00
|
|
|
ts2 = TAILQ_NEXT(ts2, ts_procq);
|
2004-09-01 06:42:02 +00:00
|
|
|
}
|
2005-12-28 17:13:31 +00:00
|
|
|
} else
|
2004-09-01 06:42:02 +00:00
|
|
|
#endif
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = TAILQ_FIRST(rqh);
|
|
|
|
KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
CTR3(KTR_RUNQ,
|
2006-12-06 06:34:57 +00:00
|
|
|
"runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
|
|
|
|
return (ts);
|
2001-02-12 00:20:08 +00:00
|
|
|
}
|
|
|
|
CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
|
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
return (NULL);
|
2001-02-12 00:20:08 +00:00
|
|
|
}
|
|
|
|
|
2007-01-04 08:39:58 +00:00
|
|
|
struct td_sched *
|
2007-02-08 01:52:25 +00:00
|
|
|
runq_choose_from(struct runq *rq, u_char idx)
|
2007-01-04 08:39:58 +00:00
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
|
|
|
struct td_sched *ts;
|
|
|
|
int pri;
|
|
|
|
|
2007-01-04 12:10:58 +00:00
|
|
|
if ((pri = runq_findbit_from(rq, idx)) != -1) {
|
2007-01-04 08:39:58 +00:00
|
|
|
rqh = &rq->rq_queues[pri];
|
|
|
|
ts = TAILQ_FIRST(rqh);
|
|
|
|
KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
|
|
|
|
CTR4(KTR_RUNQ,
|
|
|
|
"runq_choose_from: pri=%d kse=%p idx=%d rqh=%p",
|
|
|
|
pri, ts, ts->ts_rqindex, rqh);
|
|
|
|
return (ts);
|
|
|
|
}
|
|
|
|
CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri);
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
2001-02-12 00:20:08 +00:00
|
|
|
/*
|
2006-12-06 06:34:57 +00:00
|
|
|
* Remove the thread from the queue specified by its priority, and clear the
|
2001-02-12 00:20:08 +00:00
|
|
|
* corresponding status bit if the queue becomes empty.
|
2007-01-23 08:46:51 +00:00
|
|
|
* Caller must set state afterwards.
|
2001-02-12 00:20:08 +00:00
|
|
|
*/
|
|
|
|
void
|
2006-12-06 06:34:57 +00:00
|
|
|
runq_remove(struct runq *rq, struct td_sched *ts)
|
2007-01-04 08:39:58 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
runq_remove_idx(rq, ts, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-02-08 01:52:25 +00:00
|
|
|
runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
|
2001-02-12 00:20:08 +00:00
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
2007-02-08 01:52:25 +00:00
|
|
|
u_char pri;
|
2001-02-12 00:20:08 +00:00
|
|
|
|
2006-12-06 06:34:57 +00:00
|
|
|
KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
|
2007-01-04 08:39:58 +00:00
|
|
|
("runq_remove_idx: process swapped out"));
|
2006-12-06 06:34:57 +00:00
|
|
|
pri = ts->ts_rqindex;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
|
2001-02-12 00:20:08 +00:00
|
|
|
rqh = &rq->rq_queues[pri];
|
2007-01-04 08:39:58 +00:00
|
|
|
CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
|
2006-12-06 06:34:57 +00:00
|
|
|
ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
{
|
|
|
|
struct td_sched *nts;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(nts, rqh, ts_procq)
|
|
|
|
if (nts == ts)
|
|
|
|
break;
|
|
|
|
if (ts != nts)
|
|
|
|
panic("runq_remove_idx: ts %p not on rqindex %d",
|
|
|
|
ts, pri);
|
|
|
|
}
|
2006-12-06 06:34:57 +00:00
|
|
|
TAILQ_REMOVE(rqh, ts, ts_procq);
|
2001-02-12 00:20:08 +00:00
|
|
|
if (TAILQ_EMPTY(rqh)) {
|
2007-01-04 08:39:58 +00:00
|
|
|
CTR0(KTR_RUNQ, "runq_remove_idx: empty");
|
2001-02-12 00:20:08 +00:00
|
|
|
runq_clrbit(rq, pri);
|
2007-01-04 08:39:58 +00:00
|
|
|
if (idx != NULL && *idx == pri)
|
|
|
|
*idx = (pri + 1) % RQ_NQS;
|
1999-08-19 00:06:53 +00:00
|
|
|
}
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
}
|
|
|
|
|
2004-09-05 02:09:54 +00:00
|
|
|
/****** functions that are temporarily here ***********/
|
|
|
|
#include <vm/uma.h>
|
|
|
|
extern struct mtx kse_zombie_lock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate scheduler specific per-process resources.
|
2006-12-06 06:34:57 +00:00
|
|
|
* The thread and proc have already been linked in.
|
2004-09-05 02:09:54 +00:00
|
|
|
*
|
|
|
|
* Called from:
|
|
|
|
* proc_init() (UMA init method)
|
|
|
|
*/
|
|
|
|
void
|
2006-12-06 06:34:57 +00:00
|
|
|
sched_newproc(struct proc *p, struct thread *td)
|
2004-09-05 02:09:54 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* thread is being either created or recycled.
|
|
|
|
* Fix up the per-scheduler resources associated with it.
|
|
|
|
* Called from:
|
|
|
|
* sched_fork_thread()
|
|
|
|
* thread_dtor() (*may go away)
|
|
|
|
* thread_init() (*may go away)
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
sched_newthread(struct thread *td)
|
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2004-09-05 02:09:54 +00:00
|
|
|
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = (struct td_sched *) (td + 1);
|
|
|
|
bzero(ts, sizeof(*ts));
|
|
|
|
td->td_sched = ts;
|
|
|
|
ts->ts_thread = td;
|
2004-09-05 02:09:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* KERN_SWITCH_INCLUDE */
|