Threading cleanup.. part 2 of several.
Make part of John Birrell's KSE patch permanent.. Specifically, remove: Any reference of the ksegrp structure. This feature was never fully utilised and made things overly complicated. All code in the scheduler that tried to make threaded programs fair to unthreaded programs. Libpthread processes will already do this to some extent and libthr processes already disable it. Also: Since this makes such a big change to the scheduler(s), take the opportunity to rename some structures and elements that had to be moved anyhow. This makes the code a lot more readable. The ULE scheduler compiles again but I have no idea if it works. The 4bsd scheduler still reqires a little cleaning and some functions that now do ALMOST nothing will go away, but I thought I'd do that as a separate commit. Tested by David Xu, and Dan Eischen using libthr and libpthread.
This commit is contained in:
parent
3541d6d881
commit
ad1e7d285a
@ -1121,11 +1121,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
|
||||
* This may be done better later if it gets more high level
|
||||
* components in it. If so just link td->td_proc here.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
|
||||
preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
|
||||
preload_bootstrap_relocate(KERNBASE);
|
||||
|
@ -455,11 +455,7 @@ initarm(void *arg, void *arg2)
|
||||
undefined_handler_address = (u_int)undefinedinstruction_bounce;
|
||||
undefined_init();
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -422,11 +422,7 @@ initarm(void *arg, void *arg2)
|
||||
|
||||
/* Set stack for exception handlers */
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -429,11 +429,7 @@ initarm(void *arg, void *arg2)
|
||||
undefined_handler_address = (u_int)undefinedinstruction_bounce;
|
||||
undefined_init();
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -427,11 +427,7 @@ initarm(void *arg, void *arg2)
|
||||
undefined_handler_address = (u_int)undefinedinstruction_bounce;
|
||||
undefined_init();
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -493,11 +493,7 @@ initarm(void *arg, void *arg2)
|
||||
undefined_handler_address = (u_int)undefinedinstruction_bounce;
|
||||
undefined_init();
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -292,12 +292,7 @@ DB_SHOW_COMMAND(thread, db_show_thread)
|
||||
td = kdb_thread;
|
||||
|
||||
db_printf("Thread %d at %p:\n", td->td_tid, td);
|
||||
#ifdef KSE
|
||||
db_printf(" proc (pid %d): %p ", td->td_proc->p_pid, td->td_proc);
|
||||
db_printf(" ksegrp: %p\n", td->td_ksegrp);
|
||||
#else
|
||||
db_printf(" proc (pid %d): %p\n", td->td_proc->p_pid, td->td_proc);
|
||||
#endif
|
||||
if (td->td_name[0] != '\0')
|
||||
db_printf(" name: %s\n", td->td_name);
|
||||
db_printf(" flags: %#x ", td->td_flags);
|
||||
|
@ -116,7 +116,9 @@ procfs_doprocstatus(PFS_FILL_ARGS)
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
wmesg = "-kse- ";
|
||||
else {
|
||||
else
|
||||
#endif
|
||||
{
|
||||
tdfirst = FIRST_THREAD_IN_PROC(p);
|
||||
if (tdfirst->td_wchan != NULL) {
|
||||
KASSERT(tdfirst->td_wmesg != NULL,
|
||||
@ -125,15 +127,6 @@ procfs_doprocstatus(PFS_FILL_ARGS)
|
||||
} else
|
||||
wmesg = "nochan";
|
||||
}
|
||||
#else
|
||||
tdfirst = FIRST_THREAD_IN_PROC(p);
|
||||
if (tdfirst->td_wchan != NULL) {
|
||||
KASSERT(tdfirst->td_wmesg != NULL,
|
||||
("wchan %p has no wmesg", tdfirst->td_wchan));
|
||||
wmesg = tdfirst->td_wmesg;
|
||||
} else
|
||||
wmesg = "nochan";
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
if (p->p_sflag & PS_INMEM) {
|
||||
|
@ -2058,11 +2058,7 @@ init386(first)
|
||||
* This may be done better later if it gets more high level
|
||||
* components in it. If so just link td->td_proc here.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
|
||||
metadata_missing = 0;
|
||||
if (bootinfo.bi_modulep) {
|
||||
@ -2297,7 +2293,7 @@ init386(first)
|
||||
_udatasel = GSEL(GUDATA_SEL, SEL_UPL);
|
||||
|
||||
/* setup proc 0's pcb */
|
||||
thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
|
||||
thread0.td_pcb->pcb_flags = 0;
|
||||
#ifdef PAE
|
||||
thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
|
||||
#else
|
||||
|
@ -776,12 +776,10 @@ ia64_init(void)
|
||||
msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
|
||||
msgbufinit(msgbufp, MSGBUF_SIZE);
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Init mapping for kernel stack for proc 0
|
||||
*/
|
||||
proc0kstack = (vm_offset_t)kstack;
|
||||
thread0.td_kstack = proc0kstack;
|
||||
thread0.td_kstack_pages = KSTACK_PAGES;
|
||||
|
@ -95,9 +95,6 @@ static struct session session0;
|
||||
static struct pgrp pgrp0;
|
||||
struct proc proc0;
|
||||
struct thread thread0 __aligned(8);
|
||||
#ifdef KSE
|
||||
struct ksegrp ksegrp0;
|
||||
#endif
|
||||
struct vmspace vmspace0;
|
||||
struct proc *initproc;
|
||||
|
||||
@ -366,34 +363,16 @@ proc0_init(void *dummy __unused)
|
||||
struct proc *p;
|
||||
unsigned i;
|
||||
struct thread *td;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
GIANT_REQUIRED;
|
||||
p = &proc0;
|
||||
td = &thread0;
|
||||
#ifdef KSE
|
||||
kg = &ksegrp0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize magic number.
|
||||
*/
|
||||
p->p_magic = P_MAGIC;
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Initialize thread, process and ksegrp structures.
|
||||
*/
|
||||
procinit(); /* set up proc zone */
|
||||
threadinit(); /* set up thead, upcall and KSEGRP zones */
|
||||
|
||||
/*
|
||||
* Initialise scheduler resources.
|
||||
* Add scheduler specific parts to proc, ksegrp, thread as needed.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* Initialize thread and process structures.
|
||||
*/
|
||||
@ -404,7 +383,6 @@ proc0_init(void *dummy __unused)
|
||||
* Initialise scheduler resources.
|
||||
* Add scheduler specific parts to proc, thread as needed.
|
||||
*/
|
||||
#endif
|
||||
schedinit(); /* scheduler gets its house in order */
|
||||
/*
|
||||
* Initialize sleep queue hash table
|
||||
@ -440,15 +418,9 @@ proc0_init(void *dummy __unused)
|
||||
STAILQ_INIT(&p->p_ktr);
|
||||
p->p_nice = NZERO;
|
||||
td->td_state = TDS_RUNNING;
|
||||
#ifdef KSE
|
||||
kg->kg_pri_class = PRI_TIMESHARE;
|
||||
kg->kg_user_pri = PUSER;
|
||||
kg->kg_base_user_pri = PUSER;
|
||||
#else
|
||||
td->td_pri_class = PRI_TIMESHARE;
|
||||
td->td_user_pri = PUSER;
|
||||
td->td_base_user_pri = PUSER;
|
||||
#endif
|
||||
td->td_priority = PVM;
|
||||
td->td_base_pri = PUSER;
|
||||
td->td_oncpu = 0;
|
||||
@ -758,11 +730,7 @@ kick_init(const void *udata __unused)
|
||||
td = FIRST_THREAD_IN_PROC(initproc);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td);
|
||||
#ifdef KSE
|
||||
setrunqueue(td, SRQ_BORING); /* XXXKSE */
|
||||
#else
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
|
||||
|
@ -203,23 +203,12 @@ hardclock_cpu(int usermode)
|
||||
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
|
||||
sched_tick();
|
||||
#ifdef KSE
|
||||
#if 0 /* for now do nothing */
|
||||
if (p->p_flag & P_SA) {
|
||||
/* XXXKSE What to do? */
|
||||
} else {
|
||||
pstats = p->p_stats;
|
||||
if (usermode &&
|
||||
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
|
||||
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
|
||||
p->p_sflag |= PS_ALRMPEND;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
|
||||
itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
|
||||
p->p_sflag |= PS_PROFPEND;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
/* XXXKSE What to do? Should do more. */
|
||||
}
|
||||
#else
|
||||
#endif
|
||||
#endif
|
||||
pstats = p->p_stats;
|
||||
if (usermode &&
|
||||
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
|
||||
@ -232,7 +221,6 @@ hardclock_cpu(int usermode)
|
||||
p->p_sflag |= PS_PROFPEND;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
#endif
|
||||
mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
|
||||
|
||||
#ifdef HWPMC_HOOKS
|
||||
|
@ -205,9 +205,6 @@ fork1(td, flags, pages, procp)
|
||||
struct filedesc *fd;
|
||||
struct filedesc_to_leader *fdtol;
|
||||
struct thread *td2;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg2;
|
||||
#endif
|
||||
struct sigacts *newsigacts;
|
||||
int error;
|
||||
|
||||
@ -477,9 +474,6 @@ fork1(td, flags, pages, procp)
|
||||
* then copy the section that is copied directly from the parent.
|
||||
*/
|
||||
td2 = FIRST_THREAD_IN_PROC(p2);
|
||||
#ifdef KSE
|
||||
kg2 = FIRST_KSEGRP_IN_PROC(p2);
|
||||
#endif
|
||||
|
||||
/* Allocate and switch to an alternate kstack if specified. */
|
||||
if (pages != 0)
|
||||
@ -492,19 +486,11 @@ fork1(td, flags, pages, procp)
|
||||
__rangeof(struct proc, p_startzero, p_endzero));
|
||||
bzero(&td2->td_startzero,
|
||||
__rangeof(struct thread, td_startzero, td_endzero));
|
||||
#ifdef KSE
|
||||
bzero(&kg2->kg_startzero,
|
||||
__rangeof(struct ksegrp, kg_startzero, kg_endzero));
|
||||
#endif
|
||||
|
||||
bcopy(&p1->p_startcopy, &p2->p_startcopy,
|
||||
__rangeof(struct proc, p_startcopy, p_endcopy));
|
||||
bcopy(&td->td_startcopy, &td2->td_startcopy,
|
||||
__rangeof(struct thread, td_startcopy, td_endcopy));
|
||||
#ifdef KSE
|
||||
bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
|
||||
__rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
|
||||
#endif
|
||||
|
||||
td2->td_sigstk = td->td_sigstk;
|
||||
td2->td_sigmask = td->td_sigmask;
|
||||
@ -526,11 +512,7 @@ fork1(td, flags, pages, procp)
|
||||
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
p2->p_ucred = crhold(td->td_ucred);
|
||||
#ifdef KSE
|
||||
td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */
|
||||
#else
|
||||
td2->td_ucred = crhold(p2->p_ucred);
|
||||
#endif
|
||||
#ifdef AUDIT
|
||||
audit_proc_fork(p1, p2);
|
||||
#endif
|
||||
|
@ -79,11 +79,7 @@ idle_setup(void *dummy)
|
||||
td = FIRST_THREAD_IN_PROC(p);
|
||||
TD_SET_CAN_RUN(td);
|
||||
td->td_flags |= TDF_IDLETD;
|
||||
#ifdef KSE
|
||||
sched_class(td->td_ksegrp, PRI_IDLE);
|
||||
#else
|
||||
sched_class(td, PRI_IDLE);
|
||||
#endif
|
||||
sched_prio(td, PRI_MAX_IDLE);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
|
@ -296,11 +296,7 @@ ithread_create(const char *name)
|
||||
panic("kthread_create() failed with %d", error);
|
||||
td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
td->td_ksegrp->kg_pri_class = PRI_ITHD;
|
||||
#else
|
||||
td->td_pri_class = PRI_ITHD;
|
||||
#endif
|
||||
sched_class(td, PRI_ITHD);
|
||||
TD_SET_IWAIT(td);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
td->td_pflags |= TDP_ITHREAD;
|
||||
|
@ -48,9 +48,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/uma.h>
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* KSEGRP related storage.
|
||||
*/
|
||||
static uma_zone_t upcall_zone;
|
||||
|
||||
/* DEBUG ONLY */
|
||||
@ -86,24 +83,24 @@ upcall_free(struct kse_upcall *ku)
|
||||
}
|
||||
|
||||
void
|
||||
upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
|
||||
upcall_link(struct kse_upcall *ku, struct proc *p)
|
||||
{
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
|
||||
ku->ku_ksegrp = kg;
|
||||
kg->kg_numupcalls++;
|
||||
TAILQ_INSERT_TAIL(&p->p_upcalls, ku, ku_link);
|
||||
ku->ku_proc = p;
|
||||
p->p_numupcalls++;
|
||||
}
|
||||
|
||||
void
|
||||
upcall_unlink(struct kse_upcall *ku)
|
||||
{
|
||||
struct ksegrp *kg = ku->ku_ksegrp;
|
||||
struct proc *p = ku->ku_proc;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
|
||||
TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
|
||||
kg->kg_numupcalls--;
|
||||
TAILQ_REMOVE(&p->p_upcalls, ku, ku_link);
|
||||
p->p_numupcalls--;
|
||||
upcall_stash(ku);
|
||||
}
|
||||
|
||||
@ -305,7 +302,6 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
struct kse_upcall *ku, *ku2;
|
||||
int error, count;
|
||||
|
||||
@ -316,11 +312,10 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
|
||||
return (EINVAL);
|
||||
|
||||
kg = td->td_ksegrp;
|
||||
count = 0;
|
||||
|
||||
/*
|
||||
* Calculate the existing non-exiting upcalls in this ksegroup.
|
||||
* Calculate the existing non-exiting upcalls in this process.
|
||||
* If we are the last upcall but there are still other threads,
|
||||
* then do not exit. We need the other threads to be able to
|
||||
* complete whatever they are doing.
|
||||
@ -330,12 +325,12 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
*/
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
FOREACH_UPCALL_IN_GROUP(kg, ku2) {
|
||||
FOREACH_UPCALL_IN_PROC(p, ku2) {
|
||||
if (ku2->ku_flags & KUF_EXITING)
|
||||
count++;
|
||||
}
|
||||
if ((kg->kg_numupcalls - count) == 1 &&
|
||||
(kg->kg_numthreads > 1)) {
|
||||
if ((p->p_numupcalls - count) == 1 &&
|
||||
(p->p_numthreads > 1)) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
return (EDEADLK);
|
||||
@ -360,20 +355,12 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
mtx_lock_spin(&sched_lock);
|
||||
upcall_remove(td);
|
||||
if (p->p_numthreads != 1) {
|
||||
/*
|
||||
* If we are not the last thread, but we are the last
|
||||
* thread in this ksegrp, then by definition this is not
|
||||
* the last group and we need to clean it up as well.
|
||||
* thread_exit will clean up the kseg as needed.
|
||||
*/
|
||||
thread_stopped(p);
|
||||
thread_exit();
|
||||
/* NOTREACHED */
|
||||
}
|
||||
/*
|
||||
* This is the last thread. Just return to the user.
|
||||
* We know that there is only one ksegrp too, as any others
|
||||
* would have been discarded in previous calls to thread_exit().
|
||||
* Effectively we have left threading mode..
|
||||
* The only real thing left to do is ensure that the
|
||||
* scheduler sets out concurrency back to 1 as that may be a
|
||||
@ -409,7 +396,6 @@ kse_release(struct thread *td, struct kse_release_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
struct kse_upcall *ku;
|
||||
struct timespec timeout;
|
||||
struct timeval tv;
|
||||
@ -417,7 +403,6 @@ kse_release(struct thread *td, struct kse_release_args *uap)
|
||||
int error;
|
||||
|
||||
p = td->td_proc;
|
||||
kg = td->td_ksegrp;
|
||||
if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
|
||||
return (EINVAL);
|
||||
if (uap->timeout != NULL) {
|
||||
@ -452,14 +437,14 @@ kse_release(struct thread *td, struct kse_release_args *uap)
|
||||
} else {
|
||||
if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
|
||||
((ku->ku_mflags & KMF_NOCOMPLETED) ||
|
||||
(kg->kg_completed == NULL))) {
|
||||
kg->kg_upsleeps++;
|
||||
(p->p_completed == NULL))) {
|
||||
p->p_upsleeps++;
|
||||
td->td_kflags |= TDK_KSEREL;
|
||||
error = msleep(&kg->kg_completed, &p->p_mtx,
|
||||
error = msleep(&p->p_completed, &p->p_mtx,
|
||||
PPAUSE|PCATCH, "kserel",
|
||||
(uap->timeout ? tvtohz(&tv) : 0));
|
||||
td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
|
||||
kg->kg_upsleeps--;
|
||||
p->p_upsleeps--;
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
@ -482,7 +467,6 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
struct kse_upcall *ku;
|
||||
struct thread *td2;
|
||||
|
||||
@ -495,23 +479,18 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (uap->mbx) {
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
FOREACH_UPCALL_IN_GROUP(kg, ku) {
|
||||
if (ku->ku_mailbox == uap->mbx)
|
||||
break;
|
||||
}
|
||||
if (ku)
|
||||
FOREACH_UPCALL_IN_PROC(p, ku) {
|
||||
if (ku->ku_mailbox == uap->mbx)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
kg = td->td_ksegrp;
|
||||
if (kg->kg_upsleeps) {
|
||||
if (p->p_upsleeps) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
wakeup(&kg->kg_completed);
|
||||
wakeup(&p->p_completed);
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
}
|
||||
ku = TAILQ_FIRST(&kg->kg_upcalls);
|
||||
ku = TAILQ_FIRST(&p->p_upcalls);
|
||||
}
|
||||
if (ku == NULL) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -526,7 +505,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
if (!(td2->td_kflags & TDK_WAKEUP)) {
|
||||
td2->td_kflags |= TDK_WAKEUP;
|
||||
if (td2->td_kflags & TDK_KSEREL)
|
||||
sleepq_remove(td2, &kg->kg_completed);
|
||||
sleepq_remove(td2, &p->p_completed);
|
||||
else
|
||||
sleepq_remove(td2, &p->p_siglist);
|
||||
}
|
||||
@ -542,11 +521,11 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
}
|
||||
|
||||
/*
|
||||
* No new KSEG: first call: use current KSE, don't schedule an upcall
|
||||
* newgroup == 0: first call: use current KSE, don't schedule an upcall
|
||||
* All other situations, do allocate max new KSEs and schedule an upcall.
|
||||
*
|
||||
* XXX should be changed so that 'first' behaviour lasts for as long
|
||||
* as you have not made a kse in this ksegrp. i.e. as long as we do not have
|
||||
* as you have not made a thread in this proc. i.e. as long as we do not have
|
||||
* a mailbox..
|
||||
*/
|
||||
/* struct kse_create_args {
|
||||
@ -557,8 +536,6 @@ int
|
||||
kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *newkg;
|
||||
struct ksegrp *kg;
|
||||
struct proc *p;
|
||||
struct kse_mailbox mbx;
|
||||
struct kse_upcall *newku;
|
||||
@ -566,7 +543,21 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
struct thread *newtd;
|
||||
|
||||
p = td->td_proc;
|
||||
kg = td->td_ksegrp;
|
||||
|
||||
/*
|
||||
* Processes using the other threading model can't
|
||||
* suddenly start calling this one
|
||||
* XXX maybe...
|
||||
*/
|
||||
if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
|
||||
PROC_UNLOCK(p);
|
||||
return (EINVAL);
|
||||
}
|
||||
if (!(p->p_flag & P_SA)) {
|
||||
first = 1;
|
||||
p->p_flag |= P_SA|P_HADTHREADS;
|
||||
}
|
||||
|
||||
if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
|
||||
return (err);
|
||||
|
||||
@ -577,123 +568,40 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
* If the new UTS mailbox says that this
|
||||
* will be a BOUND lwp, then it had better
|
||||
* have its thread mailbox already there.
|
||||
* In addition, this ksegrp will be limited to
|
||||
* a concurrency of 1. There is more on this later.
|
||||
*/
|
||||
if (mbx.km_flags & KMF_BOUND) {
|
||||
if ((mbx.km_flags & KMF_BOUND) || uap->newgroup) {
|
||||
if (mbx.km_curthread == NULL)
|
||||
return (EINVAL);
|
||||
ncpus = 1;
|
||||
if (!(uap->newgroup || first))
|
||||
return (EINVAL);
|
||||
} else {
|
||||
sa = TDP_SA;
|
||||
}
|
||||
|
||||
PROC_LOCK(p);
|
||||
/*
|
||||
* Processes using the other threading model can't
|
||||
* suddenly start calling this one
|
||||
*/
|
||||
if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
|
||||
PROC_UNLOCK(p);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit it to NCPU upcall contexts per ksegrp in any case.
|
||||
* There is a small race here as we don't hold proclock
|
||||
* until we inc the ksegrp count, but it's not really a big problem
|
||||
* if we get one too many, but we save a proc lock.
|
||||
*/
|
||||
if ((!uap->newgroup) && (kg->kg_numupcalls >= ncpus)) {
|
||||
PROC_UNLOCK(p);
|
||||
return (EPROCLIM);
|
||||
}
|
||||
|
||||
if (!(p->p_flag & P_SA)) {
|
||||
first = 1;
|
||||
p->p_flag |= P_SA|P_HADTHREADS;
|
||||
}
|
||||
|
||||
PROC_UNLOCK(p);
|
||||
/*
|
||||
* Now pay attention!
|
||||
* If we are going to be bound, then we need to be either
|
||||
* a new group, or the first call ever. In either
|
||||
* case we will be creating (or be) the only thread in a group.
|
||||
* and the concurrency will be set to 1.
|
||||
* This is not quite right, as we may still make ourself
|
||||
* bound after making other ksegrps but it will do for now.
|
||||
* The library will only try do this much.
|
||||
*/
|
||||
if (!sa && !(uap->newgroup || first))
|
||||
return (EINVAL);
|
||||
|
||||
if (uap->newgroup) {
|
||||
newkg = ksegrp_alloc();
|
||||
bzero(&newkg->kg_startzero,
|
||||
__rangeof(struct ksegrp, kg_startzero, kg_endzero));
|
||||
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
|
||||
__rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
|
||||
sched_init_concurrency(newkg);
|
||||
PROC_LOCK(p);
|
||||
if (p->p_numksegrps >= max_groups_per_proc) {
|
||||
/*
|
||||
* Limit it to NCPU upcall contexts per proc in any case.
|
||||
*/
|
||||
if (p->p_numupcalls >= ncpus) {
|
||||
PROC_UNLOCK(p);
|
||||
ksegrp_free(newkg);
|
||||
return (EPROCLIM);
|
||||
}
|
||||
ksegrp_link(newkg, p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
sched_fork_ksegrp(td, newkg);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
} else {
|
||||
/*
|
||||
* We want to make a thread in our own ksegrp.
|
||||
* We want to make a thread (bound or unbound).
|
||||
* If we are just the first call, either kind
|
||||
* is ok, but if not then either we must be
|
||||
* already an upcallable thread to make another,
|
||||
* or a bound thread to make one of those.
|
||||
* Once again, not quite right but good enough for now.. XXXKSE
|
||||
* XXX bogus
|
||||
*/
|
||||
PROC_UNLOCK(p);
|
||||
if (!first && ((td->td_pflags & TDP_SA) != sa))
|
||||
return (EINVAL);
|
||||
|
||||
newkg = kg;
|
||||
if (p->p_numupcalls == 0) {
|
||||
sched_set_concurrency(p, ncpus);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This test is a bit "indirect".
|
||||
* It might simplify things if we made a direct way of testing
|
||||
* if a ksegrp has been worked on before.
|
||||
* In the case of a bound request and the concurrency being set to
|
||||
* one, the concurrency will already be 1 so it's just inefficient
|
||||
* but not dangerous to call this again. XXX
|
||||
*/
|
||||
if (newkg->kg_numupcalls == 0) {
|
||||
/*
|
||||
* Initialize KSE group with the appropriate
|
||||
* concurrency.
|
||||
*
|
||||
* For a multiplexed group, create as as much concurrency
|
||||
* as the number of physical cpus.
|
||||
* This increases concurrency in the kernel even if the
|
||||
* userland is not MP safe and can only run on a single CPU.
|
||||
* In an ideal world, every physical cpu should execute a
|
||||
* thread. If there is enough concurrency, threads in the
|
||||
* kernel can be executed parallel on different cpus at
|
||||
* full speed without being restricted by the number of
|
||||
* upcalls the userland provides.
|
||||
* Adding more upcall structures only increases concurrency
|
||||
* in userland.
|
||||
*
|
||||
* For a bound thread group, because there is only one thread
|
||||
* in the group, we only set the concurrency for the group
|
||||
* to 1. A thread in this kind of group will never schedule
|
||||
* an upcall when blocked. This simulates pthread system
|
||||
* scope thread behaviour.
|
||||
*/
|
||||
sched_set_concurrency(newkg, ncpus);
|
||||
}
|
||||
/*
|
||||
* Even bound LWPs get a mailbox and an upcall to hold it.
|
||||
*/
|
||||
@ -711,33 +619,38 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (newkg->kg_numupcalls >= ncpus) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
upcall_free(newku);
|
||||
return (EPROCLIM);
|
||||
if (sa) {
|
||||
if( p->p_numupcalls >= ncpus) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
upcall_free(newku);
|
||||
return (EPROCLIM);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are the first time, and a normal thread,
|
||||
* then transfer all the signals back to the 'process'.
|
||||
* SA threading will make a special thread to handle them.
|
||||
*/
|
||||
if (first) {
|
||||
sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue,
|
||||
&td->td_sigqueue.sq_signals);
|
||||
SIGFILLSET(td->td_sigmask);
|
||||
SIG_CANTMASK(td->td_sigmask);
|
||||
}
|
||||
} else {
|
||||
/* should subtract from process count (later) */
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are the first time, and a normal thread,
|
||||
* then transfer all the signals back to the 'process'.
|
||||
* SA threading will make a special thread to handle them.
|
||||
*/
|
||||
if (first && sa) {
|
||||
sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue,
|
||||
&td->td_sigqueue.sq_signals);
|
||||
SIGFILLSET(td->td_sigmask);
|
||||
SIG_CANTMASK(td->td_sigmask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make the new upcall available to the ksegrp.
|
||||
* Make the new upcall available to the process.
|
||||
* It may or may not use it, but it's available.
|
||||
*/
|
||||
upcall_link(newku, newkg);
|
||||
upcall_link(newku, p);
|
||||
PROC_UNLOCK(p);
|
||||
if (mbx.km_quantum)
|
||||
newkg->kg_upquantum = max(1, mbx.km_quantum / tick);
|
||||
/* XXX should this be in the thread? */
|
||||
p->p_upquantum = max(1, mbx.km_quantum / tick);
|
||||
|
||||
/*
|
||||
* Each upcall structure has an owner thread, find which
|
||||
@ -745,8 +658,11 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
*/
|
||||
if (uap->newgroup) {
|
||||
/*
|
||||
* Because the new ksegrp hasn't a thread,
|
||||
* create an initial upcall thread to own it.
|
||||
* The newgroup parameter now means
|
||||
* "bound, non SA, system scope"
|
||||
* It is only used for the interrupt thread at the
|
||||
* moment I think
|
||||
* We'll rename it later.
|
||||
*/
|
||||
newtd = thread_schedule_upcall(td, newku);
|
||||
} else {
|
||||
@ -771,6 +687,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
/*
|
||||
* Let the UTS instance know its LWPID.
|
||||
* It doesn't really care. But the debugger will.
|
||||
* XXX warning.. remember that this moves.
|
||||
*/
|
||||
suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
|
||||
|
||||
@ -785,6 +702,14 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
|
||||
if (sa) {
|
||||
newtd->td_pflags |= TDP_SA;
|
||||
/*
|
||||
* If we are starting a new thread, kick it off.
|
||||
*/
|
||||
if (newtd != td) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
setrunqueue(newtd, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
} else {
|
||||
newtd->td_pflags &= ~TDP_SA;
|
||||
|
||||
@ -816,17 +741,11 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
_PRELE(p);
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
setrunqueue(newtd, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are starting a new thread, kick it off.
|
||||
*/
|
||||
if (newtd != td) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
setrunqueue(newtd, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
return (0);
|
||||
#else /* !KSE */
|
||||
return (EOPNOTSUPP);
|
||||
@ -886,20 +805,18 @@ kse_GC(void)
|
||||
/*
|
||||
* Store the thread context in the UTS's mailbox.
|
||||
* then add the mailbox at the head of a list we are building in user space.
|
||||
* The list is anchored in the ksegrp structure.
|
||||
* The list is anchored in the proc structure.
|
||||
*/
|
||||
int
|
||||
thread_export_context(struct thread *td, int willexit)
|
||||
{
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
uintptr_t mbx;
|
||||
void *addr;
|
||||
int error = 0, sig;
|
||||
mcontext_t mc;
|
||||
|
||||
p = td->td_proc;
|
||||
kg = td->td_ksegrp;
|
||||
|
||||
/*
|
||||
* Post sync signal, or process SIGKILL and SIGSTOP.
|
||||
@ -940,14 +857,14 @@ thread_export_context(struct thread *td, int willexit)
|
||||
* entry into this one
|
||||
*/
|
||||
for (;;) {
|
||||
mbx = (uintptr_t)kg->kg_completed;
|
||||
mbx = (uintptr_t)p->p_completed;
|
||||
if (suword(addr, mbx)) {
|
||||
error = EFAULT;
|
||||
goto bad;
|
||||
}
|
||||
PROC_LOCK(p);
|
||||
if (mbx == (uintptr_t)kg->kg_completed) {
|
||||
kg->kg_completed = td->td_mailbox;
|
||||
if (mbx == (uintptr_t)p->p_completed) {
|
||||
p->p_completed = td->td_mailbox;
|
||||
/*
|
||||
* The thread context may be taken away by
|
||||
* other upcall threads when we unlock
|
||||
@ -970,19 +887,18 @@ thread_export_context(struct thread *td, int willexit)
|
||||
}
|
||||
|
||||
/*
|
||||
* Take the list of completed mailboxes for this KSEGRP and put them on this
|
||||
* Take the list of completed mailboxes for this Process and put them on this
|
||||
* upcall's mailbox as it's the next one going up.
|
||||
*/
|
||||
static int
|
||||
thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
|
||||
thread_link_mboxes(struct proc *p, struct kse_upcall *ku)
|
||||
{
|
||||
struct proc *p = kg->kg_proc;
|
||||
void *addr;
|
||||
uintptr_t mbx;
|
||||
|
||||
addr = (void *)(&ku->ku_mailbox->km_completed);
|
||||
for (;;) {
|
||||
mbx = (uintptr_t)kg->kg_completed;
|
||||
mbx = (uintptr_t)p->p_completed;
|
||||
if (suword(addr, mbx)) {
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGSEGV);
|
||||
@ -990,8 +906,8 @@ thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
|
||||
return (EFAULT);
|
||||
}
|
||||
PROC_LOCK(p);
|
||||
if (mbx == (uintptr_t)kg->kg_completed) {
|
||||
kg->kg_completed = NULL;
|
||||
if (mbx == (uintptr_t)p->p_completed) {
|
||||
p->p_completed = NULL;
|
||||
PROC_UNLOCK(p);
|
||||
break;
|
||||
}
|
||||
@ -1109,7 +1025,7 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
|
||||
*/
|
||||
bcopy(&td->td_startcopy, &td2->td_startcopy,
|
||||
__rangeof(struct thread, td_startcopy, td_endcopy));
|
||||
thread_link(td2, ku->ku_ksegrp);
|
||||
thread_link(td2, ku->ku_proc);
|
||||
/* inherit parts of blocked thread's context as a good template */
|
||||
cpu_set_upcall(td2, td);
|
||||
/* Let the new thread become owner of the upcall */
|
||||
@ -1210,7 +1126,6 @@ void
|
||||
thread_user_enter(struct thread *td)
|
||||
{
|
||||
struct proc *p = td->td_proc;
|
||||
struct ksegrp *kg;
|
||||
struct kse_upcall *ku;
|
||||
struct kse_thr_mailbox *tmbx;
|
||||
uint32_t flags;
|
||||
@ -1233,7 +1148,6 @@ thread_user_enter(struct thread *td)
|
||||
* note where our mailbox is.
|
||||
*/
|
||||
|
||||
kg = td->td_ksegrp;
|
||||
ku = td->td_upcall;
|
||||
|
||||
KASSERT(ku != NULL, ("no upcall owned"));
|
||||
@ -1291,10 +1205,9 @@ int
|
||||
thread_userret(struct thread *td, struct trapframe *frame)
|
||||
{
|
||||
struct kse_upcall *ku;
|
||||
struct ksegrp *kg, *kg2;
|
||||
struct proc *p;
|
||||
struct timespec ts;
|
||||
int error = 0, upcalls, uts_crit;
|
||||
int error = 0, uts_crit;
|
||||
|
||||
/* Nothing to do with bound thread */
|
||||
if (!(td->td_pflags & TDP_SA))
|
||||
@ -1311,7 +1224,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
}
|
||||
|
||||
p = td->td_proc;
|
||||
kg = td->td_ksegrp;
|
||||
ku = td->td_upcall;
|
||||
|
||||
/*
|
||||
@ -1323,9 +1235,9 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
if (TD_CAN_UNBIND(td)) {
|
||||
td->td_pflags &= ~TDP_CAN_UNBIND;
|
||||
if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
|
||||
(kg->kg_completed == NULL) &&
|
||||
(p->p_completed == NULL) &&
|
||||
(ku->ku_flags & KUF_DOUPCALL) == 0 &&
|
||||
(kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
|
||||
(p->p_upquantum && ticks < p->p_nextupcall)) {
|
||||
nanotime(&ts);
|
||||
error = copyout(&ts,
|
||||
(caddr_t)&ku->ku_mailbox->km_timeofday,
|
||||
@ -1346,8 +1258,8 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
} else if (td->td_mailbox && (ku == NULL)) {
|
||||
thread_export_context(td, 1);
|
||||
PROC_LOCK(p);
|
||||
if (kg->kg_upsleeps)
|
||||
wakeup(&kg->kg_completed);
|
||||
if (p->p_upsleeps)
|
||||
wakeup(&p->p_completed);
|
||||
WITNESS_WARN(WARN_PANIC, &p->p_mtx.mtx_object,
|
||||
"thread exiting in userret");
|
||||
sigqueue_flush(&td->td_sigqueue);
|
||||
@ -1366,14 +1278,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
mtx_lock_spin(&sched_lock);
|
||||
p->p_maxthrwaits++;
|
||||
while (p->p_numthreads > max_threads_per_proc) {
|
||||
upcalls = 0;
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg2) {
|
||||
if (kg2->kg_numupcalls == 0)
|
||||
upcalls++;
|
||||
else
|
||||
upcalls += kg2->kg_numupcalls;
|
||||
}
|
||||
if (upcalls >= max_threads_per_proc)
|
||||
if (p->p_numupcalls >= max_threads_per_proc)
|
||||
break;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
|
||||
@ -1391,7 +1296,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
|
||||
if (td->td_pflags & TDP_UPCALLING) {
|
||||
uts_crit = 0;
|
||||
kg->kg_nextupcall = ticks + kg->kg_upquantum;
|
||||
p->p_nextupcall = ticks + p->p_upquantum;
|
||||
/*
|
||||
* There is no more work to do and we are going to ride
|
||||
* this thread up to userland as an upcall.
|
||||
@ -1436,7 +1341,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
* this KSE's mailbox.
|
||||
*/
|
||||
if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
|
||||
(error = thread_link_mboxes(kg, ku)) != 0)
|
||||
(error = thread_link_mboxes(p, ku)) != 0)
|
||||
goto out;
|
||||
}
|
||||
if (!uts_crit) {
|
||||
@ -1479,7 +1384,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
|
||||
void
|
||||
thread_continued(struct proc *p)
|
||||
{
|
||||
struct ksegrp *kg;
|
||||
struct kse_upcall *ku;
|
||||
struct thread *td;
|
||||
|
||||
@ -1490,18 +1394,13 @@ thread_continued(struct proc *p)
|
||||
return;
|
||||
|
||||
if (p->p_flag & P_TRACED) {
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
td = TAILQ_FIRST(&kg->kg_threads);
|
||||
if (td == NULL)
|
||||
continue;
|
||||
/* not a SA group, nothing to do */
|
||||
if (!(td->td_pflags & TDP_SA))
|
||||
continue;
|
||||
FOREACH_UPCALL_IN_GROUP(kg, ku) {
|
||||
td = TAILQ_FIRST(&p->p_threads);
|
||||
if (td && (td->td_pflags & TDP_SA)) {
|
||||
FOREACH_UPCALL_IN_PROC(p, ku) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
ku->ku_flags |= KUF_DOUPCALL;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
wakeup(&kg->kg_completed);
|
||||
wakeup(&p->p_completed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -581,11 +581,7 @@ poll_idle(void)
|
||||
rtp.prio = RTP_PRIO_MAX; /* lowest priority */
|
||||
rtp.type = RTP_PRIO_IDLE;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
rtp_to_pri(&rtp, td->td_ksegrp);
|
||||
#else
|
||||
rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
for (;;) {
|
||||
|
@ -141,9 +141,6 @@ proc_dtor(void *mem, int size, void *arg)
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#if defined(INVARIANTS) && defined(KSE)
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
/* INVARIANTS checks go here */
|
||||
p = (struct proc *)mem;
|
||||
@ -151,14 +148,7 @@ proc_dtor(void *mem, int size, void *arg)
|
||||
#ifdef INVARIANTS
|
||||
KASSERT((p->p_numthreads == 1),
|
||||
("bad number of threads in exiting process"));
|
||||
#ifdef KSE
|
||||
KASSERT((p->p_numksegrps == 1), ("free proc with > 1 ksegrp"));
|
||||
#endif
|
||||
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
|
||||
#ifdef KSE
|
||||
kg = FIRST_KSEGRP_IN_PROC(p);
|
||||
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
|
||||
#endif
|
||||
KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
|
||||
#endif
|
||||
|
||||
@ -181,25 +171,15 @@ proc_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
p = (struct proc *)mem;
|
||||
p->p_sched = (struct p_sched *)&p[1];
|
||||
td = thread_alloc();
|
||||
#ifdef KSE
|
||||
kg = ksegrp_alloc();
|
||||
#endif
|
||||
bzero(&p->p_mtx, sizeof(struct mtx));
|
||||
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
|
||||
p->p_stats = pstats_alloc();
|
||||
#ifdef KSE
|
||||
proc_linkup(p, kg, td);
|
||||
sched_newproc(p, kg, td);
|
||||
#else
|
||||
proc_linkup(p, td);
|
||||
#endif
|
||||
sched_newproc(p, td);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -215,9 +195,6 @@ proc_fini(void *mem, int size)
|
||||
|
||||
p = (struct proc *)mem;
|
||||
pstats_free(p->p_stats);
|
||||
#ifdef KSE
|
||||
ksegrp_free(FIRST_KSEGRP_IN_PROC(p));
|
||||
#endif
|
||||
thread_free(FIRST_THREAD_IN_PROC(p));
|
||||
mtx_destroy(&p->p_mtx);
|
||||
if (p->p_ksi != NULL)
|
||||
@ -782,9 +759,6 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
|
||||
static void
|
||||
fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
@ -824,15 +798,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
|
||||
kp->ki_stat = SIDL;
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
|
||||
/* things in the KSE GROUP */
|
||||
kp->ki_estcpu = kg->kg_estcpu;
|
||||
kp->ki_slptime = kg->kg_slptime;
|
||||
kp->ki_pri.pri_user = kg->kg_user_pri;
|
||||
kp->ki_pri.pri_class = kg->kg_pri_class;
|
||||
#endif
|
||||
/* Things in the thread */
|
||||
kp->ki_wchan = td->td_wchan;
|
||||
kp->ki_pri.pri_level = td->td_priority;
|
||||
@ -845,12 +810,10 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
|
||||
kp->ki_pcb = td->td_pcb;
|
||||
kp->ki_kstack = (void *)td->td_kstack;
|
||||
kp->ki_pctcpu = sched_pctcpu(td);
|
||||
#ifndef KSE
|
||||
kp->ki_estcpu = td->td_estcpu;
|
||||
kp->ki_slptime = td->td_slptime;
|
||||
kp->ki_pri.pri_class = td->td_pri_class;
|
||||
kp->ki_pri.pri_user = td->td_user_pri;
|
||||
#endif
|
||||
|
||||
/* We can't get this anymore but ps etc never used it anyway. */
|
||||
kp->ki_rqindex = 0;
|
||||
|
@ -319,11 +319,7 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
|
||||
else
|
||||
td1 = thread_find(p, uap->lwpid);
|
||||
if (td1 != NULL)
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td1->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td1, &rtp);
|
||||
#endif
|
||||
else
|
||||
error = ESRCH;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -359,11 +355,7 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
|
||||
else
|
||||
td1 = thread_find(p, uap->lwpid);
|
||||
if (td1 != NULL)
|
||||
#ifdef KSE
|
||||
error = rtp_to_pri(&rtp, td1->td_ksegrp);
|
||||
#else
|
||||
error = rtp_to_pri(&rtp, td1);
|
||||
#endif
|
||||
else
|
||||
error = ESRCH;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -396,11 +388,7 @@ rtprio(td, uap)
|
||||
{
|
||||
struct proc *curp;
|
||||
struct proc *p;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#else
|
||||
struct thread *tdp;
|
||||
#endif
|
||||
struct rtprio rtp;
|
||||
int cierror, error;
|
||||
|
||||
@ -436,23 +424,14 @@ rtprio(td, uap)
|
||||
* as leaving it zero.
|
||||
*/
|
||||
if (uap->pid == 0) {
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td, &rtp);
|
||||
#endif
|
||||
} else {
|
||||
struct rtprio rtp2;
|
||||
|
||||
rtp.type = RTP_PRIO_IDLE;
|
||||
rtp.prio = RTP_PRIO_MAX;
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
pri_to_rtp(kg, &rtp2);
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p, tdp) {
|
||||
pri_to_rtp(tdp, &rtp2);
|
||||
#endif
|
||||
if (rtp2.type < rtp.type ||
|
||||
(rtp2.type == rtp.type &&
|
||||
rtp2.prio < rtp.prio)) {
|
||||
@ -493,39 +472,19 @@ rtprio(td, uap)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* If we are setting our own priority, set just our
|
||||
* KSEGRP but if we are doing another process,
|
||||
* do all the groups on that process. If we
|
||||
* specify our own pid we do the latter.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* If we are setting our own priority, set just our
|
||||
* thread but if we are doing another process,
|
||||
* do all the threads on that process. If we
|
||||
* specify our own pid we do the latter.
|
||||
*/
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (uap->pid == 0) {
|
||||
#ifdef KSE
|
||||
error = rtp_to_pri(&rtp, td->td_ksegrp);
|
||||
#else
|
||||
error = rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
} else {
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
if ((error = rtp_to_pri(&rtp, kg)) != 0) {
|
||||
break;
|
||||
}
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if ((error = rtp_to_pri(&rtp, td)) != 0)
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -539,11 +498,7 @@ rtprio(td, uap)
|
||||
}
|
||||
|
||||
int
|
||||
#ifdef KSE
|
||||
rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
|
||||
#else
|
||||
rtp_to_pri(struct rtprio *rtp, struct thread *td)
|
||||
#endif
|
||||
{
|
||||
u_char newpri;
|
||||
|
||||
@ -552,87 +507,43 @@ rtp_to_pri(struct rtprio *rtp, struct thread *td)
|
||||
return (EINVAL);
|
||||
switch (RTP_PRIO_BASE(rtp->type)) {
|
||||
case RTP_PRIO_REALTIME:
|
||||
#ifdef KSE
|
||||
newpri = PRI_MIN_REALTIME + rtp->prio;
|
||||
#else
|
||||
newpri = PRI_MIN_REALTIME + rtp->prio;
|
||||
#endif
|
||||
break;
|
||||
case RTP_PRIO_NORMAL:
|
||||
#ifdef KSE
|
||||
newpri = PRI_MIN_TIMESHARE + rtp->prio;
|
||||
#else
|
||||
newpri = PRI_MIN_TIMESHARE + rtp->prio;
|
||||
#endif
|
||||
break;
|
||||
case RTP_PRIO_IDLE:
|
||||
#ifdef KSE
|
||||
newpri = PRI_MIN_IDLE + rtp->prio;
|
||||
#else
|
||||
newpri = PRI_MIN_IDLE + rtp->prio;
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
return (EINVAL);
|
||||
}
|
||||
#ifdef KSE
|
||||
sched_class(kg, rtp->type);
|
||||
sched_user_prio(kg, newpri);
|
||||
if (curthread->td_ksegrp == kg) {
|
||||
sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
|
||||
}
|
||||
#else
|
||||
sched_class(td, rtp->type); /* XXX fix */
|
||||
sched_user_prio(td, newpri);
|
||||
if (curthread == td)
|
||||
sched_prio(curthread, td->td_user_pri); /* XXX dubious */
|
||||
#endif
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
#ifdef KSE
|
||||
pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
|
||||
#else
|
||||
pri_to_rtp(struct thread *td, struct rtprio *rtp)
|
||||
#endif
|
||||
{
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
#ifdef KSE
|
||||
switch (PRI_BASE(kg->kg_pri_class)) {
|
||||
#else
|
||||
switch (PRI_BASE(td->td_pri_class)) {
|
||||
#endif
|
||||
case PRI_REALTIME:
|
||||
#ifdef KSE
|
||||
rtp->prio = kg->kg_base_user_pri - PRI_MIN_REALTIME;
|
||||
#else
|
||||
rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
|
||||
#endif
|
||||
break;
|
||||
case PRI_TIMESHARE:
|
||||
#ifdef KSE
|
||||
rtp->prio = kg->kg_base_user_pri - PRI_MIN_TIMESHARE;
|
||||
#else
|
||||
rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
|
||||
#endif
|
||||
break;
|
||||
case PRI_IDLE:
|
||||
#ifdef KSE
|
||||
rtp->prio = kg->kg_base_user_pri - PRI_MIN_IDLE;
|
||||
#else
|
||||
rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#ifdef KSE
|
||||
rtp->type = kg->kg_pri_class;
|
||||
#else
|
||||
rtp->type = td->td_pri_class;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(COMPAT_43)
|
||||
|
@ -430,11 +430,7 @@ uio_yield(void)
|
||||
td = curthread;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
DROP_GIANT();
|
||||
#ifdef KSE
|
||||
sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */
|
||||
#else
|
||||
sched_prio(td, td->td_user_pri);
|
||||
#endif
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
|
@ -24,68 +24,6 @@
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef KSE
|
||||
/***
|
||||
Here is the logic..
|
||||
|
||||
If there are N processors, then there are at most N KSEs (kernel
|
||||
schedulable entities) working to process threads that belong to a
|
||||
KSEGROUP (kg). If there are X of these KSEs actually running at the
|
||||
moment in question, then there are at most M (N-X) of these KSEs on
|
||||
the run queue, as running KSEs are not on the queue.
|
||||
|
||||
Runnable threads are queued off the KSEGROUP in priority order.
|
||||
If there are M or more threads runnable, the top M threads
|
||||
(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
|
||||
their priority from those threads and are put on the run queue.
|
||||
|
||||
The last thread that had a priority high enough to have a KSE associated
|
||||
with it, AND IS ON THE RUN QUEUE is pointed to by
|
||||
kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
|
||||
assigned as all the available KSEs are activly running, or because there
|
||||
are no threads queued, that pointer is NULL.
|
||||
|
||||
When a KSE is removed from the run queue to become runnable, we know
|
||||
it was associated with the highest priority thread in the queue (at the head
|
||||
of the queue). If it is also the last assigned we know M was 1 and must
|
||||
now be 0. Since the thread is no longer queued that pointer must be
|
||||
removed from it. Since we know there were no more KSEs available,
|
||||
(M was 1 and is now 0) and since we are not FREEING our KSE
|
||||
but using it, we know there are STILL no more KSEs available, we can prove
|
||||
that the next thread in the ksegrp list will not have a KSE to assign to
|
||||
it, so we can show that the pointer must be made 'invalid' (NULL).
|
||||
|
||||
The pointer exists so that when a new thread is made runnable, it can
|
||||
have its priority compared with the last assigned thread to see if
|
||||
it should 'steal' its KSE or not.. i.e. is it 'earlier'
|
||||
on the list than that thread or later.. If it's earlier, then the KSE is
|
||||
removed from the last assigned (which is now not assigned a KSE)
|
||||
and reassigned to the new thread, which is placed earlier in the list.
|
||||
The pointer is then backed up to the previous thread (which may or may not
|
||||
be the new thread).
|
||||
|
||||
When a thread sleeps or is removed, the KSE becomes available and if there
|
||||
are queued threads that are not assigned KSEs, the highest priority one of
|
||||
them is assigned the KSE, which is then placed back on the run queue at
|
||||
the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
|
||||
to point to it.
|
||||
|
||||
The following diagram shows 2 KSEs and 3 threads from a single process.
|
||||
|
||||
RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
|
||||
\ \____
|
||||
\ \
|
||||
KSEGROUP---thread--thread--thread (queued in priority order)
|
||||
\ /
|
||||
\_______________/
|
||||
(last_assigned)
|
||||
|
||||
The result of this scheme is that the M available KSEs are always
|
||||
queued at the priorities they have inherrited from the M highest priority
|
||||
threads for that KSEGROUP. If this situation changes, the KSEs are
|
||||
reassigned to keep this true.
|
||||
***/
|
||||
#endif
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
@ -126,8 +64,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
|
||||
|
||||
#define td_kse td_sched
|
||||
|
||||
/*
|
||||
* kern.sched.preemption allows user space to determine if preemption support
|
||||
* is compiled in or not. It is not currently a boot or runtime flag that
|
||||
@ -144,79 +80,40 @@ SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
|
||||
/************************************************************************
|
||||
* Functions that manipulate runnability from a thread perspective. *
|
||||
************************************************************************/
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Select the KSE that will be run next. From that find the thread, and
|
||||
* remove it from the KSEGRP's run queue. If there is thread clustering,
|
||||
* this will be what does it.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* Select the thread that will be run next.
|
||||
*/
|
||||
#endif
|
||||
struct thread *
|
||||
choosethread(void)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct kse *ke;
|
||||
#endif
|
||||
struct td_sched *ts;
|
||||
struct thread *td;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
|
||||
if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
|
||||
/* Shutting down, run idlethread on AP's */
|
||||
td = PCPU_GET(idlethread);
|
||||
#ifdef KSE
|
||||
ke = td->td_kse;
|
||||
#endif
|
||||
ts = td->td_sched;
|
||||
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
|
||||
#ifdef KSE
|
||||
ke->ke_flags |= KEF_DIDRUN;
|
||||
#else
|
||||
td->td_kse->ke_flags |= KEF_DIDRUN;
|
||||
#endif
|
||||
ts->ts_flags |= TSF_DIDRUN;
|
||||
TD_SET_RUNNING(td);
|
||||
return (td);
|
||||
}
|
||||
#endif
|
||||
|
||||
retry:
|
||||
#ifdef KSE
|
||||
ke = sched_choose();
|
||||
if (ke) {
|
||||
td = ke->ke_thread;
|
||||
KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
|
||||
kg = ke->ke_ksegrp;
|
||||
if (td->td_proc->p_flag & P_HADTHREADS) {
|
||||
if (kg->kg_last_assigned == td) {
|
||||
kg->kg_last_assigned = TAILQ_PREV(td,
|
||||
threadqueue, td_runq);
|
||||
}
|
||||
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
|
||||
}
|
||||
#else
|
||||
td = sched_choose();
|
||||
if (td) {
|
||||
#endif
|
||||
ts = sched_choose();
|
||||
if (ts) {
|
||||
td = ts->ts_thread;
|
||||
CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
|
||||
td, td->td_priority);
|
||||
} else {
|
||||
/* Simulate runq_choose() having returned the idle thread */
|
||||
td = PCPU_GET(idlethread);
|
||||
#ifdef KSE
|
||||
ke = td->td_kse;
|
||||
#endif
|
||||
ts = td->td_sched;
|
||||
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
|
||||
}
|
||||
#ifdef KSE
|
||||
ke->ke_flags |= KEF_DIDRUN;
|
||||
#else
|
||||
td->td_kse->ke_flags |= KEF_DIDRUN;
|
||||
#endif
|
||||
ts->ts_flags |= TSF_DIDRUN;
|
||||
|
||||
/*
|
||||
* If we are in panic, only allow system threads,
|
||||
@ -233,91 +130,24 @@ choosethread(void)
|
||||
return (td);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Given a surplus system slot, try assign a new runnable thread to it.
|
||||
* Called from:
|
||||
* sched_thread_exit() (local)
|
||||
* sched_switch() (local)
|
||||
* sched_thread_exit() (local)
|
||||
* remrunqueue() (local) (not at the moment)
|
||||
*/
|
||||
static void
|
||||
slot_fill(struct ksegrp *kg)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
while (kg->kg_avail_opennings > 0) {
|
||||
/*
|
||||
* Find the first unassigned thread
|
||||
*/
|
||||
if ((td = kg->kg_last_assigned) != NULL)
|
||||
td = TAILQ_NEXT(td, td_runq);
|
||||
else
|
||||
td = TAILQ_FIRST(&kg->kg_runq);
|
||||
|
||||
/*
|
||||
* If we found one, send it to the system scheduler.
|
||||
*/
|
||||
if (td) {
|
||||
kg->kg_last_assigned = td;
|
||||
sched_add(td, SRQ_YIELDING);
|
||||
CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
|
||||
} else {
|
||||
/* no threads to use up the slots. quit now */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef SCHED_4BSD
|
||||
/*
|
||||
* Remove a thread from its KSEGRP's run queue.
|
||||
* This in turn may remove it from a KSE if it was already assigned
|
||||
* to one, possibly causing a new thread to be assigned to the KSE
|
||||
* and the KSE getting a new priority.
|
||||
* currently not used.. threads remove themselves from the
|
||||
* run queue by running.
|
||||
*/
|
||||
static void
|
||||
remrunqueue(struct thread *td)
|
||||
{
|
||||
struct thread *td2, *td3;
|
||||
struct ksegrp *kg;
|
||||
struct kse *ke;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
|
||||
kg = td->td_ksegrp;
|
||||
ke = td->td_kse;
|
||||
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
|
||||
TD_SET_CAN_RUN(td);
|
||||
/*
|
||||
* If it is not a threaded process, take the shortcut.
|
||||
*/
|
||||
if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
|
||||
/* remve from sys run queue and free up a slot */
|
||||
sched_rem(td);
|
||||
return;
|
||||
}
|
||||
td3 = TAILQ_PREV(td, threadqueue, td_runq);
|
||||
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
|
||||
if (ke->ke_state == KES_ONRUNQ) {
|
||||
/*
|
||||
* This thread has been assigned to the system run queue.
|
||||
* We need to dissociate it and try assign the
|
||||
* KSE to the next available thread. Then, we should
|
||||
* see if we need to move the KSE in the run queues.
|
||||
*/
|
||||
sched_rem(td);
|
||||
td2 = kg->kg_last_assigned;
|
||||
KASSERT((td2 != NULL), ("last assigned has wrong value"));
|
||||
if (td2 == td)
|
||||
kg->kg_last_assigned = td3;
|
||||
/* slot_fill(kg); */ /* will replace it with another */
|
||||
}
|
||||
/* remove from sys run queue */
|
||||
sched_rem(td);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change the priority of a thread that is on the run queue.
|
||||
@ -325,229 +155,32 @@ remrunqueue(struct thread *td)
|
||||
void
|
||||
adjustrunqueue( struct thread *td, int newpri)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct kse *ke;
|
||||
struct td_sched *ts;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
|
||||
|
||||
ke = td->td_kse;
|
||||
ts = td->td_sched;
|
||||
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
|
||||
#ifdef KSE
|
||||
/*
|
||||
* If it is not a threaded process, take the shortcut.
|
||||
*/
|
||||
if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
|
||||
/* We only care about the kse in the run queue. */
|
||||
td->td_priority = newpri;
|
||||
#ifndef SCHED_CORE
|
||||
if (ke->ke_rqindex != (newpri / RQ_PPQ))
|
||||
#else
|
||||
if (ke->ke_rqindex != newpri)
|
||||
#endif
|
||||
{
|
||||
sched_rem(td);
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* It is a threaded process */
|
||||
kg = td->td_ksegrp;
|
||||
if (ke->ke_state == KES_ONRUNQ
|
||||
#ifdef SCHED_ULE
|
||||
|| ((ke->ke_flags & KEF_ASSIGNED) != 0 &&
|
||||
(ke->ke_flags & KEF_REMOVED) == 0)
|
||||
#endif
|
||||
) {
|
||||
if (kg->kg_last_assigned == td) {
|
||||
kg->kg_last_assigned =
|
||||
TAILQ_PREV(td, threadqueue, td_runq);
|
||||
}
|
||||
sched_rem(td);
|
||||
}
|
||||
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
|
||||
TD_SET_CAN_RUN(td);
|
||||
td->td_priority = newpri;
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
#else
|
||||
/* We only care about the kse in the run queue. */
|
||||
/* We only care about the td_sched in the run queue. */
|
||||
td->td_priority = newpri;
|
||||
#ifndef SCHED_CORE
|
||||
if (ke->ke_rqindex != (newpri / RQ_PPQ))
|
||||
if (ts->ts_rqindex != (newpri / RQ_PPQ))
|
||||
#else
|
||||
if (ke->ke_rqindex != newpri)
|
||||
if (ts->ts_rqindex != newpri)
|
||||
#endif
|
||||
{
|
||||
sched_rem(td);
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* This function is called when a thread is about to be put on a
|
||||
* ksegrp run queue because it has been made runnable or its
|
||||
* priority has been adjusted and the ksegrp does not have a
|
||||
* free kse slot. It determines if a thread from the same ksegrp
|
||||
* should be preempted. If so, it tries to switch threads
|
||||
* if the thread is on the same cpu or notifies another cpu that
|
||||
* it should switch threads.
|
||||
*/
|
||||
|
||||
static void
|
||||
maybe_preempt_in_ksegrp(struct thread *td)
|
||||
#if !defined(SMP)
|
||||
{
|
||||
struct thread *running_thread;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
running_thread = curthread;
|
||||
|
||||
if (running_thread->td_ksegrp != td->td_ksegrp)
|
||||
return;
|
||||
|
||||
if (td->td_priority >= running_thread->td_priority)
|
||||
return;
|
||||
#ifdef PREEMPTION
|
||||
#ifndef FULL_PREEMPTION
|
||||
if (td->td_priority > PRI_MAX_ITHD) {
|
||||
running_thread->td_flags |= TDF_NEEDRESCHED;
|
||||
return;
|
||||
}
|
||||
#endif /* FULL_PREEMPTION */
|
||||
|
||||
if (running_thread->td_critnest > 1)
|
||||
running_thread->td_owepreempt = 1;
|
||||
else
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
|
||||
#else /* PREEMPTION */
|
||||
running_thread->td_flags |= TDF_NEEDRESCHED;
|
||||
#endif /* PREEMPTION */
|
||||
return;
|
||||
}
|
||||
|
||||
#else /* SMP */
|
||||
{
|
||||
struct thread *running_thread;
|
||||
int worst_pri;
|
||||
struct ksegrp *kg;
|
||||
cpumask_t cpumask,dontuse;
|
||||
struct pcpu *pc;
|
||||
struct pcpu *best_pcpu;
|
||||
struct thread *cputhread;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
|
||||
running_thread = curthread;
|
||||
|
||||
#if !defined(KSEG_PEEMPT_BEST_CPU)
|
||||
if (running_thread->td_ksegrp != td->td_ksegrp) {
|
||||
#endif
|
||||
kg = td->td_ksegrp;
|
||||
|
||||
/* if someone is ahead of this thread, wait our turn */
|
||||
if (td != TAILQ_FIRST(&kg->kg_runq))
|
||||
return;
|
||||
|
||||
worst_pri = td->td_priority;
|
||||
best_pcpu = NULL;
|
||||
dontuse = stopped_cpus | idle_cpus_mask;
|
||||
|
||||
/*
|
||||
* Find a cpu with the worst priority that runs at thread from
|
||||
* the same ksegrp - if multiple exist give first the last run
|
||||
* cpu and then the current cpu priority
|
||||
*/
|
||||
|
||||
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
|
||||
cpumask = pc->pc_cpumask;
|
||||
cputhread = pc->pc_curthread;
|
||||
|
||||
if ((cpumask & dontuse) ||
|
||||
cputhread->td_ksegrp != kg)
|
||||
continue;
|
||||
|
||||
if (cputhread->td_priority > worst_pri) {
|
||||
worst_pri = cputhread->td_priority;
|
||||
best_pcpu = pc;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cputhread->td_priority == worst_pri &&
|
||||
best_pcpu != NULL &&
|
||||
(td->td_lastcpu == pc->pc_cpuid ||
|
||||
(PCPU_GET(cpumask) == cpumask &&
|
||||
td->td_lastcpu != best_pcpu->pc_cpuid)))
|
||||
best_pcpu = pc;
|
||||
}
|
||||
|
||||
/* Check if we need to preempt someone */
|
||||
if (best_pcpu == NULL)
|
||||
return;
|
||||
|
||||
#if defined(IPI_PREEMPTION) && defined(PREEMPTION)
|
||||
#if !defined(FULL_PREEMPTION)
|
||||
if (td->td_priority <= PRI_MAX_ITHD)
|
||||
#endif /* ! FULL_PREEMPTION */
|
||||
{
|
||||
ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
|
||||
return;
|
||||
}
|
||||
#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
|
||||
|
||||
if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
|
||||
best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
|
||||
return;
|
||||
}
|
||||
#if !defined(KSEG_PEEMPT_BEST_CPU)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (td->td_priority >= running_thread->td_priority)
|
||||
return;
|
||||
#ifdef PREEMPTION
|
||||
|
||||
#if !defined(FULL_PREEMPTION)
|
||||
if (td->td_priority > PRI_MAX_ITHD) {
|
||||
running_thread->td_flags |= TDF_NEEDRESCHED;
|
||||
}
|
||||
#endif /* ! FULL_PREEMPTION */
|
||||
|
||||
if (running_thread->td_critnest > 1)
|
||||
running_thread->td_owepreempt = 1;
|
||||
else
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
|
||||
#else /* PREEMPTION */
|
||||
running_thread->td_flags |= TDF_NEEDRESCHED;
|
||||
#endif /* PREEMPTION */
|
||||
return;
|
||||
}
|
||||
#endif /* !SMP */
|
||||
|
||||
|
||||
int limitcount;
|
||||
#endif
|
||||
void
|
||||
setrunqueue(struct thread *td, int flags)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
struct thread *td2;
|
||||
struct thread *tda;
|
||||
|
||||
CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
|
||||
td, td->td_ksegrp, td->td_proc->p_pid);
|
||||
#else
|
||||
CTR2(KTR_RUNQ, "setrunqueue: td:%p pid:%d",
|
||||
td, td->td_proc->p_pid);
|
||||
#endif
|
||||
CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
curthread->td_proc->p_comm);
|
||||
@ -557,101 +190,7 @@ setrunqueue(struct thread *td, int flags)
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("setrunqueue: bad thread state"));
|
||||
TD_SET_RUNQ(td);
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
|
||||
/*
|
||||
* Common path optimisation: Only one of everything
|
||||
* and the KSE is always already attached.
|
||||
* Totally ignore the ksegrp run queue.
|
||||
*/
|
||||
if (kg->kg_avail_opennings != 1) {
|
||||
if (limitcount < 1) {
|
||||
limitcount++;
|
||||
printf("pid %d: corrected slot count (%d->1)\n",
|
||||
td->td_proc->p_pid, kg->kg_avail_opennings);
|
||||
|
||||
}
|
||||
kg->kg_avail_opennings = 1;
|
||||
}
|
||||
sched_add(td, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the concurrency has reduced, and we would go in the
|
||||
* assigned section, then keep removing entries from the
|
||||
* system run queue, until we are not in that section
|
||||
* or there is room for us to be put in that section.
|
||||
* What we MUST avoid is the case where there are threads of less
|
||||
* priority than the new one scheduled, but it can not
|
||||
* be scheduled itself. That would lead to a non contiguous set
|
||||
* of scheduled threads, and everything would break.
|
||||
*/
|
||||
tda = kg->kg_last_assigned;
|
||||
while ((kg->kg_avail_opennings <= 0) &&
|
||||
(tda && (tda->td_priority > td->td_priority))) {
|
||||
/*
|
||||
* None free, but there is one we can commandeer.
|
||||
*/
|
||||
CTR2(KTR_RUNQ,
|
||||
"setrunqueue: kg:%p: take slot from td: %p", kg, tda);
|
||||
sched_rem(tda);
|
||||
tda = kg->kg_last_assigned =
|
||||
TAILQ_PREV(tda, threadqueue, td_runq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the thread to the ksegrp's run queue at
|
||||
* the appropriate place.
|
||||
*/
|
||||
TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
|
||||
if (td2->td_priority > td->td_priority) {
|
||||
TAILQ_INSERT_BEFORE(td2, td, td_runq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (td2 == NULL) {
|
||||
/* We ran off the end of the TAILQ or it was empty. */
|
||||
TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have a slot to use, then put the thread on the system
|
||||
* run queue and if needed, readjust the last_assigned pointer.
|
||||
* it may be that we need to schedule something anyhow
|
||||
* even if the availabel slots are -ve so that
|
||||
* all the items < last_assigned are scheduled.
|
||||
*/
|
||||
if (kg->kg_avail_opennings > 0) {
|
||||
if (tda == NULL) {
|
||||
/*
|
||||
* No pre-existing last assigned so whoever is first
|
||||
* gets the slot.. (maybe us)
|
||||
*/
|
||||
td2 = TAILQ_FIRST(&kg->kg_runq);
|
||||
kg->kg_last_assigned = td2;
|
||||
} else if (tda->td_priority > td->td_priority) {
|
||||
td2 = td;
|
||||
} else {
|
||||
/*
|
||||
* We are past last_assigned, so
|
||||
* give the next slot to whatever is next,
|
||||
* which may or may not be us.
|
||||
*/
|
||||
td2 = TAILQ_NEXT(tda, td_runq);
|
||||
kg->kg_last_assigned = td2;
|
||||
}
|
||||
sched_add(td2, flags);
|
||||
} else {
|
||||
CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
|
||||
td, td->td_ksegrp, td->td_proc->p_pid);
|
||||
if ((flags & SRQ_YIELDING) == 0)
|
||||
maybe_preempt_in_ksegrp(td);
|
||||
}
|
||||
#else
|
||||
sched_add(td, flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -737,14 +276,14 @@ maybe_preempt(struct thread *td)
|
||||
* to the new thread.
|
||||
*/
|
||||
ctd = curthread;
|
||||
KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
|
||||
KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
|
||||
("thread has no (or wrong) sched-private part."));
|
||||
KASSERT((td->td_inhibitors == 0),
|
||||
("maybe_preempt: trying to run inhibitted thread"));
|
||||
pri = td->td_priority;
|
||||
cpri = ctd->td_priority;
|
||||
if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
|
||||
TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
|
||||
TD_IS_INHIBITED(ctd) || td->td_sched->ts_state != TSS_THREAD)
|
||||
return (0);
|
||||
#ifndef FULL_PREEMPTION
|
||||
if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
|
||||
@ -762,25 +301,7 @@ maybe_preempt(struct thread *td)
|
||||
* Thread is runnable but not yet put on system run queue.
|
||||
*/
|
||||
MPASS(TD_ON_RUNQ(td));
|
||||
MPASS(td->td_sched->ke_state != KES_ONRUNQ);
|
||||
#ifdef KSE
|
||||
if (td->td_proc->p_flag & P_HADTHREADS) {
|
||||
/*
|
||||
* If this is a threaded process we actually ARE on the
|
||||
* ksegrp run queue so take it off that first.
|
||||
* Also undo any damage done to the last_assigned pointer.
|
||||
* XXX Fix setrunqueue so this isn't needed
|
||||
*/
|
||||
struct ksegrp *kg;
|
||||
|
||||
kg = td->td_ksegrp;
|
||||
if (kg->kg_last_assigned == td)
|
||||
kg->kg_last_assigned =
|
||||
TAILQ_PREV(td, threadqueue, td_runq);
|
||||
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
|
||||
}
|
||||
|
||||
#endif
|
||||
MPASS(td->td_sched->ts_state != TSS_ONRUNQ);
|
||||
TD_SET_RUNNING(td);
|
||||
CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
|
||||
td->td_proc->p_pid, td->td_proc->p_comm);
|
||||
@ -880,25 +401,25 @@ runq_setbit(struct runq *rq, int pri)
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the KSE to the queue specified by its priority, and set the
|
||||
* Add the thread to the queue specified by its priority, and set the
|
||||
* corresponding status bit.
|
||||
*/
|
||||
void
|
||||
runq_add(struct runq *rq, struct kse *ke, int flags)
|
||||
runq_add(struct runq *rq, struct td_sched *ts, int flags)
|
||||
{
|
||||
struct rqhead *rqh;
|
||||
int pri;
|
||||
|
||||
pri = ke->ke_thread->td_priority / RQ_PPQ;
|
||||
ke->ke_rqindex = pri;
|
||||
pri = ts->ts_thread->td_priority / RQ_PPQ;
|
||||
ts->ts_rqindex = pri;
|
||||
runq_setbit(rq, pri);
|
||||
rqh = &rq->rq_queues[pri];
|
||||
CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
|
||||
ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
|
||||
CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
|
||||
ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
|
||||
if (flags & SRQ_PREEMPTED) {
|
||||
TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
|
||||
TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
|
||||
} else {
|
||||
TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
|
||||
TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
|
||||
}
|
||||
}
|
||||
|
||||
@ -933,11 +454,11 @@ SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
|
||||
/*
|
||||
* Find the highest priority process on the run queue.
|
||||
*/
|
||||
struct kse *
|
||||
struct td_sched *
|
||||
runq_choose(struct runq *rq)
|
||||
{
|
||||
struct rqhead *rqh;
|
||||
struct kse *ke;
|
||||
struct td_sched *ts;
|
||||
int pri;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -952,23 +473,23 @@ runq_choose(struct runq *rq)
|
||||
*/
|
||||
int count = runq_fuzz;
|
||||
int cpu = PCPU_GET(cpuid);
|
||||
struct kse *ke2;
|
||||
ke2 = ke = TAILQ_FIRST(rqh);
|
||||
struct td_sched *ts2;
|
||||
ts2 = ts = TAILQ_FIRST(rqh);
|
||||
|
||||
while (count-- && ke2) {
|
||||
if (ke->ke_thread->td_lastcpu == cpu) {
|
||||
ke = ke2;
|
||||
while (count-- && ts2) {
|
||||
if (ts->ts_thread->td_lastcpu == cpu) {
|
||||
ts = ts2;
|
||||
break;
|
||||
}
|
||||
ke2 = TAILQ_NEXT(ke2, ke_procq);
|
||||
ts2 = TAILQ_NEXT(ts2, ts_procq);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
ke = TAILQ_FIRST(rqh);
|
||||
KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
|
||||
ts = TAILQ_FIRST(rqh);
|
||||
KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
|
||||
CTR3(KTR_RUNQ,
|
||||
"runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
|
||||
return (ke);
|
||||
"runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
|
||||
return (ts);
|
||||
}
|
||||
CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
|
||||
|
||||
@ -976,28 +497,24 @@ runq_choose(struct runq *rq)
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the KSE from the queue specified by its priority, and clear the
|
||||
* Remove the thread from the queue specified by its priority, and clear the
|
||||
* corresponding status bit if the queue becomes empty.
|
||||
* Caller must set ke->ke_state afterwards.
|
||||
* Caller must set ts->ts_state afterwards.
|
||||
*/
|
||||
void
|
||||
runq_remove(struct runq *rq, struct kse *ke)
|
||||
runq_remove(struct runq *rq, struct td_sched *ts)
|
||||
{
|
||||
struct rqhead *rqh;
|
||||
int pri;
|
||||
|
||||
#ifdef KSE
|
||||
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
|
||||
#else
|
||||
KASSERT(ke->ke_thread->td_proc->p_sflag & PS_INMEM,
|
||||
#endif
|
||||
KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
|
||||
("runq_remove: process swapped out"));
|
||||
pri = ke->ke_rqindex;
|
||||
pri = ts->ts_rqindex;
|
||||
rqh = &rq->rq_queues[pri];
|
||||
CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
|
||||
ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
|
||||
KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
|
||||
TAILQ_REMOVE(rqh, ke, ke_procq);
|
||||
CTR5(KTR_RUNQ, "runq_remove: td=%p, ts=%p pri=%d %d rqh=%p",
|
||||
ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
|
||||
KASSERT(ts != NULL, ("runq_remove: no proc on busy queue"));
|
||||
TAILQ_REMOVE(rqh, ts, ts_procq);
|
||||
if (TAILQ_EMPTY(rqh)) {
|
||||
CTR0(KTR_RUNQ, "runq_remove: empty");
|
||||
runq_clrbit(rq, pri);
|
||||
@ -1008,23 +525,17 @@ runq_remove(struct runq *rq, struct kse *ke)
|
||||
#include <vm/uma.h>
|
||||
extern struct mtx kse_zombie_lock;
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Allocate scheduler specific per-process resources.
|
||||
* The thread and ksegrp have already been linked in.
|
||||
* In this case just set the default concurrency value.
|
||||
* The thread and proc have already been linked in.
|
||||
*
|
||||
* Called from:
|
||||
* proc_init() (UMA init method)
|
||||
*/
|
||||
void
|
||||
sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
|
||||
sched_newproc(struct proc *p, struct thread *td)
|
||||
{
|
||||
|
||||
/* This can go in sched_fork */
|
||||
sched_init_concurrency(kg);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* thread is being either created or recycled.
|
||||
@ -1037,37 +548,27 @@ sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
|
||||
void
|
||||
sched_newthread(struct thread *td)
|
||||
{
|
||||
struct td_sched *ke;
|
||||
struct td_sched *ts;
|
||||
|
||||
ke = (struct td_sched *) (td + 1);
|
||||
bzero(ke, sizeof(*ke));
|
||||
td->td_sched = ke;
|
||||
ke->ke_thread = td;
|
||||
ke->ke_state = KES_THREAD;
|
||||
ts = (struct td_sched *) (td + 1);
|
||||
bzero(ts, sizeof(*ts));
|
||||
td->td_sched = ts;
|
||||
ts->ts_thread = td;
|
||||
ts->ts_state = TSS_THREAD;
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Set up an initial concurrency of 1
|
||||
* and set the given thread (if given) to be using that
|
||||
* concurrency slot.
|
||||
* May be used "offline"..before the ksegrp is attached to the world
|
||||
* and thus wouldn't need schedlock in that case.
|
||||
* Called from:
|
||||
* thr_create()
|
||||
* proc_init() (UMA) via sched_newproc()
|
||||
*/
|
||||
void
|
||||
sched_init_concurrency(struct ksegrp *kg)
|
||||
sched_init_concurrency(struct proc *p)
|
||||
{
|
||||
|
||||
CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
|
||||
kg->kg_concurrency = 1;
|
||||
kg->kg_avail_opennings = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Change the concurrency of an existing ksegrp to N
|
||||
* Change the concurrency of an existing proc to N
|
||||
* Called from:
|
||||
* kse_create()
|
||||
* kse_exit()
|
||||
@ -1075,16 +576,8 @@ sched_init_concurrency(struct ksegrp *kg)
|
||||
* thread_single()
|
||||
*/
|
||||
void
|
||||
sched_set_concurrency(struct ksegrp *kg, int concurrency)
|
||||
sched_set_concurrency(struct proc *p, int concurrency)
|
||||
{
|
||||
|
||||
CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
|
||||
kg,
|
||||
concurrency,
|
||||
kg->kg_avail_opennings,
|
||||
kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
|
||||
kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
|
||||
kg->kg_concurrency = concurrency;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1099,10 +592,6 @@ sched_set_concurrency(struct ksegrp *kg, int concurrency)
|
||||
void
|
||||
sched_thread_exit(struct thread *td)
|
||||
{
|
||||
|
||||
SLOT_RELEASE(td->td_ksegrp);
|
||||
slot_fill(td->td_ksegrp);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* KERN_SWITCH_INCLUDE */
|
||||
|
@ -142,18 +142,12 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
{
|
||||
stack_t stack;
|
||||
struct thread *newtd;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg, *newkg;
|
||||
#endif
|
||||
struct proc *p;
|
||||
long id;
|
||||
int error;
|
||||
|
||||
error = 0;
|
||||
p = td->td_proc;
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
/* Have race condition but it is cheap. */
|
||||
if (p->p_numthreads >= max_threads_per_proc)
|
||||
@ -177,7 +171,7 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize our td and new ksegrp.. */
|
||||
/* Initialize our td */
|
||||
newtd = thread_alloc();
|
||||
|
||||
/*
|
||||
@ -229,42 +223,15 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
newkg = ksegrp_alloc();
|
||||
bzero(&newkg->kg_startzero,
|
||||
__rangeof(struct ksegrp, kg_startzero, kg_endzero));
|
||||
bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
|
||||
__rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
|
||||
sched_init_concurrency(newkg);
|
||||
PROC_LOCK(td->td_proc);
|
||||
td->td_proc->p_flag |= P_HADTHREADS;
|
||||
newtd->td_sigmask = td->td_sigmask;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
ksegrp_link(newkg, p);
|
||||
thread_link(newtd, newkg);
|
||||
PROC_UNLOCK(p);
|
||||
#else
|
||||
PROC_LOCK(td->td_proc);
|
||||
td->td_proc->p_flag |= P_HADTHREADS;
|
||||
newtd->td_sigmask = td->td_sigmask;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
thread_link(newtd, p);
|
||||
PROC_UNLOCK(p);
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
/* let the scheduler know about these things. */
|
||||
sched_fork_ksegrp(td, newkg);
|
||||
sched_fork_thread(td, newtd);
|
||||
if (rtp != NULL) {
|
||||
if (!(kg->kg_pri_class == PRI_TIMESHARE &&
|
||||
rtp->type == RTP_PRIO_NORMAL)) {
|
||||
rtp_to_pri(rtp, newkg);
|
||||
sched_prio(newtd, newkg->kg_user_pri);
|
||||
} /* ignore timesharing class */
|
||||
}
|
||||
#else
|
||||
sched_fork(td, newtd);
|
||||
if (rtp != NULL) {
|
||||
if (!(td->td_pri_class == PRI_TIMESHARE &&
|
||||
rtp->type == RTP_PRIO_NORMAL)) {
|
||||
@ -272,7 +239,6 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
sched_prio(newtd, newtd->td_user_pri);
|
||||
} /* ignore timesharing class */
|
||||
}
|
||||
#endif
|
||||
TD_SET_CAN_RUN(newtd);
|
||||
/* if ((flags & THR_SUSPENDED) == 0) */
|
||||
setrunqueue(newtd, SRQ_BORING);
|
||||
|
@ -50,16 +50,9 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/uma.h>
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* KSEGRP related storage.
|
||||
*/
|
||||
static uma_zone_t ksegrp_zone;
|
||||
#else
|
||||
/*
|
||||
* thread related storage.
|
||||
*/
|
||||
#endif
|
||||
static uma_zone_t thread_zone;
|
||||
|
||||
/* DEBUG ONLY */
|
||||
@ -85,9 +78,6 @@ int virtual_cpu;
|
||||
|
||||
#endif
|
||||
TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
|
||||
#ifdef KSE
|
||||
TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
|
||||
#endif
|
||||
struct mtx kse_zombie_lock;
|
||||
MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
|
||||
|
||||
@ -228,59 +218,6 @@ thread_fini(void *mem, int size)
|
||||
vm_thread_dispose(td);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Initialize type-stable parts of a ksegrp (when newly created).
|
||||
*/
|
||||
static int
|
||||
ksegrp_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
struct ksegrp *kg;
|
||||
|
||||
kg = (struct ksegrp *)mem;
|
||||
bzero(mem, size);
|
||||
kg->kg_sched = (struct kg_sched *)&kg[1];
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
ksegrp_link(struct ksegrp *kg, struct proc *p)
|
||||
{
|
||||
|
||||
TAILQ_INIT(&kg->kg_threads);
|
||||
TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
|
||||
TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
|
||||
kg->kg_proc = p;
|
||||
/*
|
||||
* the following counters are in the -zero- section
|
||||
* and may not need clearing
|
||||
*/
|
||||
kg->kg_numthreads = 0;
|
||||
kg->kg_numupcalls = 0;
|
||||
/* link it in now that it's consistent */
|
||||
p->p_numksegrps++;
|
||||
TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from:
|
||||
* thread-exit()
|
||||
*/
|
||||
void
|
||||
ksegrp_unlink(struct ksegrp *kg)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
|
||||
KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
|
||||
|
||||
p = kg->kg_proc;
|
||||
TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
|
||||
p->p_numksegrps--;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For a newly created process,
|
||||
* link up all the structures and its initial threads etc.
|
||||
@ -290,18 +227,10 @@ ksegrp_unlink(struct ksegrp *kg)
|
||||
* proc_init()
|
||||
*/
|
||||
void
|
||||
#ifdef KSE
|
||||
proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
|
||||
#else
|
||||
proc_linkup(struct proc *p, struct thread *td)
|
||||
#endif
|
||||
{
|
||||
|
||||
#ifdef KSE
|
||||
TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
|
||||
#endif
|
||||
TAILQ_INIT(&p->p_threads); /* all threads in proc */
|
||||
TAILQ_INIT(&p->p_suspended); /* Threads suspended */
|
||||
TAILQ_INIT(&p->p_upcalls); /* upcall list */
|
||||
sigqueue_init(&p->p_sigqueue, p);
|
||||
p->p_ksi = ksiginfo_alloc(1);
|
||||
if (p->p_ksi != NULL) {
|
||||
@ -309,17 +238,8 @@ proc_linkup(struct proc *p, struct thread *td)
|
||||
p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
|
||||
}
|
||||
LIST_INIT(&p->p_mqnotifier);
|
||||
#ifdef KSE
|
||||
p->p_numksegrps = 0;
|
||||
#endif
|
||||
p->p_numthreads = 0;
|
||||
|
||||
#ifdef KSE
|
||||
ksegrp_link(kg, p);
|
||||
thread_link(td, kg);
|
||||
#else
|
||||
thread_link(td, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -336,37 +256,22 @@ threadinit(void)
|
||||
thread_ctor, thread_dtor, thread_init, thread_fini,
|
||||
UMA_ALIGN_CACHE, 0);
|
||||
#ifdef KSE
|
||||
ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
|
||||
ksegrp_ctor, NULL, NULL, NULL,
|
||||
UMA_ALIGN_CACHE, 0);
|
||||
kseinit(); /* set up kse specific stuff e.g. upcall zone*/
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Stash an embarasingly extra thread into the zombie thread queue.
|
||||
* Use the slpq as that must be unused by now.
|
||||
*/
|
||||
void
|
||||
thread_stash(struct thread *td)
|
||||
{
|
||||
mtx_lock_spin(&kse_zombie_lock);
|
||||
TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
|
||||
TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
|
||||
mtx_unlock_spin(&kse_zombie_lock);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
|
||||
*/
|
||||
void
|
||||
ksegrp_stash(struct ksegrp *kg)
|
||||
{
|
||||
mtx_lock_spin(&kse_zombie_lock);
|
||||
TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
|
||||
mtx_unlock_spin(&kse_zombie_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reap zombie kse resource.
|
||||
*/
|
||||
@ -374,65 +279,27 @@ void
|
||||
thread_reap(void)
|
||||
{
|
||||
struct thread *td_first, *td_next;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg_first, * kg_next;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Don't even bother to lock if none at this instant,
|
||||
* we really don't care about the next instant..
|
||||
*/
|
||||
#ifdef KSE
|
||||
if ((!TAILQ_EMPTY(&zombie_threads))
|
||||
|| (!TAILQ_EMPTY(&zombie_ksegrps))) {
|
||||
#else
|
||||
if (!TAILQ_EMPTY(&zombie_threads)) {
|
||||
#endif
|
||||
mtx_lock_spin(&kse_zombie_lock);
|
||||
td_first = TAILQ_FIRST(&zombie_threads);
|
||||
#ifdef KSE
|
||||
kg_first = TAILQ_FIRST(&zombie_ksegrps);
|
||||
#endif
|
||||
if (td_first)
|
||||
TAILQ_INIT(&zombie_threads);
|
||||
#ifdef KSE
|
||||
if (kg_first)
|
||||
TAILQ_INIT(&zombie_ksegrps);
|
||||
#endif
|
||||
mtx_unlock_spin(&kse_zombie_lock);
|
||||
while (td_first) {
|
||||
td_next = TAILQ_NEXT(td_first, td_runq);
|
||||
td_next = TAILQ_NEXT(td_first, td_slpq);
|
||||
if (td_first->td_ucred)
|
||||
crfree(td_first->td_ucred);
|
||||
thread_free(td_first);
|
||||
td_first = td_next;
|
||||
}
|
||||
#ifdef KSE
|
||||
while (kg_first) {
|
||||
kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
|
||||
ksegrp_free(kg_first);
|
||||
kg_first = kg_next;
|
||||
}
|
||||
/*
|
||||
* there will always be a thread on the list if one of these
|
||||
* is there.
|
||||
*/
|
||||
kse_GC();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Allocate a ksegrp.
|
||||
*/
|
||||
struct ksegrp *
|
||||
ksegrp_alloc(void)
|
||||
{
|
||||
return (uma_zalloc(ksegrp_zone, M_WAITOK));
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allocate a thread.
|
||||
*/
|
||||
@ -444,16 +311,6 @@ thread_alloc(void)
|
||||
return (uma_zalloc(thread_zone, M_WAITOK));
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Deallocate a ksegrp.
|
||||
*/
|
||||
void
|
||||
ksegrp_free(struct ksegrp *td)
|
||||
{
|
||||
uma_zfree(ksegrp_zone, td);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Deallocate a thread.
|
||||
@ -503,23 +360,14 @@ thread_exit(void)
|
||||
uint64_t new_switchtime;
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
td = curthread;
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
p = td->td_proc;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
KASSERT(p != NULL, ("thread exiting without a process"));
|
||||
#ifdef KSE
|
||||
KASSERT(kg != NULL, ("thread exiting without a kse group"));
|
||||
#endif
|
||||
CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
|
||||
(long)p->p_pid, p->p_comm);
|
||||
KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
|
||||
@ -583,13 +431,8 @@ thread_exit(void)
|
||||
if (p->p_flag & P_HADTHREADS) {
|
||||
if (p->p_numthreads > 1) {
|
||||
thread_unlink(td);
|
||||
#ifdef KSE
|
||||
|
||||
/* XXX first arg not used in 4BSD or ULE */
|
||||
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
|
||||
#else
|
||||
sched_exit(p, td);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The test below is NOT true if we are the
|
||||
@ -614,38 +457,9 @@ thread_exit(void)
|
||||
* there somehow.
|
||||
*/
|
||||
upcall_remove(td);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the thread we unlinked above was the last one,
|
||||
* then this ksegrp should go away too.
|
||||
*/
|
||||
if (kg->kg_numthreads == 0) {
|
||||
/*
|
||||
* let the scheduler know about this in case
|
||||
* it needs to recover stats or resources.
|
||||
* Theoretically we could let
|
||||
* sched_exit_ksegrp() do the equivalent of
|
||||
* setting the concurrency to 0
|
||||
* but don't do it yet to avoid changing
|
||||
* the existing scheduler code until we
|
||||
* are ready.
|
||||
* We supply a random other ksegrp
|
||||
* as the recipient of any built up
|
||||
* cpu usage etc. (If the scheduler wants it).
|
||||
* XXXKSE
|
||||
* This is probably not fair so think of
|
||||
* a better answer.
|
||||
*/
|
||||
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
|
||||
sched_set_concurrency(kg, 0); /* XXX TEMP */
|
||||
ksegrp_unlink(kg);
|
||||
ksegrp_stash(kg);
|
||||
}
|
||||
#endif
|
||||
PROC_UNLOCK(p);
|
||||
#ifdef KSE
|
||||
td->td_ksegrp = NULL;
|
||||
#endif
|
||||
PCPU_SET(deadthread, td);
|
||||
} else {
|
||||
/*
|
||||
@ -689,9 +503,6 @@ thread_wait(struct proc *p)
|
||||
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
|
||||
#ifdef KSE
|
||||
KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
|
||||
#endif
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
#ifdef KSE
|
||||
if (td->td_standin != NULL) {
|
||||
@ -718,46 +529,22 @@ thread_wait(struct proc *p)
|
||||
* The thread is linked as if running but no KSE assigned.
|
||||
* Called from:
|
||||
* proc_linkup()
|
||||
* ifdef KSE
|
||||
* thread_schedule_upcall()
|
||||
* endif
|
||||
* thr_create()
|
||||
*/
|
||||
void
|
||||
#ifdef KSE
|
||||
thread_link(struct thread *td, struct ksegrp *kg)
|
||||
#else
|
||||
thread_link(struct thread *td, struct proc *p)
|
||||
#endif
|
||||
{
|
||||
#ifdef KSE
|
||||
struct proc *p;
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
p = kg->kg_proc;
|
||||
#endif
|
||||
td->td_state = TDS_INACTIVE;
|
||||
td->td_proc = p;
|
||||
#ifdef KSE
|
||||
td->td_ksegrp = kg;
|
||||
#endif
|
||||
td->td_flags = 0;
|
||||
#ifdef KSE
|
||||
td->td_kflags = 0;
|
||||
#endif
|
||||
|
||||
LIST_INIT(&td->td_contested);
|
||||
sigqueue_init(&td->td_sigqueue, p);
|
||||
callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
|
||||
TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
|
||||
#ifdef KSE
|
||||
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
|
||||
#endif
|
||||
p->p_numthreads++;
|
||||
#ifdef KSE
|
||||
kg->kg_numthreads++;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -781,7 +568,7 @@ thread_unthread(struct thread *td)
|
||||
thread_stash(td->td_standin);
|
||||
td->td_standin = NULL;
|
||||
}
|
||||
sched_set_concurrency(td->td_ksegrp, 1);
|
||||
sched_set_concurrency(p, 1);
|
||||
#else
|
||||
p->p_flag &= ~P_HADTHREADS;
|
||||
#endif
|
||||
@ -795,23 +582,12 @@ void
|
||||
thread_unlink(struct thread *td)
|
||||
{
|
||||
struct proc *p = td->td_proc;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
TAILQ_REMOVE(&p->p_threads, td, td_plist);
|
||||
p->p_numthreads--;
|
||||
#ifdef KSE
|
||||
TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
|
||||
kg->kg_numthreads--;
|
||||
#endif
|
||||
/* could clear a few other things here */
|
||||
#ifdef KSE
|
||||
/* Must NOT clear links to proc and ksegrp! */
|
||||
#else
|
||||
/* Must NOT clear links to proc! */
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1040,8 +816,7 @@ thread_suspend_check(int return_instead)
|
||||
|
||||
/*
|
||||
* When a thread suspends, it just
|
||||
* moves to the processes's suspend queue
|
||||
* and stays there.
|
||||
* gets taken off all queues.
|
||||
*/
|
||||
thread_suspend_one(td);
|
||||
if (return_instead == 0) {
|
||||
@ -1074,7 +849,6 @@ thread_suspend_one(struct thread *td)
|
||||
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
|
||||
p->p_suspcount++;
|
||||
TD_SET_SUSPENDED(td);
|
||||
TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1084,7 +858,7 @@ thread_unsuspend_one(struct thread *td)
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
TAILQ_REMOVE(&p->p_suspended, td, td_runq);
|
||||
KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
|
||||
TD_CLR_SUSPENDED(td);
|
||||
p->p_suspcount--;
|
||||
setrunnable(td);
|
||||
@ -1101,8 +875,10 @@ thread_unsuspend(struct proc *p)
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
if (!P_SHOULDSTOP(p)) {
|
||||
while ((td = TAILQ_FIRST(&p->p_suspended))) {
|
||||
thread_unsuspend_one(td);
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if (TD_IS_SUSPENDED(td)) {
|
||||
thread_unsuspend_one(td);
|
||||
}
|
||||
}
|
||||
} else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
|
||||
(p->p_numthreads == p->p_suspcount)) {
|
||||
@ -1137,8 +913,10 @@ thread_single_end(void)
|
||||
* to continue however as this is a bad place to stop.
|
||||
*/
|
||||
if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
|
||||
while ((td = TAILQ_FIRST(&p->p_suspended))) {
|
||||
thread_unsuspend_one(td);
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if (TD_IS_SUSPENDED(td)) {
|
||||
thread_unsuspend_one(td);
|
||||
}
|
||||
}
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
@ -167,15 +167,9 @@ struct umtxq_chain {
|
||||
* if it is using 100%CPU, this is unfair to other processes.
|
||||
*/
|
||||
|
||||
#ifdef KSE
|
||||
#define UPRI(td) (((td)->td_ksegrp->kg_user_pri >= PRI_MIN_TIMESHARE &&\
|
||||
(td)->td_ksegrp->kg_user_pri <= PRI_MAX_TIMESHARE) ?\
|
||||
PRI_MAX_TIMESHARE : (td)->td_ksegrp->kg_user_pri)
|
||||
#else
|
||||
#define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
|
||||
(td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
|
||||
PRI_MAX_TIMESHARE : (td)->td_user_pri)
|
||||
#endif
|
||||
|
||||
#define GOLDEN_RATIO_PRIME 2654404609U
|
||||
#define UMTX_CHAINS 128
|
||||
|
@ -105,11 +105,7 @@ getscheduler(struct ksched *ksched, struct thread *td, int *policy)
|
||||
int e = 0;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td, &rtp);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
switch (rtp.type)
|
||||
{
|
||||
@ -156,11 +152,7 @@ ksched_getparam(struct ksched *ksched,
|
||||
struct rtprio rtp;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td, &rtp);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (RTP_PRIO_IS_REALTIME(rtp.type))
|
||||
param->sched_priority = rtpprio_to_p4prio(rtp.prio);
|
||||
@ -181,9 +173,6 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
{
|
||||
int e = 0;
|
||||
struct rtprio rtp;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
switch(policy)
|
||||
{
|
||||
@ -198,20 +187,7 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
rtp_to_pri(&rtp, kg);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
} else if (TD_ON_RUNQ(td)) {
|
||||
if (td->td_priority > kg->kg_user_pri) {
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
else
|
||||
@ -225,28 +201,7 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
rtp.type = RTP_PRIO_NORMAL;
|
||||
rtp.prio = p4prio_to_rtpprio(param->sched_priority);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
rtp_to_pri(&rtp, kg);
|
||||
|
||||
/* XXX Simply revert to whatever we had for last
|
||||
* normal scheduler priorities.
|
||||
* This puts a requirement
|
||||
* on the scheduling code: You must leave the
|
||||
* scheduling info alone.
|
||||
*/
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
} else if (TD_ON_RUNQ(td)) {
|
||||
if (td->td_priority > kg->kg_user_pri) {
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
#else
|
||||
rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
break;
|
||||
|
File diff suppressed because it is too large
Load Diff
1125
sys/kern/sched_ule.c
1125
sys/kern/sched_ule.c
File diff suppressed because it is too large
Load Diff
@ -149,9 +149,6 @@ ast(struct trapframe *framep)
|
||||
{
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct rlimit rlim;
|
||||
int sflag;
|
||||
int flags;
|
||||
@ -163,9 +160,6 @@ ast(struct trapframe *framep)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
|
||||
p->p_comm);
|
||||
@ -204,7 +198,7 @@ ast(struct trapframe *framep)
|
||||
|
||||
/*
|
||||
* XXXKSE While the fact that we owe a user profiling
|
||||
* tick is stored per KSE in this code, the statistics
|
||||
* tick is stored per thread in this code, the statistics
|
||||
* themselves are still stored per process.
|
||||
* This should probably change, by which I mean that
|
||||
* possibly the location of both might change.
|
||||
@ -264,11 +258,7 @@ ast(struct trapframe *framep)
|
||||
ktrcsw(1, 1);
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
#else
|
||||
sched_prio(td, td->td_user_pri);
|
||||
#endif
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
#ifdef KTRACE
|
||||
|
@ -2578,18 +2578,8 @@ ttyinfo(struct tty *tp)
|
||||
if (proc_compare(pick, p))
|
||||
pick = p;
|
||||
|
||||
td = FIRST_THREAD_IN_PROC(pick); /* XXXKSE */
|
||||
#if 0
|
||||
KASSERT(td != NULL, ("ttyinfo: no thread"));
|
||||
#else
|
||||
if (td == NULL) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PGRP_UNLOCK(tp->t_pgrp);
|
||||
ttyprintf(tp, "foreground process without thread\n");
|
||||
tp->t_rocount = 0;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
/*^T can only show state for 1 thread. just pick the first. */
|
||||
td = FIRST_THREAD_IN_PROC(pick);
|
||||
stateprefix = "";
|
||||
if (TD_IS_RUNNING(td))
|
||||
state = "running";
|
||||
@ -2669,11 +2659,7 @@ proc_compare(struct proc *p1, struct proc *p2)
|
||||
{
|
||||
|
||||
int esta, estb;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#else
|
||||
struct thread *td;
|
||||
#endif
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
if (p1 == NULL)
|
||||
return (1);
|
||||
@ -2694,19 +2680,10 @@ proc_compare(struct proc *p1, struct proc *p2)
|
||||
* tie - favor one with highest recent cpu utilization
|
||||
*/
|
||||
esta = estb = 0;
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p1,kg) {
|
||||
esta += kg->kg_estcpu;
|
||||
}
|
||||
FOREACH_KSEGRP_IN_PROC(p2,kg) {
|
||||
estb += kg->kg_estcpu;
|
||||
}
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p1, td)
|
||||
esta += td->td_estcpu;
|
||||
FOREACH_THREAD_IN_PROC(p2, td)
|
||||
estb += td->td_estcpu;
|
||||
#endif
|
||||
if (estb > esta)
|
||||
return (1);
|
||||
if (esta > estb)
|
||||
|
@ -1906,11 +1906,7 @@ init386(first)
|
||||
* This may be done better later if it gets more high level
|
||||
* components in it. If so just link td->td_proc here.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize DMAC
|
||||
|
@ -295,11 +295,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
|
||||
/*
|
||||
* Start initializing proc0 and thread0.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_frame = &frame0;
|
||||
|
||||
/*
|
||||
|
@ -295,11 +295,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
|
||||
/*
|
||||
* Start initializing proc0 and thread0.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_frame = &frame0;
|
||||
|
||||
/*
|
||||
|
@ -391,11 +391,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
|
||||
/*
|
||||
* Initialize proc0 stuff (p_contested needs to be done early).
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
proc0.p_md.md_sigtramp = NULL;
|
||||
proc0.p_md.md_utrap = NULL;
|
||||
thread0.td_kstack = kstack0;
|
||||
|
@ -343,11 +343,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
|
||||
* Initialize proc0 stuff (p_contested needs to be done early).
|
||||
*/
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
proc0.p_md.md_sigtramp = NULL;
|
||||
proc0.p_md.md_utrap = NULL;
|
||||
frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;
|
||||
|
234
sys/sys/proc.h
234
sys/sys/proc.h
@ -152,41 +152,32 @@ struct pargs {
|
||||
*/
|
||||
struct auditinfo;
|
||||
struct kaudit_record;
|
||||
#ifdef KSE
|
||||
struct kg_sched;
|
||||
#else
|
||||
struct td_sched;
|
||||
#endif
|
||||
struct nlminfo;
|
||||
struct kaioinfo;
|
||||
struct p_sched;
|
||||
struct proc;
|
||||
struct sleepqueue;
|
||||
#ifdef KSE
|
||||
struct td_sched;
|
||||
#else
|
||||
struct thread;
|
||||
#endif
|
||||
struct trapframe;
|
||||
struct turnstile;
|
||||
struct mqueue_notifier;
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Here we define the three structures used for process information.
|
||||
* Here we define the two structures used for process information.
|
||||
*
|
||||
* The first is the thread. It might be thought of as a "Kernel
|
||||
* Schedulable Entity Context".
|
||||
* This structure contains all the information as to where a thread of
|
||||
* execution is now, or was when it was suspended, why it was suspended,
|
||||
* and anything else that will be needed to restart it when it is
|
||||
* rescheduled. Always associated with a KSE when running, but can be
|
||||
* reassigned to an equivalent KSE when being restarted for
|
||||
* load balancing. Each of these is associated with a kernel stack
|
||||
* and a pcb.
|
||||
* rescheduled. It includesa sscheduler specific substructure that is differnt
|
||||
* for each scheduler.
|
||||
*
|
||||
* It is important to remember that a particular thread structure may only
|
||||
* exist as long as the system call or kernel entrance (e.g. by pagefault)
|
||||
* M:N notes.
|
||||
* It is important to remember that when using M:N threading,
|
||||
* a particular thread structure may only exist as long as
|
||||
* the system call or kernel entrance (e.g. by pagefault)
|
||||
* which it is currently executing. It should therefore NEVER be referenced
|
||||
* by pointers in long lived structures that live longer than a single
|
||||
* request. If several threads complete their work at the same time,
|
||||
@ -198,87 +189,37 @@ struct mqueue_notifier;
|
||||
* get one when it needs a new one. There is also a system
|
||||
* cache of free threads. Threads have priority and partake in priority
|
||||
* inheritance schemes.
|
||||
*
|
||||
* The second is the proc (process) which owns all the resources of a process
|
||||
* other than CPU cycles. which are pqarelled out to the threads.
|
||||
*/
|
||||
struct thread;
|
||||
|
||||
/*
|
||||
* The KSEGRP is allocated resources across a number of CPUs.
|
||||
* (Including a number of CPUxQUANTA. It parcels these QUANTA up among
|
||||
* its threads, each of which should be running in a different CPU.
|
||||
* BASE priority and total available quanta are properties of a KSEGRP.
|
||||
* Multiple KSEGRPs in a single process compete against each other
|
||||
* for total quanta in the same way that a forked child competes against
|
||||
* its parent process.
|
||||
*/
|
||||
struct ksegrp;
|
||||
|
||||
/*
|
||||
* A process is the owner of all system resources allocated to a task
|
||||
* except CPU quanta.
|
||||
* All KSEGs under one process see, and have the same access to, these
|
||||
* resources (e.g. files, memory, sockets, credential, kqueues).
|
||||
* A process may compete for CPU cycles on the same basis as a
|
||||
* forked process cluster by spawning several KSEGRPs.
|
||||
*/
|
||||
struct proc;
|
||||
|
||||
/***************
|
||||
* In pictures:
|
||||
* Threads are the unit of execution
|
||||
With a single run queue used by all processors:
|
||||
|
||||
RUNQ: --->KSE---KSE--... SLEEPQ:[]---THREAD---THREAD---THREAD
|
||||
\ \ []---THREAD
|
||||
KSEG---THREAD--THREAD--THREAD []
|
||||
[]---THREAD---THREAD
|
||||
|
||||
(processors run THREADs from the KSEG until they are exhausted or
|
||||
the KSEG exhausts its quantum)
|
||||
|
||||
With PER-CPU run queues:
|
||||
KSEs on the separate run queues directly
|
||||
They would be given priorities calculated from the KSEG.
|
||||
RUNQ: --->THREAD---THREAD--... SLEEPQ:[]---THREAD---THREAD---THREAD
|
||||
[]---THREAD
|
||||
[]
|
||||
[]---THREAD---THREAD
|
||||
|
||||
With PER-CPU run queues:
|
||||
it gets more complicated.
|
||||
*
|
||||
*****************/
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Kernel runnable context (thread).
|
||||
* This is what is put to sleep and reactivated.
|
||||
* The first KSE available in the correct group will run this thread.
|
||||
* If several are available, use the one on the same CPU as last time.
|
||||
* When waiting to be run, threads are hung off the KSEGRP in priority order.
|
||||
* With N runnable and queued KSEs in the KSEGRP, the first N threads
|
||||
* are linked to them. Other threads are not yet assigned.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* Thread context. Processes may have multiple threads.
|
||||
*/
|
||||
#endif
|
||||
struct thread {
|
||||
struct proc *td_proc; /* (*) Associated process. */
|
||||
#ifdef KSE
|
||||
struct ksegrp *td_ksegrp; /* (*) Associated KSEG. */
|
||||
#else
|
||||
void *was_td_ksegrp; /* Temporary padding. */
|
||||
#endif
|
||||
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
|
||||
#ifdef KSE
|
||||
TAILQ_ENTRY(thread) td_kglist; /* (*) All threads in this ksegrp. */
|
||||
#else
|
||||
TAILQ_ENTRY(thread) was_td_kglist; /* Temporary padding. */
|
||||
#endif
|
||||
|
||||
/* The two queues below should someday be merged. */
|
||||
TAILQ_ENTRY(thread) td_slpq; /* (j) Sleep queue. */
|
||||
TAILQ_ENTRY(thread) td_lockq; /* (j) Lock queue. */
|
||||
#ifdef KSE
|
||||
TAILQ_ENTRY(thread) td_runq; /* (j/z) Run queue(s). XXXKSE */
|
||||
#else
|
||||
TAILQ_ENTRY(thread) td_runq; /* (j/z) Run queue(s). */
|
||||
#endif
|
||||
|
||||
TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */
|
||||
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
|
||||
@ -307,23 +248,12 @@ struct thread {
|
||||
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
|
||||
int td_intr_nesting_level; /* (k) Interrupt recursion. */
|
||||
int td_pinned; /* (k) Temporary cpu pin count. */
|
||||
#ifdef KSE
|
||||
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */
|
||||
#else
|
||||
void *was_td_mailbox; /* Temporary padding. */
|
||||
#endif
|
||||
struct ucred *td_ucred; /* (k) Reference to credentials. */
|
||||
#ifdef KSE
|
||||
struct thread *td_standin; /* (k + a) Use this for an upcall. */
|
||||
struct kse_upcall *td_upcall; /* (k + j) Upcall structure. */
|
||||
u_int new_td_estcpu; /* Temporary padding. */
|
||||
u_int new_td_slptime; /* Temporary padding. */
|
||||
#else
|
||||
void *was_td_standin; /* Temporary padding. */
|
||||
void *was_td_upcall; /* Temporary padding. */
|
||||
u_int td_estcpu; /* (j) Sum of the same field in KSEs. */
|
||||
u_int td_slptime; /* (j) How long completely blocked. */
|
||||
#endif
|
||||
u_int td_pticks; /* (k) Statclock hits for profiling */
|
||||
u_int td_sticks; /* (k) Statclock hits in system mode. */
|
||||
u_int td_iticks; /* (k) Statclock hits in intr mode. */
|
||||
@ -335,11 +265,7 @@ struct thread {
|
||||
sigset_t td_sigmask; /* (c) Current signal mask. */
|
||||
volatile u_int td_generation; /* (k) For detection of preemption */
|
||||
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
|
||||
#ifdef KSE
|
||||
int td_kflags; /* (c) Flags for KSE threading. */
|
||||
#else
|
||||
int was_td_kflags; /* Temporary padding. */
|
||||
#endif
|
||||
int td_xsig; /* (c) Signal for ptrace */
|
||||
u_long td_profil_addr; /* (k) Temporary addr until AST. */
|
||||
u_int td_profil_ticks; /* (k) Temporary ticks until AST. */
|
||||
@ -350,15 +276,9 @@ struct thread {
|
||||
#define td_startcopy td_endzero
|
||||
u_char td_base_pri; /* (j) Thread base kernel priority. */
|
||||
u_char td_priority; /* (j) Thread active priority. */
|
||||
#ifdef KSE
|
||||
u_char new_td_pri_class; /* Temporary padding. */
|
||||
u_char new_td_user_pri; /* Temporary padding. */
|
||||
u_char new_td_base_user_pri; /* Temporary padding. */
|
||||
#else
|
||||
u_char td_pri_class; /* (j) Scheduling class. */
|
||||
u_char td_user_pri; /* (j) User pri from estcpu and nice. */
|
||||
u_char td_base_user_pri; /* (j) Base user pri */
|
||||
#endif
|
||||
u_char td_base_user_pri; /* (j) Base user pri */
|
||||
#define td_endcopy td_pcb
|
||||
|
||||
/*
|
||||
@ -427,27 +347,15 @@ struct thread {
|
||||
#define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */
|
||||
#define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */
|
||||
#define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */
|
||||
#ifdef KSE
|
||||
#define TDP_UPCALLING 0x00000008 /* This thread is doing an upcall. */
|
||||
#else
|
||||
/* 0x00000008 */
|
||||
#endif
|
||||
#define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */
|
||||
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
|
||||
#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
|
||||
#ifdef KSE
|
||||
#define TDP_SA 0x00000080 /* A scheduler activation based thread. */
|
||||
#else
|
||||
/* 0x00000080 */
|
||||
#endif
|
||||
#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
|
||||
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
|
||||
#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
|
||||
#ifdef KSE
|
||||
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
|
||||
#else
|
||||
/* 0x00000800 */
|
||||
#endif
|
||||
#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
|
||||
#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */
|
||||
#define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */
|
||||
@ -467,18 +375,16 @@ struct thread {
|
||||
#define TDI_LOCK 0x0008 /* Stopped on a lock. */
|
||||
#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* flags (in kflags) related to M:N threading.
|
||||
*/
|
||||
#define TDK_KSEREL 0x0001 /* Blocked in msleep on kg->kg_completed. */
|
||||
#define TDK_KSEREL 0x0001 /* Blocked in msleep on p->p_completed. */
|
||||
#define TDK_KSERELSIG 0x0002 /* Blocked in msleep on p->p_siglist. */
|
||||
#define TDK_WAKEUP 0x0004 /* Thread has been woken by kse_wakeup. */
|
||||
|
||||
#define TD_CAN_UNBIND(td) \
|
||||
(((td)->td_pflags & TDP_CAN_UNBIND) && \
|
||||
((td)->td_upcall != NULL))
|
||||
#endif
|
||||
|
||||
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
|
||||
#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
|
||||
@ -520,14 +426,13 @@ struct thread {
|
||||
#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
|
||||
#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* An upcall is used when returning to userland. If a thread does not have
|
||||
* an upcall on return to userland the thread exports its context and exits.
|
||||
*/
|
||||
struct kse_upcall {
|
||||
TAILQ_ENTRY(kse_upcall) ku_link; /* List of upcalls in KSEG. */
|
||||
struct ksegrp *ku_ksegrp; /* Associated KSEG. */
|
||||
TAILQ_ENTRY(kse_upcall) ku_link; /* List of upcalls in proc. */
|
||||
struct proc *ku_proc; /* Associated proc. */
|
||||
struct thread *ku_owner; /* Owning thread. */
|
||||
int ku_flags; /* KUF_* flags. */
|
||||
struct kse_mailbox *ku_mailbox; /* Userland mailbox address. */
|
||||
@ -539,38 +444,6 @@ struct kse_upcall {
|
||||
#define KUF_DOUPCALL 0x00001 /* Do upcall now; don't wait. */
|
||||
#define KUF_EXITING 0x00002 /* Upcall structure is exiting. */
|
||||
|
||||
/*
|
||||
* Kernel-scheduled entity group (KSEG). The scheduler considers each KSEG to
|
||||
* be an indivisible unit from a time-sharing perspective, though each KSEG may
|
||||
* contain multiple KSEs.
|
||||
*/
|
||||
struct ksegrp {
|
||||
struct proc *kg_proc; /* (*) Proc that contains this KSEG. */
|
||||
TAILQ_ENTRY(ksegrp) kg_ksegrp; /* (*) Queue of KSEGs in kg_proc. */
|
||||
TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
|
||||
TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */
|
||||
TAILQ_HEAD(, kse_upcall) kg_upcalls; /* All upcalls in the group. */
|
||||
|
||||
#define kg_startzero kg_estcpu
|
||||
u_int kg_estcpu; /* (j) Sum of the same field in KSEs. */
|
||||
u_int kg_slptime; /* (j) How long completely blocked. */
|
||||
int kg_numupcalls; /* (j) Num upcalls. */
|
||||
int kg_upsleeps; /* (c) Num threads in kse_release(). */
|
||||
struct kse_thr_mailbox *kg_completed; /* (c) Completed thread mboxes. */
|
||||
int kg_nextupcall; /* (n) Next upcall time. */
|
||||
int kg_upquantum; /* (n) Quantum to schedule an upcall. */
|
||||
#define kg_endzero kg_pri_class
|
||||
|
||||
#define kg_startcopy kg_endzero
|
||||
u_char kg_pri_class; /* (j) Scheduling class. */
|
||||
u_char kg_user_pri; /* (j) User pri from estcpu and nice. */
|
||||
u_char kg_base_user_pri; /* (j) Base user pri */
|
||||
#define kg_endcopy kg_numthreads
|
||||
int kg_numthreads; /* (j) Num threads in total. */
|
||||
struct kg_sched *kg_sched; /* (*) Scheduler-specific data. */
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* XXX: Does this belong in resource.h or resourcevar.h instead?
|
||||
* Resource usage extension. The times in rusage structs in the kernel are
|
||||
@ -592,18 +465,12 @@ struct rusage_ext {
|
||||
};
|
||||
|
||||
/*
|
||||
* The old fashionned process. May have multiple threads, KSEGRPs
|
||||
* and KSEs. Starts off with a single embedded KSEGRP and THREAD.
|
||||
* The old fashionned process. May have multiple threads.
|
||||
* Starts off with a single embedded THREAD.
|
||||
*/
|
||||
struct proc {
|
||||
LIST_ENTRY(proc) p_list; /* (d) List of all processes. */
|
||||
#ifdef KSE
|
||||
TAILQ_HEAD(, ksegrp) p_ksegrps; /* (c)(kg_ksegrp) All KSEGs. */
|
||||
#else
|
||||
TAILQ_HEAD(, thread) was_p_ksegrps; /* Temporary padding. */
|
||||
#endif
|
||||
TAILQ_HEAD(, thread) p_threads; /* (j)(td_plist) Threads. (shortcut) */
|
||||
TAILQ_HEAD(, thread) p_suspended; /* (td_runq) Suspended threads. */
|
||||
struct ucred *p_ucred; /* (c) Process owner's identity. */
|
||||
struct filedesc *p_fd; /* (b) Open files. */
|
||||
struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */
|
||||
@ -611,6 +478,7 @@ struct proc {
|
||||
struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */
|
||||
struct plimit *p_limit; /* (c) Process limits. */
|
||||
struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */
|
||||
TAILQ_HEAD(, kse_upcall) p_upcalls; /* All upcalls in the proc. */
|
||||
|
||||
/*
|
||||
* The following don't make too much sense.
|
||||
@ -666,6 +534,14 @@ struct proc {
|
||||
int p_boundary_count;/* (c) Num threads at user boundary */
|
||||
int p_pendingcnt; /* how many signals are pending */
|
||||
struct itimers *p_itimers; /* (c) POSIX interval timers. */
|
||||
/* from ksegrp */
|
||||
u_int p_estcpu; /* (j) Sum of the field in threads. */
|
||||
u_int p_slptime; /* (j) How long completely blocked. */
|
||||
int p_numupcalls; /* (j) Num upcalls. */
|
||||
int p_upsleeps; /* (c) Num threads in kse_release(). */
|
||||
struct kse_thr_mailbox *p_completed; /* (c) Completed thread mboxes. */
|
||||
int p_nextupcall; /* (n) Next upcall time. */
|
||||
int p_upquantum; /* (n) Quantum to schedule an upcall. */
|
||||
/* End area that is zeroed on creation. */
|
||||
#define p_endzero p_magic
|
||||
|
||||
@ -684,11 +560,6 @@ struct proc {
|
||||
u_short p_xstat; /* (c) Exit status; also stop sig. */
|
||||
struct knlist p_klist; /* (c) Knotes attached to this proc. */
|
||||
int p_numthreads; /* (j) Number of threads. */
|
||||
#ifdef KSE
|
||||
int p_numksegrps; /* (c) Number of ksegrps. */
|
||||
#else
|
||||
int was_p_numksegrps; /* Temporary padding. */
|
||||
#endif
|
||||
struct mdproc p_md; /* Any machine-dependent fields. */
|
||||
struct callout p_itcallout; /* (h + c) Interval timer callout. */
|
||||
u_short p_acflag; /* (c) Accounting flags. */
|
||||
@ -797,22 +668,13 @@ MALLOC_DECLARE(M_ZOMBIE);
|
||||
|
||||
#define FOREACH_PROC_IN_SYSTEM(p) \
|
||||
LIST_FOREACH((p), &allproc, p_list)
|
||||
#ifdef KSE
|
||||
#define FOREACH_KSEGRP_IN_PROC(p, kg) \
|
||||
TAILQ_FOREACH((kg), &(p)->p_ksegrps, kg_ksegrp)
|
||||
#define FOREACH_THREAD_IN_GROUP(kg, td) \
|
||||
TAILQ_FOREACH((td), &(kg)->kg_threads, td_kglist)
|
||||
#define FOREACH_UPCALL_IN_GROUP(kg, ku) \
|
||||
TAILQ_FOREACH((ku), &(kg)->kg_upcalls, ku_link)
|
||||
#endif
|
||||
#define FOREACH_THREAD_IN_PROC(p, td) \
|
||||
TAILQ_FOREACH((td), &(p)->p_threads, td_plist)
|
||||
#define FOREACH_UPCALL_IN_PROC(p, ku) \
|
||||
TAILQ_FOREACH((ku), &(p)->p_upcalls, ku_link)
|
||||
|
||||
/* XXXKSE the following lines should probably only be used in 1:1 code: */
|
||||
#define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads)
|
||||
#ifdef KSE
|
||||
#define FIRST_KSEGRP_IN_PROC(p) TAILQ_FIRST(&(p)->p_ksegrps)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t,
|
||||
@ -923,9 +785,6 @@ extern u_long pgrphash;
|
||||
extern struct sx allproc_lock;
|
||||
extern struct sx proctree_lock;
|
||||
extern struct mtx ppeers_lock;
|
||||
#ifdef KSE
|
||||
extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0. */
|
||||
#endif
|
||||
extern struct proc proc0; /* Process slot for swapper. */
|
||||
extern struct thread thread0; /* Primary thread in proc0. */
|
||||
extern struct vmspace vmspace0; /* VM space for proc0. */
|
||||
@ -976,11 +835,7 @@ void pargs_drop(struct pargs *pa);
|
||||
void pargs_free(struct pargs *pa);
|
||||
void pargs_hold(struct pargs *pa);
|
||||
void procinit(void);
|
||||
#ifdef KSE
|
||||
void proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td);
|
||||
#else
|
||||
void proc_linkup(struct proc *p, struct thread *td);
|
||||
#endif
|
||||
void proc_reparent(struct proc *child, struct proc *newparent);
|
||||
struct pstats *pstats_alloc(void);
|
||||
void pstats_fork(struct pstats *src, struct pstats *dst);
|
||||
@ -1008,11 +863,6 @@ void cpu_fork(struct thread *, struct proc *, struct thread *, int);
|
||||
void cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
|
||||
|
||||
/* New in KSE. */
|
||||
#ifdef KSE
|
||||
struct ksegrp *ksegrp_alloc(void);
|
||||
void ksegrp_free(struct ksegrp *kg);
|
||||
void ksegrp_stash(struct ksegrp *kg);
|
||||
#endif
|
||||
void kse_GC(void);
|
||||
void kseinit(void);
|
||||
void cpu_set_upcall(struct thread *td, struct thread *td0);
|
||||
@ -1023,24 +873,14 @@ void cpu_thread_exit(struct thread *);
|
||||
void cpu_thread_setup(struct thread *td);
|
||||
void cpu_thread_swapin(struct thread *);
|
||||
void cpu_thread_swapout(struct thread *);
|
||||
#ifdef KSE
|
||||
void ksegrp_link(struct ksegrp *kg, struct proc *p);
|
||||
void ksegrp_unlink(struct ksegrp *kg);
|
||||
#endif
|
||||
struct thread *thread_alloc(void);
|
||||
void thread_continued(struct proc *p);
|
||||
void thread_exit(void) __dead2;
|
||||
int thread_export_context(struct thread *td, int willexit);
|
||||
void thread_free(struct thread *td);
|
||||
#ifdef KSE
|
||||
void thread_link(struct thread *td, struct ksegrp *kg);
|
||||
#else
|
||||
void thread_link(struct thread *td, struct proc *p);
|
||||
#endif
|
||||
void thread_reap(void);
|
||||
#ifdef KSE
|
||||
struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
|
||||
#endif
|
||||
void thread_signal_add(struct thread *td, ksiginfo_t *);
|
||||
int thread_single(int how);
|
||||
void thread_single_end(void);
|
||||
@ -1058,21 +898,17 @@ void thread_unlink(struct thread *td);
|
||||
void thread_unsuspend(struct proc *p);
|
||||
void thread_unsuspend_one(struct thread *td);
|
||||
void thread_unthread(struct thread *td);
|
||||
#ifdef KSE
|
||||
int thread_userret(struct thread *td, struct trapframe *frame);
|
||||
void thread_user_enter(struct thread *td);
|
||||
#endif
|
||||
void thread_wait(struct proc *p);
|
||||
struct thread *thread_find(struct proc *p, lwpid_t tid);
|
||||
void thr_exit1(void);
|
||||
#ifdef KSE
|
||||
struct kse_upcall *upcall_alloc(void);
|
||||
void upcall_free(struct kse_upcall *ku);
|
||||
void upcall_link(struct kse_upcall *ku, struct ksegrp *kg);
|
||||
void upcall_link(struct kse_upcall *ku, struct proc *p);
|
||||
void upcall_unlink(struct kse_upcall *ku);
|
||||
void upcall_remove(struct thread *td);
|
||||
void upcall_stash(struct kse_upcall *ke);
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
@ -75,17 +75,11 @@ struct rtprio {
|
||||
};
|
||||
|
||||
#ifdef _KERNEL
|
||||
#ifdef KSE
|
||||
struct ksegrp;
|
||||
int rtp_to_pri(struct rtprio *, struct ksegrp *);
|
||||
void pri_to_rtp(struct ksegrp *, struct rtprio *);
|
||||
#else
|
||||
struct thread;
|
||||
int rtp_to_pri(struct rtprio *, struct thread *);
|
||||
void pri_to_rtp(struct thread *, struct rtprio *);
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef _KERNEL
|
||||
#include <sys/cdefs.h>
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
#include <machine/runq.h>
|
||||
|
||||
struct kse;
|
||||
struct td_sched;
|
||||
|
||||
/*
|
||||
* Run queue parameters.
|
||||
@ -43,7 +43,7 @@ struct kse;
|
||||
/*
|
||||
* Head of run queues.
|
||||
*/
|
||||
TAILQ_HEAD(rqhead, kse);
|
||||
TAILQ_HEAD(rqhead, td_sched);
|
||||
|
||||
/*
|
||||
* Bit array which maintains the status of a run queue. When a queue is
|
||||
@ -62,10 +62,10 @@ struct runq {
|
||||
struct rqhead rq_queues[RQ_NQS];
|
||||
};
|
||||
|
||||
void runq_add(struct runq *, struct kse *, int flags);
|
||||
void runq_add(struct runq *, struct td_sched *, int flags);
|
||||
int runq_check(struct runq *);
|
||||
struct kse *runq_choose(struct runq *);
|
||||
struct td_sched *runq_choose(struct runq *);
|
||||
void runq_init(struct runq *);
|
||||
void runq_remove(struct runq *, struct kse *);
|
||||
void runq_remove(struct runq *, struct td_sched *);
|
||||
|
||||
#endif
|
||||
|
@ -86,23 +86,15 @@ void sched_fork(struct thread *td, struct thread *childtd);
|
||||
* KSE Groups contain scheduling priority information. They record the
|
||||
* behavior of groups of KSEs and threads.
|
||||
*/
|
||||
#ifdef KSE
|
||||
void sched_class(struct ksegrp *kg, int class);
|
||||
void sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd);
|
||||
void sched_fork_ksegrp(struct thread *td, struct ksegrp *child);
|
||||
#else
|
||||
void sched_class(struct thread *td, int class);
|
||||
#endif
|
||||
void sched_nice(struct proc *p, int nice);
|
||||
|
||||
/*
|
||||
* Threads are switched in and out, block on resources, have temporary
|
||||
* priorities inherited from their ksegs, and use up cpu time.
|
||||
* priorities inherited from their procs, and use up cpu time.
|
||||
*/
|
||||
#ifdef KSE
|
||||
void sched_exit_thread(struct thread *td, struct thread *child);
|
||||
void sched_fork_thread(struct thread *td, struct thread *child);
|
||||
#endif
|
||||
void sched_lend_prio(struct thread *td, u_char prio);
|
||||
void sched_lend_user_prio(struct thread *td, u_char pri);
|
||||
fixpt_t sched_pctcpu(struct thread *td);
|
||||
@ -111,11 +103,7 @@ void sched_sleep(struct thread *td);
|
||||
void sched_switch(struct thread *td, struct thread *newtd, int flags);
|
||||
void sched_unlend_prio(struct thread *td, u_char prio);
|
||||
void sched_unlend_user_prio(struct thread *td, u_char pri);
|
||||
#ifdef KSE
|
||||
void sched_user_prio(struct ksegrp *kg, u_char prio);
|
||||
#else
|
||||
void sched_user_prio(struct thread *td, u_char prio);
|
||||
#endif
|
||||
void sched_userret(struct thread *td);
|
||||
void sched_wakeup(struct thread *td);
|
||||
|
||||
@ -142,9 +130,6 @@ int sched_is_bound(struct thread *td);
|
||||
* These procedures tell the process data structure allocation code how
|
||||
* many bytes to actually allocate.
|
||||
*/
|
||||
#ifdef KSE
|
||||
int sched_sizeof_ksegrp(void);
|
||||
#endif
|
||||
int sched_sizeof_proc(void);
|
||||
int sched_sizeof_thread(void);
|
||||
|
||||
@ -162,15 +147,11 @@ sched_unpin(void)
|
||||
|
||||
/* temporarily here */
|
||||
void schedinit(void);
|
||||
#ifdef KSE
|
||||
void sched_init_concurrency(struct ksegrp *kg);
|
||||
void sched_set_concurrency(struct ksegrp *kg, int cuncurrency);
|
||||
#endif
|
||||
void sched_init_concurrency(struct proc *p);
|
||||
void sched_set_concurrency(struct proc *p, int cuncurrency);
|
||||
void sched_schedinit(void);
|
||||
#ifdef KSE
|
||||
void sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td);
|
||||
void sched_newproc(struct proc *p, struct thread *td);
|
||||
void sched_thread_exit(struct thread *td);
|
||||
#endif
|
||||
void sched_newthread(struct thread *td);
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
@ -682,9 +682,6 @@ scheduler(dummy)
|
||||
ppri = INT_MIN;
|
||||
sx_slock(&allproc_lock);
|
||||
FOREACH_PROC_IN_SYSTEM(p) {
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
|
||||
continue;
|
||||
}
|
||||
@ -696,18 +693,13 @@ scheduler(dummy)
|
||||
*
|
||||
*/
|
||||
if (td->td_inhibitors == TDI_SWAPPED) {
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
pri = p->p_swtime + kg->kg_slptime;
|
||||
#else
|
||||
pri = p->p_swtime + td->td_slptime;
|
||||
#endif
|
||||
if ((p->p_sflag & PS_SWAPINREQ) == 0) {
|
||||
pri -= p->p_nice * 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* if this ksegrp/thread is higher priority
|
||||
* if this thread is higher priority
|
||||
* and there is enough space, then select
|
||||
* this process instead of the previous
|
||||
* selection.
|
||||
@ -816,9 +808,6 @@ int action;
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
int didswap = 0;
|
||||
|
||||
retry:
|
||||
@ -892,24 +881,15 @@ int action;
|
||||
* do not swapout a realtime process
|
||||
* Check all the thread groups..
|
||||
*/
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
if (PRI_IS_REALTIME(kg->kg_pri_class))
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if (PRI_IS_REALTIME(td->td_pri_class))
|
||||
#endif
|
||||
goto nextproc;
|
||||
|
||||
/*
|
||||
* Guarantee swap_idle_threshold1
|
||||
* time in memory.
|
||||
*/
|
||||
#ifdef KSE
|
||||
if (kg->kg_slptime < swap_idle_threshold1)
|
||||
#else
|
||||
if (td->td_slptime < swap_idle_threshold1)
|
||||
#endif
|
||||
goto nextproc;
|
||||
|
||||
/*
|
||||
@ -921,16 +901,9 @@ int action;
|
||||
* This could be refined to support
|
||||
* swapping out a thread.
|
||||
*/
|
||||
#ifdef KSE
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
if ((td->td_priority) < PSOCK ||
|
||||
!thread_safetoswapout(td))
|
||||
goto nextproc;
|
||||
}
|
||||
#else
|
||||
if ((td->td_priority) < PSOCK || !thread_safetoswapout(td))
|
||||
if ((td->td_priority) < PSOCK ||
|
||||
!thread_safetoswapout(td))
|
||||
goto nextproc;
|
||||
#endif
|
||||
/*
|
||||
* If the system is under memory stress,
|
||||
* or if we are swapping
|
||||
@ -939,20 +912,11 @@ int action;
|
||||
*/
|
||||
if (((action & VM_SWAP_NORMAL) == 0) &&
|
||||
(((action & VM_SWAP_IDLE) == 0) ||
|
||||
#ifdef KSE
|
||||
(kg->kg_slptime < swap_idle_threshold2)))
|
||||
#else
|
||||
(td->td_slptime < swap_idle_threshold2)))
|
||||
#endif
|
||||
goto nextproc;
|
||||
|
||||
#ifdef KSE
|
||||
if (minslptime > kg->kg_slptime)
|
||||
minslptime = kg->kg_slptime;
|
||||
#else
|
||||
if (minslptime > td->td_slptime)
|
||||
minslptime = td->td_slptime;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -179,11 +179,7 @@ pagezero_start(void __unused *arg)
|
||||
PROC_UNLOCK(pagezero_proc);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td = FIRST_THREAD_IN_PROC(pagezero_proc);
|
||||
#ifdef KSE
|
||||
sched_class(td->td_ksegrp, PRI_IDLE);
|
||||
#else
|
||||
sched_class(td, PRI_IDLE);
|
||||
#endif
|
||||
sched_prio(td, PRI_MAX_IDLE);
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user