- Change mi_switch() and sched_switch() to accept an optional thread to
switch to. If a non-NULL thread pointer is passed in, then the CPU will switch to that thread directly rather than calling choosethread() to pick a thread to choose to. - Make sched_switch() aware of idle threads and know to do TD_SET_CAN_RUN() instead of sticking them on the run queue rather than requiring all callers of mi_switch() to know to do this if they can be called from an idlethread. - Move constants for arguments to mi_switch() and thread_single() out of the middle of the function prototypes and up above into their own section.
This commit is contained in:
parent
4b39413aeb
commit
1b16b181d1
@ -58,7 +58,7 @@ idle_setup(void *dummy)
|
||||
p->p_flag |= P_NOLOAD;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td = FIRST_THREAD_IN_PROC(p);
|
||||
td->td_state = TDS_CAN_RUN;
|
||||
TD_SET_CAN_RUN(td);
|
||||
td->td_flags |= TDF_IDLETD;
|
||||
td->td_priority = PRI_MAX_IDLE;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -86,8 +86,7 @@ idle_proc(void *dummy)
|
||||
cpu_idle();
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_state = TDS_CAN_RUN;
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ ithread_schedule(struct ithd *ithread, int do_switch)
|
||||
("ithread_schedule: Bad state for curthread."));
|
||||
if (ctd->td_flags & TDF_IDLETD)
|
||||
ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
|
||||
mi_switch(SW_INVOL);
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
} else {
|
||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
}
|
||||
@ -618,7 +618,7 @@ restart:
|
||||
if (!ithd->it_need) {
|
||||
TD_SET_IWAIT(td);
|
||||
CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
@ -695,7 +695,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
"_mtx_unlock_sleep: %p switching out lock=%p", m,
|
||||
(void *)m->mtx_lock);
|
||||
|
||||
mi_switch(SW_INVOL);
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
if (LOCK_LOG_TEST(&m->mtx_object, opts))
|
||||
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
|
||||
m, (void *)m->mtx_lock);
|
||||
|
@ -307,7 +307,7 @@ boot(int howto)
|
||||
/*
|
||||
* Allow interrupt threads to run
|
||||
*/
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
DELAY(1000);
|
||||
}
|
||||
|
@ -2019,7 +2019,7 @@ ptracestop(struct thread *td, int sig)
|
||||
thread_suspend_one(td);
|
||||
PROC_UNLOCK(p);
|
||||
DROP_GIANT();
|
||||
mi_switch(SW_INVOL);
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
}
|
||||
@ -2168,7 +2168,7 @@ issignal(td)
|
||||
thread_suspend_one(td);
|
||||
PROC_UNLOCK(p);
|
||||
DROP_GIANT();
|
||||
mi_switch(SW_INVOL);
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
|
@ -430,7 +430,7 @@ uio_yield(void)
|
||||
mtx_lock_spin(&sched_lock);
|
||||
DROP_GIANT();
|
||||
sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */
|
||||
mi_switch(SW_INVOL);
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ wakeup_one(ident)
|
||||
* The machine independent parts of context switching.
|
||||
*/
|
||||
void
|
||||
mi_switch(int flags)
|
||||
mi_switch(int flags, struct thread *newtd)
|
||||
{
|
||||
struct bintime new_switchtime;
|
||||
struct thread *td;
|
||||
@ -349,7 +349,7 @@ mi_switch(int flags)
|
||||
(void *)td, (long)p->p_pid, p->p_comm);
|
||||
if (td->td_proc->p_flag & P_SA)
|
||||
thread_switchout(td);
|
||||
sched_switch(td);
|
||||
sched_switch(td, newtd);
|
||||
|
||||
CTR3(KTR_PROC, "mi_switch: new thread %p (pid %ld, %s)",
|
||||
(void *)td, (long)p->p_pid, p->p_comm);
|
||||
@ -468,7 +468,7 @@ yield(struct thread *td, struct yield_args *uap)
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
sched_prio(td, PRI_MAX_TIMESHARE);
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
td->td_retval[0] = 0;
|
||||
return (0);
|
||||
|
@ -908,7 +908,7 @@ thread_single(int force_exit)
|
||||
*/
|
||||
thread_suspend_one(td);
|
||||
PROC_UNLOCK(p);
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
@ -1011,7 +1011,7 @@ thread_suspend_check(int return_instead)
|
||||
}
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
mi_switch(SW_INVOL);
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
}
|
||||
|
@ -637,9 +637,8 @@ sched_sleep(struct thread *td)
|
||||
}
|
||||
|
||||
void
|
||||
sched_switch(struct thread *td)
|
||||
sched_switch(struct thread *td, struct thread *newtd)
|
||||
{
|
||||
struct thread *newtd;
|
||||
struct kse *ke;
|
||||
struct proc *p;
|
||||
|
||||
@ -651,6 +650,8 @@ sched_switch(struct thread *td)
|
||||
|
||||
if ((p->p_flag & P_NOLOAD) == 0)
|
||||
sched_tdcnt--;
|
||||
if (newtd != NULL && (newtd->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_tdcnt++;
|
||||
td->td_lastcpu = td->td_oncpu;
|
||||
td->td_last_kse = ke;
|
||||
td->td_flags &= ~TDF_NEEDRESCHED;
|
||||
@ -658,9 +659,12 @@ sched_switch(struct thread *td)
|
||||
/*
|
||||
* At the last moment, if this thread is still marked RUNNING,
|
||||
* then put it back on the run queue as it has not been suspended
|
||||
* or stopped or any thing else similar.
|
||||
* or stopped or any thing else similar. We never put the idle
|
||||
* threads on the run queue, however.
|
||||
*/
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
if (td == PCPU_GET(idlethread))
|
||||
TD_SET_CAN_RUN(td);
|
||||
else if (TD_IS_RUNNING(td)) {
|
||||
/* Put us back on the run queue (kse and all). */
|
||||
setrunqueue(td);
|
||||
} else if (p->p_flag & P_SA) {
|
||||
@ -671,7 +675,8 @@ sched_switch(struct thread *td)
|
||||
*/
|
||||
kse_reassign(ke);
|
||||
}
|
||||
newtd = choosethread();
|
||||
if (newtd == NULL)
|
||||
newtd = choosethread();
|
||||
if (td != newtd)
|
||||
cpu_switch(td, newtd);
|
||||
sched_lock.mtx_lock = (uintptr_t)td;
|
||||
@ -830,7 +835,7 @@ sched_bind(struct thread *td, int cpu)
|
||||
|
||||
ke->ke_state = KES_THREAD;
|
||||
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -788,7 +788,7 @@ kseq_choose(struct kseq *kseq)
|
||||
ke = runq_choose(kseq->ksq_curr);
|
||||
if (ke == NULL) {
|
||||
/*
|
||||
* We already swaped once and didn't get anywhere.
|
||||
* We already swapped once and didn't get anywhere.
|
||||
*/
|
||||
if (swap)
|
||||
break;
|
||||
@ -1128,9 +1128,8 @@ sched_prio(struct thread *td, u_char prio)
|
||||
}
|
||||
|
||||
void
|
||||
sched_switch(struct thread *td)
|
||||
sched_switch(struct thread *td, struct thread *newtd)
|
||||
{
|
||||
struct thread *newtd;
|
||||
struct kse *ke;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -1147,7 +1146,9 @@ sched_switch(struct thread *td)
|
||||
* to the new cpu. This is the case in sched_bind().
|
||||
*/
|
||||
if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
if (td == PCPU_GET(idlethread))
|
||||
TD_SET_CAN_RUN(td);
|
||||
else if (TD_IS_RUNNING(td)) {
|
||||
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
|
||||
setrunqueue(td);
|
||||
} else {
|
||||
@ -1163,7 +1164,10 @@ sched_switch(struct thread *td)
|
||||
kse_reassign(ke);
|
||||
}
|
||||
}
|
||||
newtd = choosethread();
|
||||
if (newtd == NULL)
|
||||
newtd = choosethread();
|
||||
else
|
||||
kseq_load_add(KSEQ_SELF(), newtd->td_kse);
|
||||
if (td != newtd)
|
||||
cpu_switch(td, newtd);
|
||||
sched_lock.mtx_lock = (uintptr_t)td;
|
||||
|
@ -409,7 +409,7 @@ sleepq_switch(void *wchan)
|
||||
|
||||
sched_sleep(td);
|
||||
TD_SET_SLEEPING(td);
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
|
||||
CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
|
||||
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
|
||||
@ -449,7 +449,7 @@ sleepq_check_timeout(void)
|
||||
else if (callout_stop(&td->td_slpcallout) == 0) {
|
||||
td->td_flags |= TDF_TIMEOUT;
|
||||
TD_SET_SLEEPING(td);
|
||||
mi_switch(SW_INVOL);
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ ast(struct trapframe *framep)
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
mi_switch(SW_INVOL);
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
#ifdef KTRACE
|
||||
if (KTRPOINT(td, KTR_CSW))
|
||||
|
@ -559,7 +559,7 @@ turnstile_wait(struct turnstile *ts, struct lock_object *lock,
|
||||
CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td,
|
||||
lock, lock->lo_name);
|
||||
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
|
||||
if (LOCK_LOG_TEST(lock, 0))
|
||||
CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s",
|
||||
|
@ -675,6 +675,16 @@ struct proc {
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
/* Flags for mi_switch(). */
|
||||
#define SW_VOL 0x0001 /* Voluntary switch. */
|
||||
#define SW_INVOL 0x0002 /* Involuntary switch. */
|
||||
|
||||
/* How values for thread_single(). */
|
||||
#define SINGLE_NO_EXIT 0
|
||||
#define SINGLE_EXIT 1
|
||||
|
||||
/* XXXKSE: Missing values for thread_signal_check(). */
|
||||
|
||||
#ifdef MALLOC_DECLARE
|
||||
MALLOC_DECLARE(M_PARGS);
|
||||
MALLOC_DECLARE(M_PGRP);
|
||||
@ -840,10 +850,7 @@ void fork_exit(void (*)(void *, struct trapframe *), void *,
|
||||
void fork_return(struct thread *, struct trapframe *);
|
||||
int inferior(struct proc *p);
|
||||
int leavepgrp(struct proc *p);
|
||||
void mi_switch(int flags);
|
||||
/* Flags for mi_switch(). */
|
||||
#define SW_VOL 0x0001 /* Voluntary switch. */
|
||||
#define SW_INVOL 0x0002 /* Involuntary switch. */
|
||||
void mi_switch(int flags, struct thread *newtd);
|
||||
int p_candebug(struct thread *td, struct proc *p);
|
||||
int p_cansee(struct thread *td, struct proc *p);
|
||||
int p_cansched(struct thread *td, struct proc *p);
|
||||
@ -906,8 +913,6 @@ void thread_link(struct thread *td, struct ksegrp *kg);
|
||||
void thread_reap(void);
|
||||
struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
|
||||
int thread_single(int how);
|
||||
#define SINGLE_NO_EXIT 0 /* values for 'how' */
|
||||
#define SINGLE_EXIT 1
|
||||
void thread_single_end(void);
|
||||
void thread_stash(struct thread *td);
|
||||
int thread_suspend_check(int how);
|
||||
|
@ -66,7 +66,7 @@ void sched_fork_thread(struct thread *td, struct thread *child);
|
||||
fixpt_t sched_pctcpu(struct thread *td);
|
||||
void sched_prio(struct thread *td, u_char prio);
|
||||
void sched_sleep(struct thread *td);
|
||||
void sched_switch(struct thread *td);
|
||||
void sched_switch(struct thread *td, struct thread *newtd);
|
||||
void sched_userret(struct thread *td);
|
||||
void sched_wakeup(struct thread *td);
|
||||
|
||||
|
@ -153,7 +153,7 @@ vm_pagezero(void __unused *arg)
|
||||
pages += vm_page_zero_idle();
|
||||
if (pages > idlezero_maxrun || sched_runnable()) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
mi_switch(SW_VOL);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
pages = 0;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user