From 060563ec50bb91f09342fce9faf1878fdaafff0b Mon Sep 17 00:00:00 2001 From: Julian Elischer Date: Thu, 10 Apr 2003 17:35:44 +0000 Subject: [PATCH] Move the _oncpu entry from the KSE to the thread. The entry in the KSE still exists but it's purpose will change a bit when we add the ability to lock a KSE to a cpu. --- sys/amd64/amd64/genassym.c | 1 - sys/ddb/db_ps.c | 2 +- sys/i386/i386/genassym.c | 1 - sys/kern/init_main.c | 2 +- sys/kern/kern_fork.c | 2 +- sys/kern/kern_kse.c | 1 + sys/kern/kern_mutex.c | 3 ++- sys/kern/kern_proc.c | 2 +- sys/kern/kern_thread.c | 1 + sys/kern/sched_4bsd.c | 6 +++--- sys/kern/sched_ule.c | 6 +++--- sys/kern/subr_smp.c | 2 +- sys/kern/subr_turnstile.c | 3 ++- sys/kern/subr_witness.c | 2 +- sys/sys/proc.h | 1 + 15 files changed, 19 insertions(+), 16 deletions(-) diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c index 710b7b0a3670..0021bd4d1527 100644 --- a/sys/amd64/amd64/genassym.c +++ b/sys/amd64/amd64/genassym.c @@ -86,7 +86,6 @@ ASSYM(P_UAREA, offsetof(struct proc, p_uarea)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_WCHAN, offsetof(struct thread, td_wchan)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); -ASSYM(TD_KSE, offsetof(struct thread, td_kse)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_INTR_NESTING_LEVEL, offsetof(struct thread, td_intr_nesting_level)); ASSYM(TD_CRITNEST, offsetof(struct thread, td_critnest)); diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c index 430dae8acee9..17992ea47b0e 100644 --- a/sys/ddb/db_ps.c +++ b/sys/ddb/db_ps.c @@ -178,7 +178,7 @@ dumpthread(volatile struct proc *p, volatile struct thread *td) db_printf("[RUNQ]"); break; case TDS_RUNNING: - db_printf("[CPU %d]", td->td_kse->ke_oncpu); + db_printf("[CPU %d]", td->td_oncpu); break; default: panic("unknown thread state"); diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c index 710b7b0a3670..0021bd4d1527 100644 --- a/sys/i386/i386/genassym.c +++ b/sys/i386/i386/genassym.c @@ -86,7 +86,6 @@ ASSYM(P_UAREA, offsetof(struct proc, p_uarea)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_WCHAN, offsetof(struct thread, td_wchan)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); -ASSYM(TD_KSE, offsetof(struct thread, td_kse)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_INTR_NESTING_LEVEL, offsetof(struct thread, td_intr_nesting_level)); ASSYM(TD_CRITNEST, offsetof(struct thread, td_critnest)); diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index d135c9ba422b..08f273ac0755 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -376,7 +376,7 @@ proc0_init(void *dummy __unused) td->td_priority = PVM; td->td_base_pri = PUSER; td->td_kse = ke; /* XXXKSE */ - ke->ke_oncpu = 0; + td->td_oncpu = 0; ke->ke_state = KES_THREAD; ke->ke_thread = td; p->p_peers = 0; diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 9a74d2516b42..4cec7e317ec6 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -767,7 +767,7 @@ fork_exit(callout, arg, frame) } td = curthread; p = td->td_proc; - td->td_kse->ke_oncpu = PCPU_GET(cpuid); + td->td_oncpu = PCPU_GET(cpuid); p->p_state = PRS_NORMAL; /* * Finish setting up thread glue. We need to initialize diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 5ffa8f6872e1..bfc97ad597fe 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -139,6 +139,7 @@ thread_ctor(void *mem, int size, void *arg) td = (struct thread *)mem; td->td_state = TDS_INACTIVE; + td->td_oncpu = NOCPU; } /* diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 6a734d05bfce..072293156115 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -72,7 +72,8 @@ /* XXXKSE This test will change. */ #define thread_running(td) \ - ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU) + (td->td_state == TDS_RUNNING) + /* ((td)->td_oncpu != NOCPU) */ /* * Lock classes for sleep and spin mutexes. diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index 26fb2b7c228d..aa1e5c5da530 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -737,13 +737,13 @@ fill_kinfo_proc(p, kp) kp->ki_pri.pri_level = td->td_priority; kp->ki_pri.pri_native = td->td_base_pri; kp->ki_lastcpu = td->td_lastcpu; + kp->ki_oncpu = td->td_oncpu; kp->ki_tdflags = td->td_flags; kp->ki_pcb = td->td_pcb; kp->ki_kstack = (void *)td->td_kstack; /* Things in the kse */ kp->ki_rqindex = ke->ke_rqindex; - kp->ki_oncpu = ke->ke_oncpu; kp->ki_pctcpu = sched_pctcpu(ke); } else { kp->ki_oncpu = -1; diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 5ffa8f6872e1..bfc97ad597fe 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -139,6 +139,7 @@ thread_ctor(void *mem, int size, void *arg) td = (struct thread *)mem; td->td_state = TDS_INACTIVE; + td->td_oncpu = NOCPU; } /* diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index fe6dfd9d9aa6..3c4f0fe4b541 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -518,7 +518,7 @@ sched_sleep(struct thread *td, u_char prio) void sched_switchin(struct thread *td) { - td->td_kse->ke_oncpu = PCPU_GET(cpuid); + td->td_oncpu = PCPU_GET(cpuid); } void @@ -532,9 +532,9 @@ sched_switchout(struct thread *td) KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?")); - td->td_lastcpu = ke->ke_oncpu; + td->td_lastcpu = td->td_oncpu; td->td_last_kse = ke; - ke->ke_oncpu = NOCPU; + td->td_oncpu = NOCPU; td->td_flags &= ~TDF_NEEDRESCHED; /* * At the last moment, if this thread is still marked RUNNING, diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 190b4d64ee9a..bd9759cd92a9 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -643,8 +643,8 @@ sched_switchout(struct thread *td) ke = td->td_kse; td->td_last_kse = ke; - td->td_lastcpu = ke->ke_oncpu; - ke->ke_oncpu = NOCPU; + td->td_lastcpu = td->td_oncpu; + td->td_oncpu = NOCPU; td->td_flags &= ~TDF_NEEDRESCHED; if (TD_IS_RUNNING(td)) { @@ -667,7 +667,7 @@ sched_switchin(struct thread *td) /* struct kse *ke = td->td_kse; */ mtx_assert(&sched_lock, MA_OWNED); - td->td_kse->ke_oncpu = PCPU_GET(cpuid); + td->td_oncpu = PCPU_GET(cpuid); #if SCHED_STRICT_RESCHED if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && td->td_priority != td->td_ksegrp->kg_user_pri) diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c index f65987f53091..897e0f0bb213 100644 --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -142,7 +142,7 @@ forward_signal(struct thread *td) if (td == curthread) return; - id = td->td_kse->ke_oncpu; + id = td->td_oncpu; if (id == NOCPU) return; ipi_selected(1 << id, IPI_AST); diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index 6a734d05bfce..072293156115 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -72,7 +72,8 @@ /* XXXKSE This test will change. */ #define thread_running(td) \ - ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU) + (td->td_state == TDS_RUNNING) + /* ((td)->td_oncpu != NOCPU) */ /* * Lock classes for sleep and spin mutexes. diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c index d89698b41d33..b2171fdbe061 100644 --- a/sys/kern/subr_witness.c +++ b/sys/kern/subr_witness.c @@ -1714,7 +1714,7 @@ witness_list(struct thread *td) * if td is currently executing on some other CPU and holds spin locks * as we won't display those locks. If we had a MI way of getting * the per-cpu data for a given cpu then we could use - * td->td_kse->ke_oncpu to get the list of spinlocks for this thread + * td->td_oncpu to get the list of spinlocks for this thread * and "fix" this. * * That still wouldn't really fix this unless we locked sched_lock diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 5d2bb04f7aaa..bc921a85f638 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -287,6 +287,7 @@ struct thread { u_char td_lastcpu; /* (j) Last cpu we were on. */ u_char td_inktr; /* (k) Currently handling a KTR. */ u_char td_inktrace; /* (k) Currently handling a KTRACE. */ + u_char td_oncpu; /* (j) Which cpu we are on. */ short td_locks; /* (k) DEBUG: lockmgr count of locks */ struct mtx *td_blocked; /* (j) Mutex process is blocked on. */ struct ithd *td_ithd; /* (b) For interrupt threads only. */