Move the _oncpu entry from the KSE to the thread.

The entry in the KSE still exists but it's purpose will change a bit
when we add the ability to lock a KSE to a cpu.
This commit is contained in:
Julian Elischer 2003-04-10 17:35:44 +00:00
parent fff890d0e8
commit 060563ec50
15 changed files with 19 additions and 16 deletions

View File

@ -86,7 +86,6 @@ ASSYM(P_UAREA, offsetof(struct proc, p_uarea));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(TD_WCHAN, offsetof(struct thread, td_wchan));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
ASSYM(TD_INTR_NESTING_LEVEL, offsetof(struct thread, td_intr_nesting_level));
ASSYM(TD_CRITNEST, offsetof(struct thread, td_critnest));

View File

@ -178,7 +178,7 @@ dumpthread(volatile struct proc *p, volatile struct thread *td)
db_printf("[RUNQ]");
break;
case TDS_RUNNING:
db_printf("[CPU %d]", td->td_kse->ke_oncpu);
db_printf("[CPU %d]", td->td_oncpu);
break;
default:
panic("unknown thread state");

View File

@ -86,7 +86,6 @@ ASSYM(P_UAREA, offsetof(struct proc, p_uarea));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(TD_WCHAN, offsetof(struct thread, td_wchan));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
ASSYM(TD_INTR_NESTING_LEVEL, offsetof(struct thread, td_intr_nesting_level));
ASSYM(TD_CRITNEST, offsetof(struct thread, td_critnest));

View File

@ -376,7 +376,7 @@ proc0_init(void *dummy __unused)
td->td_priority = PVM;
td->td_base_pri = PUSER;
td->td_kse = ke; /* XXXKSE */
ke->ke_oncpu = 0;
td->td_oncpu = 0;
ke->ke_state = KES_THREAD;
ke->ke_thread = td;
p->p_peers = 0;

View File

@ -767,7 +767,7 @@ fork_exit(callout, arg, frame)
}
td = curthread;
p = td->td_proc;
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
td->td_oncpu = PCPU_GET(cpuid);
p->p_state = PRS_NORMAL;
/*
* Finish setting up thread glue. We need to initialize

View File

@ -139,6 +139,7 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
td->td_oncpu = NOCPU;
}
/*

View File

@ -72,7 +72,8 @@
/* XXXKSE This test will change. */
#define thread_running(td) \
((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
(td->td_state == TDS_RUNNING)
/* ((td)->td_oncpu != NOCPU) */
/*
* Lock classes for sleep and spin mutexes.

View File

@ -737,13 +737,13 @@ fill_kinfo_proc(p, kp)
kp->ki_pri.pri_level = td->td_priority;
kp->ki_pri.pri_native = td->td_base_pri;
kp->ki_lastcpu = td->td_lastcpu;
kp->ki_oncpu = td->td_oncpu;
kp->ki_tdflags = td->td_flags;
kp->ki_pcb = td->td_pcb;
kp->ki_kstack = (void *)td->td_kstack;
/* Things in the kse */
kp->ki_rqindex = ke->ke_rqindex;
kp->ki_oncpu = ke->ke_oncpu;
kp->ki_pctcpu = sched_pctcpu(ke);
} else {
kp->ki_oncpu = -1;

View File

@ -139,6 +139,7 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
td->td_oncpu = NOCPU;
}
/*

View File

@ -518,7 +518,7 @@ sched_sleep(struct thread *td, u_char prio)
void
sched_switchin(struct thread *td)
{
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
td->td_oncpu = PCPU_GET(cpuid);
}
void
@ -532,9 +532,9 @@ sched_switchout(struct thread *td)
KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
td->td_lastcpu = ke->ke_oncpu;
td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
ke->ke_oncpu = NOCPU;
td->td_oncpu = NOCPU;
td->td_flags &= ~TDF_NEEDRESCHED;
/*
* At the last moment, if this thread is still marked RUNNING,

View File

@ -643,8 +643,8 @@ sched_switchout(struct thread *td)
ke = td->td_kse;
td->td_last_kse = ke;
td->td_lastcpu = ke->ke_oncpu;
ke->ke_oncpu = NOCPU;
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
td->td_flags &= ~TDF_NEEDRESCHED;
if (TD_IS_RUNNING(td)) {
@ -667,7 +667,7 @@ sched_switchin(struct thread *td)
/* struct kse *ke = td->td_kse; */
mtx_assert(&sched_lock, MA_OWNED);
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
td->td_oncpu = PCPU_GET(cpuid);
#if SCHED_STRICT_RESCHED
if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
td->td_priority != td->td_ksegrp->kg_user_pri)

View File

@ -142,7 +142,7 @@ forward_signal(struct thread *td)
if (td == curthread)
return;
id = td->td_kse->ke_oncpu;
id = td->td_oncpu;
if (id == NOCPU)
return;
ipi_selected(1 << id, IPI_AST);

View File

@ -72,7 +72,8 @@
/* XXXKSE This test will change. */
#define thread_running(td) \
((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
(td->td_state == TDS_RUNNING)
/* ((td)->td_oncpu != NOCPU) */
/*
* Lock classes for sleep and spin mutexes.

View File

@ -1714,7 +1714,7 @@ witness_list(struct thread *td)
* if td is currently executing on some other CPU and holds spin locks
* as we won't display those locks. If we had a MI way of getting
* the per-cpu data for a given cpu then we could use
* td->td_kse->ke_oncpu to get the list of spinlocks for this thread
* td->td_oncpu to get the list of spinlocks for this thread
* and "fix" this.
*
* That still wouldn't really fix this unless we locked sched_lock

View File

@ -287,6 +287,7 @@ struct thread {
u_char td_lastcpu; /* (j) Last cpu we were on. */
u_char td_inktr; /* (k) Currently handling a KTR. */
u_char td_inktrace; /* (k) Currently handling a KTRACE. */
u_char td_oncpu; /* (j) Which cpu we are on. */
short td_locks; /* (k) DEBUG: lockmgr count of locks */
struct mtx *td_blocked; /* (j) Mutex process is blocked on. */
struct ithd *td_ithd; /* (b) For interrupt threads only. */