Get rid of struct proc p_sched and struct thread td_sched pointers.

p_sched is unused.

The struct td_sched is always co-allocated with the struct thread,
except for the thread0.  Avoid useless indirection, instead calculate
td_sched location using simple pointer arithmetic in td_get_sched(9).
For thread0, which is statically allocated, create a structure to
emulate layout of the dynamic allocation.

Reviewed by:	jhb (previous version)
Sponsored by:	The FreeBSD Foundation
Differential revision:	https://reviews.freebsd.org/D6711
This commit is contained in:
Konstantin Belousov 2016-06-05 17:04:03 +00:00
parent 314381b529
commit 93ccd6bf87
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=301456
9 changed files with 99 additions and 84 deletions

View File

@ -99,7 +99,7 @@ void mi_startup(void); /* Should be elsewhere */
static struct session session0;
static struct pgrp pgrp0;
struct proc proc0;
struct thread thread0 __aligned(16);
struct thread0_storage thread0_st __aligned(16);
struct vmspace vmspace0;
struct proc *initproc;

View File

@ -1011,7 +1011,7 @@ fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
td, td->td_sched, p->p_pid, td->td_name);
td, td_get_sched(td), p->p_pid, td->td_name);
sched_fork_exit(td);
/*

View File

@ -237,7 +237,6 @@ proc_init(void *mem, int size, int flags)
p = (struct proc *)mem;
SDT_PROBE3(proc, , init, entry, p, size, flags);
p->p_sched = (struct p_sched *)&p[1];
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK | MTX_NEW);
mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_NEW);
mtx_init(&p->p_statmtx, "pstatl", NULL, MTX_SPIN | MTX_NEW);

View File

@ -441,7 +441,7 @@ mi_switch(int flags, struct thread *newtd)
PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name);
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
@ -457,7 +457,7 @@ mi_switch(int flags, struct thread *newtd)
"prio:%d", td->td_priority);
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name);
/*
* If the last thread was exiting, finish cleaning it up.

View File

@ -211,7 +211,6 @@ thread_init(void *mem, int size, int flags)
td->td_turnstile = turnstile_alloc();
td->td_rlqe = NULL;
EVENTHANDLER_INVOKE(thread_init, td);
td->td_sched = (struct td_sched *)&td[1];
umtx_thread_init(td);
td->td_kstack = 0;
td->td_sel = NULL;

View File

@ -117,7 +117,10 @@ struct td_sched {
#define THREAD_CAN_SCHED(td, cpu) \
CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
static struct td_sched td_sched0;
_Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <=
sizeof(struct thread0_storage),
"increase struct thread0_storage.t0st_sched size");
static struct mtx sched_lock;
static int realstathz = 127; /* stathz is sometimes 0 and run off of hz. */
@ -491,8 +494,8 @@ schedcpu(void)
}
FOREACH_THREAD_IN_PROC(p, td) {
awake = 0;
ts = td_get_sched(td);
thread_lock(td);
ts = td->td_sched;
/*
* Increment sleep time (if sleeping). We
* ignore overflow, as above.
@ -596,7 +599,7 @@ updatepri(struct thread *td)
fixpt_t loadfac;
unsigned int newcpu;
ts = td->td_sched;
ts = td_get_sched(td);
loadfac = loadfactor(averunnable.ldavg[0]);
if (ts->ts_slptime > 5 * loadfac)
ts->ts_estcpu = 0;
@ -621,7 +624,8 @@ resetpriority(struct thread *td)
if (td->td_pri_class != PRI_TIMESHARE)
return;
newpriority = PUSER + td->td_sched->ts_estcpu / INVERSE_ESTCPU_WEIGHT +
newpriority = PUSER +
td_get_sched(td)->ts_estcpu / INVERSE_ESTCPU_WEIGHT +
NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
PRI_MAX_TIMESHARE);
@ -682,13 +686,12 @@ sched_initticks(void *dummy)
void
schedinit(void)
{
/*
* Set up the scheduler specific parts of proc0.
* Set up the scheduler specific parts of thread0.
*/
proc0.p_sched = NULL; /* XXX */
thread0.td_sched = &td_sched0;
thread0.td_lock = &sched_lock;
td_sched0.ts_slice = sched_slice;
td_get_sched(&thread0)->ts_slice = sched_slice;
mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
}
@ -731,7 +734,7 @@ sched_clock(struct thread *td)
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
ts = td_get_sched(td);
ts->ts_cpticks++;
ts->ts_estcpu = ESTCPULIM(ts->ts_estcpu + 1);
@ -775,8 +778,8 @@ sched_exit_thread(struct thread *td, struct thread *child)
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
"prio:%d", child->td_priority);
thread_lock(td);
td->td_sched->ts_estcpu = ESTCPULIM(td->td_sched->ts_estcpu +
child->td_sched->ts_estcpu);
td_get_sched(td)->ts_estcpu = ESTCPULIM(td_get_sched(td)->ts_estcpu +
td_get_sched(child)->ts_estcpu);
thread_unlock(td);
thread_lock(child);
if ((child->td_flags & TDF_NOLOAD) == 0)
@ -793,17 +796,18 @@ sched_fork(struct thread *td, struct thread *childtd)
void
sched_fork_thread(struct thread *td, struct thread *childtd)
{
struct td_sched *ts;
struct td_sched *ts, *tsc;
childtd->td_oncpu = NOCPU;
childtd->td_lastcpu = NOCPU;
childtd->td_lock = &sched_lock;
childtd->td_cpuset = cpuset_ref(td->td_cpuset);
childtd->td_priority = childtd->td_base_pri;
ts = childtd->td_sched;
ts = td_get_sched(childtd);
bzero(ts, sizeof(*ts));
ts->ts_estcpu = td->td_sched->ts_estcpu;
ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
tsc = td_get_sched(td);
ts->ts_estcpu = tsc->ts_estcpu;
ts->ts_flags |= (tsc->ts_flags & TSF_AFFINITY);
ts->ts_slice = 1;
}
@ -952,7 +956,7 @@ sched_sleep(struct thread *td, int pri)
THREAD_LOCK_ASSERT(td, MA_OWNED);
td->td_slptick = ticks;
td->td_sched->ts_slptime = 0;
td_get_sched(td)->ts_slptime = 0;
if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
sched_prio(td, pri);
if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
@ -968,7 +972,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
int preempted;
tmtx = NULL;
ts = td->td_sched;
ts = td_get_sched(td);
p = td->td_proc;
THREAD_LOCK_ASSERT(td, MA_OWNED);
@ -1095,7 +1099,7 @@ sched_wakeup(struct thread *td)
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
ts = td_get_sched(td);
td->td_flags &= ~TDF_CANSWAP;
if (ts->ts_slptime > 1) {
updatepri(td);
@ -1266,7 +1270,7 @@ sched_add(struct thread *td, int flags)
int forwarded = 0;
int single_cpu = 0;
ts = td->td_sched;
ts = td_get_sched(td);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT((td->td_inhibitors == 0),
("sched_add: trying to run inhibited thread"));
@ -1361,7 +1365,7 @@ sched_add(struct thread *td, int flags)
{
struct td_sched *ts;
ts = td->td_sched;
ts = td_get_sched(td);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT((td->td_inhibitors == 0),
("sched_add: trying to run inhibited thread"));
@ -1414,7 +1418,7 @@ sched_rem(struct thread *td)
{
struct td_sched *ts;
ts = td->td_sched;
ts = td_get_sched(td);
KASSERT(td->td_flags & TDF_INMEM,
("sched_rem: thread swapped out"));
KASSERT(TD_ON_RUNQ(td),
@ -1527,7 +1531,7 @@ sched_bind(struct thread *td, int cpu)
THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
ts = td->td_sched;
ts = td_get_sched(td);
td->td_flags |= TDF_BOUND;
#ifdef SMP
@ -1586,7 +1590,7 @@ sched_pctcpu(struct thread *td)
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
ts = td_get_sched(td);
return (ts->ts_pctcpu);
}
@ -1603,7 +1607,7 @@ sched_pctcpu_delta(struct thread *td)
int realstathz;
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
ts = td_get_sched(td);
delta = 0;
realstathz = stathz ? stathz : hz;
if (ts->ts_cpticks != 0) {
@ -1628,7 +1632,7 @@ u_int
sched_estcpu(struct thread *td)
{
return (td->td_sched->ts_estcpu);
return (td_get_sched(td)->ts_estcpu);
}
/*
@ -1707,7 +1711,7 @@ sched_tdname(struct thread *td)
#ifdef KTR
struct td_sched *ts;
ts = td->td_sched;
ts = td_get_sched(td);
if (ts->ts_name[0] == '\0')
snprintf(ts->ts_name, sizeof(ts->ts_name),
"%s tid %d", td->td_name, td->td_tid);
@ -1723,7 +1727,7 @@ sched_clear_tdname(struct thread *td)
{
struct td_sched *ts;
ts = td->td_sched;
ts = td_get_sched(td);
ts->ts_name[0] = '\0';
}
#endif
@ -1741,7 +1745,7 @@ sched_affinity(struct thread *td)
* Set the TSF_AFFINITY flag if there is at least one CPU this
* thread can't run on.
*/
ts = td->td_sched;
ts = td_get_sched(td);
ts->ts_flags &= ~TSF_AFFINITY;
CPU_FOREACH(cpu) {
if (!THREAD_CAN_SCHED(td, cpu)) {

View File

@ -106,12 +106,14 @@ struct td_sched {
#define TSF_BOUND 0x0001 /* Thread can not migrate. */
#define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */
static struct td_sched td_sched0;
#define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0)
#define THREAD_CAN_SCHED(td, cpu) \
CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
_Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <=
sizeof(struct thread0_storage),
"increase struct thread0_storage.t0st_sched size");
/*
* Priority ranges used for interactive and non-interactive timeshare
* threads. The timeshare priorities are split up into four ranges.
@ -460,7 +462,7 @@ tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
THREAD_LOCK_ASSERT(td, MA_OWNED);
pri = td->td_priority;
ts = td->td_sched;
ts = td_get_sched(td);
TD_SET_RUNQ(td);
if (THREAD_CAN_MIGRATE(td)) {
tdq->tdq_transferable++;
@ -506,7 +508,7 @@ tdq_runq_rem(struct tdq *tdq, struct thread *td)
{
struct td_sched *ts;
ts = td->td_sched;
ts = td_get_sched(td);
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
KASSERT(ts->ts_runq != NULL,
("tdq_runq_remove: thread %p null ts_runq", td));
@ -962,7 +964,7 @@ tdq_move(struct tdq *from, struct tdq *to)
td = tdq_steal(tdq, cpu);
if (td == NULL)
return (0);
ts = td->td_sched;
ts = td_get_sched(td);
/*
* Although the run queue is locked the thread may be blocked. Lock
* it to clear this and acquire the run-queue lock.
@ -1046,7 +1048,7 @@ tdq_notify(struct tdq *tdq, struct thread *td)
if (tdq->tdq_ipipending)
return;
cpu = td->td_sched->ts_cpu;
cpu = td_get_sched(td)->ts_cpu;
pri = td->td_priority;
ctd = pcpu_find(cpu)->pc_curthread;
if (!sched_shouldpreempt(pri, ctd->td_priority, 1))
@ -1174,7 +1176,7 @@ sched_setcpu(struct thread *td, int cpu, int flags)
THREAD_LOCK_ASSERT(td, MA_OWNED);
tdq = TDQ_CPU(cpu);
td->td_sched->ts_cpu = cpu;
td_get_sched(td)->ts_cpu = cpu;
/*
* If the lock matches just return the queue.
*/
@ -1221,7 +1223,7 @@ sched_pickcpu(struct thread *td, int flags)
int cpu, pri, self;
self = PCPU_GET(cpuid);
ts = td->td_sched;
ts = td_get_sched(td);
if (smp_started == 0)
return (self);
/*
@ -1472,7 +1474,7 @@ sched_interact_score(struct thread *td)
struct td_sched *ts;
int div;
ts = td->td_sched;
ts = td_get_sched(td);
/*
* The score is only needed if this is likely to be an interactive
* task. Don't go through the expense of computing it if there's
@ -1537,16 +1539,16 @@ sched_priority(struct thread *td)
pri, score));
} else {
pri = SCHED_PRI_MIN;
if (td->td_sched->ts_ticks)
pri += min(SCHED_PRI_TICKS(td->td_sched),
if (td_get_sched(td)->ts_ticks)
pri += min(SCHED_PRI_TICKS(td_get_sched(td)),
SCHED_PRI_RANGE - 1);
pri += SCHED_PRI_NICE(td->td_proc->p_nice);
KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
("sched_priority: invalid priority %d: nice %d, "
"ticks %d ftick %d ltick %d tick pri %d",
pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
td->td_sched->ts_ftick, td->td_sched->ts_ltick,
SCHED_PRI_TICKS(td->td_sched)));
pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks,
td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick,
SCHED_PRI_TICKS(td_get_sched(td))));
}
sched_user_prio(td, pri);
@ -1564,7 +1566,7 @@ sched_interact_update(struct thread *td)
struct td_sched *ts;
u_int sum;
ts = td->td_sched;
ts = td_get_sched(td);
sum = ts->ts_runtime + ts->ts_slptime;
if (sum < SCHED_SLP_RUN_MAX)
return;
@ -1606,14 +1608,16 @@ sched_interact_update(struct thread *td)
static void
sched_interact_fork(struct thread *td)
{
struct td_sched *ts;
int ratio;
int sum;
sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
ts = td_get_sched(td);
sum = ts->ts_runtime + ts->ts_slptime;
if (sum > SCHED_SLP_RUN_FORK) {
ratio = sum / SCHED_SLP_RUN_FORK;
td->td_sched->ts_runtime /= ratio;
td->td_sched->ts_slptime /= ratio;
ts->ts_runtime /= ratio;
ts->ts_slptime /= ratio;
}
}
@ -1623,15 +1627,15 @@ sched_interact_fork(struct thread *td)
void
schedinit(void)
{
struct td_sched *ts0;
/*
* Set up the scheduler specific parts of proc0.
* Set up the scheduler specific parts of thread0.
*/
proc0.p_sched = NULL; /* XXX */
thread0.td_sched = &td_sched0;
td_sched0.ts_ltick = ticks;
td_sched0.ts_ftick = ticks;
td_sched0.ts_slice = 0;
ts0 = td_get_sched(&thread0);
ts0->ts_ltick = ticks;
ts0->ts_ftick = ticks;
ts0->ts_slice = 0;
}
/*
@ -1694,7 +1698,7 @@ sched_thread_priority(struct thread *td, u_char prio)
SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
curthread);
}
ts = td->td_sched;
ts = td_get_sched(td);
THREAD_LOCK_ASSERT(td, MA_OWNED);
if (td->td_priority == prio)
return;
@ -1829,7 +1833,7 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
{
struct tdq *tdn;
tdn = TDQ_CPU(td->td_sched->ts_cpu);
tdn = TDQ_CPU(td_get_sched(td)->ts_cpu);
#ifdef SMP
tdq_load_rem(tdq, td);
/*
@ -1888,7 +1892,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
cpuid = PCPU_GET(cpuid);
tdq = TDQ_CPU(cpuid);
ts = td->td_sched;
ts = td_get_sched(td);
mtx = td->td_lock;
sched_pctcpu_update(ts, 1);
ts->ts_rltick = ticks;
@ -1948,7 +1952,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
sched_pctcpu_update(newtd->td_sched, 0);
sched_pctcpu_update(td_get_sched(newtd), 0);
#ifdef KDTRACE_HOOKS
/*
@ -2038,7 +2042,7 @@ sched_wakeup(struct thread *td)
int slptick;
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
ts = td_get_sched(td);
td->td_flags &= ~TDF_CANSWAP;
/*
* If we slept for more than a tick update our interactivity and
@ -2066,14 +2070,14 @@ void
sched_fork(struct thread *td, struct thread *child)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
sched_pctcpu_update(td->td_sched, 1);
sched_pctcpu_update(td_get_sched(td), 1);
sched_fork_thread(td, child);
/*
* Penalize the parent and child for forking.
*/
sched_interact_fork(child);
sched_priority(child);
td->td_sched->ts_runtime += tickincr;
td_get_sched(td)->ts_runtime += tickincr;
sched_interact_update(td);
sched_priority(td);
}
@ -2093,8 +2097,8 @@ sched_fork_thread(struct thread *td, struct thread *child)
/*
* Initialize child.
*/
ts = td->td_sched;
ts2 = child->td_sched;
ts = td_get_sched(td);
ts2 = td_get_sched(child);
child->td_oncpu = NOCPU;
child->td_lastcpu = NOCPU;
child->td_lock = TDQ_LOCKPTR(tdq);
@ -2169,7 +2173,7 @@ sched_exit_thread(struct thread *td, struct thread *child)
* launch expensive things to mark their children as expensive.
*/
thread_lock(td);
td->td_sched->ts_runtime += child->td_sched->ts_runtime;
td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime;
sched_interact_update(td);
sched_priority(td);
thread_unlock(td);
@ -2264,7 +2268,7 @@ sched_clock(struct thread *td)
if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
tdq->tdq_ridx = tdq->tdq_idx;
}
ts = td->td_sched;
ts = td_get_sched(td);
sched_pctcpu_update(ts, 1);
if (td->td_pri_class & PRI_FIFO_BIT)
return;
@ -2273,7 +2277,7 @@ sched_clock(struct thread *td)
* We used a tick; charge it to the thread so
* that we can compute our interactivity.
*/
td->td_sched->ts_runtime += tickincr;
td_get_sched(td)->ts_runtime += tickincr;
sched_interact_update(td);
sched_priority(td);
}
@ -2455,7 +2459,7 @@ sched_rem(struct thread *td)
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
"prio:%d", td->td_priority);
SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
tdq = TDQ_CPU(td->td_sched->ts_cpu);
tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
KASSERT(TD_ON_RUNQ(td),
@ -2477,9 +2481,7 @@ sched_pctcpu(struct thread *td)
struct td_sched *ts;
pctcpu = 0;
ts = td->td_sched;
if (ts == NULL)
return (0);
ts = td_get_sched(td);
THREAD_LOCK_ASSERT(td, MA_OWNED);
sched_pctcpu_update(ts, TD_IS_RUNNING(td));
@ -2505,7 +2507,7 @@ sched_affinity(struct thread *td)
struct td_sched *ts;
THREAD_LOCK_ASSERT(td, MA_OWNED);
ts = td->td_sched;
ts = td_get_sched(td);
if (THREAD_CAN_SCHED(td, ts->ts_cpu))
return;
if (TD_ON_RUNQ(td)) {
@ -2536,7 +2538,7 @@ sched_bind(struct thread *td, int cpu)
THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
ts = td->td_sched;
ts = td_get_sched(td);
if (ts->ts_flags & TSF_BOUND)
sched_unbind(td);
KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
@ -2559,7 +2561,7 @@ sched_unbind(struct thread *td)
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
ts = td->td_sched;
ts = td_get_sched(td);
if ((ts->ts_flags & TSF_BOUND) == 0)
return;
ts->ts_flags &= ~TSF_BOUND;
@ -2570,7 +2572,7 @@ int
sched_is_bound(struct thread *td)
{
THREAD_LOCK_ASSERT(td, MA_OWNED);
return (td->td_sched->ts_flags & TSF_BOUND);
return (td_get_sched(td)->ts_flags & TSF_BOUND);
}
/*
@ -2761,7 +2763,7 @@ sched_tdname(struct thread *td)
#ifdef KTR
struct td_sched *ts;
ts = td->td_sched;
ts = td_get_sched(td);
if (ts->ts_name[0] == '\0')
snprintf(ts->ts_name, sizeof(ts->ts_name),
"%s tid %d", td->td_name, td->td_tid);
@ -2777,7 +2779,7 @@ sched_clear_tdname(struct thread *td)
{
struct td_sched *ts;
ts = td->td_sched;
ts = td_get_sched(td);
ts->ts_name[0] = '\0';
}
#endif

View File

@ -173,7 +173,7 @@ VECTOR(_locore, unknown)
jal _C_LABEL(platform_start)
nop
PTR_LA sp, _C_LABEL(thread0)
PTR_LA sp, _C_LABEL(thread0_st)
PTR_L a0, TD_PCB(sp)
REG_LI t0, ~7
and a0, a0, t0

View File

@ -325,7 +325,6 @@ struct thread {
int td_kstack_pages; /* (a) Size of the kstack. */
volatile u_int td_critnest; /* (k*) Critical section nest level. */
struct mdthread td_md; /* (k) Any machine-dependent fields. */
struct td_sched *td_sched; /* (*) Scheduler-specific data. */
struct kaudit_record *td_ar; /* (k) Active audit record, if any. */
struct lpohead td_lprof[2]; /* (a) lock profiling objects. */
struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */
@ -341,6 +340,11 @@ struct thread {
int td_oncpu; /* (t) Which cpu we are on. */
};
struct thread0_storage {
struct thread t0st_thread;
uint64_t t0st_sched[10];
};
struct mtx *thread_lock_block(struct thread *);
void thread_lock_unblock(struct thread *, struct mtx *);
void thread_lock_set(struct thread *, struct mtx *);
@ -616,7 +620,6 @@ struct proc {
struct proc *p_leader; /* (b) */
void *p_emuldata; /* (c) Emulator state data. */
struct label *p_label; /* (*) Proc (not subject) MAC label. */
struct p_sched *p_sched; /* (*) Scheduler-specific data. */
STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */
LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/
struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */
@ -890,7 +893,8 @@ extern int allproc_gen;
extern struct sx proctree_lock;
extern struct mtx ppeers_lock;
extern struct proc proc0; /* Process slot for swapper. */
extern struct thread thread0; /* Primary thread in proc0. */
extern struct thread0_storage thread0_st; /* Primary thread in proc0. */
#define thread0 (thread0_st.t0st_thread)
extern struct vmspace vmspace0; /* VM space for proc0. */
extern int hogticks; /* Limit on kernel cpu hogs. */
extern int lastpid;
@ -1065,6 +1069,13 @@ curthread_pflags_restore(int save)
curthread->td_pflags &= save;
}
static __inline __pure2 struct td_sched *
td_get_sched(struct thread *td)
{
return ((struct td_sched *)&td[1]);
}
#endif /* _KERNEL */
#endif /* !_SYS_PROC_H_ */