Instead of doing comparisons using the pcpu area to see if
a thread is an idle thread, just see if it has the IDLETD flag set. That flag will probably move to the pflags word as it's permenent and never chenges for the life of the system so it doesn't need locking.
This commit is contained in:
parent
3483dab550
commit
80d6cde009
@ -450,7 +450,7 @@ statclock(int usermode)
|
||||
#endif
|
||||
td->td_pticks++;
|
||||
td->td_sticks++;
|
||||
if (td != PCPU_GET(idlethread))
|
||||
if (!TD_IS_IDLETHREAD(td))
|
||||
cp_time[CP_SYS]++;
|
||||
else
|
||||
cp_time[CP_IDLE]++;
|
||||
|
@ -428,7 +428,7 @@ mi_switch(int flags, struct thread *newtd)
|
||||
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
|
||||
td->td_tid, td->td_sched, p->p_pid, p->p_comm);
|
||||
#if (KTR_COMPILE & KTR_SCHED) != 0
|
||||
if (td == PCPU_GET(idlethread))
|
||||
if (TD_IS_IDLETHREAD(td))
|
||||
CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
|
||||
td, td->td_proc->p_comm, td->td_priority);
|
||||
else if (newtd != NULL)
|
||||
|
@ -983,7 +983,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
td->td_flags &= ~TDF_NEEDRESCHED;
|
||||
td->td_owepreempt = 0;
|
||||
|
||||
if (td == PCPU_GET(idlethread)) {
|
||||
if (TD_IS_IDLETHREAD(td)) {
|
||||
TD_SET_CAN_RUN(td);
|
||||
} else {
|
||||
sched_update_runtime(ts, now);
|
||||
@ -1230,7 +1230,7 @@ sched_tick(void)
|
||||
/*
|
||||
* Processes of equal idle priority are run round-robin.
|
||||
*/
|
||||
if (td != PCPU_GET(idlethread) && --ts->ts_slice <= 0) {
|
||||
if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
|
||||
ts->ts_slice = def_timeslice;
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
}
|
||||
|
@ -1436,7 +1436,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
* If the thread has been assigned it may be in the process of switching
|
||||
* to the new cpu. This is the case in sched_bind().
|
||||
*/
|
||||
if (td == PCPU_GET(idlethread)) {
|
||||
if (TD_IS_IDLETHREAD(td)) {
|
||||
TD_SET_CAN_RUN(td);
|
||||
} else {
|
||||
tdq_load_rem(tdq, ts);
|
||||
|
@ -133,7 +133,7 @@ uprintf(const char *fmt, ...)
|
||||
struct putchar_arg pca;
|
||||
int retval;
|
||||
|
||||
if (td == NULL || td == PCPU_GET(idlethread))
|
||||
if (td == NULL || TD_IS_IDLETHREAD(td))
|
||||
return (0);
|
||||
|
||||
mtx_lock(&Giant);
|
||||
|
@ -201,7 +201,7 @@ forward_roundrobin(void)
|
||||
td = pc->pc_curthread;
|
||||
id = pc->pc_cpumask;
|
||||
if (id != me && (id & stopped_cpus) == 0 &&
|
||||
td != pc->pc_idlethread) {
|
||||
!TD_IS_IDLETHREAD(td)) {
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
map |= id;
|
||||
}
|
||||
|
@ -749,7 +749,7 @@ breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
|
||||
rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
|
||||
|
||||
if ((rabp->b_flags & B_CACHE) == 0) {
|
||||
if (curthread != PCPU_GET(idlethread))
|
||||
if (!TD_IS_IDLETHREAD(curthread))
|
||||
curthread->td_proc->p_stats->p_ru.ru_inblock++;
|
||||
rabp->b_flags |= B_ASYNC;
|
||||
rabp->b_flags &= ~B_INVAL;
|
||||
@ -784,7 +784,7 @@ breadn(struct vnode * vp, daddr_t blkno, int size,
|
||||
|
||||
/* if not found in cache, do some I/O */
|
||||
if ((bp->b_flags & B_CACHE) == 0) {
|
||||
if (curthread != PCPU_GET(idlethread))
|
||||
if (!TD_IS_IDLETHREAD(curthread))
|
||||
curthread->td_proc->p_stats->p_ru.ru_inblock++;
|
||||
bp->b_iocmd = BIO_READ;
|
||||
bp->b_flags &= ~B_INVAL;
|
||||
@ -863,7 +863,7 @@ bufwrite(struct buf *bp)
|
||||
bp->b_runningbufspace = bp->b_bufsize;
|
||||
atomic_add_int(&runningbufspace, bp->b_runningbufspace);
|
||||
|
||||
if (curthread != PCPU_GET(idlethread))
|
||||
if (!TD_IS_IDLETHREAD(curthread))
|
||||
curthread->td_proc->p_stats->p_ru.ru_oublock++;
|
||||
if (oldflags & B_ASYNC)
|
||||
BUF_KERNPROC(bp);
|
||||
@ -2445,7 +2445,7 @@ loop:
|
||||
* XXX remove if 0 sections (clean this up after its proven)
|
||||
*/
|
||||
if (numfreebuffers == 0) {
|
||||
if (curthread == PCPU_GET(idlethread))
|
||||
if (TD_IS_IDLETHREAD(curthread))
|
||||
return NULL;
|
||||
mtx_lock(&nblock);
|
||||
needsbuffer |= VFS_BIO_NEED_ANY;
|
||||
|
@ -342,8 +342,7 @@ BUF_KERNPROC(struct buf *bp)
|
||||
{
|
||||
struct thread *td = curthread;
|
||||
|
||||
if ((td != PCPU_GET(idlethread))
|
||||
&& bp->b_lock.lk_lockholder == td)
|
||||
if (!TD_IS_IDLETHREAD(td) && bp->b_lock.lk_lockholder == td)
|
||||
td->td_locks--;
|
||||
bp->b_lock.lk_lockholder = LK_KERNPROC;
|
||||
}
|
||||
|
@ -398,6 +398,12 @@ struct thread {
|
||||
#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
|
||||
#define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED)
|
||||
#define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED)
|
||||
#if 0
|
||||
#define TD_IS_IDLETHREAD(td) ((td) == pcpu(idlethread))
|
||||
#else
|
||||
#define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD)
|
||||
#endif
|
||||
|
||||
|
||||
#define TD_SET_INHIB(td, inhib) do { \
|
||||
(td)->td_state = TDS_INHIBITED; \
|
||||
|
Loading…
x
Reference in New Issue
Block a user