diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 91d00b2c25a9..472d1247a053 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -105,6 +105,7 @@ struct td_sched { /* flags kept in td_flags */ #define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ #define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */ +#define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */ /* flags kept in ts_flags */ #define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */ @@ -727,7 +728,7 @@ sched_clock(struct thread *td) */ if (!TD_IS_IDLETHREAD(td) && (--ts->ts_slice <= 0)) { ts->ts_slice = sched_slice; - td->td_flags |= TDF_NEEDRESCHED; + td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; } stat = DPCPU_PTR(idlestat); @@ -943,6 +944,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) struct mtx *tmtx; struct td_sched *ts; struct proc *p; + int preempted; tmtx = NULL; ts = td->td_sched; @@ -964,8 +966,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) sched_load_rem(); td->td_lastcpu = td->td_oncpu; - if (!(flags & SW_PREEMPT)) - td->td_flags &= ~TDF_NEEDRESCHED; + preempted = !(td->td_flags & TDF_SLICEEND); + td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND); td->td_owepreempt = 0; td->td_oncpu = NOCPU; @@ -983,7 +985,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) } else { if (TD_IS_RUNNING(td)) { /* Put us back on the run queue. */ - sched_add(td, (flags & SW_PREEMPT) ? + sched_add(td, preempted ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING); } diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 726362d3af24..719592ab201b 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -189,6 +189,9 @@ static struct td_sched td_sched0; #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) #define SCHED_INTERACT_THRESH (30) +/* Flags kept in td_flags. */ +#define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */ + /* * tickincr: Converts a stathz tick into a hz domain scaled by * the shift factor. Without the shift the error rate @@ -1841,7 +1844,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) struct td_sched *ts; struct mtx *mtx; int srqflag; - int cpuid; + int cpuid, preempted; THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument")); @@ -1854,8 +1857,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) ts->ts_rltick = ticks; td->td_lastcpu = td->td_oncpu; td->td_oncpu = NOCPU; - if (!(flags & SW_PREEMPT)) - td->td_flags &= ~TDF_NEEDRESCHED; + preempted = !(td->td_flags & TDF_SLICEEND); + td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND); td->td_owepreempt = 0; tdq->tdq_switchcnt++; /* @@ -1867,7 +1870,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) TD_SET_CAN_RUN(td); } else if (TD_IS_RUNNING(td)) { MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); - srqflag = (flags & SW_PREEMPT) ? + srqflag = preempted ? SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : SRQ_OURSELF|SRQ_YIELDING; #ifdef SMP @@ -2237,7 +2240,7 @@ sched_clock(struct thread *td) * We're out of time, force a requeue at userret(). */ ts->ts_slice = sched_slice; - td->td_flags |= TDF_NEEDRESCHED; + td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; } /*