Introduce two new helper macros to define the priority ranges used for

interactive timeshare threads (PRI_*_INTERACTIVE) and non-interactive
timeshare threads (PRI_*_BATCH) and use these instead of PRI_*_REALTIME
and PRI_*_TIMESHARE.  No functional change.

Reviewed by:	jeff
This commit is contained in:
John Baldwin 2011-01-13 14:22:27 +00:00
parent 6a9736a878
commit 12d56c0f63

View File

@ -116,6 +116,15 @@ static struct td_sched td_sched0;
#define THREAD_CAN_SCHED(td, cpu) \
CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
/*
* Priority ranges used for interactive and non-interactive timeshare
* threads. Interactive threads use realtime priorities.
*/
#define PRI_MIN_INTERACT PRI_MIN_REALTIME
#define PRI_MAX_INTERACT PRI_MAX_REALTIME
#define PRI_MIN_BATCH PRI_MIN_TIMESHARE
#define PRI_MAX_BATCH PRI_MAX_TIMESHARE
/*
* Cpu percentage computation macros and defines.
*
@ -147,8 +156,8 @@ static struct td_sched td_sched0;
*/
#define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN)
#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
#define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
#define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
#define SCHED_PRI_MIN (PRI_MIN_BATCH + SCHED_PRI_NHALF)
#define SCHED_PRI_MAX (PRI_MAX_BATCH - SCHED_PRI_NHALF)
#define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
#define SCHED_PRI_TICKS(ts) \
(SCHED_TICK_HZ((ts)) / \
@ -194,7 +203,7 @@ static int preempt_thresh = PRI_MIN_KERN;
#else
static int preempt_thresh = 0;
#endif
static int static_boost = PRI_MIN_TIMESHARE;
static int static_boost = PRI_MIN_BATCH;
static int sched_idlespins = 10000;
static int sched_idlespinthresh = 16;
@ -393,15 +402,15 @@ sched_shouldpreempt(int pri, int cpri, int remote)
if (pri <= preempt_thresh)
return (1);
/*
* If we're realtime or better and there is timeshare or worse running
* preempt only remote processors.
* If we're interactive or better and there is non-interactive
* or worse running preempt only remote processors.
*/
if (remote && pri <= PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT)
return (1);
return (0);
}
#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
#define TS_RQ_PPQ (((PRI_MAX_BATCH - PRI_MIN_BATCH) + 1) / RQ_NQS)
/*
* Add a thread to the actual run-queue. Keeps transferable counts up to
* date with what is actually on the run-queue. Selects the correct
@ -423,18 +432,18 @@ tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
tdq->tdq_transferable++;
ts->ts_flags |= TSF_XFERABLE;
}
if (pri <= PRI_MAX_REALTIME) {
if (pri < PRI_MIN_BATCH) {
ts->ts_runq = &tdq->tdq_realtime;
} else if (pri <= PRI_MAX_TIMESHARE) {
} else if (pri <= PRI_MAX_BATCH) {
ts->ts_runq = &tdq->tdq_timeshare;
KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH,
("Invalid priority %d on timeshare runq", pri));
/*
* This queue contains only priorities between MIN and MAX
* realtime. Use the whole queue to represent these values.
*/
if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
pri = (pri - PRI_MIN_BATCH) / TS_RQ_PPQ;
pri = (pri + tdq->tdq_idx) % RQ_NQS;
/*
* This effectively shortens the queue by one so we
@ -1205,7 +1214,7 @@ tdq_choose(struct tdq *tdq)
return (td);
td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
if (td != NULL) {
KASSERT(td->td_priority >= PRI_MIN_TIMESHARE,
KASSERT(td->td_priority >= PRI_MIN_BATCH,
("tdq_choose: Invalid priority on timeshare queue %d",
td->td_priority));
return (td);
@ -1405,10 +1414,10 @@ sched_priority(struct thread *td)
*/
score = imax(0, sched_interact_score(td) + td->td_proc->p_nice);
if (score < sched_interact) {
pri = PRI_MIN_REALTIME;
pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME + 1) /
pri = PRI_MIN_INTERACT;
pri += ((PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) /
sched_interact) * score;
KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT,
("sched_priority: invalid interactive priority %d score %d",
pri, score));
} else {
@ -1416,7 +1425,7 @@ sched_priority(struct thread *td)
if (td->td_sched->ts_ticks)
pri += SCHED_PRI_TICKS(td->td_sched);
pri += SCHED_PRI_NICE(td->td_proc->p_nice);
KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
("sched_priority: invalid priority %d: nice %d, "
"ticks %d ftick %d ltick %d tick pri %d",
pri, td->td_proc->p_nice, td->td_sched->ts_ticks,