Some minor tunings/cleanups inspired by bde@ after previous commits:
- remove extra dynamic variable initializations; - restore (4BSD) and implement (ULE) hogticks variable setting; - make sched_rr_interval() more tolerant to options; - restore (4BSD) and implement (ULE) kern.sched.quantum sysctl, a more user-friendly wrapper for sched_slice; - tune some sysctl descriptions; - make some style fixes.
This commit is contained in:
parent
9d18043979
commit
579895df01
@ -24,7 +24,7 @@
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd January 21, 2008
|
||||
.Dd August 10, 2012
|
||||
.Dt SCHED_ULE 4
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -59,6 +59,9 @@ The following sysctls are relevant to the operation of
|
||||
.Bl -tag -width indent
|
||||
.It Va kern.sched.name
|
||||
This read-only sysctl reports the name of the active scheduler.
|
||||
.It Va kern.sched.quantum
|
||||
This read-write sysctl reports or sets the length of the quantum (in
|
||||
micro-seconds) granted to a thread.
|
||||
.El
|
||||
.Sh SEE ALSO
|
||||
.Xr sched_4bsd 4 ,
|
||||
|
@ -119,9 +119,9 @@ struct td_sched {
|
||||
static struct td_sched td_sched0;
|
||||
struct mtx sched_lock;
|
||||
|
||||
static int realstathz; /* stathz is sometimes 0 and run off of hz. */
|
||||
static int realstathz = 127; /* stathz is sometimes 0 and run off of hz. */
|
||||
static int sched_tdcnt; /* Total runnable threads in the system. */
|
||||
static int sched_slice = 1; /* Thread run time before rescheduling. */
|
||||
static int sched_slice = 12; /* Thread run time before rescheduling. */
|
||||
|
||||
static void setup_runqs(void);
|
||||
static void schedcpu(void);
|
||||
@ -185,12 +185,32 @@ setup_runqs(void)
|
||||
runq_init(&runq);
|
||||
}
|
||||
|
||||
static int
|
||||
sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
int error, new_val, period;
|
||||
|
||||
period = 1000000 / realstathz;
|
||||
new_val = period * sched_slice;
|
||||
error = sysctl_handle_int(oidp, &new_val, 0, req);
|
||||
if (error != 0 || req->newptr == NULL)
|
||||
return (error);
|
||||
if (new_val <= 0)
|
||||
return (EINVAL);
|
||||
sched_slice = max(1, (new_val + period / 2) / period);
|
||||
hogticks = max(1, 2 * hz * sched_slice / realstathz);
|
||||
return (0);
|
||||
}
|
||||
|
||||
SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
|
||||
|
||||
SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
|
||||
"Scheduler name");
|
||||
SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
|
||||
NULL, 0, sysctl_kern_quantum, "I",
|
||||
"Length of time granted to timeshare threads in microseconds");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
|
||||
"Slice size for timeshare threads");
|
||||
"Length of time granted to timeshare threads in stathz ticks");
|
||||
#ifdef SMP
|
||||
/* Enable forwarding of wakeups to all other cpus */
|
||||
static SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL,
|
||||
@ -629,21 +649,15 @@ resetpriority_thread(struct thread *td)
|
||||
static void
|
||||
sched_setup(void *dummy)
|
||||
{
|
||||
setup_runqs();
|
||||
|
||||
/*
|
||||
* To avoid divide-by-zero, we set realstathz a dummy value
|
||||
* in case which sched_clock() called before sched_initticks().
|
||||
*/
|
||||
realstathz = hz;
|
||||
sched_slice = realstathz / 10; /* ~100ms */
|
||||
setup_runqs();
|
||||
|
||||
/* Account for thread0. */
|
||||
sched_load_add();
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine determines the sched_slice after stathz and hz are setup.
|
||||
* This routine determines time constants after stathz and hz are setup.
|
||||
*/
|
||||
static void
|
||||
sched_initticks(void *dummy)
|
||||
@ -651,6 +665,7 @@ sched_initticks(void *dummy)
|
||||
|
||||
realstathz = stathz ? stathz : hz;
|
||||
sched_slice = realstathz / 10; /* ~100ms */
|
||||
hogticks = max(1, 2 * hz * sched_slice / realstathz);
|
||||
}
|
||||
|
||||
/* External interfaces start here */
|
||||
@ -689,7 +704,7 @@ sched_rr_interval(void)
|
||||
{
|
||||
|
||||
/* Convert sched_slice from stathz to hz. */
|
||||
return (hz / (realstathz / sched_slice));
|
||||
return (max(1, (sched_slice * hz + realstathz / 2) / realstathz));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -724,9 +739,9 @@ sched_clock(struct thread *td)
|
||||
|
||||
/*
|
||||
* Force a context switch if the current thread has used up a full
|
||||
* quantum (default quantum is 100ms).
|
||||
* time slice (default is 100ms).
|
||||
*/
|
||||
if (!TD_IS_IDLETHREAD(td) && (--ts->ts_slice <= 0)) {
|
||||
if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
|
||||
ts->ts_slice = sched_slice;
|
||||
td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
|
||||
}
|
||||
|
@ -201,9 +201,9 @@ static struct td_sched td_sched0;
|
||||
* preempt_thresh: Priority threshold for preemption and remote IPIs.
|
||||
*/
|
||||
static int sched_interact = SCHED_INTERACT_THRESH;
|
||||
static int realstathz;
|
||||
static int tickincr;
|
||||
static int sched_slice = 1;
|
||||
static int realstathz = 127;
|
||||
static int tickincr = 8 << SCHED_TICK_SHIFT;;
|
||||
static int sched_slice = 12;
|
||||
#ifdef PREEMPTION
|
||||
#ifdef FULL_PREEMPTION
|
||||
static int preempt_thresh = PRI_MAX_IDLE;
|
||||
@ -1363,13 +1363,6 @@ sched_setup(void *dummy)
|
||||
#else
|
||||
tdq_setup(tdq);
|
||||
#endif
|
||||
/*
|
||||
* To avoid divide-by-zero, we set realstathz a dummy value
|
||||
* in case which sched_clock() called before sched_initticks().
|
||||
*/
|
||||
realstathz = hz;
|
||||
sched_slice = (realstathz/10); /* ~100ms */
|
||||
tickincr = 1 << SCHED_TICK_SHIFT;
|
||||
|
||||
/* Add thread0's load since it's running. */
|
||||
TDQ_LOCK(tdq);
|
||||
@ -1380,7 +1373,7 @@ sched_setup(void *dummy)
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine determines the tickincr after stathz and hz are setup.
|
||||
* This routine determines time constants after stathz and hz are setup.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
static void
|
||||
@ -1389,7 +1382,8 @@ sched_initticks(void *dummy)
|
||||
int incr;
|
||||
|
||||
realstathz = stathz ? stathz : hz;
|
||||
sched_slice = (realstathz/10); /* ~100ms */
|
||||
sched_slice = realstathz / 10; /* ~100ms */
|
||||
hogticks = max(1, 2 * hz * sched_slice / realstathz);
|
||||
|
||||
/*
|
||||
* tickincr is shifted out by 10 to avoid rounding errors due to
|
||||
@ -1606,8 +1600,8 @@ int
|
||||
sched_rr_interval(void)
|
||||
{
|
||||
|
||||
/* Convert sched_slice to hz */
|
||||
return (hz/(realstathz/sched_slice));
|
||||
/* Convert sched_slice from stathz to hz. */
|
||||
return (max(1, (sched_slice * hz + realstathz / 2) / realstathz));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2231,16 +2225,15 @@ sched_clock(struct thread *td)
|
||||
sched_interact_update(td);
|
||||
sched_priority(td);
|
||||
}
|
||||
|
||||
/*
|
||||
* We used up one time slice.
|
||||
*/
|
||||
if (--ts->ts_slice > 0)
|
||||
return;
|
||||
/*
|
||||
* We're out of time, force a requeue at userret().
|
||||
* Force a context switch if the current thread has used up a full
|
||||
* time slice (default is 100ms).
|
||||
*/
|
||||
if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
|
||||
ts->ts_slice = sched_slice;
|
||||
td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2795,11 +2788,31 @@ sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
|
||||
|
||||
#endif
|
||||
|
||||
static int
|
||||
sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
int error, new_val, period;
|
||||
|
||||
period = 1000000 / realstathz;
|
||||
new_val = period * sched_slice;
|
||||
error = sysctl_handle_int(oidp, &new_val, 0, req);
|
||||
if (error != 0 || req->newptr == NULL)
|
||||
return (error);
|
||||
if (new_val <= 0)
|
||||
return (EINVAL);
|
||||
sched_slice = max(1, (new_val + period / 2) / period);
|
||||
hogticks = max(1, 2 * hz * sched_slice / realstathz);
|
||||
return (0);
|
||||
}
|
||||
|
||||
SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
|
||||
SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
|
||||
"Scheduler name");
|
||||
SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
|
||||
NULL, 0, sysctl_kern_quantum, "I",
|
||||
"Length of time granted to timeshare threads in microseconds");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
|
||||
"Slice size for timeshare threads");
|
||||
"Length of time granted to timeshare threads in stathz ticks");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
|
||||
"Interactivity score threshold");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
|
||||
@ -2807,9 +2820,9 @@ SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost,
|
||||
0,"Controls whether static kernel priorities are assigned to sleeping threads.");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins,
|
||||
0,"Number of times idle will spin waiting for new work.");
|
||||
0,"Number of times idle thread will spin waiting for new work.");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW, &sched_idlespinthresh,
|
||||
0,"Threshold before we will permit idle spinning.");
|
||||
0,"Threshold before we will permit idle thread spinning.");
|
||||
#ifdef SMP
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
|
||||
"Number of hz ticks to keep thread affinity for");
|
||||
@ -2817,17 +2830,14 @@ SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
|
||||
"Enables the long-term load balancer");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
|
||||
&balance_interval, 0,
|
||||
"Average frequency in stathz ticks to run the long-term balancer");
|
||||
"Average period in stathz ticks to run the long-term balancer");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
|
||||
"Attempts to steal work from other cores before idling");
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
|
||||
"Minimum load on remote cpu before we'll steal");
|
||||
|
||||
/* Retrieve SMP topology */
|
||||
SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
|
||||
CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
|
||||
"XML dump of detected CPU topology");
|
||||
|
||||
#endif
|
||||
|
||||
/* ps compat. All cpu percentages from ULE are weighted. */
|
||||
|
Loading…
Reference in New Issue
Block a user