Nice, is a property of a process as a whole..

I mistakenly moved it to the ksegroup when breaking up the process
structure. Put it back in the proc structure.
This commit is contained in:
Julian Elischer 2004-06-16 00:26:31 +00:00
parent 7421be0528
commit fa88511615
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=130551
13 changed files with 63 additions and 81 deletions

View File

@ -470,11 +470,11 @@ spec_xstrategy(struct vnode *vp, struct buf *bp)
/*
* Slow down disk requests for niced processes.
*/
if (doslowdown && td && td->td_ksegrp->kg_nice > 0) {
if (doslowdown && td && td->td_proc->p_nice > 0) {
mtx_lock(&strategy_mtx);
msleep(&strategy_mtx, &strategy_mtx,
PPAUSE | PCATCH | PDROP, "ioslow",
td->td_ksegrp->kg_nice);
td->td_proc->p_nice);
}
/*
* Collect statistics on synchronous and asynchronous read

View File

@ -951,10 +951,10 @@ ibcs2_nice(td, uap)
sa.which = PRIO_PROCESS;
sa.who = 0;
sa.prio = td->td_ksegrp->kg_nice + uap->incr;
sa.prio = td->td_proc->p_nice + uap->incr;
if ((error = setpriority(td, &sa)) != 0)
return EPERM;
td->td_retval[0] = td->td_ksegrp->kg_nice;
td->td_retval[0] = td->td_proc->p_nice;
return 0;
}

View File

@ -382,8 +382,8 @@ proc0_init(void *dummy __unused)
p->p_flag = P_SYSTEM;
p->p_sflag = PS_INMEM;
p->p_state = PRS_NORMAL;
p->p_nice = NZERO;
td->td_state = TDS_RUNNING;
kg->kg_nice = NZERO;
kg->kg_pri_class = PRI_TIMESHARE;
kg->kg_user_pri = PUSER;
td->td_priority = PVM;

View File

@ -392,7 +392,7 @@ statclock(frame)
if (p->p_flag & P_SA)
thread_statclock(1);
p->p_uticks++;
if (td->td_ksegrp->kg_nice > NZERO)
if (p->p_nice > NZERO)
cp_time[CP_NICE]++;
else
cp_time[CP_USER]++;

View File

@ -740,6 +740,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_sflag = p->p_sflag;
kp->ki_swtime = p->p_swtime;
kp->ki_pid = p->p_pid;
kp->ki_nice = p->p_nice;
kg = td->td_ksegrp;
ke = td->td_kse;
bintime2timeval(&p->p_runtime, &tv);
@ -751,7 +752,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
kp->ki_slptime = kg->kg_slptime;
kp->ki_pri.pri_user = kg->kg_user_pri;
kp->ki_pri.pri_class = kg->kg_pri_class;
kp->ki_nice = kg->kg_nice;
/* Things in the thread */
kp->ki_wchan = td->td_wchan;

View File

@ -88,7 +88,6 @@ getpriority(td, uap)
struct thread *td;
register struct getpriority_args *uap;
{
struct ksegrp *kg;
struct proc *p;
int error, low;
@ -98,16 +97,13 @@ getpriority(td, uap)
case PRIO_PROCESS:
if (uap->who == 0)
low = td->td_ksegrp->kg_nice;
low = td->td_proc->p_nice;
else {
p = pfind(uap->who);
if (p == NULL)
break;
if (p_cansee(td, p) == 0) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_nice < low)
low = kg->kg_nice;
}
low = p->p_nice;
}
PROC_UNLOCK(p);
}
@ -131,10 +127,8 @@ getpriority(td, uap)
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
PROC_LOCK(p);
if (!p_cansee(td, p)) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_nice < low)
low = kg->kg_nice;
}
if (p->p_nice < low)
low = p->p_nice;
}
PROC_UNLOCK(p);
}
@ -150,10 +144,8 @@ getpriority(td, uap)
PROC_LOCK(p);
if (!p_cansee(td, p) &&
p->p_ucred->cr_uid == uap->who) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_nice < low)
low = kg->kg_nice;
}
if (p->p_nice < low)
low = p->p_nice;
}
PROC_UNLOCK(p);
}
@ -260,19 +252,13 @@ setpriority(td, uap)
}
/*
* Set "nice" for a process. Doesn't really understand threaded processes
* well but does try. Has the unfortunate side effect of making all the NICE
* values for a process's ksegrps the same. This suggests that
* NICE values should be stored as a process nice and deltas for the ksegrps.
* (but not yet).
* Set "nice" for a (whole) process.
*/
static int
donice(struct thread *td, struct proc *p, int n)
{
struct ksegrp *kg;
int error, low;
int error;
low = PRIO_MAX + 1;
PROC_LOCK_ASSERT(p, MA_OWNED);
if ((error = p_cansched(td, p)))
return (error);
@ -280,20 +266,10 @@ donice(struct thread *td, struct proc *p, int n)
n = PRIO_MAX;
if (n < PRIO_MIN)
n = PRIO_MIN;
/*
* Only allow nicing if to more than the lowest nice.
* E.g., for nices of 4,3,2 allow nice to 3 but not 1
*/
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_nice < low)
low = kg->kg_nice;
}
if (n < low && suser(td) != 0)
if (n < p->p_nice && suser(td) != 0)
return (EACCES);
mtx_lock_spin(&sched_lock);
FOREACH_KSEGRP_IN_PROC(p, kg) {
sched_nice(kg, n);
}
sched_nice(p, n);
mtx_unlock_spin(&sched_lock);
return (0);
}

View File

@ -439,7 +439,7 @@ resetpriority(struct ksegrp *kg)
if (kg->kg_pri_class == PRI_TIMESHARE) {
newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
PRI_MAX_TIMESHARE);
kg->kg_user_pri = newpriority;
@ -583,13 +583,16 @@ sched_fork_thread(struct thread *td, struct thread *child)
}
void
sched_nice(struct ksegrp *kg, int nice)
sched_nice(struct proc *p, int nice)
{
struct ksegrp *kg;
PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
mtx_assert(&sched_lock, MA_OWNED);
kg->kg_nice = nice;
resetpriority(kg);
p->p_nice = nice;
FOREACH_KSEGRP_IN_PROC(p, kg) {
resetpriority(kg);
}
}
void

View File

@ -366,9 +366,9 @@ kseq_load_add(struct kseq *kseq, struct kse *ke)
CTR6(KTR_ULE,
"Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
ke->ke_proc->p_nice, kseq->ksq_nicemin);
if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
kseq_nice_add(kseq, ke->ke_proc->p_nice);
}
static void
@ -388,7 +388,7 @@ kseq_load_rem(struct kseq *kseq, struct kse *ke)
kseq->ksq_load--;
ke->ke_runq = NULL;
if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
kseq_nice_rem(kseq, ke->ke_proc->p_nice);
}
static void
@ -929,7 +929,7 @@ sched_priority(struct ksegrp *kg)
pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
pri += SCHED_PRI_BASE;
pri += kg->kg_nice;
pri += kg->kg_proc->p_nice;
if (pri > PRI_MAX_TIMESHARE)
pri = PRI_MAX_TIMESHARE;
@ -980,13 +980,13 @@ sched_slice(struct kse *ke)
if (!SCHED_INTERACTIVE(kg)) {
int nice;
nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
if (kseq->ksq_load_timeshare == 0 ||
kg->kg_nice < kseq->ksq_nicemin)
kg->kg_proc->p_nice < kseq->ksq_nicemin)
ke->ke_slice = SCHED_SLICE_MAX;
else if (nice <= SCHED_SLICE_NTHRESH)
ke->ke_slice = SCHED_SLICE_NICE(nice);
else if (kg->kg_nice == 0)
else if (kg->kg_proc->p_nice == 0)
ke->ke_slice = SCHED_SLICE_MIN;
else
ke->ke_slice = 0;
@ -995,7 +995,7 @@ sched_slice(struct kse *ke)
CTR6(KTR_ULE,
"Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
return;
@ -1167,29 +1167,35 @@ sched_switch(struct thread *td)
}
void
sched_nice(struct ksegrp *kg, int nice)
sched_nice(struct proc *p, int nice)
{
struct ksegrp *kg;
struct kse *ke;
struct thread *td;
struct kseq *kseq;
PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
PROC_LOCK_ASSERT(p, MA_OWNED);
mtx_assert(&sched_lock, MA_OWNED);
/*
* We need to adjust the nice counts for running KSEs.
*/
if (kg->kg_pri_class == PRI_TIMESHARE)
FOREACH_KSE_IN_GROUP(kg, ke) {
if (ke->ke_runq == NULL)
continue;
kseq = KSEQ_CPU(ke->ke_cpu);
kseq_nice_rem(kseq, kg->kg_nice);
kseq_nice_add(kseq, nice);
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_pri_class == PRI_TIMESHARE) {
FOREACH_KSE_IN_GROUP(kg, ke) {
if (ke->ke_runq == NULL)
continue;
kseq = KSEQ_CPU(ke->ke_cpu);
kseq_nice_rem(kseq, p->p_nice);
kseq_nice_add(kseq, nice);
}
}
kg->kg_nice = nice;
sched_priority(kg);
FOREACH_THREAD_IN_GROUP(kg, td)
td->td_flags |= TDF_NEEDRESCHED;
}
p->p_nice = nice;
FOREACH_KSEGRP_IN_PROC(p, kg) {
sched_priority(kg);
FOREACH_THREAD_IN_GROUP(kg, td)
td->td_flags |= TDF_NEEDRESCHED;
}
}
void
@ -1246,6 +1252,7 @@ sched_fork(struct proc *p, struct proc *p1)
mtx_assert(&sched_lock, MA_OWNED);
p1->p_nice = p->p_nice;
sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
@ -1273,7 +1280,6 @@ sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
child->kg_slptime = kg->kg_slptime;
child->kg_runtime = kg->kg_runtime;
child->kg_user_pri = kg->kg_user_pri;
child->kg_nice = kg->kg_nice;
sched_interact_fork(child);
kg->kg_runtime += tickincr << 10;
sched_interact_update(kg);
@ -1327,11 +1333,11 @@ sched_class(struct ksegrp *kg, int class)
#endif
if (oclass == PRI_TIMESHARE) {
kseq->ksq_load_timeshare--;
kseq_nice_rem(kseq, kg->kg_nice);
kseq_nice_rem(kseq, kg->kg_proc->p_nice);
}
if (nclass == PRI_TIMESHARE) {
kseq->ksq_load_timeshare++;
kseq_nice_add(kseq, kg->kg_nice);
kseq_nice_add(kseq, kg->kg_proc->p_nice);
}
}

View File

@ -509,7 +509,6 @@ struct ksegrp {
#define kg_startcopy kg_endzero
u_char kg_pri_class; /* (j) Scheduling class. */
u_char kg_user_pri; /* (j) User pri from estcpu and nice. */
signed char kg_nice; /* (c + j) Process "nice" value. */
#define kg_endcopy kg_numthreads
int kg_numthreads; /* (j) Num threads in total. */
int kg_kses; /* (j) Num KSEs in group. */
@ -597,6 +596,7 @@ struct proc {
struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */
struct pargs *p_args; /* (c) Process arguments. */
rlim_t p_cpulimit; /* (j) Current CPU limit in seconds. */
signed char p_nice; /* (c + j) Process "nice" value. */
/* End area that is copied on creation. */
#define p_endcopy p_xstat

View File

@ -55,7 +55,7 @@ void sched_fork(struct proc *p, struct proc *child);
void sched_class(struct ksegrp *kg, int class);
void sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child);
void sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child);
void sched_nice(struct ksegrp *kg, int nice);
void sched_nice(struct proc *p, int nice);
/*
* Threads are switched in and out, block on resources, have temporary

View File

@ -301,11 +301,11 @@ ffs_snapshot(mp, snapfile)
*
* Recind nice scheduling while running with the filesystem suspended.
*/
if (td->td_ksegrp->kg_nice > 0) {
if (td->td_proc->p_nice > 0) {
PROC_LOCK(td->td_proc);
mtx_lock_spin(&sched_lock);
saved_nice = td->td_ksegrp->kg_nice;
sched_nice(td->td_ksegrp, 0);
saved_nice = td->td_proc->p_nice;
sched_nice(td->td_proc, 0);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(td->td_proc);
}
@ -665,7 +665,7 @@ ffs_snapshot(mp, snapfile)
if (saved_nice > 0) {
PROC_LOCK(td->td_proc);
mtx_lock_spin(&sched_lock);
sched_nice(td->td_ksegrp, saved_nice);
sched_nice(td->td_proc, saved_nice);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(td->td_proc);
}

View File

@ -824,7 +824,7 @@ scheduler(dummy)
kg = td->td_ksegrp;
pri = p->p_swtime + kg->kg_slptime;
if ((p->p_sflag & PS_SWAPINREQ) == 0) {
pri -= kg->kg_nice * 8;
pri -= p->p_nice * 8;
}
/*

View File

@ -1225,12 +1225,9 @@ vm_pageout_scan(int pass)
}
sx_sunlock(&allproc_lock);
if (bigproc != NULL) {
struct ksegrp *kg;
killproc(bigproc, "out of swap space");
mtx_lock_spin(&sched_lock);
FOREACH_KSEGRP_IN_PROC(bigproc, kg) {
sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */
}
sched_nice(bigproc, PRIO_MIN);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(bigproc);
wakeup(&cnt.v_free_count);