- Various style fixes in both code and comments.

- Update some stale comments.
- Sort a couple of includes.
- Only set 'newcpu' in updatepri() if we use it.
- No functional changes.

Obtained from:	bde (via an old diff I got a long time ago)
This commit is contained in:
John Baldwin 2003-08-15 21:29:06 +00:00
parent b71ba4ff1a
commit 70fca4277e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=118972
3 changed files with 75 additions and 66 deletions

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/eventhandler.h>
#include <sys/filedesc.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/malloc.h>
@ -59,12 +60,12 @@ __FBSDID("$FreeBSD$");
#include <sys/resourcevar.h>
#include <sys/sched.h>
#include <sys/syscall.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
#include <sys/acct.h>
#include <sys/mac.h>
#include <sys/ktr.h>
#include <sys/ktrace.h>
#include <sys/kthread.h>
#include <sys/unistd.h>
#include <sys/jail.h>
#include <sys/sx.h>
@ -75,7 +76,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/uma.h>
#include <sys/vmmeter.h>
#include <sys/user.h>
#include <machine/critical.h>
@ -104,7 +104,7 @@ fork(td, uap)
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;
}
return error;
return (error);
}
/*
@ -124,7 +124,7 @@ vfork(td, uap)
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;
}
return error;
return (error);
}
/*
@ -146,11 +146,10 @@ rfork(td, uap)
td->td_retval[0] = p2 ? p2->p_pid : 0;
td->td_retval[1] = 0;
}
return error;
return (error);
}
int nprocs = 1; /* process 0 */
int nprocs = 1; /* process 0 */
int lastpid = 0;
SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
"Last used PID");
@ -192,32 +191,32 @@ SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
int
fork1(td, flags, pages, procp)
struct thread *td; /* parent proc */
struct thread *td;
int flags;
int pages;
struct proc **procp; /* child proc */
struct proc **procp;
{
struct proc *p2, *pptr;
struct proc *p1, *p2, *pptr;
uid_t uid;
struct proc *newproc;
int trypid;
int ok;
int ok, trypid;
static int curfail, pidchecked = 0;
static struct timeval lastfail;
struct filedesc *fd;
struct filedesc_to_leader *fdtol;
struct proc *p1 = td->td_proc;
struct thread *td2;
struct kse *ke2;
struct ksegrp *kg2;
struct sigacts *newsigacts;
int error;
/* Can't copy and clear */
/* Can't copy and clear. */
if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
return (EINVAL);
p1 = td->td_proc;
mtx_lock(&Giant);
/*
* Here we don't create a new process, but we divorce
* certain parts of a process from itself.
@ -332,9 +331,8 @@ fork1(td, flags, pages, procp)
*/
trypid = lastpid + 1;
if (flags & RFHIGHPID) {
if (trypid < 10) {
if (trypid < 10)
trypid = 10;
}
} else {
if (randompid)
trypid += arc4random() % randompid;
@ -582,7 +580,7 @@ fork1(td, flags, pages, procp)
* Preserve some more flags in subprocess. P_PROFIL has already
* been preserved.
*/
p2->p_flag |= p1->p_flag & (P_SUGID | P_ALTSTACK);
p2->p_flag |= p1->p_flag & (P_ALTSTACK | P_SUGID);
SESS_LOCK(p1->p_session);
if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
p2->p_flag |= P_CONTROLT;
@ -700,9 +698,10 @@ fork1(td, flags, pages, procp)
_PRELE(p1);
/*
* tell any interested parties about the new process
* Tell any interested parties about the new process.
*/
KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
PROC_UNLOCK(p1);
/*
@ -756,8 +755,14 @@ fork_exit(callout, arg, frame)
void *arg;
struct trapframe *frame;
{
struct thread *td;
struct proc *p;
struct thread *td;
/*
* Processes normally resume in mi_switch() after being
* cpu_switch()'ed to, but when children start up they arrive here
* instead, so we must do much the same things as mi_switch() would.
*/
if ((td = PCPU_GET(deadthread))) {
PCPU_SET(deadthread, NULL);
@ -767,6 +772,7 @@ fork_exit(callout, arg, frame)
p = td->td_proc;
td->td_oncpu = PCPU_GET(cpuid);
p->p_state = PRS_NORMAL;
/*
* Finish setting up thread glue so that it begins execution in a
* non-nested critical section with sched_lock held but not recursed.
@ -783,9 +789,9 @@ fork_exit(callout, arg, frame)
/*
* cpu_set_fork_handler intercepts this function call to
* have this call a non-return function to stay in kernel mode.
* initproc has its own fork handler, but it does return.
*/
* have this call a non-return function to stay in kernel mode.
* initproc has its own fork handler, but it does return.
*/
KASSERT(callout != NULL, ("NULL callout in fork_exit"));
callout(arg, frame);

View File

@ -183,19 +183,19 @@ msleep(ident, mtx, priority, wmesg, timo)
}
if (cold ) {
/*
* During autoconfiguration, just give interrupts
* a chance, then just return.
* Don't run any other procs or panic below,
* During autoconfiguration, just return;
* don't run any other procs or panic below,
* in case this is the idle process and already asleep.
* XXX: this used to do "s = splhigh(); splx(safepri);
* splx(s);" to give interrupts a chance, but there is
* no way to give interrupts a chance now.
*/
if (mtx != NULL && priority & PDROP)
mtx_unlock(mtx);
mtx_unlock_spin(&sched_lock);
return (0);
}
DROP_GIANT();
if (mtx != NULL) {
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
WITNESS_SAVE(&mtx->mtx_object, mtx);
@ -203,7 +203,6 @@ msleep(ident, mtx, priority, wmesg, timo)
if (priority & PDROP)
mtx = NULL;
}
KASSERT(p != NULL, ("msleep1"));
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
@ -288,10 +287,9 @@ msleep(ident, mtx, priority, wmesg, timo)
rval = td->td_intrval;
}
mtx_unlock_spin(&sched_lock);
if (rval == 0 && catch) {
PROC_LOCK(p);
/* XXX: shouldn't we always be calling cursig() */
/* XXX: shouldn't we always be calling cursig()? */
mtx_lock(&p->p_sigacts->ps_mtx);
if (sig != 0 || (sig = cursig(td))) {
if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
@ -315,7 +313,7 @@ msleep(ident, mtx, priority, wmesg, timo)
}
/*
* Implement timeout for msleep()
* Implement timeout for msleep().
*
* If process hasn't been awakened (wchan non-zero),
* set timeout flag and undo the sleep. If proc
@ -326,8 +324,9 @@ static void
endtsleep(arg)
void *arg;
{
register struct thread *td = arg;
register struct thread *td;
td = (struct thread *)arg;
CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
td, td->td_proc->p_pid, td->td_proc->p_comm);
mtx_lock_spin(&sched_lock);
@ -341,9 +340,8 @@ endtsleep(arg)
TD_CLR_ON_SLEEPQ(td);
td->td_flags |= TDF_TIMEOUT;
td->td_wmesg = NULL;
} else {
} else
td->td_flags |= TDF_TIMOFAIL;
}
TD_CLR_SLEEPING(td);
setrunnable(td);
mtx_unlock_spin(&sched_lock);
@ -429,9 +427,9 @@ void
wakeup_one(ident)
register void *ident;
{
register struct proc *p;
register struct slpquehead *qp;
register struct thread *td;
register struct proc *p;
struct thread *ntd;
mtx_lock_spin(&sched_lock);
@ -489,7 +487,7 @@ mi_switch(void)
if (db_active) {
mtx_unlock_spin(&sched_lock);
db_print_backtrace();
db_error("Context switches not allowed in the debugger.");
db_error("Context switches not allowed in the debugger");
}
#endif
@ -558,8 +556,9 @@ mi_switch(void)
void
setrunnable(struct thread *td)
{
struct proc *p = td->td_proc;
struct proc *p;
p = td->td_proc;
mtx_assert(&sched_lock, MA_OWNED);
switch (p->p_state) {
case PRS_ZOMBIE:
@ -666,8 +665,9 @@ sched_setup(dummy)
int
yield(struct thread *td, struct yield_args *uap)
{
struct ksegrp *kg = td->td_ksegrp;
struct ksegrp *kg;
kg = td->td_ksegrp;
mtx_assert(&Giant, MA_NOTOWNED);
mtx_lock_spin(&sched_lock);
kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
@ -675,7 +675,5 @@ yield(struct thread *td, struct yield_args *uap)
mi_switch();
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = 0;
return (0);
}

View File

@ -150,20 +150,20 @@ roundrobin(void *arg)
/*
* Constants for digital decay and forget:
* 90% of (p_estcpu) usage in 5 * loadav time
* 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
* 90% of (kg_estcpu) usage in 5 * loadav time
* 95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
* Note that, as ps(1) mentions, this can let percentages
* total over 100% (I've seen 137.9% for 3 processes).
*
* Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
* Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
*
* We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
* We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
* That is, the system wants to compute a value of decay such
* that the following for loop:
* for (i = 0; i < (5 * loadavg); i++)
* p_estcpu *= decay;
* kg_estcpu *= decay;
* will compute
* p_estcpu *= 0.1;
* kg_estcpu *= 0.1;
* for all values of loadavg:
*
* Mathematically this loop can be expressed by saying:
@ -216,7 +216,7 @@ roundrobin(void *arg)
#define loadfactor(loadav) (2 * (loadav))
#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
@ -247,22 +247,26 @@ schedcpu(void *arg)
struct proc *p;
struct kse *ke;
struct ksegrp *kg;
int realstathz;
int awake;
int awake, realstathz;
realstathz = stathz ? stathz : hz;
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
/*
* Prevent state changes and protect run queue.
*/
mtx_lock_spin(&sched_lock);
/*
* Increment time in/out of memory. We ignore overflow; with
* 16-bit int's (remember them?) overflow takes 45 days.
*/
p->p_swtime++;
FOREACH_KSEGRP_IN_PROC(p, kg) {
awake = 0;
FOREACH_KSE_IN_GROUP(kg, ke) {
/*
* Increment time in/out of memory and sleep
* time (if sleeping). We ignore overflow;
* with 16-bit int's (remember them?)
* overflow takes 45 days.
* Increment sleep time (if sleeping). We
* ignore overflow, as above.
*/
/*
* The kse slptimes are not touched in wakeup
@ -281,12 +285,11 @@ schedcpu(void *arg)
}
/*
* pctcpu is only for ps?
* Do it per kse.. and add them up at the end?
* ke_pctcpu is only for ps and ttyinfo().
* Do it per kse, and add them up at the end?
* XXXKSE
*/
ke->ke_pctcpu
= (ke->ke_pctcpu * ccpu) >>
ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
FSHIFT;
/*
* If the kse has been idle the entire second,
@ -326,9 +329,8 @@ schedcpu(void *arg)
updatepri(kg);
}
kg->kg_slptime = 0;
} else {
} else
kg->kg_slptime++;
}
if (kg->kg_slptime > 1)
continue;
kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
@ -347,20 +349,21 @@ schedcpu(void *arg)
/*
* Recalculate the priority of a process after it has slept for a while.
* For all load averages >= 1 and max p_estcpu of 255, sleeping for at
* least six times the loadfactor will decay p_estcpu to zero.
* For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
* least six times the loadfactor will decay kg_estcpu to zero.
*/
static void
updatepri(struct ksegrp *kg)
{
register fixpt_t loadfac;
register unsigned int newcpu;
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
newcpu = kg->kg_estcpu;
loadfac = loadfactor(averunnable.ldavg[0]);
if (kg->kg_slptime > 5 * loadfac)
kg->kg_estcpu = 0;
else {
kg->kg_slptime--; /* the first time was done in schedcpu */
newcpu = kg->kg_estcpu;
kg->kg_slptime--; /* was incremented in schedcpu() */
while (newcpu && --kg->kg_slptime)
newcpu = decay_cpu(loadfac, newcpu);
kg->kg_estcpu = newcpu;
@ -395,6 +398,7 @@ resetpriority(struct ksegrp *kg)
static void
sched_setup(void *dummy)
{
if (sched_quantum == 0)
sched_quantum = SCHED_QUANTUM;
hogticks = 2 * sched_quantum;
@ -425,8 +429,8 @@ sched_rr_interval(void)
/*
* We adjust the priority of the current process. The priority of
* a process gets worse as it accumulates CPU time. The cpu usage
* estimator (p_estcpu) is increased here. resetpriority() will
* compute a different priority each time p_estcpu increases by
* estimator (kg_estcpu) is increased here. resetpriority() will
* compute a different priority each time kg_estcpu increases by
* INVERSE_ESTCPU_WEIGHT
* (until MAXPRI is reached). The cpu usage estimator ramps up
* quite quickly when the process is running (linearly), and decays
@ -454,6 +458,7 @@ sched_clock(struct kse *ke)
td->td_priority = kg->kg_user_pri;
}
}
/*
* charge childs scheduling cpu usage to parent.
*