- Move all of the PS_ flags into either p_flag or td_flags.
- p_sflag was mostly protected by PROC_LOCK rather than the PROC_SLOCK or previously the sched_lock. These bugs have existed for some time. - Allow swapout to try each thread in a process individually and then swapin the whole process if any of these fail. This allows us to move most scheduler related swap flags into td_flags. - Keep ki_sflag for backwards compat but change all in source tools to use the new and more correct location of P_INMEM. Reported by: pho Reviewed by: attilio, kib Approved by: re (kensmith)
This commit is contained in:
parent
74666fdfce
commit
3fc0f8b973
@ -197,14 +197,13 @@ logname(KINFO *k, VARENT *ve)
|
||||
void
|
||||
state(KINFO *k, VARENT *ve)
|
||||
{
|
||||
int flag, sflag, tdflags;
|
||||
int flag, tdflags;
|
||||
char *cp;
|
||||
VAR *v;
|
||||
char buf[16];
|
||||
|
||||
v = ve->var;
|
||||
flag = k->ki_p->ki_flag;
|
||||
sflag = k->ki_p->ki_sflag;
|
||||
tdflags = k->ki_p->ki_tdflags; /* XXXKSE */
|
||||
cp = buf;
|
||||
|
||||
@ -242,7 +241,7 @@ state(KINFO *k, VARENT *ve)
|
||||
*cp = '?';
|
||||
}
|
||||
cp++;
|
||||
if (!(sflag & PS_INMEM))
|
||||
if (!(flag & P_INMEM))
|
||||
*cp++ = 'W';
|
||||
if (k->ki_p->ki_nice < NZERO)
|
||||
*cp++ = '<';
|
||||
@ -591,7 +590,7 @@ getpcpu(const KINFO *k)
|
||||
#define fxtofl(fixpt) ((double)(fixpt) / fscale)
|
||||
|
||||
/* XXX - I don't like this */
|
||||
if (k->ki_p->ki_swtime == 0 || (k->ki_p->ki_sflag & PS_INMEM) == 0)
|
||||
if (k->ki_p->ki_swtime == 0 || (k->ki_p->ki_flag & P_INMEM) == 0)
|
||||
return (0.0);
|
||||
if (rawcpu)
|
||||
return (100.0 * fxtofl(k->ki_p->ki_pctcpu));
|
||||
@ -619,7 +618,7 @@ getpmem(KINFO *k)
|
||||
if (failure)
|
||||
return (0.0);
|
||||
|
||||
if ((k->ki_p->ki_sflag & PS_INMEM) == 0)
|
||||
if ((k->ki_p->ki_flag & P_INMEM) == 0)
|
||||
return (0.0);
|
||||
/* XXX want pmap ptpages, segtab, etc. (per architecture) */
|
||||
/* XXX don't have info about shared */
|
||||
|
@ -994,13 +994,13 @@ fmt(char **(*fn)(kvm_t *, const struct kinfo_proc *, int), KINFO *ki,
|
||||
return (s);
|
||||
}
|
||||
|
||||
#define UREADOK(ki) (forceuread || (ki->ki_p->ki_sflag & PS_INMEM))
|
||||
#define UREADOK(ki) (forceuread || (ki->ki_p->ki_flag & P_INMEM))
|
||||
|
||||
static void
|
||||
saveuser(KINFO *ki)
|
||||
{
|
||||
|
||||
if (ki->ki_p->ki_sflag & PS_INMEM) {
|
||||
if (ki->ki_p->ki_flag & P_INMEM) {
|
||||
/*
|
||||
* The u-area might be swapped out, and we can't get
|
||||
* at it because we have a crashdump and no swap.
|
||||
|
@ -209,7 +209,7 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
|
||||
kp->ki_sigcatch = sigacts.ps_sigcatch;
|
||||
}
|
||||
#if 0
|
||||
if ((proc.p_sflag & PS_INMEM) && proc.p_stats != NULL) {
|
||||
if ((proc.p_flag & P_INMEM) && proc.p_stats != NULL) {
|
||||
if (KREAD(kd, (u_long)proc.p_stats, &pstats)) {
|
||||
_kvm_err(kd, kd->program,
|
||||
"can't read stats at %x", proc.p_stats);
|
||||
@ -370,7 +370,7 @@ nopgrp:
|
||||
if (proc.p_state != PRS_ZOMBIE) {
|
||||
kp->ki_swtime = proc.p_swtime;
|
||||
kp->ki_flag = proc.p_flag;
|
||||
kp->ki_sflag = proc.p_sflag;
|
||||
kp->ki_sflag = 0;
|
||||
kp->ki_nice = proc.p_nice;
|
||||
kp->ki_traceflag = proc.p_traceflag;
|
||||
if (proc.p_state == PRS_NORMAL) {
|
||||
|
@ -74,7 +74,6 @@ __FBSDID("$FreeBSD$");
|
||||
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
|
||||
ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
|
||||
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
||||
ASSYM(P_SFLAG, offsetof(struct proc, p_sflag));
|
||||
|
||||
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
|
@ -162,7 +162,7 @@ db_ps(db_expr_t addr, boolean_t hasaddr, db_expr_t count, char *modif)
|
||||
state[1] = '\0';
|
||||
|
||||
/* Additional process state flags. */
|
||||
if (!p->p_sflag & PS_INMEM)
|
||||
if (!p->p_flag & P_INMEM)
|
||||
strlcat(state, "W", sizeof(state));
|
||||
if (p->p_flag & P_TRACED)
|
||||
strlcat(state, "X", sizeof(state));
|
||||
|
@ -112,7 +112,6 @@ procfs_doprocstatus(PFS_FILL_ARGS)
|
||||
sbuf_printf(sb, "noflags");
|
||||
}
|
||||
|
||||
PROC_SLOCK(p);
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
wmesg = "-kse- ";
|
||||
@ -128,9 +127,10 @@ procfs_doprocstatus(PFS_FILL_ARGS)
|
||||
wmesg = "nochan";
|
||||
}
|
||||
|
||||
if (p->p_sflag & PS_INMEM) {
|
||||
if (p->p_flag & P_INMEM) {
|
||||
struct timeval start, ut, st;
|
||||
|
||||
PROC_SLOCK(p);
|
||||
calcru(p, &ut, &st);
|
||||
PROC_SUNLOCK(p);
|
||||
start = p->p_stats->p_start;
|
||||
@ -139,10 +139,8 @@ procfs_doprocstatus(PFS_FILL_ARGS)
|
||||
(intmax_t)start.tv_sec, start.tv_usec,
|
||||
(intmax_t)ut.tv_sec, ut.tv_usec,
|
||||
(intmax_t)st.tv_sec, st.tv_usec);
|
||||
} else {
|
||||
PROC_SUNLOCK(p);
|
||||
} else
|
||||
sbuf_printf(sb, " -1,-1 -1,-1 -1,-1");
|
||||
}
|
||||
|
||||
sbuf_printf(sb, " %s", wmesg);
|
||||
|
||||
|
@ -78,7 +78,6 @@ __FBSDID("$FreeBSD$");
|
||||
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
|
||||
ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
|
||||
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
||||
ASSYM(P_SFLAG, offsetof(struct proc, p_sflag));
|
||||
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
|
||||
|
@ -222,7 +222,7 @@ linux_proc_read_fpxregs(struct thread *td, struct linux_pt_fpxreg *fpxregs)
|
||||
{
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
|
||||
if (cpu_fxsr == 0 || (td->td_proc->p_sflag & PS_INMEM) == 0)
|
||||
if (cpu_fxsr == 0 || (td->td_proc->p_flag & P_INMEM) == 0)
|
||||
return (EIO);
|
||||
bcopy(&td->td_pcb->pcb_save.sv_xmm, fpxregs, sizeof(*fpxregs));
|
||||
return (0);
|
||||
@ -233,7 +233,7 @@ linux_proc_write_fpxregs(struct thread *td, struct linux_pt_fpxreg *fpxregs)
|
||||
{
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
|
||||
if (cpu_fxsr == 0 || (td->td_proc->p_sflag & PS_INMEM) == 0)
|
||||
if (cpu_fxsr == 0 || (td->td_proc->p_flag & P_INMEM) == 0)
|
||||
return (EIO);
|
||||
bcopy(fpxregs, &td->td_pcb->pcb_save.sv_xmm, sizeof(*fpxregs));
|
||||
return (0);
|
||||
|
@ -415,8 +415,7 @@ proc0_init(void *dummy __unused)
|
||||
session0.s_leader = p;
|
||||
|
||||
p->p_sysent = &null_sysvec;
|
||||
p->p_flag = P_SYSTEM;
|
||||
p->p_sflag = PS_INMEM;
|
||||
p->p_flag = P_SYSTEM | P_INMEM;
|
||||
p->p_state = PRS_NORMAL;
|
||||
knlist_init(&p->p_klist, &p->p_mtx, NULL, NULL, NULL);
|
||||
STAILQ_INIT(&p->p_ktr);
|
||||
@ -428,6 +427,7 @@ proc0_init(void *dummy __unused)
|
||||
td->td_priority = PVM;
|
||||
td->td_base_pri = PUSER;
|
||||
td->td_oncpu = 0;
|
||||
td->td_flags = TDF_INMEM;
|
||||
p->p_peers = 0;
|
||||
p->p_leader = p;
|
||||
|
||||
@ -710,7 +710,7 @@ create_init(const void *udata __unused)
|
||||
/* divorce init's credentials from the kernel's */
|
||||
newcred = crget();
|
||||
PROC_LOCK(initproc);
|
||||
initproc->p_flag |= P_SYSTEM;
|
||||
initproc->p_flag |= P_SYSTEM | P_INMEM;
|
||||
oldcred = initproc->p_ucred;
|
||||
crcopy(newcred, oldcred);
|
||||
#ifdef MAC
|
||||
@ -723,9 +723,6 @@ create_init(const void *udata __unused)
|
||||
PROC_UNLOCK(initproc);
|
||||
crfree(oldcred);
|
||||
cred_update_thread(FIRST_THREAD_IN_PROC(initproc));
|
||||
PROC_SLOCK(initproc);
|
||||
initproc->p_sflag |= PS_INMEM;
|
||||
PROC_SUNLOCK(initproc);
|
||||
cpu_set_fork_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL);
|
||||
}
|
||||
SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL)
|
||||
|
@ -201,34 +201,29 @@ hardclock_cpu(int usermode)
|
||||
struct pstats *pstats;
|
||||
struct thread *td = curthread;
|
||||
struct proc *p = td->td_proc;
|
||||
int ast;
|
||||
int flags;
|
||||
|
||||
/*
|
||||
* Run current process's virtual and profile time, as needed.
|
||||
*/
|
||||
pstats = p->p_stats;
|
||||
ast = 0;
|
||||
flags = 0;
|
||||
if (usermode &&
|
||||
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
|
||||
PROC_SLOCK(p);
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
|
||||
p->p_sflag |= PS_ALRMPEND;
|
||||
ast = 1;
|
||||
}
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
|
||||
flags |= TDF_ALRMPEND | TDF_ASTPENDING;
|
||||
PROC_SUNLOCK(p);
|
||||
}
|
||||
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
|
||||
PROC_SLOCK(p);
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
|
||||
p->p_sflag |= PS_PROFPEND;
|
||||
ast = 1;
|
||||
}
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
|
||||
flags |= TDF_PROFPEND | TDF_ASTPENDING;
|
||||
PROC_SUNLOCK(p);
|
||||
}
|
||||
thread_lock(td);
|
||||
sched_tick();
|
||||
if (ast)
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
td->td_flags |= flags;
|
||||
thread_unlock(td);
|
||||
|
||||
#ifdef HWPMC_HOOKS
|
||||
|
@ -493,17 +493,15 @@ again:
|
||||
|
||||
td2->td_sigstk = td->td_sigstk;
|
||||
td2->td_sigmask = td->td_sigmask;
|
||||
td2->td_flags = TDF_INMEM;
|
||||
|
||||
/*
|
||||
* Duplicate sub-structures as needed.
|
||||
* Increase reference counts on shared objects.
|
||||
*/
|
||||
p2->p_flag = 0;
|
||||
p2->p_flag = P_INMEM;
|
||||
if (p1->p_flag & P_PROFIL)
|
||||
startprofclock(p2);
|
||||
PROC_SLOCK(p2);
|
||||
p2->p_sflag = PS_INMEM;
|
||||
PROC_SUNLOCK(p2);
|
||||
td2->td_ucred = crhold(p2->p_ucred);
|
||||
pargs_hold(p2->p_args);
|
||||
|
||||
|
@ -1002,6 +1002,7 @@ thread_alloc_spare(struct thread *td)
|
||||
__rangeof(struct thread, td_startzero, td_endzero));
|
||||
spare->td_proc = td->td_proc;
|
||||
spare->td_ucred = crhold(td->td_ucred);
|
||||
spare->td_flags = TDF_INMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1042,7 +1043,6 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
|
||||
/* Let the new thread become owner of the upcall */
|
||||
ku->ku_owner = td2;
|
||||
td2->td_upcall = ku;
|
||||
td2->td_flags = 0;
|
||||
td2->td_pflags = TDP_SA|TDP_UPCALLING;
|
||||
td2->td_state = TDS_CAN_RUN;
|
||||
td2->td_inhibitors = 0;
|
||||
|
@ -690,14 +690,17 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
|
||||
kp->ki_ssize = vm->vm_ssize;
|
||||
} else if (p->p_state == PRS_ZOMBIE)
|
||||
kp->ki_stat = SZOMB;
|
||||
kp->ki_sflag = p->p_sflag;
|
||||
if (kp->ki_flag & P_INMEM)
|
||||
kp->ki_sflag = PS_INMEM;
|
||||
else
|
||||
kp->ki_sflag = 0;
|
||||
kp->ki_swtime = p->p_swtime;
|
||||
kp->ki_pid = p->p_pid;
|
||||
kp->ki_nice = p->p_nice;
|
||||
rufetch(p, &kp->ki_rusage);
|
||||
kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
|
||||
PROC_SUNLOCK(p);
|
||||
if ((p->p_sflag & PS_INMEM) && p->p_stats != NULL) {
|
||||
if ((p->p_flag & P_INMEM) && p->p_stats != NULL) {
|
||||
kp->ki_start = p->p_stats->p_start;
|
||||
timevaladd(&kp->ki_start, &boottime);
|
||||
PROC_SLOCK(p);
|
||||
|
@ -558,8 +558,8 @@ runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
|
||||
struct rqhead *rqh;
|
||||
u_char pri;
|
||||
|
||||
KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
|
||||
("runq_remove_idx: process swapped out"));
|
||||
KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
|
||||
("runq_remove_idx: thread swapped out"));
|
||||
pri = ts->ts_rqindex;
|
||||
KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
|
||||
rqh = &rq->rq_queues[pri];
|
||||
|
@ -463,16 +463,10 @@ mi_switch(int flags, struct thread *newtd)
|
||||
void
|
||||
setrunnable(struct thread *td)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
switch (p->p_state) {
|
||||
case PRS_ZOMBIE:
|
||||
panic("setrunnable(1)");
|
||||
default:
|
||||
break;
|
||||
}
|
||||
KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
|
||||
("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
|
||||
switch (td->td_state) {
|
||||
case TDS_RUNNING:
|
||||
case TDS_RUNQ:
|
||||
@ -491,9 +485,9 @@ setrunnable(struct thread *td)
|
||||
printf("state is 0x%x", td->td_state);
|
||||
panic("setrunnable(2)");
|
||||
}
|
||||
if ((p->p_sflag & PS_INMEM) == 0) {
|
||||
if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
|
||||
p->p_sflag |= PS_SWAPINREQ;
|
||||
if ((td->td_flags & TDF_INMEM) == 0) {
|
||||
if ((td->td_flags & TDF_SWAPINREQ) == 0) {
|
||||
td->td_flags |= TDF_SWAPINREQ;
|
||||
/*
|
||||
* due to a LOR between the thread lock and
|
||||
* the sleepqueue chain locks, use
|
||||
|
@ -531,7 +531,7 @@ thread_link(struct thread *td, struct proc *p)
|
||||
*/
|
||||
td->td_state = TDS_INACTIVE;
|
||||
td->td_proc = p;
|
||||
td->td_flags = 0;
|
||||
td->td_flags = TDF_INMEM;
|
||||
|
||||
LIST_INIT(&td->td_contested);
|
||||
sigqueue_init(&td->td_sigqueue, p);
|
||||
|
@ -1086,8 +1086,8 @@ sched_add(struct thread *td, int flags)
|
||||
("sched_add: trying to run inhibited thread"));
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("sched_add: bad thread state"));
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_add: process swapped out"));
|
||||
KASSERT(td->td_flags & TDF_INMEM,
|
||||
("sched_add: thread swapped out"));
|
||||
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
curthread->td_proc->p_comm);
|
||||
@ -1155,8 +1155,8 @@ sched_add(struct thread *td, int flags)
|
||||
("sched_add: trying to run inhibited thread"));
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("sched_add: bad thread state"));
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_add: process swapped out"));
|
||||
KASSERT(td->td_flags & TDF_INMEM,
|
||||
("sched_add: thread swapped out"));
|
||||
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
curthread->td_proc->p_comm);
|
||||
@ -1200,8 +1200,8 @@ sched_rem(struct thread *td)
|
||||
struct td_sched *ts;
|
||||
|
||||
ts = td->td_sched;
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_rem: process swapped out"));
|
||||
KASSERT(td->td_flags & TDF_INMEM,
|
||||
("sched_rem: thread swapped out"));
|
||||
KASSERT(TD_ON_RUNQ(td),
|
||||
("sched_rem: thread not on run queue"));
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -1253,8 +1253,8 @@ sched_choose(void)
|
||||
runq_remove(rq, ts);
|
||||
ts->ts_flags |= TSF_DIDRUN;
|
||||
|
||||
KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_choose: process swapped out"));
|
||||
KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
|
||||
("sched_choose: thread swapped out"));
|
||||
return (ts->ts_thread);
|
||||
}
|
||||
return (PCPU_GET(idlethread));
|
||||
|
@ -2287,8 +2287,8 @@ tdq_add(struct tdq *tdq, struct thread *td, int flags)
|
||||
("sched_add: trying to run inhibited thread"));
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("sched_add: bad thread state"));
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_add: process swapped out"));
|
||||
KASSERT(td->td_flags & TDF_INMEM,
|
||||
("sched_add: thread swapped out"));
|
||||
|
||||
ts = td->td_sched;
|
||||
class = PRI_BASE(td->td_pri_class);
|
||||
|
@ -400,7 +400,7 @@ kdb_thr_first(void)
|
||||
|
||||
p = LIST_FIRST(&allproc);
|
||||
while (p != NULL) {
|
||||
if (p->p_sflag & PS_INMEM) {
|
||||
if (p->p_flag & P_INMEM) {
|
||||
thr = FIRST_THREAD_IN_PROC(p);
|
||||
if (thr != NULL)
|
||||
return (thr);
|
||||
@ -417,7 +417,7 @@ kdb_thr_from_pid(pid_t pid)
|
||||
|
||||
p = LIST_FIRST(&allproc);
|
||||
while (p != NULL) {
|
||||
if (p->p_sflag & PS_INMEM && p->p_pid == pid)
|
||||
if (p->p_flag & P_INMEM && p->p_pid == pid)
|
||||
return (FIRST_THREAD_IN_PROC(p));
|
||||
p = LIST_NEXT(p, p_list);
|
||||
}
|
||||
@ -446,7 +446,7 @@ kdb_thr_next(struct thread *thr)
|
||||
if (thr != NULL)
|
||||
return (thr);
|
||||
p = LIST_NEXT(p, p_list);
|
||||
if (p != NULL && (p->p_sflag & PS_INMEM))
|
||||
if (p != NULL && (p->p_flag & P_INMEM))
|
||||
thr = FIRST_THREAD_IN_PROC(p);
|
||||
} while (p != NULL);
|
||||
return (NULL);
|
||||
|
@ -148,7 +148,6 @@ ast(struct trapframe *framep)
|
||||
{
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
int sflag;
|
||||
int flags;
|
||||
int sig;
|
||||
#if defined(DEV_NPX) && !defined(SMP)
|
||||
@ -174,25 +173,17 @@ ast(struct trapframe *framep)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This updates the p_sflag's for the checks below in one
|
||||
* This updates the td_flag's for the checks below in one
|
||||
* "atomic" operation with turning off the astpending flag.
|
||||
* If another AST is triggered while we are handling the
|
||||
* AST's saved in sflag, the astpending flag will be set and
|
||||
* AST's saved in flags, the astpending flag will be set and
|
||||
* ast() will be called again.
|
||||
*/
|
||||
PROC_SLOCK(p);
|
||||
sflag = p->p_sflag;
|
||||
if (p->p_sflag & (PS_ALRMPEND | PS_PROFPEND))
|
||||
p->p_sflag &= ~(PS_ALRMPEND | PS_PROFPEND);
|
||||
#ifdef MAC
|
||||
if (p->p_sflag & PS_MACPEND)
|
||||
p->p_sflag &= ~PS_MACPEND;
|
||||
#endif
|
||||
thread_lock(td);
|
||||
PROC_SUNLOCK(p);
|
||||
flags = td->td_flags;
|
||||
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
|
||||
TDF_NEEDRESCHED | TDF_INTERRUPT);
|
||||
TDF_NEEDRESCHED | TDF_INTERRUPT | TDF_ALRMPEND | TDF_PROFPEND |
|
||||
TDF_MACPEND);
|
||||
thread_unlock(td);
|
||||
PCPU_INC(cnt.v_trap);
|
||||
|
||||
@ -210,7 +201,7 @@ ast(struct trapframe *framep)
|
||||
td->td_profil_ticks = 0;
|
||||
td->td_pflags &= ~TDP_OWEUPC;
|
||||
}
|
||||
if (sflag & PS_ALRMPEND) {
|
||||
if (flags & TDF_ALRMPEND) {
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGVTALRM);
|
||||
PROC_UNLOCK(p);
|
||||
@ -228,13 +219,13 @@ ast(struct trapframe *framep)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (sflag & PS_PROFPEND) {
|
||||
if (flags & TDF_PROFPEND) {
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGPROF);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
#ifdef MAC
|
||||
if (sflag & PS_MACPEND)
|
||||
if (flags & TDF_MACPEND)
|
||||
mac_thread_userret(td);
|
||||
#endif
|
||||
if (flags & TDF_NEEDRESCHED) {
|
||||
|
@ -104,7 +104,7 @@ struct ptrace_io_desc32 {
|
||||
int error; \
|
||||
\
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
|
||||
if ((td->td_proc->p_sflag & PS_INMEM) == 0) \
|
||||
if ((td->td_proc->p_flag & P_INMEM) == 0) \
|
||||
error = EIO; \
|
||||
else \
|
||||
error = (action); \
|
||||
|
@ -537,8 +537,7 @@ maybe_demote(struct mac_lomac *subjlabel, struct mac_lomac *objlabel,
|
||||
subj->mac_lomac.ml_rangehigh = objlabel->ml_single;
|
||||
subj->mac_lomac.ml_flags |= MAC_LOMAC_FLAG_UPDATE;
|
||||
thread_lock(curthread);
|
||||
curthread->td_flags |= TDF_ASTPENDING;
|
||||
curthread->td_proc->p_sflag |= PS_MACPEND;
|
||||
curthread->td_flags |= TDF_ASTPENDING | TDF_MACPEND;
|
||||
thread_unlock(curthread);
|
||||
|
||||
/*
|
||||
|
@ -219,7 +219,6 @@ ASSYM(MD_UTRAP, offsetof(struct mdproc, md_utrap));
|
||||
ASSYM(P_COMM, offsetof(struct proc, p_comm));
|
||||
ASSYM(P_MD, offsetof(struct proc, p_md));
|
||||
ASSYM(P_PID, offsetof(struct proc, p_pid));
|
||||
ASSYM(P_SFLAG, offsetof(struct proc, p_sflag));
|
||||
ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
|
||||
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
|
@ -316,6 +316,7 @@ do { \
|
||||
*/
|
||||
#define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */
|
||||
#define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */
|
||||
#define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */
|
||||
#define TDF_SINTR 0x00000008 /* Sleep is interruptible. */
|
||||
#define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */
|
||||
#define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */
|
||||
@ -335,12 +336,15 @@ do { \
|
||||
#define TDF_UNUSED19 0x00080000 /* Thread is sleeping on a umtx. */
|
||||
#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
|
||||
#define TDF_DBSUSPEND 0x00200000 /* Thread is suspended by debugger */
|
||||
#define TDF_UNUSED22 0x00400000 /* --available-- */
|
||||
#define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */
|
||||
#define TDF_UNUSED23 0x00800000 /* --available-- */
|
||||
#define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */
|
||||
#define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */
|
||||
#define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */
|
||||
#define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */
|
||||
#define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */
|
||||
#define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */
|
||||
#define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */
|
||||
|
||||
/*
|
||||
* "Private" flags kept in td_pflags:
|
||||
@ -496,7 +500,6 @@ struct proc {
|
||||
* See the td_ or ke_ versions of the same flags.
|
||||
*/
|
||||
int p_flag; /* (c) P_* flags. */
|
||||
int p_sflag; /* (j) PS_* flags. */
|
||||
enum {
|
||||
PRS_NEW = 0, /* In creation */
|
||||
PRS_NORMAL, /* threads can be run. */
|
||||
@ -618,19 +621,13 @@ struct proc {
|
||||
#define P_JAILED 0x1000000 /* Process is in jail. */
|
||||
#define P_INEXEC 0x4000000 /* Process is in execve(). */
|
||||
#define P_STATCHILD 0x8000000 /* Child process stopped or exited. */
|
||||
#define P_INMEM 0x10000000 /* Loaded into memory. */
|
||||
#define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */
|
||||
#define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */
|
||||
|
||||
#define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE)
|
||||
#define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED)
|
||||
|
||||
/* These flags are kept in p_sflag and are protected with proc slock. */
|
||||
#define PS_INMEM 0x00001 /* Loaded into memory. */
|
||||
#define PS_ALRMPEND 0x00020 /* Pending SIGVTALRM needs to be posted. */
|
||||
#define PS_PROFPEND 0x00040 /* Pending SIGPROF needs to be posted. */
|
||||
#define PS_SWAPINREQ 0x00100 /* Swapin request due to wakeup. */
|
||||
#define PS_SWAPPINGOUT 0x00200 /* Process is being swapped out. */
|
||||
#define PS_SWAPPINGIN 0x04000 /* Process is being swapped in. */
|
||||
#define PS_MACPEND 0x08000 /* AST-based MAC event pending. */
|
||||
|
||||
/*
|
||||
* These were process status values (p_stat), now they are only used in
|
||||
* legacy conversion code.
|
||||
@ -743,7 +740,7 @@ MALLOC_DECLARE(M_ZOMBIE);
|
||||
KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \
|
||||
("PHOLD of exiting process")); \
|
||||
(p)->p_lock++; \
|
||||
if (((p)->p_sflag & PS_INMEM) == 0) \
|
||||
if (((p)->p_flag & P_INMEM) == 0) \
|
||||
faultin((p)); \
|
||||
} while (0)
|
||||
#define PROC_ASSERT_HELD(p) do { \
|
||||
|
@ -207,6 +207,12 @@ void fill_kinfo_proc(struct proc *, struct kinfo_proc *);
|
||||
#define ki_childstime ki_rusage_ch.ru_stime
|
||||
#define ki_childutime ki_rusage_ch.ru_utime
|
||||
|
||||
/*
|
||||
* Legacy PS_ flag. This moved to p_flag but is maintained for
|
||||
* compatibility.
|
||||
*/
|
||||
#define PS_INMEM 0x00001 /* Loaded into memory. */
|
||||
|
||||
/* ki_sessflag values */
|
||||
#define KI_CTTY 0x00000001 /* controlling tty vnode active */
|
||||
#define KI_SLEADER 0x00000002 /* session leader */
|
||||
|
154
sys/vm/vm_glue.c
154
sys/vm/vm_glue.c
@ -112,7 +112,8 @@ static void scheduler(void *);
|
||||
SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
|
||||
|
||||
#ifndef NO_SWAPPING
|
||||
static void swapout(struct proc *);
|
||||
static int swapout(struct proc *);
|
||||
static void swapclear(struct proc *);
|
||||
#endif
|
||||
|
||||
|
||||
@ -601,7 +602,7 @@ faultin(p)
|
||||
#ifdef NO_SWAPPING
|
||||
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
if ((p->p_sflag & PS_INMEM) == 0)
|
||||
if ((p->p_flag & P_INMEM) == 0)
|
||||
panic("faultin: proc swapped out with NO_SWAPPING!");
|
||||
#else /* !NO_SWAPPING */
|
||||
struct thread *td;
|
||||
@ -611,36 +612,34 @@ faultin(p)
|
||||
* If another process is swapping in this process,
|
||||
* just wait until it finishes.
|
||||
*/
|
||||
if (p->p_sflag & PS_SWAPPINGIN)
|
||||
msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
|
||||
else if ((p->p_sflag & PS_INMEM) == 0) {
|
||||
if (p->p_flag & P_SWAPPINGIN) {
|
||||
while (p->p_flag & P_SWAPPINGIN)
|
||||
msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
|
||||
return;
|
||||
}
|
||||
if ((p->p_flag & P_INMEM) == 0) {
|
||||
/*
|
||||
* Don't let another thread swap process p out while we are
|
||||
* busy swapping it in.
|
||||
*/
|
||||
++p->p_lock;
|
||||
PROC_SLOCK(p);
|
||||
p->p_sflag |= PS_SWAPPINGIN;
|
||||
PROC_SUNLOCK(p);
|
||||
p->p_flag |= P_SWAPPINGIN;
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
/*
|
||||
* We hold no lock here because the list of threads
|
||||
* can not change while all threads in the process are
|
||||
* swapped out.
|
||||
*/
|
||||
FOREACH_THREAD_IN_PROC(p, td)
|
||||
vm_thread_swapin(td);
|
||||
|
||||
PROC_LOCK(p);
|
||||
PROC_SLOCK(p);
|
||||
p->p_sflag &= ~PS_SWAPPINGIN;
|
||||
p->p_sflag |= PS_INMEM;
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
thread_lock(td);
|
||||
TD_CLR_SWAPPED(td);
|
||||
if (TD_CAN_RUN(td))
|
||||
setrunnable(td);
|
||||
thread_unlock(td);
|
||||
}
|
||||
swapclear(p);
|
||||
p->p_swtime = 0;
|
||||
PROC_SUNLOCK(p);
|
||||
|
||||
wakeup(&p->p_sflag);
|
||||
wakeup(&p->p_flag);
|
||||
|
||||
/* Allow other threads to swap p out now. */
|
||||
--p->p_lock;
|
||||
@ -684,7 +683,9 @@ loop:
|
||||
ppri = INT_MIN;
|
||||
sx_slock(&allproc_lock);
|
||||
FOREACH_PROC_IN_SYSTEM(p) {
|
||||
if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
|
||||
PROC_LOCK(p);
|
||||
if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
|
||||
PROC_UNLOCK(p);
|
||||
continue;
|
||||
}
|
||||
PROC_SLOCK(p);
|
||||
@ -697,10 +698,8 @@ loop:
|
||||
thread_lock(td);
|
||||
if (td->td_inhibitors == TDI_SWAPPED) {
|
||||
pri = p->p_swtime + td->td_slptime;
|
||||
if ((p->p_sflag & PS_SWAPINREQ) == 0) {
|
||||
if ((td->td_flags & TDF_SWAPINREQ) == 0)
|
||||
pri -= p->p_nice * 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* if this thread is higher priority
|
||||
* and there is enough space, then select
|
||||
@ -715,6 +714,7 @@ loop:
|
||||
thread_unlock(td);
|
||||
}
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
sx_sunlock(&allproc_lock);
|
||||
|
||||
@ -738,7 +738,7 @@ loop:
|
||||
* brought this process in while we traverse all threads.
|
||||
* Or, this process may even be being swapped out again.
|
||||
*/
|
||||
if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
|
||||
if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
|
||||
PROC_UNLOCK(p);
|
||||
thread_lock(&thread0);
|
||||
proc0_rescan = 0;
|
||||
@ -746,19 +746,12 @@ loop:
|
||||
goto loop;
|
||||
}
|
||||
|
||||
PROC_SLOCK(p);
|
||||
p->p_sflag &= ~PS_SWAPINREQ;
|
||||
PROC_SUNLOCK(p);
|
||||
|
||||
/*
|
||||
* We would like to bring someone in. (only if there is space).
|
||||
* [What checks the space? ]
|
||||
*/
|
||||
faultin(p);
|
||||
PROC_UNLOCK(p);
|
||||
PROC_SLOCK(p);
|
||||
p->p_swtime = 0;
|
||||
PROC_SUNLOCK(p);
|
||||
thread_lock(&thread0);
|
||||
proc0_rescan = 0;
|
||||
thread_unlock(&thread0);
|
||||
@ -804,7 +797,7 @@ SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
|
||||
|
||||
/*
|
||||
* Swapout is driven by the pageout daemon. Very simple, we find eligible
|
||||
* procs and unwire their u-areas. We try to always "swap" at least one
|
||||
* procs and swap out their stacks. We try to always "swap" at least one
|
||||
* process in case we need the room for a swapin.
|
||||
* If any procs have been sleeping/stopped for at least maxslp seconds,
|
||||
* they are swapped. Else, we swap the longest-sleeping or stopped process,
|
||||
@ -829,13 +822,8 @@ retry:
|
||||
* creation. It may have no
|
||||
* address space or lock yet.
|
||||
*/
|
||||
PROC_SLOCK(p);
|
||||
if (p->p_state == PRS_NEW) {
|
||||
PROC_SUNLOCK(p);
|
||||
if (p->p_state == PRS_NEW)
|
||||
continue;
|
||||
}
|
||||
PROC_SUNLOCK(p);
|
||||
|
||||
/*
|
||||
* An aio daemon switches its
|
||||
* address space while running.
|
||||
@ -844,7 +832,6 @@ retry:
|
||||
*/
|
||||
if ((p->p_flag & P_SYSTEM) != 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Do not swapout a process that
|
||||
* is waiting for VM data
|
||||
@ -874,7 +861,7 @@ retry:
|
||||
* skipped because of the if statement above checking
|
||||
* for P_SYSTEM
|
||||
*/
|
||||
if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
|
||||
if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
|
||||
goto nextproc2;
|
||||
|
||||
switch (p->p_state) {
|
||||
@ -890,15 +877,20 @@ retry:
|
||||
* Check all the thread groups..
|
||||
*/
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if (PRI_IS_REALTIME(td->td_pri_class))
|
||||
thread_lock(td);
|
||||
if (PRI_IS_REALTIME(td->td_pri_class)) {
|
||||
thread_unlock(td);
|
||||
goto nextproc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Guarantee swap_idle_threshold1
|
||||
* time in memory.
|
||||
*/
|
||||
if (td->td_slptime < swap_idle_threshold1)
|
||||
if (td->td_slptime < swap_idle_threshold1) {
|
||||
thread_unlock(td);
|
||||
goto nextproc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not swapout a process if it is
|
||||
@ -910,8 +902,10 @@ retry:
|
||||
* swapping out a thread.
|
||||
*/
|
||||
if ((td->td_priority) < PSOCK ||
|
||||
!thread_safetoswapout(td))
|
||||
!thread_safetoswapout(td)) {
|
||||
thread_unlock(td);
|
||||
goto nextproc;
|
||||
}
|
||||
/*
|
||||
* If the system is under memory stress,
|
||||
* or if we are swapping
|
||||
@ -920,11 +914,14 @@ retry:
|
||||
*/
|
||||
if (((action & VM_SWAP_NORMAL) == 0) &&
|
||||
(((action & VM_SWAP_IDLE) == 0) ||
|
||||
(td->td_slptime < swap_idle_threshold2)))
|
||||
(td->td_slptime < swap_idle_threshold2))) {
|
||||
thread_unlock(td);
|
||||
goto nextproc;
|
||||
}
|
||||
|
||||
if (minslptime > td->td_slptime)
|
||||
minslptime = td->td_slptime;
|
||||
thread_unlock(td);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -935,8 +932,8 @@ retry:
|
||||
if ((action & VM_SWAP_NORMAL) ||
|
||||
((action & VM_SWAP_IDLE) &&
|
||||
(minslptime > swap_idle_threshold2))) {
|
||||
swapout(p);
|
||||
didswap++;
|
||||
if (swapout(p) == 0)
|
||||
didswap++;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
vm_map_unlock(&vm->vm_map);
|
||||
@ -964,13 +961,35 @@ nextproc1:
|
||||
}
|
||||
|
||||
static void
|
||||
swapclear(p)
|
||||
struct proc *p;
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
||||
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_INMEM;
|
||||
td->td_flags &= ~TDF_SWAPINREQ;
|
||||
TD_CLR_SWAPPED(td);
|
||||
if (TD_CAN_RUN(td))
|
||||
setrunnable(td);
|
||||
thread_unlock(td);
|
||||
}
|
||||
p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
|
||||
p->p_flag |= P_INMEM;
|
||||
}
|
||||
|
||||
static int
|
||||
swapout(p)
|
||||
struct proc *p;
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
mtx_assert(&p->p_slock, MA_OWNED | MA_NOTRECURSED);
|
||||
PROC_SLOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED);
|
||||
#if defined(SWAP_DEBUG)
|
||||
printf("swapping out %d\n", p->p_pid);
|
||||
#endif
|
||||
@ -980,43 +999,46 @@ swapout(p)
|
||||
* by now. Assuming that there is only one pageout daemon thread,
|
||||
* this process should still be in memory.
|
||||
*/
|
||||
KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
|
||||
KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
|
||||
("swapout: lost a swapout race?"));
|
||||
|
||||
#if defined(INVARIANTS)
|
||||
/*
|
||||
* Make sure that all threads are safe to be swapped out.
|
||||
*
|
||||
* Alternatively, we could swap out only safe threads.
|
||||
*/
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
KASSERT(thread_safetoswapout(td),
|
||||
("swapout: there is a thread not safe for swapout"));
|
||||
}
|
||||
#endif /* INVARIANTS */
|
||||
td = FIRST_THREAD_IN_PROC(p);
|
||||
++td->td_ru.ru_nswap;
|
||||
/*
|
||||
* remember the process resident count
|
||||
*/
|
||||
p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
|
||||
|
||||
p->p_sflag &= ~PS_INMEM;
|
||||
p->p_sflag |= PS_SWAPPINGOUT;
|
||||
PROC_UNLOCK(p);
|
||||
/*
|
||||
* Check and mark all threads before we proceed.
|
||||
*/
|
||||
p->p_flag &= ~P_INMEM;
|
||||
p->p_flag |= P_SWAPPINGOUT;
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
thread_lock(td);
|
||||
if (!thread_safetoswapout(td)) {
|
||||
thread_unlock(td);
|
||||
swapclear(p);
|
||||
return (EBUSY);
|
||||
}
|
||||
td->td_flags &= ~TDF_INMEM;
|
||||
TD_SET_SWAPPED(td);
|
||||
thread_unlock(td);
|
||||
}
|
||||
td = FIRST_THREAD_IN_PROC(p);
|
||||
++td->td_ru.ru_nswap;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
/*
|
||||
* This list is stable because all threads are now prevented from
|
||||
* running. The list is only modified in the context of a running
|
||||
* thread in this process.
|
||||
*/
|
||||
FOREACH_THREAD_IN_PROC(p, td)
|
||||
vm_thread_swapout(td);
|
||||
|
||||
PROC_LOCK(p);
|
||||
p->p_flag &= ~P_SWAPPINGOUT;
|
||||
PROC_SLOCK(p);
|
||||
p->p_sflag &= ~PS_SWAPPINGOUT;
|
||||
p->p_swtime = 0;
|
||||
return (0);
|
||||
}
|
||||
#endif /* !NO_SWAPPING */
|
||||
|
@ -1620,7 +1620,7 @@ vm_daemon()
|
||||
* swapped out set the limit to nothing (will force a
|
||||
* swap-out.)
|
||||
*/
|
||||
if ((p->p_sflag & PS_INMEM) == 0)
|
||||
if ((p->p_flag & P_INMEM) == 0)
|
||||
limit = 0; /* XXX */
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
|
@ -174,7 +174,7 @@ fetchpigs()
|
||||
pt[i].pt_kp = &kpp[i];
|
||||
pctp = &pt[i].pt_pctcpu;
|
||||
ftime = kpp[i].ki_swtime;
|
||||
if (ftime == 0 || (kpp[i].ki_sflag & PS_INMEM) == 0)
|
||||
if (ftime == 0 || (kpp[i].ki_flag & P_INMEM) == 0)
|
||||
*pctp = 0;
|
||||
else
|
||||
*pctp = ((double) kpp[i].ki_pctcpu /
|
||||
|
@ -675,7 +675,7 @@ format_next_process(caddr_t handle, char *(*get_userid)(int), int flags)
|
||||
hp->remaining--;
|
||||
|
||||
/* get the process's command name */
|
||||
if ((pp->ki_sflag & PS_INMEM) == 0) {
|
||||
if ((pp->ki_flag & P_INMEM) == 0) {
|
||||
/*
|
||||
* Print swapped processes as <pname>
|
||||
*/
|
||||
|
Loading…
x
Reference in New Issue
Block a user