The process spin lock currently has the following distinct uses:
- Threads lifetime cycle, in particular, counting of the threads in the process, and interlocking with process mutex and thread lock. The main reason of this is that turnstile locks are after thread locks, so you e.g. cannot unlock blockable mutex (think process mutex) while owning thread lock. - Virtual and profiling itimers, since the timers activation is done from the clock interrupt context. Replace the p_slock by p_itimmtx and PROC_ITIMLOCK(). - Profiling code (profil(2)), for similar reason. Replace the p_slock by p_profmtx and PROC_PROFLOCK(). - Resource usage accounting. Need for the spinlock there is subtle, my understanding is that spinlock blocks context switching for the current thread, which prevents td_runtime and similar fields from changing (updates are done at the mi_switch()). Replace the p_slock by p_statmtx and PROC_STATLOCK(). The split is done mostly for code clarity, and should not affect scalability. Tested by: pho Sponsored by: The FreeBSD Foundation MFC after: 1 week
This commit is contained in:
parent
4501dadd00
commit
11cee2ecf7
@ -690,9 +690,9 @@ linux_times(struct thread *td, struct linux_times_args *args)
|
||||
if (args->buf != NULL) {
|
||||
p = td->td_proc;
|
||||
PROC_LOCK(p);
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &utime, &stime);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
calccru(p, &cutime, &cstime);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
|
@ -864,9 +864,9 @@ svr4_sys_times(td, uap)
|
||||
|
||||
p = td->td_proc;
|
||||
PROC_LOCK(p);
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &utime, &stime);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
calccru(p, &cutime, &cstime);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
@ -1277,9 +1277,9 @@ svr4_sys_waitsys(td, uap)
|
||||
pid = p->p_pid;
|
||||
status = p->p_xstat;
|
||||
ru = p->p_ru;
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &ru.ru_utime, &ru.ru_stime);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
sx_sunlock(&proctree_lock);
|
||||
|
||||
@ -1304,9 +1304,9 @@ svr4_sys_waitsys(td, uap)
|
||||
pid = p->p_pid;
|
||||
status = W_STOPCODE(p->p_xstat);
|
||||
ru = p->p_ru;
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &ru.ru_utime, &ru.ru_stime);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
if (((uap->options & SVR4_WNOWAIT)) == 0) {
|
||||
@ -1328,9 +1328,9 @@ svr4_sys_waitsys(td, uap)
|
||||
pid = p->p_pid;
|
||||
ru = p->p_ru;
|
||||
status = SIGCONT;
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &ru.ru_utime, &ru.ru_stime);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
if (((uap->options & SVR4_WNOWAIT)) == 0) {
|
||||
|
@ -125,9 +125,9 @@ procfs_doprocstatus(PFS_FILL_ARGS)
|
||||
if (p->p_flag & P_INMEM) {
|
||||
struct timeval start, ut, st;
|
||||
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &ut, &st);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
start = p->p_stats->p_start;
|
||||
timevaladd(&start, &boottime);
|
||||
sbuf_printf(sb, " %jd,%ld %jd,%ld %jd,%ld",
|
||||
|
@ -603,9 +603,9 @@ proc0_post(void *dummy __unused)
|
||||
sx_slock(&allproc_lock);
|
||||
FOREACH_PROC_IN_SYSTEM(p) {
|
||||
microuptime(&p->p_stats->p_start);
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
rufetch(p, &ru); /* Clears thread stats */
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
p->p_rux.rux_runtime = 0;
|
||||
p->p_rux.rux_uticks = 0;
|
||||
p->p_rux.rux_sticks = 0;
|
||||
|
@ -432,16 +432,16 @@ hardclock_cpu(int usermode)
|
||||
flags = 0;
|
||||
if (usermode &&
|
||||
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
|
||||
PROC_SLOCK(p);
|
||||
PROC_ITIMLOCK(p);
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
|
||||
flags |= TDF_ALRMPEND | TDF_ASTPENDING;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
|
||||
PROC_SLOCK(p);
|
||||
PROC_ITIMLOCK(p);
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
|
||||
flags |= TDF_PROFPEND | TDF_ASTPENDING;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
thread_lock(td);
|
||||
sched_tick(1);
|
||||
@ -520,18 +520,18 @@ hardclock_cnt(int cnt, int usermode)
|
||||
flags = 0;
|
||||
if (usermode &&
|
||||
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
|
||||
PROC_SLOCK(p);
|
||||
PROC_ITIMLOCK(p);
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
|
||||
tick * cnt) == 0)
|
||||
flags |= TDF_ALRMPEND | TDF_ASTPENDING;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
|
||||
PROC_SLOCK(p);
|
||||
PROC_ITIMLOCK(p);
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
|
||||
tick * cnt) == 0)
|
||||
flags |= TDF_PROFPEND | TDF_ASTPENDING;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
thread_lock(td);
|
||||
sched_tick(cnt);
|
||||
|
@ -614,7 +614,9 @@ exit1(struct thread *td, int rv)
|
||||
/*
|
||||
* Save our children's rusage information in our exit rusage.
|
||||
*/
|
||||
PROC_STATLOCK(p);
|
||||
ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
|
||||
PROC_STATUNLOCK(p);
|
||||
|
||||
/*
|
||||
* Make sure the scheduler takes this thread out of its tables etc.
|
||||
@ -990,8 +992,6 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
|
||||
return (0);
|
||||
}
|
||||
|
||||
PROC_SLOCK(p);
|
||||
|
||||
if (siginfo != NULL) {
|
||||
bzero(siginfo, sizeof(*siginfo));
|
||||
siginfo->si_errno = 0;
|
||||
@ -1038,7 +1038,9 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
|
||||
if (wrusage != NULL) {
|
||||
rup = &wrusage->wru_self;
|
||||
*rup = p->p_ru;
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &rup->ru_utime, &rup->ru_stime);
|
||||
PROC_STATUNLOCK(p);
|
||||
|
||||
rup = &wrusage->wru_children;
|
||||
*rup = p->p_stats->p_cru;
|
||||
@ -1046,10 +1048,10 @@ proc_to_reap(struct thread *td, struct proc *p, idtype_t idtype, id_t id,
|
||||
}
|
||||
|
||||
if (p->p_state == PRS_ZOMBIE) {
|
||||
PROC_SLOCK(p);
|
||||
proc_reap(td, p, status, options);
|
||||
return (-1);
|
||||
}
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
return (1);
|
||||
}
|
||||
|
@ -969,6 +969,9 @@ mutex_init(void)
|
||||
blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
|
||||
mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
|
||||
mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
|
||||
mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
|
||||
mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
|
||||
mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
|
||||
mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
|
||||
mtx_lock(&Giant);
|
||||
}
|
||||
|
@ -228,6 +228,9 @@ proc_init(void *mem, int size, int flags)
|
||||
bzero(&p->p_mtx, sizeof(struct mtx));
|
||||
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
|
||||
mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
|
||||
mtx_init(&p->p_statmtx, "pstatl", NULL, MTX_SPIN);
|
||||
mtx_init(&p->p_itimmtx, "pitiml", NULL, MTX_SPIN);
|
||||
mtx_init(&p->p_profmtx, "pprofl", NULL, MTX_SPIN);
|
||||
cv_init(&p->p_pwait, "ppwait");
|
||||
cv_init(&p->p_dbgwait, "dbgwait");
|
||||
TAILQ_INIT(&p->p_threads); /* all threads in proc */
|
||||
@ -872,11 +875,11 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
|
||||
kp->ki_fibnum = p->p_fibnum;
|
||||
kp->ki_start = p->p_stats->p_start;
|
||||
timevaladd(&kp->ki_start, &boottime);
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
rufetch(p, &kp->ki_rusage);
|
||||
kp->ki_runtime = cputick2usec(p->p_rux.rux_runtime);
|
||||
calcru(p, &kp->ki_rusage.ru_utime, &kp->ki_rusage.ru_stime);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
calccru(p, &kp->ki_childutime, &kp->ki_childstime);
|
||||
/* Some callers want child times in a single value. */
|
||||
kp->ki_childtime = kp->ki_childstime;
|
||||
@ -944,7 +947,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
|
||||
if (preferthread)
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
thread_lock(td);
|
||||
if (td->td_wmesg != NULL)
|
||||
strlcpy(kp->ki_wmesg, td->td_wmesg, sizeof(kp->ki_wmesg));
|
||||
@ -1030,7 +1033,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
|
||||
kp->ki_sigmask = td->td_sigmask;
|
||||
thread_unlock(td);
|
||||
if (preferthread)
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1129,11 +1129,11 @@ racctd(void)
|
||||
|
||||
microuptime(&wallclock);
|
||||
timevalsub(&wallclock, &p->p_stats->p_start);
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
FOREACH_THREAD_IN_PROC(p, td)
|
||||
ruxagg(p, td);
|
||||
runtime = cputick2usec(p->p_rux.rux_runtime);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
#ifdef notyet
|
||||
KASSERT(runtime >= p->p_prev_runtime,
|
||||
("runtime < p_prev_runtime"));
|
||||
|
@ -619,11 +619,11 @@ lim_cb(void *arg)
|
||||
*/
|
||||
if (p->p_cpulimit == RLIM_INFINITY)
|
||||
return;
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
ruxagg(p, td);
|
||||
}
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
|
||||
lim_rlimit(p, RLIMIT_CPU, &rlim);
|
||||
if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
|
||||
@ -825,7 +825,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp)
|
||||
uint64_t runtime, u;
|
||||
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
||||
PROC_STATLOCK_ASSERT(p, MA_OWNED);
|
||||
/*
|
||||
* If we are getting stats for the current process, then add in the
|
||||
* stats that this thread has accumulated in its current time slice.
|
||||
@ -857,7 +857,7 @@ rufetchtd(struct thread *td, struct rusage *ru)
|
||||
uint64_t runtime, u;
|
||||
|
||||
p = td->td_proc;
|
||||
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
||||
PROC_STATLOCK_ASSERT(p, MA_OWNED);
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
/*
|
||||
* If we are getting stats for the current thread, then add in the
|
||||
@ -991,11 +991,11 @@ kern_getrusage(struct thread *td, int who, struct rusage *rup)
|
||||
break;
|
||||
|
||||
case RUSAGE_THREAD:
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
thread_lock(td);
|
||||
rufetchtd(td, rup);
|
||||
thread_unlock(td);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1042,7 +1042,7 @@ ruxagg_locked(struct rusage_ext *rux, struct thread *td)
|
||||
{
|
||||
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
|
||||
PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
|
||||
rux->rux_runtime += td->td_incruntime;
|
||||
rux->rux_uticks += td->td_uticks;
|
||||
rux->rux_sticks += td->td_sticks;
|
||||
@ -1072,7 +1072,7 @@ rufetch(struct proc *p, struct rusage *ru)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
||||
PROC_STATLOCK_ASSERT(p, MA_OWNED);
|
||||
|
||||
*ru = p->p_ru;
|
||||
if (p->p_numthreads > 0) {
|
||||
@ -1093,10 +1093,10 @@ rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
|
||||
struct timeval *sp)
|
||||
{
|
||||
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
rufetch(p, ru);
|
||||
calcru(p, up, sp);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -470,6 +470,9 @@ thread_exit(void)
|
||||
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
|
||||
#endif
|
||||
PROC_UNLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
thread_lock(td);
|
||||
PROC_SUNLOCK(p);
|
||||
|
||||
/* Do the same timestamp bookkeeping that mi_switch() would do. */
|
||||
new_switchtime = cpu_ticks();
|
||||
@ -484,9 +487,8 @@ thread_exit(void)
|
||||
td->td_ru.ru_nvcsw++;
|
||||
ruxagg(p, td);
|
||||
rucollect(&p->p_ru, &td->td_ru);
|
||||
PROC_STATUNLOCK(p);
|
||||
|
||||
thread_lock(td);
|
||||
PROC_SUNLOCK(p);
|
||||
td->td_state = TDS_INACTIVE;
|
||||
#ifdef WITNESS
|
||||
witness_thread_exit(td);
|
||||
|
@ -276,10 +276,10 @@ get_process_cputime(struct proc *targetp, struct timespec *ats)
|
||||
uint64_t runtime;
|
||||
struct rusage ru;
|
||||
|
||||
PROC_SLOCK(targetp);
|
||||
PROC_STATLOCK(targetp);
|
||||
rufetch(targetp, &ru);
|
||||
runtime = targetp->p_rux.rux_runtime;
|
||||
PROC_SUNLOCK(targetp);
|
||||
PROC_STATUNLOCK(targetp);
|
||||
cputick2timespec(runtime, ats);
|
||||
}
|
||||
|
||||
@ -328,17 +328,17 @@ kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats)
|
||||
break;
|
||||
case CLOCK_VIRTUAL:
|
||||
PROC_LOCK(p);
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &user, &sys);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
TIMEVAL_TO_TIMESPEC(&user, ats);
|
||||
break;
|
||||
case CLOCK_PROF:
|
||||
PROC_LOCK(p);
|
||||
PROC_SLOCK(p);
|
||||
PROC_STATLOCK(p);
|
||||
calcru(p, &user, &sys);
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_STATUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
timevaladd(&user, &sys);
|
||||
TIMEVAL_TO_TIMESPEC(&user, ats);
|
||||
@ -698,9 +698,9 @@ kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv)
|
||||
timevalsub(&aitv->it_value, &ctv);
|
||||
}
|
||||
} else {
|
||||
PROC_SLOCK(p);
|
||||
PROC_ITIMLOCK(p);
|
||||
*aitv = p->p_stats->p_timer[which];
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
@ -782,10 +782,10 @@ kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv,
|
||||
aitv->it_value.tv_usec != 0 &&
|
||||
aitv->it_value.tv_usec < tick)
|
||||
aitv->it_value.tv_usec = tick;
|
||||
PROC_SLOCK(p);
|
||||
PROC_ITIMLOCK(p);
|
||||
*oitv = p->p_stats->p_timer[which];
|
||||
p->p_stats->p_timer[which] = *aitv;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
@ -421,12 +421,12 @@ sys_profil(struct thread *td, struct profil_args *uap)
|
||||
}
|
||||
PROC_LOCK(p);
|
||||
upp = &td->td_proc->p_stats->p_prof;
|
||||
PROC_SLOCK(p);
|
||||
PROC_PROFLOCK(p);
|
||||
upp->pr_off = uap->offset;
|
||||
upp->pr_scale = uap->scale;
|
||||
upp->pr_base = uap->samples;
|
||||
upp->pr_size = uap->size;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_PROFUNLOCK(p);
|
||||
startprofclock(p);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
@ -466,15 +466,15 @@ addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
|
||||
if (ticks == 0)
|
||||
return;
|
||||
prof = &td->td_proc->p_stats->p_prof;
|
||||
PROC_SLOCK(td->td_proc);
|
||||
PROC_PROFLOCK(td->td_proc);
|
||||
if (pc < prof->pr_off ||
|
||||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
|
||||
PROC_SUNLOCK(td->td_proc);
|
||||
PROC_PROFUNLOCK(td->td_proc);
|
||||
return; /* out of range; ignore */
|
||||
}
|
||||
|
||||
addr = prof->pr_base + i;
|
||||
PROC_SUNLOCK(td->td_proc);
|
||||
PROC_PROFUNLOCK(td->td_proc);
|
||||
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
|
||||
td->td_profil_addr = pc;
|
||||
td->td_profil_ticks = ticks;
|
||||
@ -509,15 +509,15 @@ addupc_task(struct thread *td, uintfptr_t pc, u_int ticks)
|
||||
}
|
||||
p->p_profthreads++;
|
||||
prof = &p->p_stats->p_prof;
|
||||
PROC_SLOCK(p);
|
||||
PROC_PROFLOCK(p);
|
||||
if (pc < prof->pr_off ||
|
||||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_PROFUNLOCK(p);
|
||||
goto out;
|
||||
}
|
||||
|
||||
addr = prof->pr_base + i;
|
||||
PROC_SUNLOCK(p);
|
||||
PROC_PROFUNLOCK(p);
|
||||
PROC_UNLOCK(p);
|
||||
if (copyin(addr, &v, sizeof(v)) == 0) {
|
||||
v += ticks;
|
||||
|
@ -148,6 +148,8 @@ struct pargs {
|
||||
* q - td_contested lock
|
||||
* r - p_peers lock
|
||||
* t - thread lock
|
||||
* u - process stat lock
|
||||
* w - process timer lock
|
||||
* x - created at fork, only changes during single threading in exec
|
||||
* y - created at first aio, doesn't change until exit or exec at which
|
||||
* point we are single-threaded and only curthread changes it
|
||||
@ -183,14 +185,14 @@ struct turnstile;
|
||||
* userland asks for rusage info. Backwards compatibility prevents putting
|
||||
* this directly in the user-visible rusage struct.
|
||||
*
|
||||
* Locking for p_rux: (cj) means (j) for p_rux and (c) for p_crux.
|
||||
* Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux.
|
||||
* Locking for td_rux: (t) for all fields.
|
||||
*/
|
||||
struct rusage_ext {
|
||||
uint64_t rux_runtime; /* (cj) Real time. */
|
||||
uint64_t rux_uticks; /* (cj) Statclock hits in user mode. */
|
||||
uint64_t rux_sticks; /* (cj) Statclock hits in sys mode. */
|
||||
uint64_t rux_iticks; /* (cj) Statclock hits in intr mode. */
|
||||
uint64_t rux_runtime; /* (cu) Real time. */
|
||||
uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */
|
||||
uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */
|
||||
uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */
|
||||
uint64_t rux_uu; /* (c) Previous user time in usec. */
|
||||
uint64_t rux_su; /* (c) Previous sys time in usec. */
|
||||
uint64_t rux_tu; /* (c) Previous total time in usec. */
|
||||
@ -512,6 +514,9 @@ struct proc {
|
||||
LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */
|
||||
LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */
|
||||
struct mtx p_mtx; /* (n) Lock for this struct. */
|
||||
struct mtx p_statmtx; /* Lock for the stats */
|
||||
struct mtx p_itimmtx; /* Lock for the virt/prof timers */
|
||||
struct mtx p_profmtx; /* Lock for the profiling */
|
||||
struct ksiginfo *p_ksi; /* Locked by parent proc lock */
|
||||
sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */
|
||||
#define p_siglist p_sigqueue.sq_signals
|
||||
@ -523,7 +528,7 @@ struct proc {
|
||||
u_int p_swtick; /* (c) Tick when swapped in or out. */
|
||||
struct itimerval p_realtimer; /* (c) Alarm timer. */
|
||||
struct rusage p_ru; /* (a) Exit information. */
|
||||
struct rusage_ext p_rux; /* (cj) Internal resource usage. */
|
||||
struct rusage_ext p_rux; /* (cu) Internal resource usage. */
|
||||
struct rusage_ext p_crux; /* (c) Internal child resource usage. */
|
||||
int p_profthreads; /* (c) Num threads in addupc_task. */
|
||||
volatile int p_exitthreads; /* (j) Number of threads exiting */
|
||||
@ -609,6 +614,18 @@ struct proc {
|
||||
#define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock)
|
||||
#define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type))
|
||||
|
||||
#define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx)
|
||||
#define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx)
|
||||
#define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type))
|
||||
|
||||
#define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx)
|
||||
#define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx)
|
||||
#define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type))
|
||||
|
||||
#define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx)
|
||||
#define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx)
|
||||
#define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type))
|
||||
|
||||
/* These flags are kept in p_flag. */
|
||||
#define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */
|
||||
#define P_CONTROLT 0x00002 /* Has a controlling terminal. */
|
||||
|
@ -47,21 +47,22 @@
|
||||
* Locking key:
|
||||
* b - created at fork, never changes
|
||||
* c - locked by proc mtx
|
||||
* j - locked by proc slock
|
||||
* k - only accessed by curthread
|
||||
* w - locked by proc itim lock
|
||||
* w2 - locked by proc prof lock
|
||||
*/
|
||||
struct pstats {
|
||||
#define pstat_startzero p_cru
|
||||
struct rusage p_cru; /* Stats for reaped children. */
|
||||
struct itimerval p_timer[3]; /* (j) Virtual-time timers. */
|
||||
struct itimerval p_timer[3]; /* (w) Virtual-time timers. */
|
||||
#define pstat_endzero pstat_startcopy
|
||||
|
||||
#define pstat_startcopy p_prof
|
||||
struct uprof { /* Profile arguments. */
|
||||
caddr_t pr_base; /* (c + j) Buffer base. */
|
||||
u_long pr_size; /* (c + j) Buffer size. */
|
||||
u_long pr_off; /* (c + j) PC offset. */
|
||||
u_long pr_scale; /* (c + j) PC scaling. */
|
||||
caddr_t pr_base; /* (c + w2) Buffer base. */
|
||||
u_long pr_size; /* (c + w2) Buffer size. */
|
||||
u_long pr_off; /* (c + w2) PC offset. */
|
||||
u_long pr_scale; /* (c + w2) PC scaling. */
|
||||
} p_prof;
|
||||
#define pstat_endcopy p_start
|
||||
struct timeval p_start; /* (b) Starting time. */
|
||||
|
Loading…
Reference in New Issue
Block a user