Tidy up uprof locking. Mostly the fields are protected by both the proc

lock and sched_lock so they can be read with either lock held.  Document
the locking as well.  The one remaining bogosity is that pr_addr and
pr_ticks should be per-thread but profiling of multithreaded apps is
currently undefined.
This commit is contained in:
John Baldwin 2004-07-02 03:50:48 +00:00
parent 16f9f20579
commit a3a7017895
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=131437
3 changed files with 41 additions and 29 deletions

View File

@ -419,17 +419,19 @@ profil(td, uap)
p = td->td_proc;
if (uap->scale == 0) {
PROC_LOCK(td->td_proc);
stopprofclock(td->td_proc);
PROC_UNLOCK(td->td_proc);
PROC_LOCK(p);
stopprofclock(p);
PROC_UNLOCK(p);
return (0);
}
PROC_LOCK(p);
upp = &td->td_proc->p_stats->p_prof;
mtx_lock_spin(&sched_lock);
upp->pr_off = uap->offset;
upp->pr_scale = uap->scale;
upp->pr_base = uap->samples;
upp->pr_size = uap->size;
PROC_LOCK(p);
mtx_unlock_spin(&sched_lock);
startprofclock(p);
PROC_UNLOCK(p);
@ -469,16 +471,20 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
if (ticks == 0)
return;
prof = &td->td_proc->p_stats->p_prof;
mtx_lock_spin(&sched_lock);
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
mtx_unlock_spin(&sched_lock);
return; /* out of range; ignore */
}
addr = prof->pr_base + i;
mtx_unlock_spin(&sched_lock);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
mtx_lock_spin(&sched_lock);
prof->pr_addr = pc;
prof->pr_ticks = ticks;
td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING ;
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}
@ -506,7 +512,6 @@ addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
return;
}
p->p_profthreads++;
PROC_UNLOCK(p);
prof = &p->p_stats->p_prof;
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
@ -514,15 +519,18 @@ addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
}
addr = prof->pr_base + i;
PROC_UNLOCK(p);
if (copyin(addr, &v, sizeof(v)) == 0) {
v += ticks;
if (copyout(&v, addr, sizeof(v)) == 0)
if (copyout(&v, addr, sizeof(v)) == 0) {
PROC_LOCK(p);
goto out;
}
}
stop = 1;
PROC_LOCK(p);
out:
PROC_LOCK(p);
if (--p->p_profthreads == 0) {
if (p->p_flag & P_STOPPROF) {
wakeup(&p->p_profthreads);

View File

@ -142,7 +142,7 @@ ast(struct trapframe *framep)
struct proc *p;
struct ksegrp *kg;
struct rlimit rlim;
u_int prticks, sticks;
u_int sticks;
int sflag;
int flags;
int sig;
@ -180,11 +180,6 @@ ast(struct trapframe *framep)
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
TDF_NEEDRESCHED | TDF_OWEUPC | TDF_INTERRUPT);
cnt.v_soft++;
prticks = 0;
if (flags & TDF_OWEUPC && p->p_flag & P_PROFIL) {
prticks = p->p_stats->p_prof.pr_ticks;
p->p_stats->p_prof.pr_ticks = 0;
}
mtx_unlock_spin(&sched_lock);
/*
* XXXKSE While the fact that we owe a user profiling
@ -196,8 +191,11 @@ ast(struct trapframe *framep)
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
if (flags & TDF_OWEUPC && p->p_flag & P_PROFIL)
addupc_task(td, p->p_stats->p_prof.pr_addr, prticks);
if (flags & TDF_OWEUPC && p->p_flag & P_PROFIL) {
addupc_task(td, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
p->p_stats->p_prof.pr_ticks = 0;
}
if (sflag & PS_ALRMPEND) {
PROC_LOCK(p);
psignal(p, SIGVTALRM);

View File

@ -43,25 +43,31 @@
/*
* Kernel per-process accounting / statistics
* (not necessarily resident except when running).
*
* Locking key:
* b - created at fork, never changes
* c - locked by proc mtx
* j - locked by sched_lock mtx
* k - only accessed by curthread
*/
struct pstats {
#define pstat_startzero p_ru
struct rusage p_ru; /* stats for this proc */
struct rusage p_cru; /* sum of stats for reaped children */
struct itimerval p_timer[3]; /* virtual-time timers */
struct rusage p_ru; /* Stats for this process. */
struct rusage p_cru; /* Stats for reaped children. */
struct itimerval p_timer[3]; /* (j) Virtual-time timers. */
#define pstat_endzero pstat_startcopy
#define pstat_startcopy p_prof
struct uprof { /* profile arguments */
caddr_t pr_base; /* buffer base */
u_long pr_size; /* buffer size */
u_long pr_off; /* pc offset */
u_long pr_scale; /* pc scaling */
u_long pr_addr; /* temp storage for addr until AST */
u_int pr_ticks; /* temp storage for ticks until AST */
struct uprof { /* Profile arguments. */
caddr_t pr_base; /* (c + j) Buffer base. */
u_long pr_size; /* (c + j) Buffer size. */
u_long pr_off; /* (c + j) PC offset. */
u_long pr_scale; /* (c + j) PC scaling. */
u_long pr_addr; /* (k) Temporary addr until AST. */
u_int pr_ticks; /* (k) Temporary ticks until AST. */
} p_prof;
#define pstat_endcopy p_start
struct timeval p_start; /* starting time */
struct timeval p_start; /* (b) Starting time. */
};
#ifdef _KERNEL