- Move clock synchronization into a seperate clock lock so the global

scheduler lock is not involved.  sched_lock still protects the sched_clock
   call.  Another patch will remedy this.

Contributed by:	Attilio Rao <attilio@FreeBSD.org>
Tested by:	kris, jeff
This commit is contained in:
Jeff Roberson 2007-05-20 22:11:50 +00:00
parent 0ad5e7f326
commit 8b98fec903
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=169803
4 changed files with 21 additions and 15 deletions

View File

@ -83,6 +83,9 @@ SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
/* Some of these don't belong here, but it's easiest to concentrate them. */
long cp_time[CPUSTATES];
/* Spin-lock protecting profiling statistics. */
struct mtx time_lock;
static int
sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
{
@ -172,6 +175,7 @@ initclocks(dummy)
* code do its bit.
*/
cpu_initclocks();
mtx_init(&time_lock, "time lock", NULL, MTX_SPIN);
/*
* Compute profhz/stathz, and fix profhz if needed.
@ -349,20 +353,15 @@ startprofclock(p)
register struct proc *p;
{
/*
* XXX; Right now sched_lock protects statclock(), but perhaps
* it should be protected later on by a time_lock, which would
* cover psdiv, etc. as well.
*/
PROC_LOCK_ASSERT(p, MA_OWNED);
if (p->p_flag & P_STOPPROF)
return;
if ((p->p_flag & P_PROFIL) == 0) {
mtx_lock_spin(&sched_lock);
p->p_flag |= P_PROFIL;
mtx_lock_spin(&time_lock);
if (++profprocs == 1)
cpu_startprofclock();
mtx_unlock_spin(&sched_lock);
mtx_unlock_spin(&time_lock);
}
}
@ -385,11 +384,11 @@ stopprofclock(p)
}
if ((p->p_flag & P_PROFIL) == 0)
return;
mtx_lock_spin(&sched_lock);
p->p_flag &= ~P_PROFIL;
mtx_lock_spin(&time_lock);
if (--profprocs == 0)
cpu_stopprofclock();
mtx_unlock_spin(&sched_lock);
mtx_unlock_spin(&time_lock);
}
}
@ -412,7 +411,6 @@ statclock(int usermode)
td = curthread;
p = td->td_proc;
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
if (usermode) {
/*
* Charge the time as appropriate.
@ -422,6 +420,7 @@ statclock(int usermode)
thread_statclock(1);
#endif
td->td_uticks++;
mtx_lock_spin_flags(&time_lock, MTX_QUIET);
if (p->p_nice > NZERO)
cp_time[CP_NICE]++;
else
@ -442,6 +441,7 @@ statclock(int usermode)
if ((td->td_pflags & TDP_ITHREAD) ||
td->td_intr_nesting_level >= 2) {
td->td_iticks++;
mtx_lock_spin_flags(&time_lock, MTX_QUIET);
cp_time[CP_INTR]++;
} else {
#ifdef KSE
@ -450,15 +450,18 @@ statclock(int usermode)
#endif
td->td_pticks++;
td->td_sticks++;
mtx_lock_spin_flags(&time_lock, MTX_QUIET);
if (!TD_IS_IDLETHREAD(td))
cp_time[CP_SYS]++;
else
cp_time[CP_IDLE]++;
}
}
mtx_unlock_spin_flags(&time_lock, MTX_QUIET);
CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d",
td, td->td_proc->p_comm, td->td_priority, (stathz)?stathz:hz);
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
sched_clock(td);
/* Update resource usage integrals and maximums. */

View File

@ -423,12 +423,12 @@ profil(td, uap)
}
PROC_LOCK(p);
upp = &td->td_proc->p_stats->p_prof;
mtx_lock_spin(&sched_lock);
mtx_lock_spin(&time_lock);
upp->pr_off = uap->offset;
upp->pr_scale = uap->scale;
upp->pr_base = uap->samples;
upp->pr_size = uap->size;
mtx_unlock_spin(&sched_lock);
mtx_unlock_spin(&time_lock);
startprofclock(p);
PROC_UNLOCK(p);
@ -468,15 +468,15 @@ addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
if (ticks == 0)
return;
prof = &td->td_proc->p_stats->p_prof;
mtx_lock_spin(&sched_lock);
mtx_lock_spin(&time_lock);
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
mtx_unlock_spin(&sched_lock);
mtx_unlock_spin(&time_lock);
return; /* out of range; ignore */
}
addr = prof->pr_base + i;
mtx_unlock_spin(&sched_lock);
mtx_unlock_spin(&time_lock);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
td->td_profil_addr = pc;
td->td_profil_ticks = ticks;

View File

@ -410,6 +410,7 @@ static struct witness_order_list_entry order_lists[] = {
{ "callout", &lock_class_mtx_spin },
{ "entropy harvest mutex", &lock_class_mtx_spin },
{ "syscons video lock", &lock_class_mtx_spin },
{ "time lock", &lock_class_mtx_spin },
/*
* leaf locks
*/

View File

@ -71,6 +71,8 @@ extern int bootverbose; /* nonzero to print verbose messages */
extern int maxusers; /* system tune hint */
extern struct mtx time_lock; /* time lock for profiling */
#ifdef INVARIANTS /* The option is always available */
#define KASSERT(exp,msg) do { \
if (__predict_false(!(exp))) \