- Implement a new mechanism for resetting lock profiling. We now

guarantee that all cpus have acknowledged the cleared enable int by
   scheduling the resetting thread on each cpu in succession.  Since all
   lock profiling happens within a critical section this guarantees that
   all cpus have left lock profiling before we clear the datastructures.
 - Assert that the per-thread queue of locks lock profiling is aware of
   is clear on thread exit.  There were several cases where this was not
   true that slows lock profiling and leaks information.
 - Remove all objects from all lists before clearing any per-cpu
   information in reset.  Lock profiling objects can migrate between
   per-cpu caches and previously these migrated objects could be zero'd
   before they'd been removed

Discussed with:	attilio
Sponsored by:	Nokia
This commit is contained in:
jeff 2009-03-15 06:41:47 +00:00
parent 1799153106
commit ee1ec823f6
3 changed files with 111 additions and 25 deletions

View File

@ -306,6 +306,8 @@ thread_alloc(void)
void void
thread_free(struct thread *td) thread_free(struct thread *td)
{ {
lock_profile_thread_exit(td);
if (td->td_cpuset) if (td->td_cpuset)
cpuset_rel(td->td_cpuset); cpuset_rel(td->td_cpuset);
td->td_cpuset = NULL; td->td_cpuset = NULL;
@ -439,6 +441,7 @@ thread_wait(struct proc *p)
/* Wait for any remaining threads to exit cpu_throw(). */ /* Wait for any remaining threads to exit cpu_throw(). */
while (p->p_exitthreads) while (p->p_exitthreads)
sched_relinquish(curthread); sched_relinquish(curthread);
lock_profile_thread_exit(td);
cpuset_rel(td->td_cpuset); cpuset_rel(td->td_cpuset);
td->td_cpuset = NULL; td->td_cpuset = NULL;
cpu_thread_clean(td); cpu_thread_clean(td);

View File

@ -46,9 +46,11 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h> #include <sys/lock.h>
#include <sys/lock_profile.h> #include <sys/lock_profile.h>
#include <sys/malloc.h> #include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/pcpu.h> #include <sys/pcpu.h>
#include <sys/proc.h> #include <sys/proc.h>
#include <sys/sbuf.h> #include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/smp.h> #include <sys/smp.h>
#include <sys/sysctl.h> #include <sys/sysctl.h>
@ -186,7 +188,8 @@ struct lock_prof_cpu {
struct lock_prof_cpu *lp_cpu[MAXCPU]; struct lock_prof_cpu *lp_cpu[MAXCPU];
int lock_prof_enable = 0; volatile int lock_prof_enable = 0;
static volatile int lock_prof_resetting;
/* SWAG: sbuf size = avg stat. line size * number of locks */ /* SWAG: sbuf size = avg stat. line size * number of locks */
#define LPROF_SBUF_SIZE 256 * 400 #define LPROF_SBUF_SIZE 256 * 400
@ -239,25 +242,77 @@ lock_prof_init(void *arg)
} }
SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
/*
* To be certain that lock profiling has idled on all cpus before we
* reset, we schedule the resetting thread on all active cpus. Since
* all operations happen within critical sections we can be sure that
* it is safe to zero the profiling structures.
*/
static void
lock_prof_idle(void)
{
struct thread *td;
int cpu;
td = curthread;
thread_lock(td);
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
sched_bind(td, cpu);
}
sched_unbind(td);
thread_unlock(td);
}
static void
lock_prof_reset_wait(void)
{
/*
* Spin relinquishing our cpu so that lock_prof_idle may
* run on it.
*/
while (lock_prof_resetting)
sched_relinquish(curthread);
}
static void static void
lock_prof_reset(void) lock_prof_reset(void)
{ {
struct lock_prof_cpu *lpc; struct lock_prof_cpu *lpc;
int enabled, i, cpu; int enabled, i, cpu;
/*
* We not only race with acquiring and releasing locks but also
* thread exit. To be certain that threads exit without valid head
* pointers they must see resetting set before enabled is cleared.
* Otherwise a lock may not be removed from a per-thread list due
* to disabled being set but not wait for reset() to remove it below.
*/
atomic_store_rel_int(&lock_prof_resetting, 1);
enabled = lock_prof_enable; enabled = lock_prof_enable;
lock_prof_enable = 0; lock_prof_enable = 0;
pause("lpreset", hz / 10); lock_prof_idle();
/*
* Some objects may have migrated between CPUs. Clear all links
* before we zero the structures. Some items may still be linked
* into per-thread lists as well.
*/
for (cpu = 0; cpu <= mp_maxid; cpu++) { for (cpu = 0; cpu <= mp_maxid; cpu++) {
lpc = lp_cpu[cpu]; lpc = lp_cpu[cpu];
for (i = 0; i < LPROF_CACHE_SIZE; i++) { for (i = 0; i < LPROF_CACHE_SIZE; i++) {
LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
} }
}
for (cpu = 0; cpu <= mp_maxid; cpu++) {
lpc = lp_cpu[cpu];
bzero(lpc, sizeof(*lpc)); bzero(lpc, sizeof(*lpc));
lock_prof_init_type(&lpc->lpc_types[0]); lock_prof_init_type(&lpc->lpc_types[0]);
lock_prof_init_type(&lpc->lpc_types[1]); lock_prof_init_type(&lpc->lpc_types[1]);
} }
atomic_store_rel_int(&lock_prof_resetting, 0);
lock_prof_enable = enabled; lock_prof_enable = enabled;
} }
@ -351,7 +406,7 @@ dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
"max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
enabled = lock_prof_enable; enabled = lock_prof_enable;
lock_prof_enable = 0; lock_prof_enable = 0;
pause("lpreset", hz / 10); lock_prof_idle();
t = ticks; t = ticks;
for (cpu = 0; cpu <= mp_maxid; cpu++) { for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (lp_cpu[cpu] == NULL) if (lp_cpu[cpu] == NULL)
@ -461,16 +516,13 @@ lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
if (l->lpo_obj == lo && l->lpo_file == file && if (l->lpo_obj == lo && l->lpo_file == file &&
l->lpo_line == line) l->lpo_line == line)
return (l); return (l);
critical_enter();
type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
l = LIST_FIRST(&type->lpt_lpoalloc); l = LIST_FIRST(&type->lpt_lpoalloc);
if (l == NULL) { if (l == NULL) {
lock_prof_rejected++; lock_prof_rejected++;
critical_exit();
return (NULL); return (NULL);
} }
LIST_REMOVE(l, lpo_link); LIST_REMOVE(l, lpo_link);
critical_exit();
l->lpo_obj = lo; l->lpo_obj = lo;
l->lpo_file = file; l->lpo_file = file;
l->lpo_line = line; l->lpo_line = line;
@ -497,18 +549,49 @@ lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
if (spin && lock_prof_skipspin == 1) if (spin && lock_prof_skipspin == 1)
return; return;
critical_enter();
/* Recheck enabled now that we're in a critical section. */
if (lock_prof_enable == 0)
goto out;
l = lock_profile_object_lookup(lo, spin, file, line); l = lock_profile_object_lookup(lo, spin, file, line);
if (l == NULL) if (l == NULL)
return; goto out;
l->lpo_cnt++; l->lpo_cnt++;
if (++l->lpo_ref > 1) if (++l->lpo_ref > 1)
return; goto out;
l->lpo_contest_locking = contested; l->lpo_contest_locking = contested;
l->lpo_acqtime = nanoseconds(); l->lpo_acqtime = nanoseconds();
if (waittime && (l->lpo_acqtime > waittime)) if (waittime && (l->lpo_acqtime > waittime))
l->lpo_waittime = l->lpo_acqtime - waittime; l->lpo_waittime = l->lpo_acqtime - waittime;
else else
l->lpo_waittime = 0; l->lpo_waittime = 0;
out:
critical_exit();
}
void
lock_profile_thread_exit(struct thread *td)
{
#ifdef INVARIANTS
struct lock_profile_object *l;
MPASS(curthread->td_critnest == 0);
#endif
/*
* If lock profiling was disabled we have to wait for reset to
* clear our pointers before we can exit safely.
*/
lock_prof_reset_wait();
#ifdef INVARIANTS
LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
printf("thread still holds lock acquired at %s:%d\n",
l->lpo_file, l->lpo_line);
LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
printf("thread still holds lock acquired at %s:%d\n",
l->lpo_file, l->lpo_line);
#endif
MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
} }
void void
@ -521,11 +604,20 @@ lock_profile_release_lock(struct lock_object *lo)
struct lpohead *head; struct lpohead *head;
int spin; int spin;
if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) if (lo->lo_flags & LO_NOPROFILE)
return; return;
spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
head = &curthread->td_lprof[spin]; head = &curthread->td_lprof[spin];
if (LIST_FIRST(head) == NULL)
return;
critical_enter(); critical_enter();
/* Recheck enabled now that we're in a critical section. */
if (lock_prof_enable == 0 && lock_prof_resetting == 1)
goto out;
/*
* If lock profiling is not enabled we still want to remove the
* lpo from our queue.
*/
LIST_FOREACH(l, head, lpo_link) LIST_FOREACH(l, head, lpo_link)
if (l->lpo_obj == lo) if (l->lpo_obj == lo)
break; break;

View File

@ -43,11 +43,13 @@ LIST_HEAD(lpohead, lock_profile_object);
u_int64_t nanoseconds(void); u_int64_t nanoseconds(void);
#endif #endif
extern int lock_prof_enable; extern volatile int lock_prof_enable;
void lock_profile_obtain_lock_success(struct lock_object *lo, int contested, void lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
uint64_t waittime, const char *file, int line); uint64_t waittime, const char *file, int line);
void lock_profile_release_lock(struct lock_object *lo); void lock_profile_release_lock(struct lock_object *lo);
void lock_profile_thread_exit(struct thread *td);
static inline void static inline void
lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested, lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested,
@ -61,21 +63,10 @@ lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested,
#else /* !LOCK_PROFILING */ #else /* !LOCK_PROFILING */
static inline void #define lock_profile_release_lock(lo)
lock_profile_release_lock(struct lock_object *lo) #define lock_profile_obtain_lock_failed(lo, contested, waittime)
{ #define lock_profile_obtain_lock_success(lo, contested, waittime, file, line)
} #define lock_profile_thread_exit(td)
static inline void
lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested, uint64_t *waittime)
{
}
static inline void
lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime,
const char *file, int line)
{
}
#endif /* !LOCK_PROFILING */ #endif /* !LOCK_PROFILING */