From 6a467cc5e1f673a700f6229e63e9a98ed65264c8 Mon Sep 17 00:00:00 2001 From: Mateusz Guzik Date: Sun, 23 May 2021 17:25:42 +0200 Subject: [PATCH] lockprof: pass lock type as an argument instead of reading the spin flag --- sys/kern/kern_lock.c | 10 +++++----- sys/kern/kern_mutex.c | 20 +++++++++++--------- sys/kern/kern_rwlock.c | 8 ++++---- sys/kern/kern_sx.c | 8 ++++---- sys/kern/sched_4bsd.c | 8 ++++---- sys/kern/subr_lock.c | 30 +++++++++++++++++++++++------- sys/sys/lock_profile.h | 16 ++++++++-------- sys/sys/lockstat.h | 28 ++++++++++++++++++++++------ sys/sys/mutex.h | 6 +++--- 9 files changed, 84 insertions(+), 50 deletions(-) diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 5cefcf7a597b..bec49f29d162 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -639,7 +639,7 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&lk->lock_object, + lock_profile_obtain_lock_failed(&lk->lock_object, false, &contested, &waittime); /* @@ -852,7 +852,7 @@ lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&lk->lock_object, + lock_profile_obtain_lock_failed(&lk->lock_object, false, &contested, &waittime); /* @@ -1442,7 +1442,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&lk->lock_object, + lock_profile_obtain_lock_failed(&lk->lock_object, false, &contested, &waittime); /* @@ -1589,7 +1589,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, if (error == 0) { lock_profile_obtain_lock_success(&lk->lock_object, - contested, waittime, file, line); + false, contested, waittime, file, line); LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, lk->lk_recurse, file, line); WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | @@ -1635,7 +1635,7 @@ _lockmgr_disown(struct lock *lk, const char *file, int line) */ if (LK_HOLDER(lk->lk_lock) != tid) return; - lock_profile_release_lock(&lk->lock_object); + lock_profile_release_lock(&lk->lock_object, false); LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER); LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 0c384281f711..d9db69e2ac09 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -344,7 +344,7 @@ __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, if (!_mtx_obtain_lock_fetch(m, &v, tid)) _mtx_lock_spin(m, v, opts, file, line); else - LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, + LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, 0, 0, file, line); #else __mtx_lock_spin(m, curthread, opts, file, line); @@ -565,7 +565,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&m->lock_object, + lock_profile_obtain_lock_failed(&m->lock_object, false, &contested, &waittime); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR4(KTR_LOCK, @@ -756,7 +756,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); + lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime); for (;;) { if (v == MTX_UNOWNED) { @@ -792,7 +792,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) LOCKSTAT_RECORD1(spin__spin, m, spin_time); out_lockstat: #endif - LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, + LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); } #endif /* SMP */ @@ -912,7 +912,7 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line) continue; } MPASS(v != tid); - lock_profile_obtain_lock_failed(&m->lock_object, + lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime); /* Give interrupts a chance while we spin. */ spinlock_exit(); @@ -945,7 +945,7 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line) #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); #endif - LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, + LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); #ifdef KDTRACE_HOOKS if (lda.spin_cnt != 0) @@ -1183,12 +1183,14 @@ _mtx_destroy(volatile uintptr_t *c) MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); /* Perform the non-mtx related part of mtx_unlock_spin(). */ - if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) + if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) { + lock_profile_release_lock(&m->lock_object, true); spinlock_exit(); - else + } else { TD_LOCKS_DEC(curthread); + lock_profile_release_lock(&m->lock_object, false); + } - lock_profile_release_lock(&m->lock_object); /* Tell witness this isn't locked to make it happy. */ WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, __LINE__); diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index cf1af0ee7af9..f1c18f952f24 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -481,7 +481,7 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&rw->lock_object, + lock_profile_obtain_lock_failed(&rw->lock_object, false, &contested, &waittime); for (;;) { @@ -681,7 +681,7 @@ __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) !__rw_rlock_try(rw, td, &v, true LOCK_FILE_LINE_ARG))) __rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG); else - lock_profile_obtain_lock_success(&rw->lock_object, 0, 0, + lock_profile_obtain_lock_success(&rw->lock_object, false, 0, 0, file, line); LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); @@ -856,7 +856,7 @@ _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) !__rw_runlock_try(rw, td, &v))) __rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG); else - lock_profile_release_lock(&rw->lock_object); + lock_profile_release_lock(&rw->lock_object, false); TD_LOCKS_DEC(curthread); } @@ -975,7 +975,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&rw->lock_object, + lock_profile_obtain_lock_failed(&rw->lock_object, false, &contested, &waittime); for (;;) { diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 3296bf50a290..ad1c1a0e8813 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -648,7 +648,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&sx->lock_object, &contested, + lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested, &waittime); #ifndef INVARIANTS @@ -1069,7 +1069,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif - lock_profile_obtain_lock_failed(&sx->lock_object, &contested, + lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested, &waittime); #ifndef INVARIANTS @@ -1272,7 +1272,7 @@ _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG))) error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); else - lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, + lock_profile_obtain_lock_success(&sx->lock_object, false, 0, 0, file, line); if (error == 0) { LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); @@ -1379,7 +1379,7 @@ _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) !_sx_sunlock_try(sx, td, &x))) _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG); else - lock_profile_release_lock(&sx->lock_object); + lock_profile_release_lock(&sx->lock_object, false); TD_LOCKS_DEC(curthread); } diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 7ce9bd81704f..7e6123cdcf24 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1053,7 +1053,7 @@ sched_switch(struct thread *td, int flags) SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc); /* I feel sleepy */ - lock_profile_release_lock(&sched_lock.lock_object); + lock_profile_release_lock(&sched_lock.lock_object, true); #ifdef KDTRACE_HOOKS /* * If DTrace has set the active vtime enum to anything @@ -1065,7 +1065,7 @@ sched_switch(struct thread *td, int flags) #endif cpu_switch(td, newtd, tmtx); - lock_profile_obtain_lock_success(&sched_lock.lock_object, + lock_profile_obtain_lock_success(&sched_lock.lock_object, true, 0, 0, __FILE__, __LINE__); /* * Where am I? What year is it? @@ -1676,7 +1676,7 @@ sched_throw(struct thread *td) PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); } else { - lock_profile_release_lock(&sched_lock.lock_object); + lock_profile_release_lock(&sched_lock.lock_object, true); MPASS(td->td_lock == &sched_lock); td->td_lastcpu = td->td_oncpu; td->td_oncpu = NOCPU; @@ -1696,7 +1696,7 @@ sched_fork_exit(struct thread *td) */ td->td_oncpu = PCPU_GET(cpuid); sched_lock.mtx_lock = (uintptr_t)td; - lock_profile_obtain_lock_success(&sched_lock.lock_object, + lock_profile_obtain_lock_success(&sched_lock.lock_object, true, 0, 0, __FILE__, __LINE__); THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c index 9c433f683bac..ca2c6ad32f00 100644 --- a/sys/kern/subr_lock.c +++ b/sys/kern/subr_lock.c @@ -58,6 +58,12 @@ __FBSDID("$FreeBSD$"); #include +/* + * Uncomment to validate that spin argument to acquire/release routines matches + * the flag in the lock + */ +//#define LOCK_PROFILING_DEBUG_SPIN + SDT_PROVIDER_DEFINE(lock); SDT_PROBE_DEFINE1(lock, , , starvation, "u_int"); @@ -594,11 +600,17 @@ lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, } void -lock_profile_obtain_lock_success(struct lock_object *lo, int contested, - uint64_t waittime, const char *file, int line) +lock_profile_obtain_lock_success(struct lock_object *lo, bool spin, + int contested, uint64_t waittime, const char *file, int line) { struct lock_profile_object *l; - int spin; + +#ifdef LOCK_PROFILING_DEBUG_SPIN + bool is_spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK); + if ((spin && !is_spin) || (!spin && is_spin)) + printf("%s: lock %s spin mismatch (arg %d, flag %d)\n", __func__, + lo->lo_name, spin, is_spin); +#endif if (SCHEDULER_STOPPED()) return; @@ -608,7 +620,6 @@ lock_profile_obtain_lock_success(struct lock_object *lo, int contested, return; if (lock_contested_only && !contested) return; - spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; if (spin && lock_prof_skipspin == 1) return; critical_enter(); @@ -661,20 +672,25 @@ lock_profile_thread_exit(struct thread *td) } void -lock_profile_release_lock(struct lock_object *lo) +lock_profile_release_lock(struct lock_object *lo, bool spin) { struct lock_profile_object *l; struct lock_prof_type *type; struct lock_prof *lp; uint64_t curtime, holdtime; struct lpohead *head; - int spin; + +#ifdef LOCK_PROFILING_DEBUG_SPIN + bool is_spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK); + if ((spin && !is_spin) || (!spin && is_spin)) + printf("%s: lock %s spin mismatch (arg %d, flag %d)\n", __func__, + lo->lo_name, spin, is_spin); +#endif if (SCHEDULER_STOPPED()) return; if (lo->lo_flags & LO_NOPROFILE) return; - spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; head = &curthread->td_lprof[spin]; if (LIST_FIRST(head) == NULL) return; diff --git a/sys/sys/lock_profile.h b/sys/sys/lock_profile.h index 2ace6ef56983..de1a95779254 100644 --- a/sys/sys/lock_profile.h +++ b/sys/sys/lock_profile.h @@ -46,14 +46,14 @@ u_int64_t nanoseconds(void); extern volatile int lock_prof_enable; -void lock_profile_obtain_lock_success(struct lock_object *lo, int contested, - uint64_t waittime, const char *file, int line); -void lock_profile_release_lock(struct lock_object *lo); +void lock_profile_obtain_lock_success(struct lock_object *lo, bool spin, + int contested, uint64_t waittime, const char *file, int line); +void lock_profile_release_lock(struct lock_object *lo, bool spin); void lock_profile_thread_exit(struct thread *td); static inline void -lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested, - uint64_t *waittime) +lock_profile_obtain_lock_failed(struct lock_object *lo, bool spin, + int *contested, uint64_t *waittime) { if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE) || *contested) return; @@ -63,9 +63,9 @@ lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested, #else /* !LOCK_PROFILING */ -#define lock_profile_release_lock(lo) (void)0 -#define lock_profile_obtain_lock_failed(lo, contested, waittime) (void)0 -#define lock_profile_obtain_lock_success(lo, contested, waittime, file, line) (void)0 +#define lock_profile_release_lock(lo, spin) (void)0 +#define lock_profile_obtain_lock_failed(lo, spin, contested, waittime) (void)0 +#define lock_profile_obtain_lock_success(lo, spin, contested, waittime, file, line) (void)0 #define lock_profile_thread_exit(td) (void)0 #endif /* !LOCK_PROFILING */ diff --git a/sys/sys/lockstat.h b/sys/sys/lockstat.h index 6a5f79a2f152..76bd97dbafa5 100644 --- a/sys/sys/lockstat.h +++ b/sys/sys/lockstat.h @@ -97,22 +97,32 @@ extern volatile bool lockstat_enabled; SDT_PROBE5(lockstat, , , probe, lp, arg1, arg2, arg3, arg4) #define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) do { \ - lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \ + lock_profile_obtain_lock_success(&(lp)->lock_object, false, c, wt, f, l); \ + LOCKSTAT_RECORD0(probe, lp); \ +} while (0) + +#define LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) do { \ + lock_profile_obtain_lock_success(&(lp)->lock_object, true, c, wt, f, l); \ LOCKSTAT_RECORD0(probe, lp); \ } while (0) #define LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(probe, lp, c, wt, f, l, a) do { \ - lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \ + lock_profile_obtain_lock_success(&(lp)->lock_object, false, c, wt, f, l); \ LOCKSTAT_RECORD1(probe, lp, a); \ } while (0) #define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) do { \ - lock_profile_release_lock(&(lp)->lock_object); \ + lock_profile_release_lock(&(lp)->lock_object, false); \ + LOCKSTAT_RECORD0(probe, lp); \ +} while (0) + +#define LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(probe, lp) do { \ + lock_profile_release_lock(&(lp)->lock_object, true); \ LOCKSTAT_RECORD0(probe, lp); \ } while (0) #define LOCKSTAT_PROFILE_RELEASE_RWLOCK(probe, lp, a) do { \ - lock_profile_release_lock(&(lp)->lock_object); \ + lock_profile_release_lock(&(lp)->lock_object, false); \ LOCKSTAT_RECORD1(probe, lp, a); \ } while (0) @@ -130,13 +140,19 @@ uint64_t lockstat_nsecs(struct lock_object *); #define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4) #define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) \ - lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l) + lock_profile_obtain_lock_success(&(lp)->lock_object, false, c, wt, f, l) + +#define LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) \ + lock_profile_obtain_lock_success(&(lp)->lock_object, true, c, wt, f, l) #define LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(probe, lp, c, wt, f, l, a) \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) #define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) \ - lock_profile_release_lock(&(lp)->lock_object) + lock_profile_release_lock(&(lp)->lock_object, false) + +#define LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(probe, lp) \ + lock_profile_release_lock(&(lp)->lock_object, true) #define LOCKSTAT_PROFILE_RELEASE_RWLOCK(probe, lp, a) \ LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index 35257ce97038..f35cdd7413a6 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -270,7 +270,7 @@ void _thread_lock(struct thread *); spinlock_exit(); \ _ret = 0; \ } else { \ - LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ + LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, \ mp, 0, 0, file, line); \ _ret = 1; \ } \ @@ -328,7 +328,7 @@ void _thread_lock(struct thread *); if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else { \ - LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ + LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(spin__release, mp); \ _mtx_release_lock_quick((mp)); \ } \ spinlock_exit(); \ @@ -338,7 +338,7 @@ void _thread_lock(struct thread *); if (mtx_recursed((mp))) \ (mp)->mtx_recurse--; \ else { \ - LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ + LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(spin__release, mp); \ (mp)->mtx_lock = MTX_UNOWNED; \ } \ spinlock_exit(); \