From 2cba8dd301342c1c31f6dbf496e8351fa8359584 Mon Sep 17 00:00:00 2001 From: John Baldwin Date: Tue, 4 Nov 2014 16:35:56 +0000 Subject: [PATCH] Add a new thread state "spinning" to schedgraph and add tracepoints at the start and stop of spinning waits in lock primitives. --- sys/kern/kern_lock.c | 20 ++++++++++++++++++++ sys/kern/kern_mutex.c | 11 +++++++++++ sys/kern/kern_rwlock.c | 21 +++++++++++++++++++++ sys/kern/kern_sx.c | 17 +++++++++++++++++ tools/sched/schedgraph.py | 1 + 5 files changed, 70 insertions(+) diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index d0e0bdd8cd5c..3fc151db298b 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -582,6 +582,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, lk, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); /* * If we are holding also an interlock drop it @@ -597,11 +600,16 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, while (LK_HOLDER(lk->lk_lock) == (uintptr_t)owner && TD_IS_RUNNING(owner)) cpu_spinwait(); + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); continue; } else if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) != 0 && LK_SHARERS(x) && spintries < alk_retries) { + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); if (flags & LK_INTERLOCK) { class->lc_unlock(ilk); flags &= ~LK_INTERLOCK; @@ -619,6 +627,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); if (i != alk_loops) continue; @@ -814,6 +824,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, lk, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); /* * If we are holding also an interlock drop it @@ -829,6 +842,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, while (LK_HOLDER(lk->lk_lock) == (uintptr_t)owner && TD_IS_RUNNING(owner)) cpu_spinwait(); + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); continue; } else if (LK_CAN_ADAPT(lk, flags) && @@ -838,6 +853,9 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, !atomic_cmpset_ptr(&lk->lk_lock, x, x | LK_EXCLUSIVE_SPINNERS)) continue; + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(td), "spinning", + "lockname:\"%s\"", lk->lock_object.lo_name); if (flags & LK_INTERLOCK) { class->lc_unlock(ilk); flags &= ~LK_INTERLOCK; @@ -854,6 +872,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(td), "running"); GIANT_RESTORE(); if (i != alk_loops) continue; diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index d0b47e5374b8..467ae1cf5cfb 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -433,6 +433,10 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, m, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname((struct thread *)tid), + "spinning", "lockname:\"%s\"", + m->lock_object.lo_name); while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { cpu_spinwait(); @@ -440,6 +444,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname((struct thread *)tid), + "running"); continue; } } @@ -576,6 +583,8 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); + KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), + "spinning", "lockname:\"%s\"", m->lock_object.lo_name); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); @@ -601,6 +610,8 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); + KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), + "running"); LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, contested, waittime, (file), (line)); diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 4cd5a96614bd..c0906c03b4d5 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -420,6 +421,9 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, rw, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", rw->lock_object.lo_name); while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && TD_IS_RUNNING(owner)) { cpu_spinwait(); @@ -427,10 +431,15 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); continue; } } else if (spintries < rowner_retries) { spintries++; + KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), + "spinning", "lockname:\"%s\"", + rw->lock_object.lo_name); for (i = 0; i < rowner_loops; i++) { v = rw->rw_lock; if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) @@ -440,6 +449,8 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line) #ifdef KDTRACE_HOOKS spin_cnt += rowner_loops - i; #endif + KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), + "running"); if (i != rowner_loops) continue; } @@ -759,6 +770,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, rw, owner); + KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), + "spinning", "lockname:\"%s\"", + rw->lock_object.lo_name); while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && TD_IS_RUNNING(owner)) { cpu_spinwait(); @@ -766,6 +780,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), + "running"); continue; } if ((v & RW_LOCK_READ) && RW_READERS(v) && @@ -777,11 +793,16 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, } } spintries++; + KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), + "spinning", "lockname:\"%s\"", + rw->lock_object.lo_name); for (i = 0; i < rowner_loops; i++) { if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) break; cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), + "running"); #ifdef KDTRACE_HOOKS spin_cnt += rowner_loops - i; #endif diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 8154b20c494e..6bd276077309 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -555,6 +556,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", + sx->lock_object.lo_name); GIANT_SAVE(); while (SX_OWNER(sx->sx_lock) == x && TD_IS_RUNNING(owner)) { @@ -563,9 +568,14 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); continue; } } else if (SX_SHARERS(x) && spintries < asx_retries) { + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); spintries++; for (i = 0; i < asx_loops; i++) { @@ -582,6 +592,8 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, spin_cnt++; #endif } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); if (i != asx_loops) continue; } @@ -844,6 +856,9 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); + KTR_STATE1(KTR_SCHED, "thread", + sched_tdname(curthread), "spinning", + "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); while (SX_OWNER(sx->sx_lock) == x && TD_IS_RUNNING(owner)) { @@ -852,6 +867,8 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) #endif cpu_spinwait(); } + KTR_STATE0(KTR_SCHED, "thread", + sched_tdname(curthread), "running"); continue; } } diff --git a/tools/sched/schedgraph.py b/tools/sched/schedgraph.py index f1ce3c61a3ea..f0552cacdcc5 100644 --- a/tools/sched/schedgraph.py +++ b/tools/sched/schedgraph.py @@ -70,6 +70,7 @@ ("count", "red"), ("running", "green"), ("idle", "grey"), + ("spinning", "red"), ("yielding", "yellow"), ("swapped", "violet"), ("suspended", "purple"),