Commit 10/14 of sched_lock decomposition.
- Add new spinlocks to support thread_lock() and adjust ordering. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
This commit is contained in:
parent
07a61420ff
commit
bd43e47156
@ -404,9 +404,12 @@ static struct witness_order_list_entry order_lists[] = {
|
|||||||
#ifdef HWPMC_HOOKS
|
#ifdef HWPMC_HOOKS
|
||||||
{ "pmc-per-proc", &lock_class_mtx_spin },
|
{ "pmc-per-proc", &lock_class_mtx_spin },
|
||||||
#endif
|
#endif
|
||||||
|
{ "process slock", &lock_class_mtx_spin },
|
||||||
{ "sleepq chain", &lock_class_mtx_spin },
|
{ "sleepq chain", &lock_class_mtx_spin },
|
||||||
{ "sched lock", &lock_class_mtx_spin },
|
{ "umtx lock", &lock_class_mtx_spin },
|
||||||
{ "turnstile chain", &lock_class_mtx_spin },
|
{ "turnstile chain", &lock_class_mtx_spin },
|
||||||
|
{ "turnstile lock", &lock_class_mtx_spin },
|
||||||
|
{ "sched lock", &lock_class_mtx_spin },
|
||||||
{ "td_contested", &lock_class_mtx_spin },
|
{ "td_contested", &lock_class_mtx_spin },
|
||||||
{ "callout", &lock_class_mtx_spin },
|
{ "callout", &lock_class_mtx_spin },
|
||||||
{ "entropy harvest mutex", &lock_class_mtx_spin },
|
{ "entropy harvest mutex", &lock_class_mtx_spin },
|
||||||
@ -429,7 +432,8 @@ static struct witness_order_list_entry order_lists[] = {
|
|||||||
#endif
|
#endif
|
||||||
{ "clk", &lock_class_mtx_spin },
|
{ "clk", &lock_class_mtx_spin },
|
||||||
{ "mutex profiling lock", &lock_class_mtx_spin },
|
{ "mutex profiling lock", &lock_class_mtx_spin },
|
||||||
{ "kse zombie lock", &lock_class_mtx_spin },
|
{ "kse lock", &lock_class_mtx_spin },
|
||||||
|
{ "zombie lock", &lock_class_mtx_spin },
|
||||||
{ "ALD Queue", &lock_class_mtx_spin },
|
{ "ALD Queue", &lock_class_mtx_spin },
|
||||||
#ifdef __ia64__
|
#ifdef __ia64__
|
||||||
{ "MCA spin lock", &lock_class_mtx_spin },
|
{ "MCA spin lock", &lock_class_mtx_spin },
|
||||||
@ -446,6 +450,7 @@ static struct witness_order_list_entry order_lists[] = {
|
|||||||
#ifdef HWPMC_HOOKS
|
#ifdef HWPMC_HOOKS
|
||||||
{ "pmc-leaf", &lock_class_mtx_spin },
|
{ "pmc-leaf", &lock_class_mtx_spin },
|
||||||
#endif
|
#endif
|
||||||
|
{ "blocked lock", &lock_class_mtx_spin },
|
||||||
{ NULL, NULL },
|
{ NULL, NULL },
|
||||||
{ NULL, NULL }
|
{ NULL, NULL }
|
||||||
};
|
};
|
||||||
@ -1961,10 +1966,10 @@ witness_list(struct thread *td)
|
|||||||
* td->td_oncpu to get the list of spinlocks for this thread
|
* td->td_oncpu to get the list of spinlocks for this thread
|
||||||
* and "fix" this.
|
* and "fix" this.
|
||||||
*
|
*
|
||||||
* That still wouldn't really fix this unless we locked sched_lock
|
* That still wouldn't really fix this unless we locked the scheduler
|
||||||
* or stopped the other CPU to make sure it wasn't changing the list
|
* lock or stopped the other CPU to make sure it wasn't changing the
|
||||||
* out from under us. It is probably best to just not try to handle
|
* list out from under us. It is probably best to just not try to
|
||||||
* threads on other CPU's for now.
|
* handle threads on other CPU's for now.
|
||||||
*/
|
*/
|
||||||
if (td == curthread && PCPU_GET(spinlocks) != NULL)
|
if (td == curthread && PCPU_GET(spinlocks) != NULL)
|
||||||
witness_list_locks(PCPU_PTR(spinlocks));
|
witness_list_locks(PCPU_PTR(spinlocks));
|
||||||
|
Loading…
x
Reference in New Issue
Block a user