From e30df26e7bb63838e770a4e4f0b51e32b5ac13d4 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Wed, 27 Jun 2012 03:45:25 +0000 Subject: [PATCH] Add new pmap layer locks to the predefined lock order. Change the names of a few existing VM locks to follow a consistent naming scheme. --- sys/amd64/amd64/pmap.c | 6 +++--- sys/i386/i386/pmap.c | 2 +- sys/kern/subr_witness.c | 13 ++++++++----- sys/sparc64/sparc64/pmap.c | 5 +++-- sys/vm/vm_map.c | 4 ++-- sys/vm/vm_page.c | 11 ++++------- 6 files changed, 21 insertions(+), 20 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 2da212c68fa4..0f302d4d6049 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -642,7 +642,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr) /* * Initialize the global pv list lock. */ - rw_init(&pvh_global_lock, "pvh global"); + rw_init(&pvh_global_lock, "pmap pv global"); /* * Reserve some special page table entries/VA space for temporary @@ -810,13 +810,13 @@ pmap_init(void) /* * Initialize the pv chunk list mutex. */ - mtx_init(&pv_chunks_mutex, "pv chunk list", NULL, MTX_DEF); + mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); /* * Initialize the pool of pv list locks. */ for (i = 0; i < NPV_LIST_LOCKS; i++) - rw_init(&pv_list_locks[i], "pv list"); + rw_init(&pv_list_locks[i], "pmap pv list"); /* * Calculate the size of the pv head table for superpages. diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index d02f00e9fc40..2929789d31f1 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -409,7 +409,7 @@ pmap_bootstrap(vm_paddr_t firstaddr) /* * Initialize the global pv list lock. */ - rw_init(&pvh_global_lock, "pvh global"); + rw_init(&pvh_global_lock, "pmap pv global"); LIST_INIT(&allpmaps); diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c index f14be503cef3..aeaa3d28a4c5 100644 --- a/sys/kern/subr_witness.c +++ b/sys/kern/subr_witness.c @@ -593,19 +593,22 @@ static struct witness_order_list_entry order_lists[] = { /* * CDEV */ - { "system map", &lock_class_mtx_sleep }, - { "vm page queue mutex", &lock_class_mtx_sleep }, + { "vm map (system)", &lock_class_mtx_sleep }, + { "vm page queue", &lock_class_mtx_sleep }, { "vnode interlock", &lock_class_mtx_sleep }, { "cdev", &lock_class_mtx_sleep }, { NULL, NULL }, /* * VM - * */ + { "vm map (user)", &lock_class_sx }, { "vm object", &lock_class_mtx_sleep }, - { "page lock", &lock_class_mtx_sleep }, - { "vm page queue mutex", &lock_class_mtx_sleep }, + { "vm page", &lock_class_mtx_sleep }, + { "vm page queue", &lock_class_mtx_sleep }, + { "pmap pv global", &lock_class_rw }, { "pmap", &lock_class_mtx_sleep }, + { "pmap pv list", &lock_class_rw }, + { "vm page free queue", &lock_class_mtx_sleep }, { NULL, NULL }, /* * kqueue/VFS interaction diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index e83f768a170d..6d9e43dd3eea 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -673,9 +673,10 @@ pmap_bootstrap(u_int cpu_impl) CPU_FILL(&pm->pm_active); /* - * Initialize the global tte list lock. + * Initialize the global tte list lock, which is more commonly + * known as the pmap pv global lock. */ - rw_init(&tte_list_global_lock, "tte list global"); + rw_init(&tte_list_global_lock, "pmap pv global"); /* * Flush all non-locked TLB entries possibly left over by the diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 5cb1fef1e569..aefed47745a4 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -241,8 +241,8 @@ vm_map_zinit(void *mem, int size, int flags) map = (vm_map_t)mem; map->nentries = 0; map->size = 0; - mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); - sx_init(&map->lock, "user map"); + mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); + sx_init(&map->lock, "vm map (user)"); return (0); } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index c1a7e40d2eae..496fce9fcf7a 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -292,16 +292,13 @@ vm_page_startup(vm_offset_t vaddr) end = phys_avail[biggestone+1]; /* - * Initialize the locks. + * Initialize the page and queue locks. */ - mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF | + mtx_init(&vm_page_queue_mtx, "vm page queue", NULL, MTX_DEF | MTX_RECURSE); - mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, - MTX_DEF); - - /* Setup page locks. */ + mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF); for (i = 0; i < PA_LOCK_COUNT; i++) - mtx_init(&pa_lock[i].data, "page lock", NULL, MTX_DEF); + mtx_init(&pa_lock[i].data, "vm page", NULL, MTX_DEF); /* * Initialize the queue headers for the hold queue, the active queue,