Rename SI_SUB_MUTEX to SI_SUB_MTX_POOL to make the name at all accurate.

While doing this, move it earlier in the sysinit boot process so that the
VM system can use it.

After that, the system is now able to use sx locks instead of lockmgr
locks in the VM system.  To accomplish this, some of the more
questionable uses of the locks (such as testing whether they are
owned or not, as well as allowing shared+exclusive recursion) are
removed, and simpler logic throughout is used so locks should also be
easier to understand.

This has been tested on my laptop for months, and has not shown any
problems on SMP systems, either, so appears quite safe.  One more
user of lockmgr down, many more to go :)
This commit is contained in:
Brian Feldman 2002-03-13 23:48:08 +00:00
parent 419fe413e5
commit 0e0af8ecda
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=92246
8 changed files with 99 additions and 88 deletions

View File

@ -111,5 +111,5 @@ mtx_pool_unlock(void *ptr)
mtx_unlock(_mtx_pool_find(ptr));
}
SYSINIT(mtxpooli, SI_SUB_MUTEX, SI_ORDER_FIRST, mtx_pool_setup, NULL)
SYSINIT(mtxpooli, SI_SUB_MTX_POOL, SI_ORDER_FIRST, mtx_pool_setup, NULL)

View File

@ -113,11 +113,11 @@ enum sysinit_sub_id {
SI_SUB_TUNABLES = 0x0700000, /* establish tunable values */
SI_SUB_CONSOLE = 0x0800000, /* console*/
SI_SUB_COPYRIGHT = 0x0800001, /* first use of console*/
SI_SUB_MTX_POOL = 0x0900000, /* mutex pool */
SI_SUB_VM = 0x1000000, /* virtual memory system init*/
SI_SUB_KMEM = 0x1800000, /* kernel memory*/
SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
SI_SUB_WITNESS = 0x1A80000, /* witness initialization */
SI_SUB_MUTEX = 0x1AC0000, /* mutex pool */
SI_SUB_LOCK = 0x1B00000, /* lockmgr locks */
SI_SUB_EVENTHANDLER = 0x1C00000, /* eventhandler init */
SI_SUB_KLD = 0x2000000, /* KLD and module setup */

View File

@ -127,6 +127,8 @@ static __inline void
unlock_map(struct faultstate *fs)
{
if (fs->lookup_still_valid) {
if (fs->lookup_still_valid == 2)
vm_map_lock_downgrade(fs->map);
vm_map_lookup_done(fs->map, fs->entry);
fs->lookup_still_valid = FALSE;
}
@ -282,7 +284,7 @@ RetryFault:;
fs.first_pindex, fs.first_pindex + 1);
}
fs.lookup_still_valid = TRUE;
fs.lookup_still_valid = 1;
if (wired)
fault_type = prot;
@ -657,10 +659,10 @@ RetryFault:;
* grab the lock if we need to
*/
(fs.lookup_still_valid ||
lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curthread) == 0)
vm_map_try_lock(fs.map) == 0)
) {
fs.lookup_still_valid = 1;
if (fs.lookup_still_valid == 0)
fs.lookup_still_valid = 2;
/*
* get rid of the unnecessary page
*/
@ -764,7 +766,7 @@ RetryFault:;
unlock_and_deallocate(&fs);
return (result);
}
fs.lookup_still_valid = TRUE;
fs.lookup_still_valid = 1;
if ((retry_object != fs.first_object) ||
(retry_pindex != fs.first_pindex)) {

View File

@ -573,9 +573,7 @@ int action;
* data structures there is a
* possible deadlock.
*/
if (lockmgr(&vm->vm_map.lock,
LK_EXCLUSIVE | LK_NOWAIT,
NULL, curthread)) {
if (vm_map_try_lock(&vm->vm_map)) {
vmspace_free(vm);
PROC_UNLOCK(p);
goto nextproc;

View File

@ -277,73 +277,61 @@ vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
}
void
vm_map_lock(vm_map_t map)
_vm_map_lock(vm_map_t map, const char *file, int line)
{
vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread) != 0)
panic("vm_map_lock: failed to get lock");
_sx_xlock(&map->lock, file, line);
map->timestamp++;
}
void
vm_map_unlock(vm_map_t map)
int
_vm_map_try_lock(vm_map_t map, const char *file, int line)
{
vm_map_printf("locking map LK_RELEASE: %p\n", map);
lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
vm_map_printf("trying to lock map LK_EXCLUSIVE: %p\n", map);
if (_sx_try_xlock(&map->lock, file, line)) {
map->timestamp++;
return (0);
}
return (EWOULDBLOCK);
}
void
vm_map_lock_read(vm_map_t map)
_vm_map_unlock(vm_map_t map, const char *file, int line)
{
vm_map_printf("locking map LK_RELEASE: %p\n", map);
_sx_xunlock(&map->lock, file, line);
}
void
_vm_map_lock_read(vm_map_t map, const char *file, int line)
{
vm_map_printf("locking map LK_SHARED: %p\n", map);
lockmgr(&(map)->lock, LK_SHARED, NULL, curthread);
_sx_slock(&map->lock, file, line);
}
void
vm_map_unlock_read(vm_map_t map)
_vm_map_unlock_read(vm_map_t map, const char *file, int line)
{
vm_map_printf("locking map LK_RELEASE: %p\n", map);
lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
}
static __inline__ int
_vm_map_lock_upgrade(vm_map_t map, struct thread *td) {
int error;
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td);
if (error == 0)
map->timestamp++;
return error;
_sx_sunlock(&map->lock, file, line);
}
int
vm_map_lock_upgrade(vm_map_t map)
_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
{
return (_vm_map_lock_upgrade(map, curthread));
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
if (_sx_try_upgrade(&map->lock, file, line)) {
map->timestamp++;
return (0);
}
return (EWOULDBLOCK);
}
void
vm_map_lock_downgrade(vm_map_t map)
_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
{
vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
lockmgr(&map->lock, LK_DOWNGRADE, NULL, curthread);
}
void
vm_map_set_recursive(vm_map_t map)
{
mtx_lock((map)->lock.lk_interlock);
map->lock.lk_flags |= LK_CANRECURSE;
mtx_unlock((map)->lock.lk_interlock);
}
void
vm_map_clear_recursive(vm_map_t map)
{
mtx_lock((map)->lock.lk_interlock);
map->lock.lk_flags &= ~LK_CANRECURSE;
mtx_unlock((map)->lock.lk_interlock);
_sx_downgrade(&map->lock, file, line);
}
vm_offset_t
@ -417,7 +405,7 @@ vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
map->first_free = &map->header;
map->hint = &map->header;
map->timestamp = 0;
lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
sx_init(&map->lock, "thrd_sleep");
}
void
@ -425,7 +413,7 @@ vm_map_destroy(map)
struct vm_map *map;
{
GIANT_REQUIRED;
lockdestroy(&map->lock);
sx_destroy(&map->lock);
}
/*
@ -1482,17 +1470,13 @@ vm_map_user_pageable(
eend = entry->end;
/* First we need to allow map modifications */
vm_map_set_recursive(map);
vm_map_lock_downgrade(map);
map->timestamp++;
rv = vm_fault_user_wire(map, entry->start, entry->end);
if (rv) {
vm_map_lock(map);
entry->wired_count--;
entry->eflags &= ~MAP_ENTRY_USER_WIRED;
vm_map_clear_recursive(map);
vm_map_unlock(map);
/*
@ -1506,8 +1490,14 @@ vm_map_user_pageable(
return rv;
}
vm_map_clear_recursive(map);
if (vm_map_lock_upgrade(map)) {
/*
* XXX- This is only okay because we have the
* Giant lock. If the VM system were to be
* reentrant, we'd know that we really can't
* do this. Still, this behavior is no worse
* than the old recursion...
*/
if (vm_map_try_lock(map)) {
vm_map_lock(map);
if (vm_map_lookup_entry(map, estart, &entry)
== FALSE) {
@ -1745,13 +1735,13 @@ vm_map_pageable(
entry = entry->next;
}
if (vm_map_pmap(map) == kernel_pmap) {
vm_map_lock(map);
}
if (rv) {
vm_map_unlock(map);
if (vm_map_pmap(map) != kernel_pmap)
vm_map_unlock_read(map);
(void) vm_map_pageable(map, start, failed, TRUE);
return (rv);
} else if (vm_map_pmap(map) == kernel_pmap) {
vm_map_lock(map);
}
/*
* An exclusive lock on the map is needed in order to call
@ -1760,6 +1750,7 @@ vm_map_pageable(
*/
if (vm_map_pmap(map) != kernel_pmap &&
vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
vm_map_lock(map);
if (vm_map_lookup_entry(map, start, &start_entry) ==
FALSE) {
@ -2531,8 +2522,10 @@ vm_map_growstack (struct proc *p, vm_offset_t addr)
* might have intended by limiting the stack size.
*/
if (grow_amount > stack_entry->start - end) {
if (vm_map_lock_upgrade(map))
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
goto Retry;
}
stack_entry->avail_ssize = stack_entry->start - end;
@ -2562,8 +2555,10 @@ vm_map_growstack (struct proc *p, vm_offset_t addr)
ctob(vm->vm_ssize);
}
if (vm_map_lock_upgrade(map))
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
goto Retry;
}
/* Get the preliminary new entry start value */
addr = stack_entry->start - grow_amount;
@ -2782,8 +2777,10 @@ RetryLookup:;
* -- one just moved from the map to the new
* object.
*/
if (vm_map_lock_upgrade(map))
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
goto RetryLookup;
}
vm_object_shadow(
&entry->object.vm_object,
&entry->offset,
@ -2804,8 +2801,10 @@ RetryLookup:;
*/
if (entry->object.vm_object == NULL &&
!map->system_map) {
if (vm_map_lock_upgrade(map))
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
goto RetryLookup;
}
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
atop(entry->end - entry->start));
entry->offset = 0;
@ -3038,7 +3037,10 @@ vm_uiomove(
pmap_remove (map->pmap, uaddr, tend);
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
vm_map_lock_upgrade(map);
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
vm_map_lock(map);
}
if (entry == &map->header) {
map->first_free = &map->header;

View File

@ -70,7 +70,8 @@
#ifndef _VM_MAP_
#define _VM_MAP_
#include <sys/lockmgr.h>
#include <sys/lock.h>
#include <sys/sx.h>
#ifdef MAP_LOCK_DIAGNOSTIC
#include <sys/systm.h>
@ -152,7 +153,7 @@ struct vm_map_entry {
*/
struct vm_map {
struct vm_map_entry header; /* List of entries */
struct lock lock; /* Lock for map data */
struct sx lock; /* Lock for map data */
int nentries; /* Number of entries */
vm_size_t size; /* virtual size */
u_char system_map; /* Am I a system map? */
@ -214,14 +215,23 @@ void vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior);
} while (0)
#endif
void vm_map_lock(vm_map_t map);
void vm_map_unlock(vm_map_t map);
void vm_map_lock_read(vm_map_t map);
void vm_map_unlock_read(vm_map_t map);
int vm_map_lock_upgrade(vm_map_t map);
void vm_map_lock_downgrade(vm_map_t map);
void vm_map_set_recursive(vm_map_t map);
void vm_map_clear_recursive(vm_map_t map);
void _vm_map_lock(vm_map_t map, const char *file, int line);
int _vm_map_try_lock(vm_map_t map, const char *file, int line);
void _vm_map_unlock(vm_map_t map, const char *file, int line);
void _vm_map_lock_read(vm_map_t map, const char *file, int line);
void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
#define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_try_lock(map) _vm_map_try_lock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_downgrade(map) _vm_map_lock_downgrade(map, LOCK_FILE, \
LOCK_LINE)
vm_offset_t vm_map_min(vm_map_t map);
vm_offset_t vm_map_max(vm_map_t map);
struct pmap *vm_map_pmap(vm_map_t map);

View File

@ -547,9 +547,8 @@ vm_pageout_map_deactivate_pages(map, desired)
int nothingwired;
GIANT_REQUIRED;
if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curthread)) {
if (vm_map_try_lock(map))
return;
}
bigobj = NULL;
nothingwired = TRUE;

View File

@ -411,14 +411,14 @@ _zget(vm_zone_t z)
* map.
*/
mtx_unlock(&z->zmtx);
if (lockstatus(&kernel_map->lock, NULL)) {
item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
item = (void *)kmem_alloc(kernel_map, nbytes);
if (item != NULL) {
atomic_add_int(&zone_kern_pages, z->zalloc);
} else {
item = (void *)kmem_malloc(kmem_map, nbytes,
M_WAITOK);
if (item != NULL)
atomic_add_int(&zone_kmem_pages, z->zalloc);
} else {
item = (void *) kmem_alloc(kernel_map, nbytes);
if (item != NULL)
atomic_add_int(&zone_kern_pages, z->zalloc);
}
if (item != NULL) {
bzero(item, nbytes);