Back out the modification of vm_map locks from lockmgr to sx locks. The

best path forward now is likely to change the lockmgr locks to simple
sleep mutexes, then see if any extra contention it generates is greater
than removed overhead of managing local locking state information,
cost of extra calls into lockmgr, etc.

Additionally, making the vm_map lock a mutex and respecting it properly
will put us much closer to not needing Giant magic in vm.
This commit is contained in:
Brian Feldman 2002-03-18 15:08:09 +00:00
parent 24fb82a7ad
commit 25adb370be
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=92588
6 changed files with 89 additions and 104 deletions

View File

@ -111,11 +111,7 @@ struct faultstate {
vm_pindex_t first_pindex;
vm_map_t map;
vm_map_entry_t entry;
enum {
LSV_FALSE, /* the lookup's lock has been dropped */
LSV_TRUE, /* the lookup's lock is still valid */
LSV_UPGRADED /* the lookup's lock is now exclusive */
} lookup_still_valid;
int lookup_still_valid;
struct vnode *vp;
};
@ -130,11 +126,9 @@ release_page(struct faultstate *fs)
static __inline void
unlock_map(struct faultstate *fs)
{
if (fs->lookup_still_valid != LSV_FALSE) {
if (fs->lookup_still_valid == LSV_UPGRADED)
vm_map_lock_downgrade(fs->map);
if (fs->lookup_still_valid) {
vm_map_lookup_done(fs->map, fs->entry);
fs->lookup_still_valid = LSV_FALSE;
fs->lookup_still_valid = FALSE;
}
}
@ -288,7 +282,7 @@ RetryFault:;
fs.first_pindex, fs.first_pindex + 1);
}
fs.lookup_still_valid = LSV_TRUE;
fs.lookup_still_valid = TRUE;
if (wired)
fault_type = prot;
@ -662,11 +656,11 @@ RetryFault:;
/*
* grab the lock if we need to
*/
(fs.lookup_still_valid != LSV_FALSE ||
vm_map_try_lock(fs.map) == 0)
(fs.lookup_still_valid ||
lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curthread) == 0)
) {
if (fs.lookup_still_valid == LSV_FALSE)
fs.lookup_still_valid = LSV_UPGRADED;
fs.lookup_still_valid = 1;
/*
* get rid of the unnecessary page
*/
@ -721,7 +715,7 @@ RetryFault:;
* We must verify that the maps have not changed since our last
* lookup.
*/
if (fs.lookup_still_valid == LSV_FALSE &&
if (!fs.lookup_still_valid &&
(fs.map->timestamp != map_generation)) {
vm_object_t retry_object;
vm_pindex_t retry_pindex;
@ -770,7 +764,7 @@ RetryFault:;
unlock_and_deallocate(&fs);
return (result);
}
fs.lookup_still_valid = LSV_TRUE;
fs.lookup_still_valid = TRUE;
if ((retry_object != fs.first_object) ||
(retry_pindex != fs.first_pindex)) {

View File

@ -560,7 +560,9 @@ int action;
* data structures there is a
* possible deadlock.
*/
if (vm_map_try_lock(&vm->vm_map)) {
if (lockmgr(&vm->vm_map.lock,
LK_EXCLUSIVE | LK_NOWAIT,
NULL, curthread)) {
vmspace_free(vm);
PROC_UNLOCK(p);
goto nextproc;

View File

@ -277,61 +277,73 @@ vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior)
}
void
_vm_map_lock(vm_map_t map, const char *file, int line)
vm_map_lock(vm_map_t map)
{
vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
_sx_xlock(&map->lock, file, line);
if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread) != 0)
panic("vm_map_lock: failed to get lock");
map->timestamp++;
}
int
_vm_map_try_lock(vm_map_t map, const char *file, int line)
{
vm_map_printf("trying to lock map LK_EXCLUSIVE: %p\n", map);
if (_sx_try_xlock(&map->lock, file, line)) {
map->timestamp++;
return (0);
}
return (EWOULDBLOCK);
}
void
_vm_map_unlock(vm_map_t map, const char *file, int line)
vm_map_unlock(vm_map_t map)
{
vm_map_printf("locking map LK_RELEASE: %p\n", map);
_sx_xunlock(&map->lock, file, line);
lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
}
void
_vm_map_lock_read(vm_map_t map, const char *file, int line)
vm_map_lock_read(vm_map_t map)
{
vm_map_printf("locking map LK_SHARED: %p\n", map);
_sx_slock(&map->lock, file, line);
lockmgr(&(map)->lock, LK_SHARED, NULL, curthread);
}
void
_vm_map_unlock_read(vm_map_t map, const char *file, int line)
vm_map_unlock_read(vm_map_t map)
{
vm_map_printf("locking map LK_RELEASE: %p\n", map);
_sx_sunlock(&map->lock, file, line);
lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
}
static __inline__ int
_vm_map_lock_upgrade(vm_map_t map, struct thread *td) {
int error;
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td);
if (error == 0)
map->timestamp++;
return error;
}
int
_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
vm_map_lock_upgrade(vm_map_t map)
{
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
if (_sx_try_upgrade(&map->lock, file, line)) {
map->timestamp++;
return (0);
}
return (EWOULDBLOCK);
return (_vm_map_lock_upgrade(map, curthread));
}
void
_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
vm_map_lock_downgrade(vm_map_t map)
{
vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
_sx_downgrade(&map->lock, file, line);
lockmgr(&map->lock, LK_DOWNGRADE, NULL, curthread);
}
void
vm_map_set_recursive(vm_map_t map)
{
mtx_lock((map)->lock.lk_interlock);
map->lock.lk_flags |= LK_CANRECURSE;
mtx_unlock((map)->lock.lk_interlock);
}
void
vm_map_clear_recursive(vm_map_t map)
{
mtx_lock((map)->lock.lk_interlock);
map->lock.lk_flags &= ~LK_CANRECURSE;
mtx_unlock((map)->lock.lk_interlock);
}
vm_offset_t
@ -405,7 +417,7 @@ vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
map->first_free = &map->header;
map->hint = &map->header;
map->timestamp = 0;
sx_init(&map->lock, "thrd_sleep");
lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
}
void
@ -413,7 +425,7 @@ vm_map_destroy(map)
struct vm_map *map;
{
GIANT_REQUIRED;
sx_destroy(&map->lock);
lockdestroy(&map->lock);
}
/*
@ -1470,13 +1482,17 @@ vm_map_user_pageable(
eend = entry->end;
/* First we need to allow map modifications */
vm_map_set_recursive(map);
vm_map_lock_downgrade(map);
map->timestamp++;
rv = vm_fault_user_wire(map, entry->start, entry->end);
if (rv) {
vm_map_lock(map);
entry->wired_count--;
entry->eflags &= ~MAP_ENTRY_USER_WIRED;
vm_map_clear_recursive(map);
vm_map_unlock(map);
/*
@ -1490,14 +1506,8 @@ vm_map_user_pageable(
return rv;
}
/*
* XXX- This is only okay because we have the
* Giant lock. If the VM system were to be
* reentrant, we'd know that we really can't
* do this. Still, this behavior is no worse
* than the old recursion...
*/
if (vm_map_try_lock(map)) {
vm_map_clear_recursive(map);
if (vm_map_lock_upgrade(map)) {
vm_map_lock(map);
if (vm_map_lookup_entry(map, estart, &entry)
== FALSE) {
@ -1735,13 +1745,13 @@ vm_map_pageable(
entry = entry->next;
}
if (vm_map_pmap(map) == kernel_pmap) {
vm_map_lock(map);
}
if (rv) {
if (vm_map_pmap(map) != kernel_pmap)
vm_map_unlock_read(map);
vm_map_unlock(map);
(void) vm_map_pageable(map, start, failed, TRUE);
return (rv);
} else if (vm_map_pmap(map) == kernel_pmap) {
vm_map_lock(map);
}
/*
* An exclusive lock on the map is needed in order to call
@ -1750,7 +1760,6 @@ vm_map_pageable(
*/
if (vm_map_pmap(map) != kernel_pmap &&
vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
vm_map_lock(map);
if (vm_map_lookup_entry(map, start, &start_entry) ==
FALSE) {
@ -2528,10 +2537,8 @@ vm_map_growstack (struct proc *p, vm_offset_t addr)
* might have intended by limiting the stack size.
*/
if (grow_amount > stack_entry->start - end) {
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
if (vm_map_lock_upgrade(map))
goto Retry;
}
stack_entry->avail_ssize = stack_entry->start - end;
@ -2561,10 +2568,8 @@ vm_map_growstack (struct proc *p, vm_offset_t addr)
ctob(vm->vm_ssize);
}
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
if (vm_map_lock_upgrade(map))
goto Retry;
}
/* Get the preliminary new entry start value */
addr = stack_entry->start - grow_amount;
@ -2783,10 +2788,8 @@ RetryLookup:;
* -- one just moved from the map to the new
* object.
*/
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
if (vm_map_lock_upgrade(map))
goto RetryLookup;
}
vm_object_shadow(
&entry->object.vm_object,
&entry->offset,
@ -2807,10 +2810,8 @@ RetryLookup:;
*/
if (entry->object.vm_object == NULL &&
!map->system_map) {
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
if (vm_map_lock_upgrade(map))
goto RetryLookup;
}
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
atop(entry->end - entry->start));
entry->offset = 0;
@ -3043,10 +3044,7 @@ vm_uiomove(
pmap_remove (map->pmap, uaddr, tend);
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
if (vm_map_lock_upgrade(map)) {
vm_map_unlock_read(map);
vm_map_lock(map);
}
vm_map_lock_upgrade(map);
if (entry == &map->header) {
map->first_free = &map->header;

View File

@ -70,8 +70,7 @@
#ifndef _VM_MAP_
#define _VM_MAP_
#include <sys/lock.h>
#include <sys/sx.h>
#include <sys/lockmgr.h>
#ifdef MAP_LOCK_DIAGNOSTIC
#include <sys/systm.h>
@ -153,7 +152,7 @@ struct vm_map_entry {
*/
struct vm_map {
struct vm_map_entry header; /* List of entries */
struct sx lock; /* Lock for map data */
struct lock lock; /* Lock for map data */
int nentries; /* Number of entries */
vm_size_t size; /* virtual size */
u_char system_map; /* Am I a system map? */
@ -215,23 +214,14 @@ void vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior);
} while (0)
#endif
void _vm_map_lock(vm_map_t map, const char *file, int line);
int _vm_map_try_lock(vm_map_t map, const char *file, int line);
void _vm_map_unlock(vm_map_t map, const char *file, int line);
void _vm_map_lock_read(vm_map_t map, const char *file, int line);
void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
#define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_try_lock(map) _vm_map_try_lock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
#define vm_map_lock_downgrade(map) _vm_map_lock_downgrade(map, LOCK_FILE, \
LOCK_LINE)
void vm_map_lock(vm_map_t map);
void vm_map_unlock(vm_map_t map);
void vm_map_lock_read(vm_map_t map);
void vm_map_unlock_read(vm_map_t map);
int vm_map_lock_upgrade(vm_map_t map);
void vm_map_lock_downgrade(vm_map_t map);
void vm_map_set_recursive(vm_map_t map);
void vm_map_clear_recursive(vm_map_t map);
vm_offset_t vm_map_min(vm_map_t map);
vm_offset_t vm_map_max(vm_map_t map);
struct pmap *vm_map_pmap(vm_map_t map);

View File

@ -547,8 +547,9 @@ vm_pageout_map_deactivate_pages(map, desired)
int nothingwired;
GIANT_REQUIRED;
if (vm_map_try_lock(map))
if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curthread)) {
return;
}
bigobj = NULL;
nothingwired = TRUE;

View File

@ -411,14 +411,14 @@ _zget(vm_zone_t z)
* map.
*/
mtx_unlock(&z->zmtx);
item = (void *)kmem_alloc(kernel_map, nbytes);
if (item != NULL) {
atomic_add_int(&zone_kern_pages, z->zalloc);
} else {
item = (void *)kmem_malloc(kmem_map, nbytes,
M_WAITOK);
if (lockstatus(&kernel_map->lock, NULL)) {
item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
if (item != NULL)
atomic_add_int(&zone_kmem_pages, z->zalloc);
} else {
item = (void *) kmem_alloc(kernel_map, nbytes);
if (item != NULL)
atomic_add_int(&zone_kern_pages, z->zalloc);
}
if (item != NULL) {
bzero(item, nbytes);