o Acquire and release Giant around pmap operations in vm_fault_unwire()

and vm_map_delete().  Assert GIANT_REQUIRED in vm_map_delete()
   only if operating on the kernel_object or the kmem_object.
 o Remove GIANT_REQUIRED from vm_map_remove().
 o Remove the acquisition and release of Giant from munmap().
This commit is contained in:
Alan Cox 2002-05-26 04:54:56 +00:00
parent 24aaa74c83
commit 4b9fdc2bce
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=97294
3 changed files with 5 additions and 7 deletions

View File

@ -979,12 +979,12 @@ vm_fault_unwire(map, start, end)
vm_map_t map;
vm_offset_t start, end;
{
vm_offset_t va, pa;
pmap_t pmap;
pmap = vm_map_pmap(map);
mtx_lock(&Giant);
/*
* Since the pages are wired down, we must be able to get their
* mappings from the physical map system.
@ -1003,6 +1003,7 @@ vm_fault_unwire(map, start, end)
*/
pmap_pageable(pmap, start, end, TRUE);
mtx_unlock(&Giant);
}
/*

View File

@ -2041,8 +2041,6 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
vm_map_entry_t entry;
vm_map_entry_t first_entry;
GIANT_REQUIRED;
/*
* Find the start of the region, and clip it
*/
@ -2091,8 +2089,10 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
offidxend = offidxstart + count;
if ((object == kernel_object) || (object == kmem_object)) {
GIANT_REQUIRED;
vm_object_page_remove(object, offidxstart, offidxend, FALSE);
} else {
mtx_lock(&Giant);
pmap_remove(map->pmap, s, e);
if (object != NULL &&
object->ref_count != 1 &&
@ -2108,6 +2108,7 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
object->size = offidxstart;
}
}
mtx_unlock(&Giant);
}
/*
@ -2133,8 +2134,6 @@ vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
{
int result, s = 0;
GIANT_REQUIRED;
if (map == kmem_map)
s = splvm();

View File

@ -615,9 +615,7 @@ munmap(td, uap)
return (EINVAL);
/* returns nothing but KERN_SUCCESS anyway */
mtx_lock(&Giant);
(void) vm_map_remove(map, addr, addr + size);
mtx_unlock(&Giant);
return (0);
}