In cases where a file was resident in memory mmap(..., PROT_NONE, ...)
would actually map the file with read access enabled. According to http://www.opengroup.org/onlinepubs/007904975/functions/mmap.html this is an error. Similarly, an madvise(..., MADV_WILLNEED) would enable read access on a virtual address range that was PROT_NONE. The solution implemented herein is (1) to pass a vm_prot_t to vm_map_pmap_enter() describing the allowed access and (2) to make vm_map_pmap_enter() responsible for understanding the limitations of pmap_enter_quick(). Submitted by: "Mark W. Krentel" <krentel@dreamscape.com> PR: kern/64573
This commit is contained in:
parent
50069af197
commit
4da4d293df
@ -877,7 +877,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
|
if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
|
||||||
vm_map_pmap_enter(map, start,
|
vm_map_pmap_enter(map, start, prot,
|
||||||
object, OFF_TO_IDX(offset), end - start,
|
object, OFF_TO_IDX(offset), end - start,
|
||||||
cow & MAP_PREFAULT_PARTIAL);
|
cow & MAP_PREFAULT_PARTIAL);
|
||||||
}
|
}
|
||||||
@ -1243,19 +1243,19 @@ vm_map_submap(
|
|||||||
/*
|
/*
|
||||||
* vm_map_pmap_enter:
|
* vm_map_pmap_enter:
|
||||||
*
|
*
|
||||||
* Preload the mappings for the given object into the specified
|
* Preload read-only mappings for the given object into the specified
|
||||||
* map. This eliminates the soft faults on process startup and
|
* map. This eliminates the soft faults on process startup and
|
||||||
* immediately after an mmap(2).
|
* immediately after an mmap(2).
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
|
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
|
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
|
||||||
{
|
{
|
||||||
vm_offset_t tmpidx;
|
vm_offset_t tmpidx;
|
||||||
int psize;
|
int psize;
|
||||||
vm_page_t p, mpte;
|
vm_page_t p, mpte;
|
||||||
|
|
||||||
if (object == NULL)
|
if ((prot & VM_PROT_READ) == 0 || object == NULL)
|
||||||
return;
|
return;
|
||||||
mtx_lock(&Giant);
|
mtx_lock(&Giant);
|
||||||
VM_OBJECT_LOCK(object);
|
VM_OBJECT_LOCK(object);
|
||||||
@ -1547,6 +1547,7 @@ vm_map_madvise(
|
|||||||
if (behav == MADV_WILLNEED) {
|
if (behav == MADV_WILLNEED) {
|
||||||
vm_map_pmap_enter(map,
|
vm_map_pmap_enter(map,
|
||||||
useStart,
|
useStart,
|
||||||
|
current->protection,
|
||||||
current->object.vm_object,
|
current->object.vm_object,
|
||||||
pindex,
|
pindex,
|
||||||
(count << PAGE_SHIFT),
|
(count << PAGE_SHIFT),
|
||||||
|
@ -333,7 +333,7 @@ int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_obje
|
|||||||
vm_pindex_t *, vm_prot_t *, boolean_t *);
|
vm_pindex_t *, vm_prot_t *, boolean_t *);
|
||||||
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
|
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
|
||||||
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
|
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
|
||||||
void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
|
void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
|
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
|
||||||
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
|
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
|
||||||
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
|
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user