Two small changes to vm_map_pmap_enter():
1) Eliminate an unnecessary check for fictitious pages. Specifically, only device-backed objects contain fictitious pages and the object is not device-backed. 2) Change the types of "psize" and "tmpidx" to vm_pindex_t in order to prevent possible wrap around with extremely large maps and objects, respectively. Observed by: tegge (last summer)
This commit is contained in:
parent
7f61ad1c2b
commit
8fece8c367
@ -1437,9 +1437,9 @@ void
|
|||||||
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||||
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
|
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
|
||||||
{
|
{
|
||||||
vm_offset_t start, tmpidx;
|
vm_offset_t start;
|
||||||
int psize;
|
|
||||||
vm_page_t p, p_start;
|
vm_page_t p, p_start;
|
||||||
|
vm_pindex_t psize, tmpidx;
|
||||||
boolean_t are_queues_locked;
|
boolean_t are_queues_locked;
|
||||||
|
|
||||||
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
|
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
|
||||||
@ -1493,8 +1493,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
|
if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
|
||||||
(p->busy == 0) &&
|
(p->busy == 0)) {
|
||||||
(p->flags & PG_FICTITIOUS) == 0) {
|
|
||||||
if (p_start == NULL) {
|
if (p_start == NULL) {
|
||||||
start = addr + ptoa(tmpidx);
|
start = addr + ptoa(tmpidx);
|
||||||
p_start = p;
|
p_start = p;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user