o Merge vm_fault_wire() and vm_fault_user_wire() by adding a new parameter,

user_wire.
This commit is contained in:
Alan Cox 2002-07-24 19:47:56 +00:00
parent 1225379557
commit ef594d3186
3 changed files with 11 additions and 56 deletions

@ -76,8 +76,7 @@ int useracc(caddr_t, int, int);
int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t);
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t);
int vm_fault_user_wire(vm_map_t, vm_offset_t, vm_offset_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
void vm_forkproc(struct thread *, struct proc *, struct thread *, int);
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t);

@ -919,30 +919,29 @@ readrest:
* Wire down a range of virtual addresses in a map.
*/
int
vm_fault_wire(map, start, end)
vm_fault_wire(map, start, end, user_wire)
vm_map_t map;
vm_offset_t start, end;
boolean_t user_wire;
{
vm_offset_t va;
pmap_t pmap;
int rv;
pmap = vm_map_pmap(map);
/*
* Inform the physical mapping system that the range of addresses may
* not fault, so that page tables and such can be locked down as well.
*/
pmap_pageable(pmap, start, end, FALSE);
pmap_pageable(map->pmap, start, end, FALSE);
/*
* We simulate a fault to get the page and enter it in the physical
* map.
* map. For user wiring, we only ask for read access on currently
* read-only sections.
*/
for (va = start; va < end; va += PAGE_SIZE) {
rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
VM_FAULT_CHANGE_WIRING);
rv = vm_fault(map, va,
user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
if (rv) {
if (va != start)
vm_fault_unwire(map, start, va);
@ -952,46 +951,6 @@ vm_fault_wire(map, start, end)
return (KERN_SUCCESS);
}
/*
* vm_fault_user_wire:
*
* Wire down a range of virtual addresses in a map. This
* is for user mode though, so we only ask for read access
* on currently read only sections.
*/
int
vm_fault_user_wire(map, start, end)
vm_map_t map;
vm_offset_t start, end;
{
vm_offset_t va;
pmap_t pmap;
int rv;
pmap = vm_map_pmap(map);
/*
* Inform the physical mapping system that the range of addresses may
* not fault, so that page tables and such can be locked down as well.
*/
pmap_pageable(pmap, start, end, FALSE);
/*
* We simulate a fault to get the page and enter it in the physical
* map.
*/
for (va = start; va < end; va += PAGE_SIZE) {
rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE);
if (rv) {
if (va != start)
vm_fault_unwire(map, start, va);
return (rv);
}
}
return (KERN_SUCCESS);
}
/*
* vm_fault_unwire:
*

@ -1707,11 +1707,8 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
* mark.
*/
vm_map_unlock(map);
if (user_wire)
rv = vm_fault_user_wire(map, saved_start,
saved_end);
else
rv = vm_fault_wire(map, saved_start, saved_end);
rv = vm_fault_wire(map, saved_start, saved_end,
user_wire);
vm_map_lock(map);
if (last_timestamp + 1 != map->timestamp) {
/*