Simplify both the invocation and the implementation of vm_fault() for wiring

pages.

(Note: Claims made in the comments about the handling of breakpoints in
wired pages have been false for roughly a decade.  This and another bug
involving breakpoints will be fixed in coming changes.)

Reviewed by:	kib
This commit is contained in:
Alan Cox 2009-11-18 18:05:54 +00:00
parent 12036bce1d
commit 2db65ab46e
4 changed files with 16 additions and 37 deletions

View File

@ -58,7 +58,7 @@ int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t, void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
vm_ooffset_t *); vm_ooffset_t *);
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t); void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t); int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int); int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
void vm_waitproc(struct proc *); void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t); int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);

View File

@ -185,7 +185,7 @@ unlock_and_deallocate(struct faultstate *fs)
* default objects are zero-fill, there is no real pager. * default objects are zero-fill, there is no real pager.
*/ */
#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
(((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired))
/* /*
* vm_fault: * vm_fault:
@ -238,31 +238,15 @@ RetryFault:;
result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry, result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry,
&fs.first_object, &fs.first_pindex, &prot, &wired); &fs.first_object, &fs.first_pindex, &prot, &wired);
if (result != KERN_SUCCESS) { if (result != KERN_SUCCESS) {
if (result != KERN_PROTECTION_FAILURE || if (growstack && result == KERN_INVALID_ADDRESS &&
(fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) { map != kernel_map) {
if (growstack && result == KERN_INVALID_ADDRESS && result = vm_map_growstack(curproc, vaddr);
map != kernel_map && curproc != NULL) { if (result != KERN_SUCCESS)
result = vm_map_growstack(curproc, vaddr); return (KERN_FAILURE);
if (result != KERN_SUCCESS) growstack = FALSE;
return (KERN_FAILURE); goto RetryFault;
growstack = FALSE;
goto RetryFault;
}
return (result);
} }
return (result);
/*
* If we are user-wiring a r/w segment, and it is COW, then
* we need to do the COW operation. Note that we don't COW
* currently RO sections now, because it is NOT desirable
* to COW .text. We simply keep .text from ever being COW'ed
* and take the heat that one cannot debug wired .text sections.
*/
result = vm_map_lookup(&fs.map, vaddr,
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
&fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired);
if (result != KERN_SUCCESS)
return (result);
} }
map_generation = fs.map->timestamp; map_generation = fs.map->timestamp;
@ -919,9 +903,8 @@ vnode_locked:
* won't find it (yet). * won't find it (yet).
*/ */
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired); pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
}
VM_OBJECT_LOCK(fs.object); VM_OBJECT_LOCK(fs.object);
vm_page_lock_queues(); vm_page_lock_queues();
vm_page_flag_set(fs.m, PG_REFERENCED); vm_page_flag_set(fs.m, PG_REFERENCED);
@ -930,7 +913,7 @@ vnode_locked:
* If the page is not wired down, then put it where the pageout daemon * If the page is not wired down, then put it where the pageout daemon
* can find it. * can find it.
*/ */
if (fault_flags & VM_FAULT_WIRE_MASK) { if (fault_flags & VM_FAULT_CHANGE_WIRING) {
if (wired) if (wired)
vm_page_wire(fs.m); vm_page_wire(fs.m);
else else
@ -1048,7 +1031,7 @@ vm_fault_quick(caddr_t v, int prot)
*/ */
int int
vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
boolean_t user_wire, boolean_t fictitious) boolean_t fictitious)
{ {
vm_offset_t va; vm_offset_t va;
int rv; int rv;
@ -1059,9 +1042,7 @@ vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
* read-only sections. * read-only sections.
*/ */
for (va = start; va < end; va += PAGE_SIZE) { for (va = start; va < end; va += PAGE_SIZE) {
rv = vm_fault(map, va, rv = vm_fault(map, va, VM_PROT_NONE, VM_FAULT_CHANGE_WIRING);
user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
if (rv) { if (rv) {
if (va != start) if (va != start)
vm_fault_unwire(map, start, va, fictitious); vm_fault_unwire(map, start, va, fictitious);

View File

@ -2381,7 +2381,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
*/ */
vm_map_unlock(map); vm_map_unlock(map);
rv = vm_fault_wire(map, saved_start, saved_end, rv = vm_fault_wire(map, saved_start, saved_end,
user_wire, fictitious); fictitious);
vm_map_lock(map); vm_map_lock(map);
if (last_timestamp + 1 != map->timestamp) { if (last_timestamp + 1 != map->timestamp) {
/* /*
@ -3563,7 +3563,7 @@ RetryLookup:;
else else
prot = entry->protection; prot = entry->protection;
fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
if ((fault_type & prot) != fault_type) { if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
vm_map_unlock_read(map); vm_map_unlock_read(map);
return (KERN_PROTECTION_FAILURE); return (KERN_PROTECTION_FAILURE);
} }

View File

@ -319,8 +319,6 @@ long vmspace_wired_count(struct vmspace *vmspace);
*/ */
#define VM_FAULT_NORMAL 0 /* Nothing special */ #define VM_FAULT_NORMAL 0 /* Nothing special */
#define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */ #define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
#define VM_FAULT_USER_WIRE 2 /* Likewise, but for user purposes */
#define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
#define VM_FAULT_DIRTY 8 /* Dirty the page */ #define VM_FAULT_DIRTY 8 /* Dirty the page */
/* /*