Do not pretend that vm_fault(9) supports unwiring the address. Rename
the VM_FAULT_CHANGE_WIRING flag to VM_FAULT_WIRE. Assert that the flag is only passed when faulting on the wired map entry. Remove the vm_page_unwire() call, which should be never reachable. Since VM_FAULT_WIRE flag implies wired map entry, the TRYPAGER() macro is reduced to the testing of the fs.object having a default pager. Inline the check. Suggested and reviewed by: alc Tested by: pho (previous version) MFC after: 1 week
This commit is contained in:
parent
2736982e9b
commit
6a875bf929
@ -193,7 +193,7 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
|
||||
need_dirty = ((fault_type & VM_PROT_WRITE) != 0 &&
|
||||
(fault_flags & VM_FAULT_CHANGE_WIRING) == 0) ||
|
||||
(fault_flags & VM_FAULT_WIRE) == 0) ||
|
||||
(fault_flags & VM_FAULT_DIRTY) != 0;
|
||||
|
||||
if (set_wd)
|
||||
@ -243,15 +243,6 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
|
||||
vm_pager_page_unswapped(m);
|
||||
}
|
||||
|
||||
/*
|
||||
* TRYPAGER - used by vm_fault to calculate whether the pager for the
|
||||
* current object *might* contain the page.
|
||||
*
|
||||
* default objects are zero-fill, there is no real pager.
|
||||
*/
|
||||
#define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
|
||||
((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired))
|
||||
|
||||
/*
|
||||
* vm_fault:
|
||||
*
|
||||
@ -361,9 +352,12 @@ RetryFault:;
|
||||
|
||||
if (wired)
|
||||
fault_type = prot | (fault_type & VM_PROT_COPY);
|
||||
else
|
||||
KASSERT((fault_flags & VM_FAULT_WIRE) == 0,
|
||||
("!wired && VM_FAULT_WIRE"));
|
||||
|
||||
if (fs.vp == NULL /* avoid locked vnode leak */ &&
|
||||
(fault_flags & (VM_FAULT_CHANGE_WIRING | VM_FAULT_DIRTY)) == 0 &&
|
||||
(fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0 &&
|
||||
/* avoid calling vm_object_set_writeable_dirty() */
|
||||
((prot & VM_PROT_WRITE) == 0 ||
|
||||
(fs.first_object->type != OBJT_VNODE &&
|
||||
@ -509,10 +503,12 @@ fast_failed:
|
||||
}
|
||||
|
||||
/*
|
||||
* Page is not resident, If this is the search termination
|
||||
* Page is not resident. If this is the search termination
|
||||
* or the pager might contain the page, allocate a new page.
|
||||
* Default objects are zero-fill, there is no real pager.
|
||||
*/
|
||||
if (TRYPAGER || fs.object == fs.first_object) {
|
||||
if (fs.object->type != OBJT_DEFAULT ||
|
||||
fs.object == fs.first_object) {
|
||||
if (fs.pindex >= fs.object->size) {
|
||||
unlock_and_deallocate(&fs);
|
||||
return (KERN_PROTECTION_FAILURE);
|
||||
@ -556,9 +552,10 @@ readrest:
|
||||
*
|
||||
* Attempt to fault-in the page if there is a chance that the
|
||||
* pager has it, and potentially fault in additional pages
|
||||
* at the same time.
|
||||
* at the same time. For default objects simply provide
|
||||
* zero-filled pages.
|
||||
*/
|
||||
if (TRYPAGER) {
|
||||
if (fs.object->type != OBJT_DEFAULT) {
|
||||
int rv;
|
||||
u_char behavior = vm_map_entry_behavior(fs.entry);
|
||||
|
||||
@ -873,7 +870,7 @@ vnode_locked:
|
||||
pmap_copy_page(fs.m, fs.first_m);
|
||||
fs.first_m->valid = VM_PAGE_BITS_ALL;
|
||||
if (wired && (fault_flags &
|
||||
VM_FAULT_CHANGE_WIRING) == 0) {
|
||||
VM_FAULT_WIRE) == 0) {
|
||||
vm_page_lock(fs.first_m);
|
||||
vm_page_wire(fs.first_m);
|
||||
vm_page_unlock(fs.first_m);
|
||||
@ -994,7 +991,7 @@ vnode_locked:
|
||||
*/
|
||||
pmap_enter(fs.map->pmap, vaddr, fs.m, prot,
|
||||
fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0);
|
||||
if (faultcount != 1 && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0 &&
|
||||
if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 &&
|
||||
wired == 0)
|
||||
vm_fault_prefault(&fs, vaddr, faultcount, reqpage);
|
||||
VM_OBJECT_WLOCK(fs.object);
|
||||
@ -1004,11 +1001,9 @@ vnode_locked:
|
||||
* If the page is not wired down, then put it where the pageout daemon
|
||||
* can find it.
|
||||
*/
|
||||
if (fault_flags & VM_FAULT_CHANGE_WIRING) {
|
||||
if (wired)
|
||||
vm_page_wire(fs.m);
|
||||
else
|
||||
vm_page_unwire(fs.m, PQ_ACTIVE);
|
||||
if ((fault_flags & VM_FAULT_WIRE) != 0) {
|
||||
KASSERT(wired, ("VM_FAULT_WIRE && !wired"));
|
||||
vm_page_wire(fs.m);
|
||||
} else
|
||||
vm_page_activate(fs.m);
|
||||
if (m_hold != NULL) {
|
||||
|
@ -2591,7 +2591,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
* it into the physical map.
|
||||
*/
|
||||
if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
|
||||
VM_FAULT_CHANGE_WIRING)) != KERN_SUCCESS)
|
||||
VM_FAULT_WIRE)) != KERN_SUCCESS)
|
||||
break;
|
||||
} while ((faddr += PAGE_SIZE) < saved_end);
|
||||
vm_map_lock(map);
|
||||
|
@ -326,9 +326,9 @@ long vmspace_resident_count(struct vmspace *vmspace);
|
||||
/*
|
||||
* vm_fault option flags
|
||||
*/
|
||||
#define VM_FAULT_NORMAL 0 /* Nothing special */
|
||||
#define VM_FAULT_CHANGE_WIRING 1 /* Change the wiring as appropriate */
|
||||
#define VM_FAULT_DIRTY 2 /* Dirty the page; use w/VM_PROT_COPY */
|
||||
#define VM_FAULT_NORMAL 0 /* Nothing special */
|
||||
#define VM_FAULT_WIRE 1 /* Wire the mapped page */
|
||||
#define VM_FAULT_DIRTY 2 /* Dirty the page; use w/VM_PROT_COPY */
|
||||
|
||||
/*
|
||||
* Initially, mappings are slightly sequential. The maximum window size must
|
||||
|
Loading…
x
Reference in New Issue
Block a user