Simplify vm_fault(). The introduction of the new page lock renders a bit of

cleverness by vm_fault() to avoid repeatedly releasing and reacquiring the
page queues lock pointless.

Reviewed by:	kib, kmacy
This commit is contained in:
Alan Cox 2010-05-02 20:24:25 +00:00
parent 5a8336816e
commit f623e55269

View File

@ -215,7 +215,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
{
vm_prot_t prot;
int is_first_object_locked, result;
boolean_t are_queues_locked, growstack, wired;
boolean_t growstack, wired;
int map_generation;
vm_object_t next_object;
vm_page_t marray[VM_FAULT_READ];
@ -478,7 +478,6 @@ RetryFault:;
else
firstpindex = fs.first_pindex - 2 * VM_FAULT_READ;
are_queues_locked = FALSE;
/*
* note: partially valid pages cannot be
* included in the lookahead - NFS piecemeal
@ -495,17 +494,11 @@ RetryFault:;
if (mt->busy ||
(mt->oflags & VPO_BUSY))
continue;
if (!are_queues_locked) {
are_queues_locked = TRUE;
vm_page_lock(mt);
vm_page_lock_queues();
} else {
vm_page_unlock_queues();
vm_page_lock(mt);
vm_page_lock_queues();
}
vm_page_lock(mt);
vm_page_lock_queues();
if (mt->hold_count ||
mt->wire_count) {
vm_page_unlock_queues();
vm_page_unlock(mt);
continue;
}
@ -515,10 +508,9 @@ RetryFault:;
} else {
vm_page_cache(mt);
}
vm_page_unlock_queues();
vm_page_unlock(mt);
}
if (are_queues_locked)
vm_page_unlock_queues();
ahead += behind;
behind = 0;
}