vm_fault: Move nera into faultstate

This makes it easier to factor out pieces of vm_fault().  No functional
change intended.

Obtained from:	jeff (object_concurrency patches)
Reviewed by:	kib
MFC after:	2 weeks
Differential Revision:	https://reviews.freebsd.org/D33016
This commit is contained in:
Mark Johnston 2021-11-24 13:42:32 -05:00
parent d9ed1dcc5c
commit 45c09a74d6

View File

@ -125,9 +125,12 @@ struct faultstate {
vm_prot_t fault_type;
vm_prot_t prot;
int fault_flags;
boolean_t wired;
/* Control state. */
struct timeval oom_start_time;
bool oom_started;
boolean_t wired;
int nera;
/* Page reference for cow. */
vm_page_t m_cow;
@ -1184,7 +1187,7 @@ vm_fault_allocate(struct faultstate *fs)
* pages at the same time.
*/
static int
vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp)
vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp)
{
vm_offset_t e_end, e_start;
int ahead, behind, cluster_offset, rv;
@ -1202,6 +1205,20 @@ vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp)
e_end = fs->entry->end;
behavior = vm_map_entry_behavior(fs->entry);
/*
* If the pager for the current object might have
* the page, then determine the number of additional
* pages to read and potentially reprioritize
* previously read pages for earlier reclamation.
* These operations should only be performed once per
* page fault. Even if the current pager doesn't
* have the page, the number of additional pages to
* read will apply to subsequent objects in the
* shadow chain.
*/
if (fs->nera == -1 && !P_KILLED(curproc))
fs->nera = vm_fault_readahead(fs);
/*
* Release the map lock before locking the vnode or
* sleeping in the pager. (If the current object has
@ -1221,15 +1238,15 @@ vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp)
* Page in the requested page and hint the pager,
* that it may bring up surrounding pages.
*/
if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
P_KILLED(curproc)) {
behind = 0;
ahead = 0;
} else {
/* Is this a sequential fault? */
if (nera > 0) {
if (fs->nera > 0) {
behind = 0;
ahead = nera;
ahead = fs->nera;
} else {
/*
* Request a cluster of pages that is
@ -1261,8 +1278,14 @@ vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp)
* outside the range of the pager, clean up and return
* an error.
*/
if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD)
if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
VM_OBJECT_WLOCK(fs->object);
fault_page_free(&fs->m);
unlock_and_deallocate(fs);
return (KERN_OUT_OF_BOUNDS);
}
KASSERT(rv == VM_PAGER_FAIL,
("%s: unepxected pager error %d", __func__, rv));
return (KERN_NOT_RECEIVER);
}
@ -1307,7 +1330,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
{
struct faultstate fs;
int ahead, behind, faultcount;
int nera, result, rv;
int result, rv;
bool dead, hardfault;
VM_CNT_INC(v_vm_faults);
@ -1322,8 +1345,8 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
fs.map = map;
fs.lookup_still_valid = false;
fs.oom_started = false;
fs.nera = -1;
faultcount = 0;
nera = -1;
hardfault = false;
RetryFault:
@ -1490,21 +1513,7 @@ RetryFault:
*/
VM_OBJECT_WUNLOCK(fs.object);
/*
* If the pager for the current object might have
* the page, then determine the number of additional
* pages to read and potentially reprioritize
* previously read pages for earlier reclamation.
* These operations should only be performed once per
* page fault. Even if the current pager doesn't
* have the page, the number of additional pages to
* read will apply to subsequent objects in the
* shadow chain.
*/
if (nera == -1 && !P_KILLED(curproc))
nera = vm_fault_readahead(&fs);
rv = vm_fault_getpages(&fs, nera, &behind, &ahead);
rv = vm_fault_getpages(&fs, &behind, &ahead);
if (rv == KERN_SUCCESS) {
faultcount = behind + 1 + ahead;
hardfault = true;
@ -1512,12 +1521,9 @@ RetryFault:
}
if (rv == KERN_RESOURCE_SHORTAGE)
goto RetryFault;
VM_OBJECT_WLOCK(fs.object);
if (rv == KERN_OUT_OF_BOUNDS) {
fault_page_free(&fs.m);
unlock_and_deallocate(&fs);
if (rv == KERN_OUT_OF_BOUNDS)
return (rv);
}
VM_OBJECT_WLOCK(fs.object);
}
/*