Make prefaulting more aggressive on hard faults. Previously, we would only
map a fraction of the pages that were fetched by vm_pager_get_pages() from secondary storage. Now, we map them all in order to avoid future soft faults. This effect is most evident when a memory-mapped file is accessed sequentially. Previously, there were 6 soft faults for every hard fault. Now, these soft faults are eliminated. Sponsored by: EMC / Isilon Storage Division
This commit is contained in:
parent
b318fc466d
commit
63281952f0
@ -104,17 +104,8 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#define PFBAK 4
|
||||
#define PFFOR 4
|
||||
#define PAGEORDER_SIZE (PFBAK+PFFOR)
|
||||
|
||||
static int prefault_pageorder[] = {
|
||||
-1 * PAGE_SIZE, 1 * PAGE_SIZE,
|
||||
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
|
||||
-3 * PAGE_SIZE, 3 * PAGE_SIZE,
|
||||
-4 * PAGE_SIZE, 4 * PAGE_SIZE
|
||||
};
|
||||
|
||||
static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
|
||||
static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
|
||||
|
||||
#define VM_FAULT_READ_BEHIND 8
|
||||
#define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX)
|
||||
@ -136,6 +127,8 @@ struct faultstate {
|
||||
};
|
||||
|
||||
static void vm_fault_cache_behind(const struct faultstate *fs, int distance);
|
||||
static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
|
||||
int faultcount, int reqpage);
|
||||
|
||||
static inline void
|
||||
release_page(struct faultstate *fs)
|
||||
@ -905,7 +898,7 @@ RetryFault:;
|
||||
*/
|
||||
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
|
||||
if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
|
||||
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
|
||||
vm_fault_prefault(&fs, vaddr, faultcount, reqpage);
|
||||
VM_OBJECT_WLOCK(fs.object);
|
||||
vm_page_lock(fs.m);
|
||||
|
||||
@ -1000,31 +993,49 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
|
||||
* of mmap time.
|
||||
*/
|
||||
static void
|
||||
vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
||||
vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
|
||||
int faultcount, int reqpage)
|
||||
{
|
||||
int i;
|
||||
pmap_t pmap;
|
||||
vm_map_entry_t entry;
|
||||
vm_object_t backing_object, lobject;
|
||||
vm_offset_t addr, starta;
|
||||
vm_pindex_t pindex;
|
||||
vm_page_t m;
|
||||
vm_object_t object;
|
||||
int backward, forward, i;
|
||||
|
||||
pmap = fs->map->pmap;
|
||||
if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
|
||||
return;
|
||||
|
||||
object = entry->object.vm_object;
|
||||
if (faultcount > 0) {
|
||||
backward = reqpage;
|
||||
forward = faultcount - reqpage - 1;
|
||||
} else {
|
||||
backward = PFBAK;
|
||||
forward = PFFOR;
|
||||
}
|
||||
entry = fs->entry;
|
||||
|
||||
starta = addra - PFBAK * PAGE_SIZE;
|
||||
starta = addra - backward * PAGE_SIZE;
|
||||
if (starta < entry->start) {
|
||||
starta = entry->start;
|
||||
} else if (starta > addra) {
|
||||
starta = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < PAGEORDER_SIZE; i++) {
|
||||
vm_object_t backing_object, lobject;
|
||||
|
||||
addr = addra + prefault_pageorder[i];
|
||||
if (addr > addra + (PFFOR * PAGE_SIZE))
|
||||
/*
|
||||
* Generate the sequence of virtual addresses that are candidates for
|
||||
* prefaulting in an outward spiral from the faulting virtual address,
|
||||
* "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra
|
||||
* + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ...
|
||||
* If the candidate address doesn't have a backing physical page, then
|
||||
* the loop immediately terminates.
|
||||
*/
|
||||
for (i = 0; i < 2 * imax(backward, forward); i++) {
|
||||
addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE :
|
||||
PAGE_SIZE);
|
||||
if (addr > addra + forward * PAGE_SIZE)
|
||||
addr = 0;
|
||||
|
||||
if (addr < starta || addr >= entry->end)
|
||||
@ -1034,7 +1045,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
||||
continue;
|
||||
|
||||
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
|
||||
lobject = object;
|
||||
lobject = entry->object.vm_object;
|
||||
VM_OBJECT_RLOCK(lobject);
|
||||
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
|
||||
lobject->type == OBJT_DEFAULT &&
|
||||
@ -1046,9 +1057,6 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
||||
VM_OBJECT_RUNLOCK(lobject);
|
||||
lobject = backing_object;
|
||||
}
|
||||
/*
|
||||
* give-up when a page is not in memory
|
||||
*/
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_RUNLOCK(lobject);
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user