Migrate pmap_prefault() into the machine-independent virtual memory layer.

A small helper function pmap_is_prefaultable() is added.  This function
encapsulate the few lines of pmap_prefault() that actually vary from
machine to machine.  Note: pmap_is_prefaultable() and pmap_mincore() have
much in common.  Going forward, it's worth considering their merger.
This commit is contained in:
Alan Cox 2003-10-03 22:46:53 +00:00
parent 87002f0dc1
commit 566526a957
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=120722
10 changed files with 221 additions and 424 deletions

View File

@ -2092,105 +2092,6 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
("pmap_object_init_pt: non-device object"));
}
/*
* pmap_prefault provides a quick way of clustering
* pagefaults into a processes address space. It is a "cousin"
* of pmap_object_init_pt, except it runs at page fault time instead
* of mmap time.
*/
#define PFBAK 4
#define PFFOR 4
#define PAGEORDER_SIZE (PFBAK+PFFOR)
static int pmap_prefault_pageorder[] = {
-1 * PAGE_SIZE, 1 * PAGE_SIZE,
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
-3 * PAGE_SIZE, 3 * PAGE_SIZE,
-4 * PAGE_SIZE, 4 * PAGE_SIZE
};
void
pmap_prefault(pmap, addra, entry)
pmap_t pmap;
vm_offset_t addra;
vm_map_entry_t entry;
{
int i;
vm_offset_t starta;
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m, mpte;
vm_object_t object;
if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
return;
object = entry->object.vm_object;
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
} else if (starta > addra) {
starta = 0;
}
mpte = NULL;
for (i = 0; i < PAGEORDER_SIZE; i++) {
vm_object_t backing_object, lobject;
pt_entry_t *pte;
addr = addra + pmap_prefault_pageorder[i];
if (addr > addra + (PFFOR * PAGE_SIZE))
addr = 0;
if (addr < starta || addr >= entry->end)
continue;
if (!pmap_pte_v(pmap_lev1pte(pmap, addr))
|| !pmap_pte_v(pmap_lev2pte(pmap, addr)))
continue;
pte = vtopte(addr);
if (*pte)
continue;
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = object;
VM_OBJECT_LOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
if (lobject->backing_object_offset & PAGE_MASK)
break;
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(lobject);
lobject = backing_object;
}
VM_OBJECT_UNLOCK(lobject);
/*
* give-up when a page is not in memory
*/
if (m == NULL)
break;
vm_page_lock_queues();
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_deactivate(m);
}
vm_page_busy(m);
vm_page_unlock_queues();
mpte = pmap_enter_quick(pmap, addr, m, mpte);
vm_page_lock_queues();
vm_page_wakeup(m);
}
vm_page_unlock_queues();
}
}
/*
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
@ -2556,6 +2457,26 @@ pmap_is_modified(vm_page_t m)
return 0;
}
/*
* pmap_is_prefaultable:
*
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pt_entry_t *pte;
if (!pmap_pte_v(pmap_lev1pte(pmap, addr)) ||
!pmap_pte_v(pmap_lev2pte(pmap, addr)))
return (FALSE);
pte = vtopte(addr);
if (*pte)
return (FALSE);
return (TRUE);
}
/*
* Clear the modify bits on the specified physical page.
*/

View File

@ -2123,106 +2123,6 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
}
}
/*
* pmap_prefault provides a quick way of clustering
* pagefaults into a processes address space. It is a "cousin"
* of pmap_object_init_pt, except it runs at page fault time instead
* of mmap time.
*/
#define PFBAK 4
#define PFFOR 4
#define PAGEORDER_SIZE (PFBAK+PFFOR)
static int pmap_prefault_pageorder[] = {
-1 * PAGE_SIZE, 1 * PAGE_SIZE,
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
-3 * PAGE_SIZE, 3 * PAGE_SIZE,
-4 * PAGE_SIZE, 4 * PAGE_SIZE
};
void
pmap_prefault(pmap, addra, entry)
pmap_t pmap;
vm_offset_t addra;
vm_map_entry_t entry;
{
int i;
vm_offset_t starta;
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m, mpte;
vm_object_t object;
pd_entry_t *pde;
if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
return;
object = entry->object.vm_object;
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
} else if (starta > addra) {
starta = 0;
}
mpte = NULL;
for (i = 0; i < PAGEORDER_SIZE; i++) {
vm_object_t backing_object, lobject;
pt_entry_t *pte;
addr = addra + pmap_prefault_pageorder[i];
if (addr > addra + (PFFOR * PAGE_SIZE))
addr = 0;
if (addr < starta || addr >= entry->end)
continue;
pde = pmap_pde(pmap, addr);
if (pde == NULL || (*pde & PG_V) == 0)
continue;
pte = vtopte(addr);
if ((*pte & PG_V) == 0)
continue;
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = object;
VM_OBJECT_LOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
if (lobject->backing_object_offset & PAGE_MASK)
break;
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(lobject);
lobject = backing_object;
}
VM_OBJECT_UNLOCK(lobject);
/*
* give-up when a page is not in memory
*/
if (m == NULL)
break;
vm_page_lock_queues();
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_deactivate(m);
}
vm_page_busy(m);
vm_page_unlock_queues();
mpte = pmap_enter_quick(pmap, addr, m, mpte);
vm_page_lock_queues();
vm_page_wakeup(m);
}
vm_page_unlock_queues();
}
}
/*
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
@ -2597,6 +2497,27 @@ pmap_is_modified(vm_page_t m)
return (FALSE);
}
/*
* pmap_is_prefaultable:
*
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pd_entry_t *pde;
pt_entry_t *pte;
pde = pmap_pde(pmap, addr);
if (pde == NULL || (*pde & PG_V) == 0)
return (FALSE);
pte = vtopte(addr);
if ((*pte & PG_V) == 0)
return (FALSE);
return (TRUE);
}
/*
* Clear the given bit in each of the given page's ptes.
*/

View File

@ -2191,104 +2191,6 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
}
}
/*
* pmap_prefault provides a quick way of clustering
* pagefaults into a processes address space. It is a "cousin"
* of pmap_object_init_pt, except it runs at page fault time instead
* of mmap time.
*/
#define PFBAK 4
#define PFFOR 4
#define PAGEORDER_SIZE (PFBAK+PFFOR)
static int pmap_prefault_pageorder[] = {
-1 * PAGE_SIZE, 1 * PAGE_SIZE,
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
-3 * PAGE_SIZE, 3 * PAGE_SIZE,
-4 * PAGE_SIZE, 4 * PAGE_SIZE
};
void
pmap_prefault(pmap, addra, entry)
pmap_t pmap;
vm_offset_t addra;
vm_map_entry_t entry;
{
int i;
vm_offset_t starta;
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m, mpte;
vm_object_t object;
if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
return;
object = entry->object.vm_object;
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
} else if (starta > addra) {
starta = 0;
}
mpte = NULL;
for (i = 0; i < PAGEORDER_SIZE; i++) {
vm_object_t backing_object, lobject;
pt_entry_t *pte;
addr = addra + pmap_prefault_pageorder[i];
if (addr > addra + (PFFOR * PAGE_SIZE))
addr = 0;
if (addr < starta || addr >= entry->end)
continue;
if ((*pmap_pde(pmap, addr)) == 0)
continue;
pte = vtopte(addr);
if (*pte)
continue;
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = object;
VM_OBJECT_LOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
if (lobject->backing_object_offset & PAGE_MASK)
break;
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(lobject);
lobject = backing_object;
}
VM_OBJECT_UNLOCK(lobject);
/*
* give-up when a page is not in memory
*/
if (m == NULL)
break;
vm_page_lock_queues();
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_deactivate(m);
}
vm_page_busy(m);
vm_page_unlock_queues();
mpte = pmap_enter_quick(pmap, addr, m, mpte);
vm_page_lock_queues();
vm_page_wakeup(m);
}
vm_page_unlock_queues();
}
}
/*
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
@ -2763,6 +2665,25 @@ pmap_is_modified(vm_page_t m)
return (FALSE);
}
/*
* pmap_is_prefaultable:
*
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pt_entry_t *pte;
if ((*pmap_pde(pmap, addr)) == 0)
return (FALSE);
pte = vtopte(addr);
if (*pte)
return (FALSE);
return (TRUE);
}
/*
* Clear the given bit in each of the given page's ptes.
*/

View File

@ -1735,101 +1735,6 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
("pmap_object_init_pt: non-device object"));
}
/*
* pmap_prefault provides a quick way of clustering
* pagefaults into a processes address space. It is a "cousin"
* of pmap_object_init_pt, except it runs at page fault time instead
* of mmap time.
*/
#define PFBAK 4
#define PFFOR 4
#define PAGEORDER_SIZE (PFBAK+PFFOR)
static int pmap_prefault_pageorder[] = {
-1 * PAGE_SIZE, 1 * PAGE_SIZE,
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
-3 * PAGE_SIZE, 3 * PAGE_SIZE,
-4 * PAGE_SIZE, 4 * PAGE_SIZE
};
void
pmap_prefault(pmap, addra, entry)
pmap_t pmap;
vm_offset_t addra;
vm_map_entry_t entry;
{
int i;
vm_offset_t starta;
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m, mpte;
vm_object_t object;
if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
return;
object = entry->object.vm_object;
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
} else if (starta > addra) {
starta = 0;
}
mpte = NULL;
for (i = 0; i < PAGEORDER_SIZE; i++) {
vm_object_t backing_object, lobject;
struct ia64_lpte *pte;
addr = addra + pmap_prefault_pageorder[i];
if (addr > addra + (PFFOR * PAGE_SIZE))
addr = 0;
if (addr < starta || addr >= entry->end)
continue;
pte = pmap_find_vhpt(addr);
if (pte && pte->pte_p)
continue;
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = object;
VM_OBJECT_LOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
if (lobject->backing_object_offset & PAGE_MASK)
break;
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(lobject);
lobject = backing_object;
}
VM_OBJECT_UNLOCK(lobject);
/*
* give-up when a page is not in memory
*/
if (m == NULL)
break;
vm_page_lock_queues();
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_deactivate(m);
}
vm_page_busy(m);
vm_page_unlock_queues();
pmap_enter_quick(pmap, addr, m, NULL);
vm_page_lock_queues();
vm_page_wakeup(m);
}
vm_page_unlock_queues();
}
}
/*
* Routine: pmap_change_wiring
* Function: Change the wiring attribute for a map/virtual-address
@ -2147,6 +2052,23 @@ pmap_is_modified(vm_page_t m)
return 0;
}
/*
* pmap_is_prefaultable:
*
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
struct ia64_lpte *pte;
pte = pmap_find_vhpt(addr);
if (pte && pte->pte_p)
return (FALSE);
return (TRUE);
}
/*
* Clear the modify bits on the specified physical page.
*/

View File

@ -1117,6 +1117,19 @@ pmap_is_modified(vm_page_t m)
return (pmap_query_bit(m, PTE_CHG));
}
/*
* pmap_is_prefaultable:
*
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
return (FALSE);
}
void
pmap_clear_reference(vm_page_t m)
{
@ -1424,14 +1437,6 @@ pmap_pinit2(pmap_t pmap)
/* XXX: Remove this stub when no longer called */
}
void
pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
{
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
("pmap_prefault: non current pmap"));
/* XXX */
}
/*
* Set the physical protection on the specified range of this map as requested.
*/

View File

@ -1117,6 +1117,19 @@ pmap_is_modified(vm_page_t m)
return (pmap_query_bit(m, PTE_CHG));
}
/*
* pmap_is_prefaultable:
*
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
return (FALSE);
}
void
pmap_clear_reference(vm_page_t m)
{
@ -1424,14 +1437,6 @@ pmap_pinit2(pmap_t pmap)
/* XXX: Remove this stub when no longer called */
}
void
pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
{
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
("pmap_prefault: non current pmap"));
/* XXX */
}
/*
* Set the physical protection on the specified range of this map as requested.
*/

View File

@ -1117,6 +1117,19 @@ pmap_is_modified(vm_page_t m)
return (pmap_query_bit(m, PTE_CHG));
}
/*
* pmap_is_prefaultable:
*
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
return (FALSE);
}
void
pmap_clear_reference(vm_page_t m)
{
@ -1424,14 +1437,6 @@ pmap_pinit2(pmap_t pmap)
/* XXX: Remove this stub when no longer called */
}
void
pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
{
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
("pmap_prefault: non current pmap"));
/* XXX */
}
/*
* Set the physical protection on the specified range of this map as requested.
*/

View File

@ -1374,12 +1374,6 @@ pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
("pmap_object_init_pt: non-device object"));
}
void
pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
{
/* XXX */
}
/*
* Change the wiring attribute for a map/virtual-address pair.
* The mapping must already exist in the pmap.
@ -1724,6 +1718,19 @@ pmap_is_modified(vm_page_t m)
return (FALSE);
}
/*
* pmap_is_prefaultable:
*
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
return (FALSE);
}
void
pmap_clear_modify(vm_page_t m)
{

View File

@ -111,6 +111,7 @@ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va,
void pmap_growkernel(vm_offset_t);
void pmap_init(vm_paddr_t, vm_paddr_t);
boolean_t pmap_is_modified(vm_page_t m);
boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
boolean_t pmap_ts_referenced(vm_page_t m);
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
@ -130,7 +131,6 @@ void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_activate(struct thread *td);
vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size);

View File

@ -97,7 +97,19 @@ __FBSDID("$FreeBSD$");
#include <vm/vnode_pager.h>
#include <vm/vm_extern.h>
#define PFBAK 4
#define PFFOR 4
#define PAGEORDER_SIZE (PFBAK+PFFOR)
static int prefault_pageorder[] = {
-1 * PAGE_SIZE, 1 * PAGE_SIZE,
-2 * PAGE_SIZE, 2 * PAGE_SIZE,
-3 * PAGE_SIZE, 3 * PAGE_SIZE,
-4 * PAGE_SIZE, 4 * PAGE_SIZE
};
static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
#define VM_FAULT_READ_AHEAD 8
#define VM_FAULT_READ_BEHIND 7
@ -889,7 +901,7 @@ RetryFault:;
}
pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
pmap_prefault(fs.map->pmap, vaddr, fs.entry);
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
}
vm_page_lock_queues();
vm_page_flag_clear(fs.m, PG_ZERO);
@ -928,6 +940,84 @@ RetryFault:;
return (KERN_SUCCESS);
}
/*
* vm_fault_prefault provides a quick way of clustering
* pagefaults into a processes address space. It is a "cousin"
* of vm_map_pmap_enter, except it runs at page fault time instead
* of mmap time.
*/
static void
vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
{
int i;
vm_offset_t addr, starta;
vm_pindex_t pindex;
vm_page_t m, mpte;
vm_object_t object;
if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
return;
object = entry->object.vm_object;
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
} else if (starta > addra) {
starta = 0;
}
mpte = NULL;
for (i = 0; i < PAGEORDER_SIZE; i++) {
vm_object_t backing_object, lobject;
addr = addra + prefault_pageorder[i];
if (addr > addra + (PFFOR * PAGE_SIZE))
addr = 0;
if (addr < starta || addr >= entry->end)
continue;
if (!pmap_is_prefaultable(pmap, addr))
continue;
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = object;
VM_OBJECT_LOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
if (lobject->backing_object_offset & PAGE_MASK)
break;
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(lobject);
lobject = backing_object;
}
VM_OBJECT_UNLOCK(lobject);
/*
* give-up when a page is not in memory
*/
if (m == NULL)
break;
vm_page_lock_queues();
if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_deactivate(m);
}
vm_page_busy(m);
vm_page_unlock_queues();
mpte = pmap_enter_quick(pmap, addr, m, mpte);
vm_page_lock_queues();
vm_page_wakeup(m);
}
vm_page_unlock_queues();
}
}
/*
* vm_fault_quick:
*