Allow the fault code to use additional clustering info from both

bmap and the swap pager.  Improved fault clustering performance.
This commit is contained in:
John Dyson 1995-09-04 04:44:26 +00:00
parent 2f78014449
commit 170db9c63a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=10556
3 changed files with 114 additions and 50 deletions

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.42 1995/07/13 08:48:15 davidg Exp $
* $Id: swap_pager.c,v 1.43 1995/07/16 13:28:34 davidg Exp $
*/
/*
@ -708,6 +708,7 @@ swap_pager_haspage(object, offset, before, after)
{
register sw_blk_t swb;
int ix;
int gix;
if (before != NULL)
*before = 0;
@ -718,10 +719,35 @@ swap_pager_haspage(object, offset, before, after)
return (FALSE);
}
swb = &object->un_pager.swp.swp_blocks[ix];
ix = (offset % (SWB_NPAGES * PAGE_SIZE)) / PAGE_SIZE;
gix = offset / PAGE_SIZE;
ix = gix % SWB_NPAGES;
if (swb->swb_block[ix] != SWB_EMPTY) {
if (swb->swb_valid & (1 << ix))
if (swb->swb_valid & (1 << ix)) {
int tix;
if (before) {
for(tix = ix - 1; tix >= 0; --tix) {
if ((swb->swb_block[tix] -
(ix - tix) * (PAGE_SIZE/DEV_BSIZE)) !=
swb->swb_block[ix])
break;
(*before)++;
}
}
if (after) {
for(tix = ix + 1; tix < SWB_NPAGES; tix++) {
if ((swb->swb_block[tix] +
(tix - ix) * (PAGE_SIZE/DEV_BSIZE)) !=
swb->swb_block[ix])
break;
(*after)++;
}
}
return TRUE;
}
}
return (FALSE);
}

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.26 1995/07/13 08:48:20 davidg Exp $
* $Id: vm_fault.c,v 1.27 1995/09/03 20:40:41 dyson Exp $
*/
/*
@ -883,7 +883,7 @@ vm_fault_page_lookup(object, offset, rtobject, rtoffset, rtm)
*rtoffset = offset;
return 1;
}
if (!object->backing_object)
if (!object->backing_object || (object == *rtobject))
return 0;
else {
offset += object->backing_object_offset;
@ -928,6 +928,7 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
vm_offset_t offsetdiff;
int rahead;
int treqpage;
int cbehind, cahead;
object = m->object;
offset = m->offset;
@ -938,9 +939,19 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
* if the requested page is not available, then give up now
*/
if (!vm_pager_has_page(object, object->paging_offset + offset, NULL, NULL))
if (!vm_pager_has_page(object,
object->paging_offset + offset, &cbehind, &cahead))
return 0;
if (object->backing_object == NULL) {
if (raheada > cahead) {
raheada = cahead;
}
if (rbehind > cbehind) {
rbehind = cbehind;
}
}
/*
* try to do any readahead that we might have free pages for.
*/
@ -969,6 +980,7 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
rbehind = offset / NBPG;
startoffset = offset - rbehind * NBPG;
while (toffset >= startoffset) {
rtobject = object;
if (!vm_fault_page_lookup(first_object, toffset - offsetdiff, &rtobject, &rtoffset, &rtm) ||
rtm != 0 || rtobject != object) {
startoffset = toffset + NBPG;
@ -989,6 +1001,7 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
toffset = offset + NBPG;
endoffset = offset + (rahead + 1) * NBPG;
while (toffset < object->size && toffset < endoffset) {
rtobject = object;
if (!vm_fault_page_lookup(first_object, toffset - offsetdiff, &rtobject, &rtoffset, &rtm) ||
rtm != 0 || rtobject != object) {
break;
@ -1010,10 +1023,14 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
* get our pages and don't block for them
*/
for (i = 0; i < size; i++) {
if (i != treqpage)
if (i != treqpage) {
rtm = vm_page_alloc(object, startoffset + i * NBPG, VM_ALLOC_NORMAL);
else
if (rtm == NULL)
break;
} else {
rtm = m;
}
marray[i] = rtm;
}
@ -1026,7 +1043,7 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
* if we could not get our block of pages, then free the
* readahead/readbehind pages.
*/
if (i < size) {
if (i < treqpage) {
for (i = 0; i < size; i++) {
if (i != treqpage && marray[i])
FREE_PAGE(marray[i]);
@ -1035,6 +1052,8 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
marray[0] = m;
return 1;
}
size = i;
*reqpage = treqpage;
return size;
}

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.44 1995/07/13 08:48:47 davidg Exp $
* $Id: vnode_pager.c,v 1.45 1995/09/04 00:21:16 dyson Exp $
*/
/*
@ -76,6 +76,9 @@ struct pagerops vnodepagerops = {
NULL
};
static int vnode_pager_leaf_getpages();
static int vnode_pager_leaf_putpages();
/*
* Allocate (or lookup) pager for a vnode.
* Handle is a vnode pointer.
@ -186,7 +189,10 @@ vnode_pager_haspage(object, offset, before, after)
struct vnode *vp = object->handle;
daddr_t bn;
int err, run;
daddr_t startblock, reqblock;
daddr_t reqblock;
int poff;
int bsize = vp->v_mount->mnt_stat.f_iosize;
int pagesperblock;
/*
* If filesystem no longer mounted or offset beyond end of file we do
@ -195,43 +201,21 @@ vnode_pager_haspage(object, offset, before, after)
if ((vp->v_mount == NULL) || (offset >= object->un_pager.vnp.vnp_size))
return FALSE;
startblock = reqblock = offset / vp->v_mount->mnt_stat.f_iosize;
if (startblock > PFCLUSTER_BEHIND)
startblock -= PFCLUSTER_BEHIND;
else
startblock = 0;;
if (before != NULL) {
/*
* Loop looking for a contiguous chunk that includes the
* requested page.
*/
while (TRUE) {
err = VOP_BMAP(vp, startblock, (struct vnode **) 0, &bn, &run, NULL);
if (err || bn == -1) {
if (startblock < reqblock) {
startblock++;
continue;
}
*before = 0;
if (after != NULL)
*after = 0;
return err ? TRUE : FALSE;
}
if ((startblock + run) < reqblock) {
startblock += run + 1;
continue;
}
*before = reqblock - startblock;
if (after != NULL)
*after = run;
return TRUE;
}
}
err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, after, NULL);
pagesperblock = bsize / PAGE_SIZE;
reqblock = offset / bsize;
err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn,
after, before);
if (err)
return TRUE;
poff = (offset - (reqblock * bsize)) / PAGE_SIZE;
if (before) {
*before *= pagesperblock;
*before += poff;
}
if (after) {
*after *= pagesperblock;
*after += (pagesperblock - (poff + 1));
}
return ((long) bn < 0 ? FALSE : TRUE);
}
@ -475,8 +459,7 @@ vnode_pager_input_smlfs(object, m)
if (error)
break;
vm_page_set_clean(m, (i * bsize) & (PAGE_SIZE-1), bsize);
vm_page_set_valid(m, (i * bsize) & (PAGE_SIZE-1), bsize);
vm_page_set_validclean(m, (i * bsize) & (PAGE_SIZE-1), bsize);
} else {
vm_page_set_clean(m, (i * bsize) & (PAGE_SIZE-1), bsize);
bzero((caddr_t) kva + i * bsize, bsize);
@ -553,12 +536,30 @@ vnode_pager_input_old(object, m)
/*
* generic vnode pager input routine
*/
int
vnode_pager_getpages(object, m, count, reqpage)
vm_object_t object;
vm_page_t *m;
int count;
int reqpage;
{
int rtval;
struct vnode *vp;
vp = object->handle;
rtval = VOP_GETPAGES(vp, m, count, reqpage);
if (rtval == EOPNOTSUPP)
return vnode_pager_leaf_getpages(object, m, count, reqpage);
else
return rtval;
}
static int
vnode_pager_leaf_getpages(object, m, count, reqpage)
vm_object_t object;
vm_page_t *m;
int count;
int reqpage;
{
vm_offset_t kva, foff;
int i, size, bsize, first, firstaddr;
@ -772,11 +773,29 @@ vnode_pager_getpages(object, m, count, reqpage)
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
}
int
vnode_pager_putpages(object, m, count, sync, rtvals)
vm_object_t object;
vm_page_t *m;
int count;
boolean_t sync;
int *rtvals;
{
int rtval;
struct vnode *vp;
vp = object->handle;
rtval = VOP_PUTPAGES(vp, m, count, sync, rtvals);
if (rtval == EOPNOTSUPP)
return vnode_pager_leaf_putpages(object, m, count, sync, rtvals);
else
return rtval;
}
/*
* generic vnode pager output routine
*/
int
vnode_pager_putpages(object, m, count, sync, rtvals)
static int
vnode_pager_leaf_putpages(object, m, count, sync, rtvals)
vm_object_t object;
vm_page_t *m;
int count;