o Enhance vm_pager_free_nonreq() function:

- Allow to call the function with vm object lock held.
  - Allow to specify reqpage that doesn't match any page in the region,
    meaning freeing all pages.
o Utilize the new function in couple more places in vnode pager.

Reviewed by:	alc, kib
Sponsored by:	Netflix
Sponsored by:	Nginx, Inc.
This commit is contained in:
Gleb Smirnoff 2015-03-17 19:19:19 +00:00
parent 8f75a87acd
commit 4d6481a4c9
4 changed files with 27 additions and 29 deletions

View File

@ -140,7 +140,8 @@ ncl_getpages(struct vop_getpages_args *ap)
* can only occur at the file EOF.
*/
if (pages[ap->a_reqpage]->valid != 0) {
vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages);
vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages,
FALSE);
return (VM_PAGER_OK);
}
@ -172,7 +173,8 @@ ncl_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
ncl_printf("nfs_getpages: error %d\n", error);
vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages);
vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages,
FALSE);
return (VM_PAGER_ERROR);
}

View File

@ -283,29 +283,35 @@ vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
}
/*
* Free the non-requested pages from the given array.
* Free the non-requested pages from the given array. To remove all pages,
* caller should provide out of range reqpage number.
*/
void
vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
int npages)
int npages, boolean_t object_locked)
{
enum { UNLOCKED, CALLER_LOCKED, INTERNALLY_LOCKED } locked;
int i;
boolean_t object_locked;
VM_OBJECT_ASSERT_UNLOCKED(object);
object_locked = FALSE;
if (object_locked) {
VM_OBJECT_ASSERT_WLOCKED(object);
locked = CALLER_LOCKED;
} else {
VM_OBJECT_ASSERT_UNLOCKED(object);
locked = UNLOCKED;
}
for (i = 0; i < npages; ++i) {
if (i != reqpage) {
if (!object_locked) {
if (locked == UNLOCKED) {
VM_OBJECT_WLOCK(object);
object_locked = TRUE;
locked = INTERNALLY_LOCKED;
}
vm_page_lock(ma[i]);
vm_page_free(ma[i]);
vm_page_unlock(ma[i]);
}
}
if (object_locked)
if (locked == INTERNALLY_LOCKED)
VM_OBJECT_WUNLOCK(object);
}

View File

@ -113,7 +113,7 @@ static __inline boolean_t vm_pager_has_page(vm_object_t, vm_pindex_t, int *, int
void vm_pager_init(void);
vm_object_t vm_pager_object_lookup(struct pagerlst *, void *);
void vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
int npages);
int npages, boolean_t object_locked);
/*
* vm_page_get_pages:

View File

@ -732,7 +732,7 @@ vnode_pager_local_getpages0(struct vnode *vp, vm_page_t *m, int bytecount,
*/
if (mreq->valid != 0) {
vm_pager_free_nonreq(mreq->object, m, reqpage,
round_page(bytecount) / PAGE_SIZE);
round_page(bytecount) / PAGE_SIZE, FALSE);
if (iodone != NULL)
iodone(arg, m, reqpage, 0);
return (VM_PAGER_OK);
@ -806,7 +806,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
return (error);
} else if (error != 0) {
relpbuf(bp, freecnt);
vm_pager_free_nonreq(object, m, reqpage, count);
vm_pager_free_nonreq(object, m, reqpage, count, FALSE);
return (VM_PAGER_ERROR);
/*
@ -817,7 +817,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
} else if ((PAGE_SIZE / bsize) > 1 &&
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
relpbuf(bp, freecnt);
vm_pager_free_nonreq(object, m, reqpage, count);
vm_pager_free_nonreq(object, m, reqpage, count, FALSE);
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
return vnode_pager_input_smlfs(object, m[reqpage]);
@ -836,7 +836,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
*/
if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
relpbuf(bp, freecnt);
vm_pager_free_nonreq(object, m, reqpage, count);
vm_pager_free_nonreq(object, m, reqpage, count, FALSE);
return (VM_PAGER_OK);
} else if (reqblock == -1) {
relpbuf(bp, freecnt);
@ -845,12 +845,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
("vnode_pager_generic_getpages: page %p is dirty", m));
VM_OBJECT_WLOCK(object);
m[reqpage]->valid = VM_PAGE_BITS_ALL;
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
vm_pager_free_nonreq(object, m, reqpage, count, TRUE);
VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_OK);
} else if (m[reqpage]->valid != 0) {
@ -871,14 +866,9 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
&runpg) != 0) {
relpbuf(bp, freecnt);
VM_OBJECT_WLOCK(object);
for (; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_WUNLOCK(object);
/* The requested page may be out of range. */
vm_pager_free_nonreq(object, m + i, reqpage - i,
count - i, FALSE);
return (VM_PAGER_ERROR);
}
if (firstaddr == -1) {