diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c index dc106ee53707..4c8c2dc62322 100644 --- a/sys/fs/nfsclient/nfs_clbio.c +++ b/sys/fs/nfsclient/nfs_clbio.c @@ -128,24 +128,21 @@ ncl_getpages(struct vop_getpages_args *ap) npages = btoc(count); + /* + * Since the caller has busied the requested page, that page's valid + * field will not be changed by other threads. + */ + vm_page_assert_xbusied(pages[ap->a_reqpage]); + /* * If the requested page is partially valid, just return it and * allow the pager to zero-out the blanks. Partially valid pages * can only occur at the file EOF. */ - VM_OBJECT_WLOCK(object); if (pages[ap->a_reqpage]->valid != 0) { - for (i = 0; i < npages; ++i) { - if (i != ap->a_reqpage) { - vm_page_lock(pages[i]); - vm_page_free(pages[i]); - vm_page_unlock(pages[i]); - } - } - VM_OBJECT_WUNLOCK(object); - return (0); + vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages); + return (VM_PAGER_OK); } - VM_OBJECT_WUNLOCK(object); /* * We use only the kva address for the buffer, but this is extremely @@ -175,15 +172,7 @@ ncl_getpages(struct vop_getpages_args *ap) if (error && (uio.uio_resid == count)) { ncl_printf("nfs_getpages: error %d\n", error); - VM_OBJECT_WLOCK(object); - for (i = 0; i < npages; ++i) { - if (i != ap->a_reqpage) { - vm_page_lock(pages[i]); - vm_page_free(pages[i]); - vm_page_unlock(pages[i]); - } - } - VM_OBJECT_WUNLOCK(object); + vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages); return (VM_PAGER_ERROR); } diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c index 1f07b4696c9c..1a5b15639d18 100644 --- a/sys/nfsclient/nfs_bio.c +++ b/sys/nfsclient/nfs_bio.c @@ -122,24 +122,21 @@ nfs_getpages(struct vop_getpages_args *ap) npages = btoc(count); + /* + * Since the caller has busied the requested page, that page's valid + * field will not be changed by other threads. + */ + vm_page_assert_xbusied(pages[ap->a_reqpage]); + /* * If the requested page is partially valid, just return it and * allow the pager to zero-out the blanks. Partially valid pages * can only occur at the file EOF. */ - VM_OBJECT_WLOCK(object); if (pages[ap->a_reqpage]->valid != 0) { - for (i = 0; i < npages; ++i) { - if (i != ap->a_reqpage) { - vm_page_lock(pages[i]); - vm_page_free(pages[i]); - vm_page_unlock(pages[i]); - } - } - VM_OBJECT_WUNLOCK(object); - return (0); + vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages); + return (VM_PAGER_OK); } - VM_OBJECT_WUNLOCK(object); /* * We use only the kva address for the buffer, but this is extremely @@ -169,15 +166,7 @@ nfs_getpages(struct vop_getpages_args *ap) if (error && (uio.uio_resid == count)) { nfs_printf("nfs_getpages: error %d\n", error); - VM_OBJECT_WLOCK(object); - for (i = 0; i < npages; ++i) { - if (i != ap->a_reqpage) { - vm_page_lock(pages[i]); - vm_page_free(pages[i]); - vm_page_unlock(pages[i]); - } - } - VM_OBJECT_WUNLOCK(object); + vm_pager_free_nonreq(object, pages, ap->a_reqpage, npages); return (VM_PAGER_ERROR); } diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index c7d038b090ec..65193c3d31a7 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -281,6 +281,33 @@ vm_pager_object_lookup(struct pagerlst *pg_list, void *handle) return (object); } +/* + * Free the non-requested pages from the given array. + */ +void +vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage, + int npages) +{ + int i; + boolean_t object_locked; + + VM_OBJECT_ASSERT_UNLOCKED(object); + object_locked = FALSE; + for (i = 0; i < npages; ++i) { + if (i != reqpage) { + if (!object_locked) { + VM_OBJECT_WLOCK(object); + object_locked = TRUE; + } + vm_page_lock(ma[i]); + vm_page_free(ma[i]); + vm_page_unlock(ma[i]); + } + } + if (object_locked) + VM_OBJECT_WUNLOCK(object); +} + /* * initialize a physical buffer */ diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h index 62265a23d277..e3d292d745af 100644 --- a/sys/vm/vm_pager.h +++ b/sys/vm/vm_pager.h @@ -106,6 +106,8 @@ static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int); static __inline boolean_t vm_pager_has_page(vm_object_t, vm_pindex_t, int *, int *); void vm_pager_init(void); vm_object_t vm_pager_object_lookup(struct pagerlst *, void *); +void vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage, + int npages); /* * vm_page_get_pages: diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 072f1970362d..1b0dc7ebb160 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -722,14 +722,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount, VM_OBJECT_WUNLOCK(object); return (error); } else if (error != 0) { - VM_OBJECT_WLOCK(object); - for (i = 0; i < count; i++) - if (i != reqpage) { - vm_page_lock(m[i]); - vm_page_free(m[i]); - vm_page_unlock(m[i]); - } - VM_OBJECT_WUNLOCK(object); + vm_pager_free_nonreq(object, m, reqpage, count); return (VM_PAGER_ERROR); /* @@ -739,14 +732,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount, */ } else if ((PAGE_SIZE / bsize) > 1 && (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { - VM_OBJECT_WLOCK(object); - for (i = 0; i < count; i++) - if (i != reqpage) { - vm_page_lock(m[i]); - vm_page_free(m[i]); - vm_page_unlock(m[i]); - } - VM_OBJECT_WUNLOCK(object); + vm_pager_free_nonreq(object, m, reqpage, count); PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); return vnode_pager_input_smlfs(object, m[reqpage]);