From 5b81a204332fac12893fc060fd17fce126f9df29 Mon Sep 17 00:00:00 2001 From: glebius Date: Wed, 17 Jun 2015 22:44:27 +0000 Subject: [PATCH] o Un-inline vm_pager_get_pages(), vm_pager_get_pages_async(). o Provide an extensive set of assertions for input array of pages. o Remove now duplicate assertions from different pagers. Sponsored by: Nginx, Inc. Sponsored by: Netflix --- .../opensolaris/uts/common/fs/zfs/zfs_vnops.c | 2 - sys/fs/nfsclient/nfs_clbio.c | 6 -- sys/vm/swap_pager.c | 4 -- sys/vm/vm_pager.c | 72 ++++++++++++++++++- sys/vm/vm_pager.h | 40 +---------- 5 files changed, 74 insertions(+), 50 deletions(-) diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c index 5bd67257b419..1038a8747b43 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c @@ -5734,8 +5734,6 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage) object = mreq->object; error = 0; - KASSERT(vp->v_object == object, ("mismatching object")); - if (pcount > 1 && zp->z_blksz > PAGESIZE) { startoff = rounddown(IDX_TO_OFF(mreq->pindex), zp->z_blksz); reqstart = OFF_TO_IDX(round_page(startoff)); diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c index e8de4a54c2fe..53ba7efe418f 100644 --- a/sys/fs/nfsclient/nfs_clbio.c +++ b/sys/fs/nfsclient/nfs_clbio.c @@ -128,12 +128,6 @@ ncl_getpages(struct vop_getpages_args *ap) npages = btoc(count); - /* - * Since the caller has busied the requested page, that page's valid - * field will not be changed by other threads. - */ - vm_page_assert_xbusied(pages[ap->a_reqpage]); - /* * If the requested page is partially valid, just return it and * allow the pager to zero-out the blanks. Partially valid pages diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 5ade2bc452e6..b3ad70ed9484 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -1118,10 +1118,6 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) mreq = m[reqpage]; - KASSERT(mreq->object == object, - ("swap_pager_getpages: object mismatch %p/%p", - object, mreq->object)); - /* * Calculate range to retrieve. The pages have already been assigned * their swapblks. We require a *contiguous* range but we know it to diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index ed9b562fbb56..b4fe8d0b6ecc 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -250,8 +250,78 @@ vm_pager_deallocate(object) (*pagertab[object->type]->pgo_dealloc) (object); } +static void +vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count) +{ +#ifdef INVARIANTS + + VM_OBJECT_ASSERT_WLOCKED(object); + KASSERT(count > 0, ("%s: 0 count", __func__)); + /* + * All pages must be busied, not mapped, not fully valid, + * not dirty and belong to the proper object. + */ + for (int i = 0 ; i < count; i++) { + vm_page_assert_xbusied(m[i]); + KASSERT(!pmap_page_is_mapped(m[i]), + ("%s: page %p is mapped", __func__, m[i])); + KASSERT(m[i]->valid != VM_PAGE_BITS_ALL, + ("%s: request for a valid page %p", __func__, m[i])); + KASSERT(m[i]->dirty == 0, + ("%s: page %p is dirty", __func__, m[i])); + KASSERT(m[i]->object == object, + ("%s: wrong object %p/%p", __func__, object, m[i]->object)); + } +#endif +} + +/* + * Page in the pages for the object using its associated pager. + * The requested page must be fully valid on successful return. + */ +int +vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int reqpage) +{ + int r; + + vm_pager_assert_in(object, m, count); + + r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage); + if (r != VM_PAGER_OK) + return (r); + + /* + * If pager has replaced the page, assert that it had + * updated the array. Also assert that page is still + * busied. + */ + KASSERT(m[reqpage] == vm_page_lookup(object, m[reqpage]->pindex), + ("%s: mismatch page %p pindex %ju", __func__, + m[reqpage], (uintmax_t )m[reqpage]->pindex)); + vm_page_assert_xbusied(m[reqpage]); + + /* + * Pager didn't fill up entire page. Zero out + * partially filled data. + */ + if (m[reqpage]->valid != VM_PAGE_BITS_ALL) + vm_page_zero_invalid(m[reqpage], TRUE); + + return (VM_PAGER_OK); +} + +int +vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count, + int reqpage, pgo_getpages_iodone_t iodone, void *arg) +{ + + vm_pager_assert_in(object, m, count); + + return ((*pagertab[object->type]->pgo_getpages_async)(object, m, + count, reqpage, iodone, arg)); +} + /* - * vm_pager_get_pages() - inline, see vm/vm_pager.h * vm_pager_put_pages() - inline, see vm/vm_pager.h * vm_pager_has_page() - inline, see vm/vm_pager.h */ diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h index 3accc69c94c0..6884729f7667 100644 --- a/sys/vm/vm_pager.h +++ b/sys/vm/vm_pager.h @@ -106,49 +106,15 @@ vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t, struct ucred *); void vm_pager_bufferinit(void); void vm_pager_deallocate(vm_object_t); -static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int); -static inline int vm_pager_get_pages_async(vm_object_t, vm_page_t *, int, - int, pgo_getpages_iodone_t, void *); +int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int); +int vm_pager_get_pages_async(vm_object_t, vm_page_t *, int, int, + pgo_getpages_iodone_t, void *); static __inline boolean_t vm_pager_has_page(vm_object_t, vm_pindex_t, int *, int *); void vm_pager_init(void); vm_object_t vm_pager_object_lookup(struct pagerlst *, void *); void vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage, int npages, boolean_t object_locked); -/* - * vm_page_get_pages: - * - * Retrieve pages from the VM system in order to map them into an object - * ( or into VM space somewhere ). If the pagein was successful, we - * must fully validate it. - */ -static __inline int -vm_pager_get_pages( - vm_object_t object, - vm_page_t *m, - int count, - int reqpage -) { - int r; - - VM_OBJECT_ASSERT_WLOCKED(object); - r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage); - if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) { - vm_page_zero_invalid(m[reqpage], TRUE); - } - return (r); -} - -static inline int -vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count, - int reqpage, pgo_getpages_iodone_t iodone, void *arg) -{ - - VM_OBJECT_ASSERT_WLOCKED(object); - return ((*pagertab[object->type]->pgo_getpages_async)(object, m, - count, reqpage, iodone, arg)); -} - static __inline void vm_pager_put_pages( vm_object_t object,