o Un-inline vm_pager_get_pages(), vm_pager_get_pages_async().

o Provide an extensive set of assertions for input array of pages.
o Remove now duplicate assertions from different pagers.

Sponsored by:	Nginx, Inc.
Sponsored by:	Netflix
This commit is contained in:
Gleb Smirnoff 2015-06-17 22:44:27 +00:00
parent c97426f4d7
commit 093ebe1d28
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=284529
5 changed files with 74 additions and 50 deletions

View File

@ -5734,8 +5734,6 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
object = mreq->object;
error = 0;
KASSERT(vp->v_object == object, ("mismatching object"));
if (pcount > 1 && zp->z_blksz > PAGESIZE) {
startoff = rounddown(IDX_TO_OFF(mreq->pindex), zp->z_blksz);
reqstart = OFF_TO_IDX(round_page(startoff));

View File

@ -128,12 +128,6 @@ ncl_getpages(struct vop_getpages_args *ap)
npages = btoc(count);
/*
* Since the caller has busied the requested page, that page's valid
* field will not be changed by other threads.
*/
vm_page_assert_xbusied(pages[ap->a_reqpage]);
/*
* If the requested page is partially valid, just return it and
* allow the pager to zero-out the blanks. Partially valid pages

View File

@ -1118,10 +1118,6 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
mreq = m[reqpage];
KASSERT(mreq->object == object,
("swap_pager_getpages: object mismatch %p/%p",
object, mreq->object));
/*
* Calculate range to retrieve. The pages have already been assigned
* their swapblks. We require a *contiguous* range but we know it to

View File

@ -250,8 +250,78 @@ vm_pager_deallocate(object)
(*pagertab[object->type]->pgo_dealloc) (object);
}
static void
vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
{
#ifdef INVARIANTS
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(count > 0, ("%s: 0 count", __func__));
/*
* All pages must be busied, not mapped, not fully valid,
* not dirty and belong to the proper object.
*/
for (int i = 0 ; i < count; i++) {
vm_page_assert_xbusied(m[i]);
KASSERT(!pmap_page_is_mapped(m[i]),
("%s: page %p is mapped", __func__, m[i]));
KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
("%s: request for a valid page %p", __func__, m[i]));
KASSERT(m[i]->dirty == 0,
("%s: page %p is dirty", __func__, m[i]));
KASSERT(m[i]->object == object,
("%s: wrong object %p/%p", __func__, object, m[i]->object));
}
#endif
}
/*
* Page in the pages for the object using its associated pager.
* The requested page must be fully valid on successful return.
*/
int
vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
int r;
vm_pager_assert_in(object, m, count);
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r != VM_PAGER_OK)
return (r);
/*
* If pager has replaced the page, assert that it had
* updated the array. Also assert that page is still
* busied.
*/
KASSERT(m[reqpage] == vm_page_lookup(object, m[reqpage]->pindex),
("%s: mismatch page %p pindex %ju", __func__,
m[reqpage], (uintmax_t )m[reqpage]->pindex));
vm_page_assert_xbusied(m[reqpage]);
/*
* Pager didn't fill up entire page. Zero out
* partially filled data.
*/
if (m[reqpage]->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(m[reqpage], TRUE);
return (VM_PAGER_OK);
}
int
vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
int reqpage, pgo_getpages_iodone_t iodone, void *arg)
{
vm_pager_assert_in(object, m, count);
return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
count, reqpage, iodone, arg));
}
/*
* vm_pager_get_pages() - inline, see vm/vm_pager.h
* vm_pager_put_pages() - inline, see vm/vm_pager.h
* vm_pager_has_page() - inline, see vm/vm_pager.h
*/

View File

@ -106,49 +106,15 @@ vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t, struct ucred *);
void vm_pager_bufferinit(void);
void vm_pager_deallocate(vm_object_t);
static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
static inline int vm_pager_get_pages_async(vm_object_t, vm_page_t *, int,
int, pgo_getpages_iodone_t, void *);
int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
int vm_pager_get_pages_async(vm_object_t, vm_page_t *, int, int,
pgo_getpages_iodone_t, void *);
static __inline boolean_t vm_pager_has_page(vm_object_t, vm_pindex_t, int *, int *);
void vm_pager_init(void);
vm_object_t vm_pager_object_lookup(struct pagerlst *, void *);
void vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
int npages, boolean_t object_locked);
/*
* vm_page_get_pages:
*
* Retrieve pages from the VM system in order to map them into an object
* ( or into VM space somewhere ). If the pagein was successful, we
* must fully validate it.
*/
static __inline int
vm_pager_get_pages(
vm_object_t object,
vm_page_t *m,
int count,
int reqpage
) {
int r;
VM_OBJECT_ASSERT_WLOCKED(object);
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
vm_page_zero_invalid(m[reqpage], TRUE);
}
return (r);
}
static inline int
vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
int reqpage, pgo_getpages_iodone_t iodone, void *arg)
{
VM_OBJECT_ASSERT_WLOCKED(object);
return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
count, reqpage, iodone, arg));
}
static __inline void
vm_pager_put_pages(
vm_object_t object,