Make KPI of vm_pager_get_pages() more strict: if a pager changes a page

in the requested array, then it is responsible for disposition of previous
page and is responsible for updating the entry in the requested array.
Now consumers of KPI do not need to re-lookup the pages after call to
vm_pager_get_pages().

Reviewed by:	kib
Sponsored by:	Netflix
Sponsored by:	Nginx, Inc.
This commit is contained in:
Gleb Smirnoff 2015-06-12 11:32:20 +00:00
parent a565264d3c
commit 093c7f396d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=284310
7 changed files with 26 additions and 56 deletions

View File

@ -1320,7 +1320,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
struct tmpfs_mount *tmp;
struct tmpfs_node *node;
vm_object_t uobj;
vm_page_t m, ma[1];
vm_page_t m;
vm_pindex_t idx, newpages, oldpages;
off_t oldsize;
int base, rv;
@ -1367,11 +1367,9 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
VM_WAIT;
VM_OBJECT_WLOCK(uobj);
goto retry;
} else if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
rv = vm_pager_get_pages(uobj, ma, 1, 0);
m = vm_page_lookup(uobj, idx);
} else
} else if (m->valid != VM_PAGE_BITS_ALL)
rv = vm_pager_get_pages(uobj, &m, 1, 0);
else
/* A cached page was reactivated. */
rv = VM_PAGER_OK;
vm_page_lock(m);

View File

@ -966,13 +966,10 @@ exec_map_first_page(imgp)
}
initial_pagein = i;
rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
ma[0] = vm_page_lookup(object, 0);
if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
if (ma[0] != NULL) {
vm_page_lock(ma[0]);
vm_page_free(ma[0]);
vm_page_unlock(ma[0]);
}
if (rv != VM_PAGER_OK) {
vm_page_lock(ma[0]);
vm_page_free(ma[0]);
vm_page_unlock(ma[0]);
VM_OBJECT_WUNLOCK(object);
return (EIO);
}

View File

@ -189,14 +189,6 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
if (m->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(obj, idx, NULL, NULL)) {
rv = vm_pager_get_pages(obj, &m, 1, 0);
m = vm_page_lookup(obj, idx);
if (m == NULL) {
printf(
"uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
obj, idx, rv);
VM_OBJECT_WUNLOCK(obj);
return (EIO);
}
if (rv != VM_PAGER_OK) {
printf(
"uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
@ -423,7 +415,7 @@ static int
shm_dotruncate(struct shmfd *shmfd, off_t length)
{
vm_object_t object;
vm_page_t m, ma[1];
vm_page_t m;
vm_pindex_t idx, nobjsize;
vm_ooffset_t delta;
int base, rv;
@ -465,12 +457,10 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
VM_WAIT;
VM_OBJECT_WLOCK(object);
goto retry;
} else if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
rv = vm_pager_get_pages(object, ma, 1,
} else if (m->valid != VM_PAGE_BITS_ALL)
rv = vm_pager_get_pages(object, &m, 1,
0);
m = vm_page_lookup(object, idx);
} else
else
/* A cached page was reactivated. */
rv = VM_PAGER_OK;
vm_page_lock(m);

View File

@ -2026,10 +2026,7 @@ sendfile_readpage(vm_object_t obj, struct vnode *vp, int nd,
if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
rv = vm_pager_get_pages(obj, &m, 1, 0);
SFSTAT_INC(sf_iocnt);
m = vm_page_lookup(obj, pindex);
if (m == NULL)
error = EIO;
else if (rv != VM_PAGER_OK) {
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);

View File

@ -679,19 +679,12 @@ RetryFault:;
/*
* Found the page. Leave it busy while we play
* with it.
*/
/*
* Relookup in case pager changed page. Pager
*
* Pager could have changed the page. Pager
* is responsible for disposition of old page
* if moved.
*/
fs.m = vm_page_lookup(fs.object, fs.pindex);
if (!fs.m) {
unlock_and_deallocate(&fs);
goto RetryFault;
}
fs.m = marray[reqpage];
hardfault++;
break; /* break to PAGE HAS BEEN FOUND */
}

View File

@ -230,7 +230,7 @@ vsunlock(void *addr, size_t len)
static vm_page_t
vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
{
vm_page_t m, ma[1];
vm_page_t m;
vm_pindex_t pindex;
int rv;
@ -238,11 +238,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
pindex = OFF_TO_IDX(offset);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
rv = vm_pager_get_pages(object, ma, 1, 0);
m = vm_page_lookup(object, pindex);
if (m == NULL)
goto out;
rv = vm_pager_get_pages(object, &m, 1, 0);
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_free(m);
@ -571,7 +567,7 @@ vm_thread_swapin(struct thread *td)
{
vm_object_t ksobj;
vm_page_t ma[KSTACK_MAX_PAGES];
int i, j, k, pages, rv;
int i, j, pages, rv;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
@ -593,9 +589,12 @@ vm_thread_swapin(struct thread *td)
if (rv != VM_PAGER_OK)
panic("vm_thread_swapin: cannot get kstack for proc: %d",
td->td_proc->p_pid);
/*
* All pages in the array are in place, due to the
* pager is always the swap pager, which doesn't
* free or remove wired non-req pages from object.
*/
vm_object_pip_wakeup(ksobj);
for (k = i; k < j; k++)
ma[k] = vm_page_lookup(ksobj, k);
vm_page_xunbusy(ma[i]);
} else if (vm_page_xbusied(ma[i]))
vm_page_xunbusy(ma[i]);

View File

@ -2046,7 +2046,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
boolean_t
vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
{
vm_page_t m, ma[1];
vm_page_t m;
vm_pindex_t pindex;
int rv;
@ -2054,11 +2054,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
for (pindex = start; pindex < end; pindex++) {
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
rv = vm_pager_get_pages(object, ma, 1, 0);
m = vm_page_lookup(object, pindex);
if (m == NULL)
break;
rv = vm_pager_get_pages(object, &m, 1, 0);
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_free(m);