Handle pagein clustering in vm_page_grab_valid() so that it can be used by

exec_map_first_page().  This will also enable pagein clustering for other
interested consumers (tmpfs, md, etc).

Discussed with:	alc
Approved by:	kib
Differential Revision:	https://reviews.freebsd.org/D22731
This commit is contained in:
Jeff Roberson 2019-12-15 02:00:32 +00:00
parent d5dfb2fbc8
commit af00971419
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=355763
2 changed files with 43 additions and 69 deletions

View File

@ -974,9 +974,9 @@ do_execve(struct thread *td, struct image_args *args, struct mac *mac_p)
int
exec_map_first_page(struct image_params *imgp)
{
int rv, i, after, initial_pagein;
vm_page_t ma[VM_INITIAL_PAGEIN];
vm_object_t object;
vm_page_t m;
int error;
if (imgp->firstpage != NULL)
exec_unmap_first_page(imgp);
@ -988,68 +988,14 @@ exec_map_first_page(struct image_params *imgp)
#if VM_NRESERVLEVEL > 0
vm_object_color(object, 0);
#endif
retry:
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED);
if (!vm_page_all_valid(ma[0])) {
if (vm_page_busy_acquire(ma[0], VM_ALLOC_WAITFAIL) == 0) {
vm_page_unwire_noq(ma[0]);
goto retry;
}
if (vm_page_all_valid(ma[0])) {
vm_page_xunbusy(ma[0]);
goto out;
}
if (!vm_pager_has_page(object, 0, NULL, &after)) {
if (vm_page_unwire_noq(ma[0]))
vm_page_free(ma[0]);
else
vm_page_xunbusy(ma[0]);
VM_OBJECT_WUNLOCK(object);
return (EIO);
}
initial_pagein = min(after, VM_INITIAL_PAGEIN);
KASSERT(initial_pagein <= object->size,
("%s: initial_pagein %d object->size %ju",
__func__, initial_pagein, (uintmax_t )object->size));
for (i = 1; i < initial_pagein; i++) {
if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
if (ma[i]->valid)
break;
if (!vm_page_tryxbusy(ma[i]))
break;
} else {
ma[i] = vm_page_alloc(object, i,
VM_ALLOC_NORMAL);
if (ma[i] == NULL)
break;
}
}
initial_pagein = i;
rv = vm_pager_get_pages(object, ma, initial_pagein, NULL, NULL);
if (rv != VM_PAGER_OK) {
if (vm_page_unwire_noq(ma[0]))
vm_page_free(ma[0]);
else
vm_page_xunbusy(ma[0]);
for (i = 1; i < initial_pagein; i++) {
if (!vm_page_wired(ma[i]))
vm_page_free(ma[i]);
else
vm_page_xunbusy(ma[i]);
}
VM_OBJECT_WUNLOCK(object);
return (EIO);
}
vm_page_xunbusy(ma[0]);
for (i = 1; i < initial_pagein; i++)
vm_page_readahead_finish(ma[i]);
}
out:
error = vm_page_grab_valid(&m, object, 0,
VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
VM_OBJECT_WUNLOCK(object);
imgp->firstpage = sf_buf_alloc(ma[0], 0);
if (error != VM_PAGER_OK)
return (EIO);
imgp->firstpage = sf_buf_alloc(m, 0);
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
return (0);

View File

@ -4333,15 +4333,18 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
/*
* Grab a page and make it valid, paging in if necessary. Pages missing from
* their pager are zero filled and validated.
* their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied
* and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought
* in simultaneously. Additional pages will be left on a paging queue but
* will neither be wired nor busy regardless of allocflags.
*/
int
vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
{
vm_page_t m;
vm_page_t ma[VM_INITIAL_PAGEIN];
bool sleep, xbusy;
int pflags;
int rv;
int after, i, pflags, rv;
KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
(allocflags & VM_ALLOC_IGN_SBUSY) != 0,
@ -4400,15 +4403,40 @@ vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int al
vm_page_assert_xbusied(m);
MPASS(xbusy);
if (vm_pager_has_page(object, pindex, NULL, NULL)) {
rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
if (vm_pager_has_page(object, pindex, NULL, &after)) {
after = MIN(after, VM_INITIAL_PAGEIN);
after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT);
after = MAX(after, 1);
ma[0] = m;
for (i = 1; i < after; i++) {
if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
if (ma[i]->valid || !vm_page_tryxbusy(ma[i]))
break;
} else {
ma[i] = vm_page_alloc(object, m->pindex + i,
VM_ALLOC_NORMAL);
if (ma[i] == NULL)
break;
}
}
after = i;
rv = vm_pager_get_pages(object, ma, after, NULL, NULL);
/* Pager may have replaced a page. */
m = ma[0];
if (rv != VM_PAGER_OK) {
if (allocflags & VM_ALLOC_WIRED)
if ((allocflags & VM_ALLOC_WIRED) != 0)
vm_page_unwire_noq(m);
vm_page_free(m);
for (i = 0; i < after; i++) {
if (!vm_page_wired(ma[i]))
vm_page_free(ma[i]);
else
vm_page_xunbusy(ma[i]);
}
*mp = NULL;
return (rv);
}
for (i = 1; i < after; i++)
vm_page_readahead_finish(ma[i]);
MPASS(vm_page_all_valid(m));
} else {
vm_page_zero_invalid(m, TRUE);