Wire pages in vm_page_grab() when appropriate.

uiomove_object_page() and exec_map_first_page() would previously wire a
page after having grabbed it.  Ask vm_page_grab() to perform the wiring
instead: this removes some redundant code, and is cheaper in the case
where the requested page is not resident since the page allocator can be
asked to initialize the page as wired, whereas a separate vm_page_wire()
call requires the page lock.

In vm_imgact_hold_page(), use vm_page_unwire_noq() instead of
vm_page_unwire(PQ_NONE).  The latter ensures that the page is dequeued
before returning, but this is unnecessary since vm_page_free() will
trigger a batched dequeue of the page.

Reviewed by:	alc, kib
Tested by:	pho (part of a larger patch)
MFC after:	1 week
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D21440
This commit is contained in:
Mark Johnston 2019-08-28 16:08:06 +00:00
parent d8deeff04d
commit b5d239cb97
3 changed files with 9 additions and 9 deletions

View File

@ -972,11 +972,13 @@ exec_map_first_page(struct image_params *imgp)
#if VM_NRESERVLEVEL > 0
vm_object_color(object, 0);
#endif
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED);
if (ma[0]->valid != VM_PAGE_BITS_ALL) {
vm_page_xbusy(ma[0]);
if (!vm_pager_has_page(object, 0, NULL, &after)) {
vm_page_lock(ma[0]);
vm_page_unwire_noq(ma[0]);
vm_page_free(ma[0]);
vm_page_unlock(ma[0]);
VM_OBJECT_WUNLOCK(object);
@ -1004,6 +1006,8 @@ exec_map_first_page(struct image_params *imgp)
if (rv != VM_PAGER_OK) {
for (i = 0; i < initial_pagein; i++) {
vm_page_lock(ma[i]);
if (i == 0)
vm_page_unwire_noq(ma[i]);
vm_page_free(ma[i]);
vm_page_unlock(ma[i]);
}
@ -1014,9 +1018,6 @@ exec_map_first_page(struct image_params *imgp)
for (i = 1; i < initial_pagein; i++)
vm_page_readahead_finish(ma[i]);
}
vm_page_lock(ma[0]);
vm_page_wire(ma[0]);
vm_page_unlock(ma[0]);
VM_OBJECT_WUNLOCK(object);
imgp->firstpage = sf_buf_alloc(ma[0], 0);

View File

@ -188,7 +188,8 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
* lock to page out tobj's pages because tobj is a OBJT_SWAP
* type object.
*/
m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED);
if (m->valid != VM_PAGE_BITS_ALL) {
vm_page_xbusy(m);
if (vm_pager_has_page(obj, idx, NULL, NULL)) {
@ -198,6 +199,7 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
"uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
obj, idx, m->valid, rv);
vm_page_lock(m);
vm_page_unwire_noq(m);
vm_page_free(m);
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(obj);
@ -207,9 +209,6 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
vm_page_zero_invalid(m, TRUE);
vm_page_xunbusy(m);
}
vm_page_lock(m);
vm_page_wire(m);
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(obj);
error = uiomove_fromphys(&m, offset, tlen, uio);
if (uio->uio_rw == UIO_WRITE && error == 0) {

View File

@ -230,7 +230,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_unwire(m, PQ_NONE);
vm_page_unwire_noq(m);
vm_page_free(m);
vm_page_unlock(m);
m = NULL;