Add vm_page_alloc_after().

This is a variant of vm_page_alloc() which accepts an additional parameter:
the page in the object with largest index that is smaller than the requested
index. vm_page_alloc() finds this page using a lookup in the object's radix
tree, but in some cases its identity is already known, allowing the lookup
to be elided.

Modify kmem_back() and vm_page_grab_pages() to use vm_page_alloc_after().
vm_page_alloc() is converted into a trivial wrapper of
vm_page_alloc_after().

Suggested by:	alc
Reviewed by:	alc, kib
MFC after:	2 weeks
Differential Revision:	https://reviews.freebsd.org/D11984
This commit is contained in:
Mark Johnston 2017-08-15 16:39:49 +00:00
parent 69b14f7acd
commit 33fff5d536
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=322547
3 changed files with 42 additions and 21 deletions

View File

@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_radix.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
@ -332,7 +333,7 @@ int
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
{
vm_offset_t offset, i;
vm_page_t m;
vm_page_t m, mpred;
int pflags;
KASSERT(object == kmem_object || object == kernel_object,
@ -341,10 +342,13 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
VM_OBJECT_WLOCK(object);
for (i = 0; i < size; i += PAGE_SIZE) {
i = 0;
retry:
m = vm_page_alloc(object, atop(offset + i), pflags);
VM_OBJECT_WLOCK(object);
mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
for (; i < size; i += PAGE_SIZE, mpred = m) {
m = vm_page_alloc_after(object, atop(offset + i), pflags,
mpred);
/*
* Ran out of space, free everything up and return. Don't need
@ -355,7 +359,6 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
VM_OBJECT_WUNLOCK(object);
if ((flags & M_NOWAIT) == 0) {
VM_WAIT;
VM_OBJECT_WLOCK(object);
goto retry;
}
kmem_unback(object, addr, i);

View File

@ -1540,15 +1540,32 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
vm_page_t
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
{
vm_page_t m, mpred;
return (vm_page_alloc_after(object, pindex, req, object != NULL ?
vm_radix_lookup_le(&object->rtree, pindex) : NULL));
}
/*
* Allocate a page in the specified object with the given page index. To
* optimize insertion of the page into the object, the caller must also specifiy
* the resident page in the object with largest index smaller than the given
* page index, or NULL if no such page exists.
*/
vm_page_t
vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req,
vm_page_t mpred)
{
vm_page_t m;
int flags, req_class;
mpred = NULL; /* XXX: pacify gcc */
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
("vm_page_alloc: inconsistent object(%p)/req(%x)", object, req));
("inconsistent object(%p)/req(%x)", object, req));
KASSERT(mpred == NULL || mpred->pindex < pindex,
("mpred %p doesn't precede pindex 0x%jx", mpred,
(uintmax_t)pindex));
if (object != NULL)
VM_OBJECT_ASSERT_WLOCKED(object);
@ -1560,12 +1577,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
req_class = VM_ALLOC_SYSTEM;
if (object != NULL) {
mpred = vm_radix_lookup_le(&object->rtree, pindex);
KASSERT(mpred == NULL || mpred->pindex != pindex,
("vm_page_alloc: pindex already allocated"));
}
/*
* Allocate a page if the number of free pages exceeds the minimum
* for the request class.
@ -1612,7 +1623,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
/*
* At this point we had better have found a good page.
*/
KASSERT(m != NULL, ("vm_page_alloc: missing page"));
KASSERT(m != NULL, ("missing page"));
vm_phys_freecnt_adj(m, -1);
mtx_unlock(&vm_page_queue_free_mtx);
vm_page_alloc_check(m);
@ -3185,7 +3196,7 @@ int
vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count)
{
vm_page_t m;
vm_page_t m, mpred;
int i;
bool sleep;
@ -3202,7 +3213,12 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
return (0);
i = 0;
retrylookup:
m = vm_page_lookup(object, pindex + i);
m = vm_radix_lookup_le(&object->rtree, pindex + i);
if (m == NULL || m->pindex != pindex + i) {
mpred = m;
m = NULL;
} else
mpred = TAILQ_PREV(m, pglist, listq);
for (; i < count; i++) {
if (m != NULL) {
sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
@ -3234,8 +3250,9 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
if ((allocflags & VM_ALLOC_SBUSY) != 0)
vm_page_sbusy(m);
} else {
m = vm_page_alloc(object, pindex + i, (allocflags &
~VM_ALLOC_IGN_SBUSY) | VM_ALLOC_COUNT(count - i));
m = vm_page_alloc_after(object, pindex + i,
(allocflags & ~VM_ALLOC_IGN_SBUSY) |
VM_ALLOC_COUNT(count - i), mpred);
if (m == NULL) {
if ((allocflags & VM_ALLOC_NOWAIT) != 0)
break;
@ -3250,7 +3267,7 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
ma[i] = m;
ma[i] = mpred = m;
m = vm_page_next(m);
}
return (i);

View File

@ -464,7 +464,8 @@ void vm_page_free_zero(vm_page_t m);
void vm_page_activate (vm_page_t);
void vm_page_advise(vm_page_t m, int advice);
vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_memattr_t memattr);