Refactor vm_page_alloc()'s interactions with vm_reserv_alloc_page() and

vm_page_insert() so that (1) vm_radix_lookup_le() is never called while the
free page queues lock is held and (2) vm_radix_lookup_le() is called at most
once.  This change reduces the average time that the free page queues lock
is held by vm_page_alloc() as well as vm_page_alloc()'s average overall
running time.

Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Alan Cox 2013-05-12 16:50:18 +00:00
parent a164074fc4
commit 404eb1b3fd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=250577
3 changed files with 63 additions and 25 deletions

View File

@ -161,6 +161,8 @@ static struct vnode *vm_page_alloc_init(vm_page_t m);
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
static void vm_page_enqueue(int queue, vm_page_t m);
static void vm_page_init_fakepg(void *dummy);
static void vm_page_insert_after(vm_page_t m, vm_object_t object,
vm_pindex_t pindex, vm_page_t mpred);
SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
@ -809,11 +811,44 @@ vm_page_dirty_KBI(vm_page_t m)
void
vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
{
vm_page_t neighbor;
vm_page_t mpred;
VM_OBJECT_ASSERT_WLOCKED(object);
if (m->object != NULL)
panic("vm_page_insert: page already inserted");
mpred = vm_radix_lookup_le(&object->rtree, pindex);
vm_page_insert_after(m, object, pindex, mpred);
}
/*
* vm_page_insert_after:
*
* Inserts the page "m" into the specified object at offset "pindex".
*
* The page "mpred" must immediately precede the offset "pindex" within
* the specified object.
*
* The object must be locked.
*/
static void
vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
vm_page_t mpred)
{
vm_page_t msucc;
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(m->object == NULL,
("vm_page_insert_after: page already inserted"));
if (mpred != NULL) {
KASSERT(mpred->object == object ||
(mpred->flags & PG_SLAB) != 0,
("vm_page_insert_after: object doesn't contain mpred"));
KASSERT(mpred->pindex < pindex,
("vm_page_insert_after: mpred doesn't precede pindex"));
msucc = TAILQ_NEXT(mpred, listq);
} else
msucc = TAILQ_FIRST(&object->memq);
if (msucc != NULL)
KASSERT(msucc->pindex > pindex,
("vm_page_insert_after: msucc doesn't succeed pindex"));
/*
* Record the object/offset pair in this page
@ -824,18 +859,10 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
/*
* Now link into the object's ordered list of backed pages.
*/
if (object->resident_page_count == 0) {
TAILQ_INSERT_TAIL(&object->memq, m, listq);
} else {
neighbor = vm_radix_lookup_le(&object->rtree, pindex);
if (neighbor != NULL) {
KASSERT(pindex > neighbor->pindex,
("vm_page_insert: offset %ju less than %ju",
(uintmax_t)pindex, (uintmax_t)neighbor->pindex));
TAILQ_INSERT_AFTER(&object->memq, neighbor, m, listq);
} else
TAILQ_INSERT_HEAD(&object->memq, m, listq);
}
if (mpred != NULL)
TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
else
TAILQ_INSERT_HEAD(&object->memq, m, listq);
vm_radix_insert(&object->rtree, m);
/*
@ -1179,7 +1206,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
{
struct vnode *vp = NULL;
vm_object_t m_object;
vm_page_t m;
vm_page_t m, mpred;
int flags, req_class;
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
@ -1195,6 +1222,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
req_class = VM_ALLOC_SYSTEM;
if (object != NULL) {
mpred = vm_radix_lookup_le(&object->rtree, pindex);
KASSERT(mpred == NULL || mpred->pindex != pindex,
("vm_page_alloc: pindex already allocated"));
}
mtx_lock(&vm_page_queue_free_mtx);
if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
(req_class == VM_ALLOC_SYSTEM &&
@ -1225,8 +1257,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
return (NULL);
#if VM_NRESERVLEVEL > 0
} else if (object == NULL || (object->flags & (OBJ_COLORED |
OBJ_FICTITIOUS)) != OBJ_COLORED ||
(m = vm_reserv_alloc_page(object, pindex)) == NULL) {
OBJ_FICTITIOUS)) != OBJ_COLORED || (m =
vm_reserv_alloc_page(object, pindex, mpred)) == NULL) {
#else
} else {
#endif
@ -1320,7 +1352,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (object->memattr != VM_MEMATTR_DEFAULT &&
(object->flags & OBJ_FICTITIOUS) == 0)
pmap_page_set_memattr(m, object->memattr);
vm_page_insert(m, object, pindex);
vm_page_insert_after(m, object, pindex, mpred);
} else
m->pindex = pindex;

View File

@ -476,12 +476,15 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
/*
* Allocates a page from an existing or newly-created reservation.
*
* The page "mpred" must immediately precede the offset "pindex" within the
* specified object.
*
* The object and free page queue must be locked.
*/
vm_page_t
vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
{
vm_page_t m, mpred, msucc;
vm_page_t m, msucc;
vm_pindex_t first, leftcap, rightcap;
vm_reserv_t rv;
@ -498,10 +501,12 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
/*
* Look for an existing reservation.
*/
mpred = vm_radix_lookup_le(&object->rtree, pindex);
if (mpred != NULL) {
KASSERT(mpred->object == object ||
(mpred->flags & PG_SLAB) != 0,
("vm_reserv_alloc_page: object doesn't contain mpred"));
KASSERT(mpred->pindex < pindex,
("vm_reserv_alloc_page: pindex already allocated"));
("vm_reserv_alloc_page: mpred doesn't precede pindex"));
rv = vm_reserv_from_page(mpred);
if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
goto found;
@ -510,7 +515,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
msucc = TAILQ_FIRST(&object->memq);
if (msucc != NULL) {
KASSERT(msucc->pindex > pindex,
("vm_reserv_alloc_page: pindex already allocated"));
("vm_reserv_alloc_page: msucc doesn't succeed pindex"));
rv = vm_reserv_from_page(msucc);
if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
goto found;

View File

@ -48,7 +48,8 @@
vm_page_t vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex,
u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary);
vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex);
vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex,
vm_page_t mpred);
void vm_reserv_break_all(vm_object_t object);
boolean_t vm_reserv_free_page(vm_page_t m);
void vm_reserv_init(void);