Most allocation of pages to objects proceeds from lower to higher
indices. Consequentially, vm_page_insert() should use vm_radix_lookup_le() instead of vm_radix_lookup_ge(). Here's why. In the expected case, vm_radix_lookup_le() will quickly find a page less than the specified key at the same radix node. In contrast, vm_radix_lookup_ge() is expected to return NULL, but to do that it must examine every slot in the radix tree that is greater than the key. Prior to this change, the average cost of a vm_page_insert() call on my test machine was 992 cycles. After this change, the average cost is only 532 cycles, a reduction of 46%. Reviewed by: attilio Sponsored by: EMC / Isilon Storage Division
This commit is contained in:
parent
b346e448af
commit
9e48bd7ba9
@ -827,14 +827,14 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
|
||||
if (object->resident_page_count == 0) {
|
||||
TAILQ_INSERT_TAIL(&object->memq, m, listq);
|
||||
} else {
|
||||
neighbor = vm_radix_lookup_ge(&object->rtree, pindex);
|
||||
neighbor = vm_radix_lookup_le(&object->rtree, pindex);
|
||||
if (neighbor != NULL) {
|
||||
KASSERT(pindex < neighbor->pindex,
|
||||
("vm_page_insert: offset %ju not minor than %ju",
|
||||
KASSERT(pindex > neighbor->pindex,
|
||||
("vm_page_insert: offset %ju less than %ju",
|
||||
(uintmax_t)pindex, (uintmax_t)neighbor->pindex));
|
||||
TAILQ_INSERT_BEFORE(neighbor, m, listq);
|
||||
TAILQ_INSERT_AFTER(&object->memq, neighbor, m, listq);
|
||||
} else
|
||||
TAILQ_INSERT_TAIL(&object->memq, m, listq);
|
||||
TAILQ_INSERT_HEAD(&object->memq, m, listq);
|
||||
}
|
||||
vm_radix_insert(&object->rtree, m);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user