Batch atomic updates to the number of active, inactive, and laundry

pages by vm_object_terminate_pages().  For example, for a "buildworld"
workload, this batching reduces vm_object_terminate_pages()'s average
execution time by 12%.  (The total savings were about 11.7 billion
processor cycles.)

Reviewed by:	kib
MFC after:	1 week
This commit is contained in:
Alan Cox 2017-10-19 04:13:47 +00:00
parent 12accff186
commit 4074d642d2

View File

@ -715,6 +715,7 @@ vm_object_terminate_pages(vm_object_t object)
vm_page_t p, p_next;
struct mtx *mtx, *mtx1;
struct vm_pagequeue *pq, *pq1;
int dequeued;
VM_OBJECT_ASSERT_WLOCKED(object);
@ -739,6 +740,7 @@ vm_object_terminate_pages(vm_object_t object)
if (mtx != NULL)
mtx_unlock(mtx);
if (pq != NULL) {
vm_pagequeue_cnt_add(pq, dequeued);
vm_pagequeue_unlock(pq);
pq = NULL;
}
@ -756,19 +758,27 @@ vm_object_terminate_pages(vm_object_t object)
"page %p is not queued", p));
pq1 = vm_page_pagequeue(p);
if (pq != pq1) {
if (pq != NULL)
if (pq != NULL) {
vm_pagequeue_cnt_add(pq, dequeued);
vm_pagequeue_unlock(pq);
}
pq = pq1;
vm_pagequeue_lock(pq);
dequeued = 0;
}
p->queue = PQ_NONE;
TAILQ_REMOVE(&pq->pq_pl, p, plinks.q);
dequeued--;
}
if (vm_page_free_prep(p, true))
continue;
unlist:
TAILQ_REMOVE(&object->memq, p, listq);
}
if (pq != NULL)
if (pq != NULL) {
vm_pagequeue_cnt_add(pq, dequeued);
vm_pagequeue_unlock(pq);
}
if (mtx != NULL)
mtx_unlock(mtx);