Implement a mechanism within vm_phys_alloc_contig() to defer all necessary

calls to vdrop() until after the free page queues lock is released.  This
eliminates repeatedly releasing and reacquiring the free page queues lock
each time the last cached page is reclaimed from a vnode-backed object.
This commit is contained in:
Alan Cox 2009-06-21 20:29:14 +00:00
parent 046d6c762f
commit ef327c3ee7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=194607

View File

@ -594,7 +594,7 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
struct vm_phys_seg *seg;
vm_object_t m_object;
vm_paddr_t pa, pa_last, size;
vm_page_t m, m_ret;
vm_page_t deferred_vdrop_list, m, m_ret;
int flind, i, oind, order, pind;
size = npages << PAGE_SHIFT;
@ -604,6 +604,7 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
("vm_phys_alloc_contig: alignment must be a power of 2"));
KASSERT((boundary & (boundary - 1)) == 0,
("vm_phys_alloc_contig: boundary must be a power of 2"));
deferred_vdrop_list = NULL;
/* Compute the queue that is the best fit for npages. */
for (order = 0; (1 << order) < npages; order++);
mtx_lock(&vm_page_queue_free_mtx);
@ -697,10 +698,23 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
("vm_phys_alloc_contig: page %p is busy", m));
KASSERT(m->dirty == 0,
("vm_phys_alloc_contig: page %p is dirty", m));
m_object = m->object;
if ((m->flags & PG_CACHED) != 0) {
m->valid = 0;
m_object = m->object;
vm_page_cache_remove(m);
if (m_object->type == OBJT_VNODE &&
m_object->cache == NULL) {
/*
* Enqueue the vnode for deferred vdrop().
*
* Unmanaged pages don't use "pageq", so it
* can be safely abused to construct a short-
* lived queue of vnodes.
*/
m->pageq.tqe_prev = m_object->handle;
m->pageq.tqe_next = deferred_vdrop_list;
deferred_vdrop_list = m;
}
} else {
KASSERT(VM_PAGE_IS_FREE(m),
("vm_phys_alloc_contig: page %p is not free", m));
@ -714,13 +728,6 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
m->flags = PG_UNMANAGED | (m->flags & PG_ZERO);
m->oflags = 0;
/* Unmanaged pages don't use "act_count". */
if (m_object != NULL &&
m_object->type == OBJT_VNODE &&
m_object->cache == NULL) {
mtx_unlock(&vm_page_queue_free_mtx);
vdrop(m_object->handle);
mtx_lock(&vm_page_queue_free_mtx);
}
}
for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
m = &m_ret[i];
@ -730,6 +737,10 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
vm_phys_free_pages(m, 0);
}
mtx_unlock(&vm_page_queue_free_mtx);
while (deferred_vdrop_list != NULL) {
vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
}
return (m_ret);
}