Have vm_page_{deactivate,launder}() requeue already-queued pages.

In many cases the page is not enqueued so the change will have no
effect. However, the change is needed to support an optimization in
the fault handler and in some cases (sendfile, the buffer cache) it
was being emulated by the caller anyway.

Reviewed by:	alc
Tested by:	pho
MFC after:	2 weeks
X-Differential Revision: https://reviews.freebsd.org/D14625
This commit is contained in:
markj 2018-03-18 16:40:56 +00:00
parent 12a9f83baf
commit 3394e82adc
4 changed files with 18 additions and 23 deletions

View File

@ -167,10 +167,8 @@ sendfile_free_page(vm_page_t pg, bool nocache)
vm_page_deactivate_noreuse(pg);
else if (pg->queue == PQ_ACTIVE)
vm_page_reference(pg);
else if (pg->queue != PQ_INACTIVE)
vm_page_deactivate(pg);
else
vm_page_requeue(pg);
vm_page_deactivate(pg);
}
}
}

View File

@ -2936,10 +2936,8 @@ vfs_vmio_unwire(struct buf *bp, vm_page_t m)
vm_page_deactivate_noreuse(m);
else if (m->queue == PQ_ACTIVE)
vm_page_reference(m);
else if (m->queue != PQ_INACTIVE)
vm_page_deactivate(m);
else
vm_page_requeue(m);
vm_page_deactivate(m);
}
}
vm_page_unlock(m);

View File

@ -1379,7 +1379,8 @@ vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead)
* active queue.
*/
vm_page_lock(m);
vm_page_deactivate(m);
if (!vm_page_inactive(m))
vm_page_deactivate(m);
vm_page_unlock(m);
}
}

View File

@ -3363,7 +3363,8 @@ vm_page_unwire_noq(vm_page_t m)
}
/*
* Move the specified page to the inactive queue.
* Move the specified page to the inactive queue, or requeue the page if it is
* already in the inactive queue.
*
* Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive
* queue. However, setting "noreuse" to TRUE will accelerate the specified
@ -3381,15 +3382,10 @@ _vm_page_deactivate(vm_page_t m, boolean_t noreuse)
vm_page_assert_locked(m);
/*
* Ignore if the page is already inactive, unless it is unlikely to be
* reactivated.
*/
if ((queue = m->queue) == PQ_INACTIVE && !noreuse)
return;
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
pq = &vm_pagequeue_domain(m)->vmd_pagequeues[PQ_INACTIVE];
/* Avoid multiple acquisitions of the inactive queue lock. */
queue = m->queue;
if (queue == PQ_INACTIVE) {
vm_pagequeue_lock(pq);
vm_page_dequeue_locked(m);
@ -3411,7 +3407,8 @@ _vm_page_deactivate(vm_page_t m, boolean_t noreuse)
}
/*
* Move the specified page to the inactive queue.
* Move the specified page to the inactive queue, or requeue the page if it is
* already in the inactive queue.
*
* The page must be locked.
*/
@ -3438,19 +3435,20 @@ vm_page_deactivate_noreuse(vm_page_t m)
/*
* vm_page_launder
*
* Put a page in the laundry.
* Put a page in the laundry, or requeue it if it is already there.
*/
void
vm_page_launder(vm_page_t m)
{
int queue;
vm_page_assert_locked(m);
if ((queue = m->queue) != PQ_LAUNDRY && m->wire_count == 0 &&
(m->oflags & VPO_UNMANAGED) == 0) {
if (queue != PQ_NONE)
vm_page_dequeue(m);
vm_page_enqueue(PQ_LAUNDRY, m);
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
if (m->queue == PQ_LAUNDRY)
vm_page_requeue(m);
else {
vm_page_remque(m);
vm_page_enqueue(PQ_LAUNDRY, m);
}
}
}
@ -3540,7 +3538,7 @@ vm_page_advise(vm_page_t m, int advice)
*/
if (m->dirty == 0)
vm_page_deactivate_noreuse(m);
else
else if (!vm_page_in_laundry(m))
vm_page_launder(m);
}