Get rid of vm_pageout_page_queued().

vm_page_queue(), added in r333256, generalizes vm_pageout_page_queued(),
so use it instead.  No functional change intended.

Reviewed by:	kib
Differential Revision:	https://reviews.freebsd.org/D15402
This commit is contained in:
Mark Johnston 2018-05-13 13:00:59 +00:00
parent 0f13d146a0
commit 36f8fe9bbb

View File

@ -251,22 +251,6 @@ vm_pageout_end_scan(struct scan_state *ss)
VM_CNT_ADD(v_pdpages, ss->scanned);
}
/*
* Ensure that the page has not been dequeued after a pageout batch was
* collected. See vm_page_dequeue_complete().
*/
static inline bool
vm_pageout_page_queued(vm_page_t m, int queue)
{
vm_page_assert_locked(m);
if ((m->aflags & PGA_DEQUEUE) != 0)
return (false);
atomic_thread_fence_acq();
return (m->queue == queue);
}
/*
* Add a small number of queued pages to a batch queue for later processing
* without the corresponding queue lock held. The caller must have enqueued a
@ -274,10 +258,10 @@ vm_pageout_page_queued(vm_page_t m, int queue)
* physically dequeued if the caller so requests. Otherwise, the returned
* batch may contain marker pages, and it is up to the caller to handle them.
*
* When processing the batch queue, vm_pageout_page_queued() must be used to
* determine whether the page was logically dequeued by another thread. Once
* this check is performed, the page lock guarantees that the page will not be
* disassociated from the queue.
* When processing the batch queue, vm_page_queue() must be used to
* determine whether the page has been logically dequeued by another thread.
* Once this check is performed, the page lock guarantees that the page will
* not be disassociated from the queue.
*/
static __always_inline void
vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
@ -751,7 +735,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
* The page may have been disassociated from the queue
* while locks were dropped.
*/
if (!vm_pageout_page_queued(m, queue))
if (vm_page_queue(m) != queue)
continue;
/*
@ -1262,7 +1246,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage)
* The page may have been disassociated from the queue
* while locks were dropped.
*/
if (!vm_pageout_page_queued(m, PQ_INACTIVE)) {
if (vm_page_queue(m) != PQ_INACTIVE) {
addl_page_shortage++;
continue;
}
@ -1542,7 +1526,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage)
* The page may have been disassociated from the queue
* while locks were dropped.
*/
if (!vm_pageout_page_queued(m, PQ_ACTIVE))
if (vm_page_queue(m) != PQ_ACTIVE)
continue;
/*