Ensure that deactivated pages that are not expected to be reused are

reclaimed in FIFO order by the pagedaemon.  Previously we would enqueue
such pages at the head of the inactive queue, yielding a LIFO reclaim order.

Reviewed by:	alc
MFC after:	2 weeks
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Mark Johnston 2015-11-08 01:36:18 +00:00
parent 1c01e4f876
commit 7e78597f04
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=290529
3 changed files with 16 additions and 9 deletions

View File

@ -2536,14 +2536,16 @@ vm_page_unwire(vm_page_t m, uint8_t queue)
* processes. This optimization causes one-time-use metadata to be
* reused more quickly.
*
* Normally athead is 0 resulting in LRU operation. athead is set
* to 1 if we want this page to be 'as if it were placed in the cache',
* except without unmapping it from the process address space.
* Normally noreuse is FALSE, resulting in LRU operation. noreuse is set
* to TRUE if we want this page to be 'as if it were placed in the cache',
* except without unmapping it from the process address space. In
* practice this is implemented by inserting the page at the head of the
* queue, using a marker page to guide FIFO insertion ordering.
*
* The page must be locked.
*/
static inline void
_vm_page_deactivate(vm_page_t m, int athead)
_vm_page_deactivate(vm_page_t m, boolean_t noreuse)
{
struct vm_pagequeue *pq;
int queue;
@ -2554,7 +2556,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
* Ignore if the page is already inactive, unless it is unlikely to be
* reactivated.
*/
if ((queue = m->queue) == PQ_INACTIVE && !athead)
if ((queue = m->queue) == PQ_INACTIVE && !noreuse)
return;
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE];
@ -2569,8 +2571,9 @@ _vm_page_deactivate(vm_page_t m, int athead)
vm_pagequeue_lock(pq);
}
m->queue = PQ_INACTIVE;
if (athead)
TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q);
if (noreuse)
TAILQ_INSERT_BEFORE(&vm_phys_domain(m)->vmd_inacthead,
m, plinks.q);
else
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_inc(pq);
@ -2587,7 +2590,7 @@ void
vm_page_deactivate(vm_page_t m)
{
_vm_page_deactivate(m, 0);
_vm_page_deactivate(m, FALSE);
}
/*
@ -2600,7 +2603,7 @@ void
vm_page_deactivate_noreuse(vm_page_t m)
{
_vm_page_deactivate(m, 1);
_vm_page_deactivate(m, TRUE);
}
/*

View File

@ -229,6 +229,7 @@ struct vm_domain {
int vmd_pass; /* local pagedaemon pass */
int vmd_last_active_scan;
struct vm_page vmd_marker; /* marker for pagedaemon private use */
struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
};
extern struct vm_domain vm_dom[MAXMEMDOM];

View File

@ -1630,6 +1630,9 @@ vm_pageout_worker(void *arg)
KASSERT(domain->vmd_segs != 0, ("domain without segments"));
domain->vmd_last_active_scan = ticks;
vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE);
vm_pageout_init_marker(&domain->vmd_inacthead, PQ_INACTIVE);
TAILQ_INSERT_HEAD(&domain->vmd_pagequeues[PQ_INACTIVE].pq_pl,
&domain->vmd_inacthead, plinks.q);
/*
* The pageout daemon worker is never done, so loop forever.