Remove weighted page handling from vm_page_advise().
This was added in r51337 as part of the implementation of madvise(MADV_DONTNEED). Its objective was to ensure that the page daemon would eventually reclaim other unreferenced pages (i.e., unreferenced pages not touched by madvise()) from the active queue. Now that the pagedaemon performs steady scanning of the active page queue, this weighted handling is unnecessary. Instead, always "cache" clean pages by moving them to the head of the inactive page queue. This simplifies the implementation of vm_page_advise() and eliminates the fragmentation that resulted from the distribution of pages among multiple queues. Suggested by: alc Reviewed by: alc Sponsored by: EMC / Isilon Storage Division Differential Revision: https://reviews.freebsd.org/D3401
This commit is contained in:
parent
bd81e07d27
commit
c25fabea97
@ -162,7 +162,7 @@ struct pcpu {
|
||||
long pc_cp_time[CPUSTATES]; /* statclock ticks */
|
||||
struct device *pc_device;
|
||||
void *pc_netisr; /* netisr SWI cookie */
|
||||
int pc_dnweight; /* vm_page_dontneed() */
|
||||
int pc_unused1; /* unused field */
|
||||
int pc_domain; /* Memory domain. */
|
||||
struct rm_queue pc_rm_queue; /* rmlock list of trackers */
|
||||
uintptr_t pc_dynamic; /* Dynamic per-cpu data area */
|
||||
|
@ -2542,19 +2542,26 @@ _vm_page_deactivate(vm_page_t m, int athead)
|
||||
struct vm_pagequeue *pq;
|
||||
int queue;
|
||||
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
vm_page_assert_locked(m);
|
||||
|
||||
/*
|
||||
* Ignore if already inactive.
|
||||
* Ignore if the page is already inactive, unless it is unlikely to be
|
||||
* reactivated.
|
||||
*/
|
||||
if ((queue = m->queue) == PQ_INACTIVE)
|
||||
if ((queue = m->queue) == PQ_INACTIVE && !athead)
|
||||
return;
|
||||
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
|
||||
if (queue != PQ_NONE)
|
||||
vm_page_dequeue(m);
|
||||
m->flags &= ~PG_WINATCFLS;
|
||||
pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE];
|
||||
vm_pagequeue_lock(pq);
|
||||
/* Avoid multiple acquisitions of the inactive queue lock. */
|
||||
if (queue == PQ_INACTIVE) {
|
||||
vm_pagequeue_lock(pq);
|
||||
vm_page_dequeue_locked(m);
|
||||
} else {
|
||||
if (queue != PQ_NONE)
|
||||
vm_page_dequeue(m);
|
||||
m->flags &= ~PG_WINATCFLS;
|
||||
vm_pagequeue_lock(pq);
|
||||
}
|
||||
m->queue = PQ_INACTIVE;
|
||||
if (athead)
|
||||
TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q);
|
||||
@ -2729,34 +2736,18 @@ vm_page_cache(vm_page_t m)
|
||||
/*
|
||||
* vm_page_advise
|
||||
*
|
||||
* Cache, deactivate, or do nothing as appropriate. This routine
|
||||
* is used by madvise().
|
||||
*
|
||||
* Generally speaking we want to move the page into the cache so
|
||||
* it gets reused quickly. However, this can result in a silly syndrome
|
||||
* due to the page recycling too quickly. Small objects will not be
|
||||
* fully cached. On the other hand, if we move the page to the inactive
|
||||
* queue we wind up with a problem whereby very large objects
|
||||
* unnecessarily blow away our inactive and cache queues.
|
||||
*
|
||||
* The solution is to move the pages based on a fixed weighting. We
|
||||
* either leave them alone, deactivate them, or move them to the cache,
|
||||
* where moving them to the cache has the highest weighting.
|
||||
* By forcing some pages into other queues we eventually force the
|
||||
* system to balance the queues, potentially recovering other unrelated
|
||||
* space from active. The idea is to not force this to happen too
|
||||
* often.
|
||||
* Deactivate or do nothing, as appropriate. This routine is used
|
||||
* by madvise() and vop_stdadvise().
|
||||
*
|
||||
* The object and page must be locked.
|
||||
*/
|
||||
void
|
||||
vm_page_advise(vm_page_t m, int advice)
|
||||
{
|
||||
int dnw, head;
|
||||
|
||||
vm_page_assert_locked(m);
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
if (advice == MADV_FREE) {
|
||||
if (advice == MADV_FREE)
|
||||
/*
|
||||
* Mark the page clean. This will allow the page to be freed
|
||||
* up by the system. However, such pages are often reused
|
||||
@ -2767,24 +2758,12 @@ vm_page_advise(vm_page_t m, int advice)
|
||||
* nor do we try to put it in the cache (which would cause a
|
||||
* page fault on reuse).
|
||||
*
|
||||
* But we do make the page is freeable as we can without
|
||||
* But we do make the page as freeable as we can without
|
||||
* actually taking the step of unmapping it.
|
||||
*/
|
||||
m->dirty = 0;
|
||||
m->act_count = 0;
|
||||
} else if (advice != MADV_DONTNEED)
|
||||
else if (advice != MADV_DONTNEED)
|
||||
return;
|
||||
dnw = PCPU_GET(dnweight);
|
||||
PCPU_INC(dnweight);
|
||||
|
||||
/*
|
||||
* Occasionally leave the page alone.
|
||||
*/
|
||||
if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) {
|
||||
if (m->act_count >= ACT_INIT)
|
||||
--m->act_count;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear any references to the page. Otherwise, the page daemon will
|
||||
@ -2795,20 +2774,12 @@ vm_page_advise(vm_page_t m, int advice)
|
||||
if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
|
||||
vm_page_dirty(m);
|
||||
|
||||
if (m->dirty || (dnw & 0x0070) == 0) {
|
||||
/*
|
||||
* Deactivate the page 3 times out of 32.
|
||||
*/
|
||||
head = 0;
|
||||
} else {
|
||||
/*
|
||||
* Cache the page 28 times out of every 32. Note that
|
||||
* the page is deactivated instead of cached, but placed
|
||||
* at the head of the queue instead of the tail.
|
||||
*/
|
||||
head = 1;
|
||||
}
|
||||
_vm_page_deactivate(m, head);
|
||||
/*
|
||||
* Place clean pages at the head of the inactive queue rather than the
|
||||
* tail, thus defeating the queue's LRU operation and ensuring that the
|
||||
* page will be reused quickly.
|
||||
*/
|
||||
_vm_page_deactivate(m, m->dirty == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user