The intention of r254304 was to scan the active queue continuously.
However, I've observed the active queue scan stopping when there are frequent free page shortages and the inactive queue is steadily refilled by other mechanisms, such as the sequential access heuristic in vm_fault() or madvise(2). To remedy this problem, record the time of the last active queue scan, and always scan a number of pages proportional to the time since the last scan, regardless of whether that last scan was a timeout-triggered ("pass == 0") or free-page-shortage-triggered ("pass > 0") scan. Also, on a timeout-triggered scan, allow a full scan of the active queue when the system is short of inactive pages. Reviewed by: kib MFC after: 6 weeks Sponsored by: EMC / Isilon Storage Division
This commit is contained in:
parent
613ab60283
commit
22cf98d1f3
@ -227,6 +227,7 @@ struct vm_domain {
|
||||
long vmd_segs; /* bitmask of the segments */
|
||||
boolean_t vmd_oom;
|
||||
int vmd_pass; /* local pagedaemon pass */
|
||||
int vmd_last_active_scan;
|
||||
struct vm_page vmd_marker; /* marker for pagedaemon private use */
|
||||
};
|
||||
|
||||
|
@ -1028,9 +1028,10 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
|
||||
vm_page_t m, next;
|
||||
struct vm_pagequeue *pq;
|
||||
vm_object_t object;
|
||||
long min_scan;
|
||||
int act_delta, addl_page_shortage, deficit, maxscan, page_shortage;
|
||||
int vnodes_skipped = 0;
|
||||
int maxlaunder;
|
||||
int maxlaunder, scan_tick, scanned;
|
||||
boolean_t queues_locked;
|
||||
|
||||
/*
|
||||
@ -1351,34 +1352,37 @@ relock_queues:
|
||||
* If we're just idle polling attempt to visit every
|
||||
* active page within 'update_period' seconds.
|
||||
*/
|
||||
if (pass == 0 && vm_pageout_update_period != 0) {
|
||||
maxscan /= vm_pageout_update_period;
|
||||
page_shortage = maxscan;
|
||||
}
|
||||
scan_tick = ticks;
|
||||
if (vm_pageout_update_period != 0) {
|
||||
min_scan = pq->pq_cnt;
|
||||
min_scan *= scan_tick - vmd->vmd_last_active_scan;
|
||||
min_scan /= hz * vm_pageout_update_period;
|
||||
} else
|
||||
min_scan = 0;
|
||||
if (min_scan > 0 || (page_shortage > 0 && maxscan > 0))
|
||||
vmd->vmd_last_active_scan = scan_tick;
|
||||
|
||||
/*
|
||||
* Scan the active queue for things we can deactivate. We nominally
|
||||
* track the per-page activity counter and use it to locate
|
||||
* deactivation candidates.
|
||||
* Scan the active queue for pages that can be deactivated. Update
|
||||
* the per-page activity counter and use it to identify deactivation
|
||||
* candidates.
|
||||
*/
|
||||
m = TAILQ_FIRST(&pq->pq_pl);
|
||||
while (m != NULL && maxscan-- > 0 && page_shortage > 0) {
|
||||
for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
|
||||
min_scan || (page_shortage > 0 && scanned < maxscan)); m = next,
|
||||
scanned++) {
|
||||
|
||||
KASSERT(m->queue == PQ_ACTIVE,
|
||||
("vm_pageout_scan: page %p isn't active", m));
|
||||
|
||||
next = TAILQ_NEXT(m, plinks.q);
|
||||
if ((m->flags & PG_MARKER) != 0) {
|
||||
m = next;
|
||||
if ((m->flags & PG_MARKER) != 0)
|
||||
continue;
|
||||
}
|
||||
KASSERT((m->flags & PG_FICTITIOUS) == 0,
|
||||
("Fictitious page %p cannot be in active queue", m));
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("Unmanaged page %p cannot be in active queue", m));
|
||||
if (!vm_pageout_page_lock(m, &next)) {
|
||||
vm_page_unlock(m);
|
||||
m = next;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1430,7 +1434,6 @@ relock_queues:
|
||||
} else
|
||||
vm_page_requeue_locked(m);
|
||||
vm_page_unlock(m);
|
||||
m = next;
|
||||
}
|
||||
vm_pagequeue_unlock(pq);
|
||||
#if !defined(NO_SWAPPING)
|
||||
@ -1620,6 +1623,7 @@ vm_pageout_worker(void *arg)
|
||||
*/
|
||||
|
||||
KASSERT(domain->vmd_segs != 0, ("domain without segments"));
|
||||
domain->vmd_last_active_scan = ticks;
|
||||
vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE);
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user