Do not requeue held page or page for which locking failed, just leave
them alone. Process the act_count updates for the held pages in the vm_pageout loop over the inactive queue, instead of refusing to do anything with such page. Clarify the intent of the addl_page_shortage counter and change its use for pages which are not processed in the loop according to the description. Reviewed by: alc MFC after: 2 weeks
This commit is contained in:
parent
77db9ed99e
commit
311e34e260
@ -889,6 +889,12 @@ vm_pageout_scan(int pass)
|
|||||||
*/
|
*/
|
||||||
uma_reclaim();
|
uma_reclaim();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The addl_page_shortage is the the number of temporarily
|
||||||
|
* stuck pages in the inactive queue. In other words, the
|
||||||
|
* number of pages from cnt.v_inactive_count that should be
|
||||||
|
* discounted in setting the target for the active queue scan.
|
||||||
|
*/
|
||||||
addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit);
|
addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -945,38 +951,31 @@ vm_pageout_scan(int pass)
|
|||||||
("Unmanaged page %p cannot be in inactive queue", m));
|
("Unmanaged page %p cannot be in inactive queue", m));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock the page.
|
* The page or object lock acquisitions fail if the
|
||||||
|
* page was removed from the queue or moved to a
|
||||||
|
* different position within the queue. In either
|
||||||
|
* case, addl_page_shortage should not be incremented.
|
||||||
*/
|
*/
|
||||||
if (!vm_pageout_page_lock(m, &next)) {
|
if (!vm_pageout_page_lock(m, &next)) {
|
||||||
vm_page_unlock(m);
|
vm_page_unlock(m);
|
||||||
addl_page_shortage++;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* A held page may be undergoing I/O, so skip it.
|
|
||||||
*/
|
|
||||||
if (m->hold_count) {
|
|
||||||
vm_page_unlock(m);
|
|
||||||
vm_page_requeue(m);
|
|
||||||
addl_page_shortage++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't mess with busy pages, keep in the front of the
|
|
||||||
* queue, most likely are being paged out.
|
|
||||||
*/
|
|
||||||
object = m->object;
|
object = m->object;
|
||||||
if (!VM_OBJECT_TRYLOCK(object) &&
|
if (!VM_OBJECT_TRYLOCK(object) &&
|
||||||
(!vm_pageout_fallback_object_lock(m, &next) ||
|
!vm_pageout_fallback_object_lock(m, &next)) {
|
||||||
m->hold_count != 0)) {
|
|
||||||
VM_OBJECT_UNLOCK(object);
|
|
||||||
vm_page_unlock(m);
|
vm_page_unlock(m);
|
||||||
addl_page_shortage++;
|
VM_OBJECT_UNLOCK(object);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (m->busy || (m->oflags & VPO_BUSY)) {
|
|
||||||
|
/*
|
||||||
|
* Don't mess with busy pages, keep them at at the
|
||||||
|
* front of the queue, most likely they are being
|
||||||
|
* paged out. Increment addl_page_shortage for busy
|
||||||
|
* pages, because they may leave the inactive queue
|
||||||
|
* shortly after page scan is finished.
|
||||||
|
*/
|
||||||
|
if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0) {
|
||||||
vm_page_unlock(m);
|
vm_page_unlock(m);
|
||||||
VM_OBJECT_UNLOCK(object);
|
VM_OBJECT_UNLOCK(object);
|
||||||
addl_page_shortage++;
|
addl_page_shortage++;
|
||||||
@ -1036,6 +1035,21 @@ vm_pageout_scan(int pass)
|
|||||||
goto relock_queues;
|
goto relock_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (m->hold_count != 0) {
|
||||||
|
vm_page_unlock(m);
|
||||||
|
VM_OBJECT_UNLOCK(object);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Held pages are essentially stuck in the
|
||||||
|
* queue. So, they ought to be discounted
|
||||||
|
* from cnt.v_inactive_count. See the
|
||||||
|
* calculation of the page_shortage for the
|
||||||
|
* loop over the active queue below.
|
||||||
|
*/
|
||||||
|
addl_page_shortage++;
|
||||||
|
goto relock_queues;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the upper level VM system does not believe that the page
|
* If the upper level VM system does not believe that the page
|
||||||
* is fully dirty, but it is mapped for write access, then we
|
* is fully dirty, but it is mapped for write access, then we
|
||||||
|
Loading…
x
Reference in New Issue
Block a user