Invalid pages do not need neither update of the activation count nor

they coould be dirty.  Move the handling if the invalid pages in the
inactive scan earlier.

Remove some code duplication in the scan by introducing the
'drop_page' label, which centralizes the object and the page unlock.

Suggested and reviewed by:	alc
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2015-06-14 20:23:41 +00:00
parent b96e9390db
commit 776f729c86

View File

@ -1158,6 +1158,17 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
vm_pagequeue_unlock(pq);
queues_locked = FALSE;
/*
* Invalid pages can be easily freed. They cannot be
* mapped, vm_page_free() asserts this.
*/
if (m->valid == 0 && m->hold_count == 0) {
vm_page_free(m);
PCPU_INC(cnt.v_dfree);
--page_shortage;
goto drop_page;
}
/*
* We bump the activation count if the page has been
* referenced while in the inactive queue. This makes
@ -1192,15 +1203,10 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
queues_locked = TRUE;
vm_page_requeue_locked(m);
}
VM_OBJECT_WUNLOCK(object);
vm_page_unlock(m);
goto relock_queues;
goto drop_page;
}
if (m->hold_count != 0) {
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(object);
/*
* Held pages are essentially stuck in the
* queue. So, they ought to be discounted
@ -1209,7 +1215,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* loop over the active queue below.
*/
addl_page_shortage++;
goto relock_queues;
goto drop_page;
}
/*
@ -1224,14 +1230,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
if (m->dirty == 0 && object->ref_count != 0)
pmap_remove_all(m);
if (m->valid == 0) {
/*
* Invalid pages can be easily freed
*/
vm_page_free(m);
PCPU_INC(cnt.v_dfree);
--page_shortage;
} else if (m->dirty == 0) {
if (m->dirty == 0) {
/*
* Clean pages can be freed.
*/
@ -1305,6 +1304,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
vm_page_lock_assert(m, MA_NOTOWNED);
goto relock_queues;
}
drop_page:
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(object);
relock_queues: