Revise the interface between vm_object_madvise() and vm_page_dontneed() so

that pointless calls to pmap_is_modified() can be easily avoided when
performing madvise(..., MADV_FREE).

Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
alc 2013-06-10 01:48:21 +00:00
parent ac0545a5fe
commit 53ffec3a56
3 changed files with 30 additions and 31 deletions

View File

@ -1175,28 +1175,8 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
}
if (advise == MADV_WILLNEED) {
vm_page_activate(m);
} else if (advise == MADV_DONTNEED) {
vm_page_dontneed(m);
} else if (advise == MADV_FREE) {
/*
* Mark the page clean. This will allow the page
* to be freed up by the system. However, such pages
* are often reused quickly by malloc()/free()
* so we do not do anything that would cause
* a page fault if we can help it.
*
* Specifically, we do not try to actually free
* the page now nor do we try to put it in the
* cache (which would cause a page fault on reuse).
*
* But we do make the page is freeable as we
* can without actually taking the step of unmapping
* it.
*/
pmap_clear_modify(m);
m->dirty = 0;
m->act_count = 0;
vm_page_dontneed(m);
} else {
vm_page_advise(m, advise);
}
vm_page_unlock(m);
if (advise == MADV_FREE && tobject->type == OBJT_SWAP)

View File

@ -93,6 +93,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/malloc.h>
#include <sys/mman.h>
#include <sys/msgbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
@ -2238,15 +2239,15 @@ vm_page_cache(vm_page_t m)
}
/*
* vm_page_dontneed
* vm_page_advise
*
* Cache, deactivate, or do nothing as appropriate. This routine
* is typically used by madvise() MADV_DONTNEED.
* is used by madvise().
*
* Generally speaking we want to move the page into the cache so
* it gets reused quickly. However, this can result in a silly syndrome
* due to the page recycling too quickly. Small objects will not be
* fully cached. On the otherhand, if we move the page to the inactive
* fully cached. On the other hand, if we move the page to the inactive
* queue we wind up with a problem whereby very large objects
* unnecessarily blow away our inactive and cache queues.
*
@ -2261,13 +2262,31 @@ vm_page_cache(vm_page_t m)
* The object and page must be locked.
*/
void
vm_page_dontneed(vm_page_t m)
vm_page_advise(vm_page_t m, int advice)
{
int dnw;
int head;
int dnw, head;
vm_page_lock_assert(m, MA_OWNED);
vm_page_assert_locked(m);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (advice == MADV_FREE) {
/*
* Mark the page clean. This will allow the page to be freed
* up by the system. However, such pages are often reused
* quickly by malloc() so we do not do anything that would
* cause a page fault if we can help it.
*
* Specifically, we do not try to actually free the page now
* nor do we try to put it in the cache (which would cause a
* page fault on reuse).
*
* But we do make the page is freeable as we can without
* actually taking the step of unmapping it.
*/
pmap_clear_modify(m);
m->dirty = 0;
m->act_count = 0;
} else if (advice != MADV_DONTNEED)
return;
dnw = PCPU_GET(dnweight);
PCPU_INC(dnweight);
@ -2294,7 +2313,7 @@ vm_page_dontneed(vm_page_t m)
pmap_clear_reference(m);
vm_page_aflag_clear(m, PGA_REFERENCED);
if (m->dirty == 0 && pmap_is_modified(m))
if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
vm_page_dirty(m);
if (m->dirty || (dnw & 0x0070) == 0) {

View File

@ -370,6 +370,7 @@ void vm_page_free_zero(vm_page_t m);
void vm_page_wakeup(vm_page_t m);
void vm_page_activate (vm_page_t);
void vm_page_advise(vm_page_t m, int advice);
vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
@ -381,7 +382,6 @@ void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t);
void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
int vm_page_try_to_cache (vm_page_t);
int vm_page_try_to_free (vm_page_t);
void vm_page_dontneed(vm_page_t);
void vm_page_deactivate (vm_page_t);
void vm_page_dequeue(vm_page_t m);
void vm_page_dequeue_locked(vm_page_t m);