Don't remove the virtual-to-physical mapping when an sf_buf is freed.
Instead, allow the mapping to persist, but add the sf_buf to a free list. If a later sendfile(2) or zero-copy send resends the same physical page, perhaps with the same or different contents, then the mapping overhead is avoided and the sf_buf is simply removed from the free list. In other words, the i386 sf_buf implementation now behaves as a cache of virtual-to-physical translations using an LRU replacement policy on inactive sf_bufs. This is similar in concept to a part of http://www.cs.princeton.edu/~yruan/debox/ patch, but much simpler in implementation. Note: none of this is required on alpha, amd64, or ia64. They now use their direct virtual-to-physical mapping to avoid any emphemeral mapping overheads in their sf_buf implementations.
This commit is contained in:
parent
93908284d5
commit
d821fb3e55
@ -110,7 +110,7 @@ static u_long sf_buf_hashmask;
|
||||
|
||||
#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
|
||||
|
||||
static struct sf_head sf_buf_freelist;
|
||||
static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
|
||||
static u_int sf_buf_alloc_want;
|
||||
|
||||
/*
|
||||
@ -583,13 +583,13 @@ sf_buf_init(void *arg)
|
||||
int i;
|
||||
|
||||
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
|
||||
LIST_INIT(&sf_buf_freelist);
|
||||
TAILQ_INIT(&sf_buf_freelist);
|
||||
sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
|
||||
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
|
||||
M_NOWAIT | M_ZERO);
|
||||
for (i = 0; i < nsfbufs; i++) {
|
||||
sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
|
||||
LIST_INSERT_HEAD(&sf_buf_freelist, &sf_bufs[i], list_entry);
|
||||
TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
|
||||
}
|
||||
sf_buf_alloc_want = 0;
|
||||
mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
|
||||
@ -609,11 +609,13 @@ sf_buf_alloc(struct vm_page *m)
|
||||
mtx_lock(&sf_buf_lock);
|
||||
LIST_FOREACH(sf, hash_list, list_entry) {
|
||||
if (sf->m == m) {
|
||||
if (sf->ref_count == 0)
|
||||
TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
|
||||
sf->ref_count++;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
while ((sf = LIST_FIRST(&sf_buf_freelist)) == NULL) {
|
||||
while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
|
||||
sf_buf_alloc_want++;
|
||||
error = msleep(&sf_buf_freelist, &sf_buf_lock, PVM|PCATCH,
|
||||
"sfbufa", 0);
|
||||
@ -625,7 +627,9 @@ sf_buf_alloc(struct vm_page *m)
|
||||
if (error)
|
||||
goto done;
|
||||
}
|
||||
LIST_REMOVE(sf, list_entry);
|
||||
TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
|
||||
if (sf->m != NULL)
|
||||
LIST_REMOVE(sf, list_entry);
|
||||
LIST_INSERT_HEAD(hash_list, sf, list_entry);
|
||||
sf->ref_count = 1;
|
||||
sf->m = m;
|
||||
@ -649,10 +653,7 @@ sf_buf_free(void *addr, void *args)
|
||||
m = sf->m;
|
||||
sf->ref_count--;
|
||||
if (sf->ref_count == 0) {
|
||||
pmap_qremove((vm_offset_t)addr, 1);
|
||||
sf->m = NULL;
|
||||
LIST_REMOVE(sf, list_entry);
|
||||
LIST_INSERT_HEAD(&sf_buf_freelist, sf, list_entry);
|
||||
TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
|
||||
if (sf_buf_alloc_want > 0)
|
||||
wakeup_one(&sf_buf_freelist);
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ struct vm_page;
|
||||
|
||||
struct sf_buf {
|
||||
LIST_ENTRY(sf_buf) list_entry; /* list of buffers */
|
||||
TAILQ_ENTRY(sf_buf) free_entry; /* list of buffers */
|
||||
struct vm_page *m; /* currently mapped page */
|
||||
vm_offset_t kva; /* va of mapping */
|
||||
int ref_count; /* usage of this mapping */
|
||||
|
Loading…
Reference in New Issue
Block a user