MFp4 alc_popmap
Change the way that reservations keep track of which pages are in use. Instead of using the page's PG_CACHED and PG_FREE flags, maintain a bit vector within the reservation. This approach has a couple benefits. First, it makes breaking reservations much cheaper because there are fewer cache misses to identify the unused pages. Second, it is a pre- requisite for supporting two or more reservation sizes.
This commit is contained in:
parent
058e24d34b
commit
ec17932242
@ -1,6 +1,6 @@
|
||||
/*-
|
||||
* Copyright (c) 2002-2006 Rice University
|
||||
* Copyright (c) 2007-2008 Alan L. Cox <alc@cs.rice.edu>
|
||||
* Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software was developed for the FreeBSD Project by Alan L. Cox,
|
||||
@ -92,6 +92,21 @@ __FBSDID("$FreeBSD$");
|
||||
#define VM_RESERV_INDEX(object, pindex) \
|
||||
(((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
|
||||
|
||||
/*
|
||||
* The size of a population map entry
|
||||
*/
|
||||
typedef u_long popmap_t;
|
||||
|
||||
/*
|
||||
* The number of bits in a population map entry
|
||||
*/
|
||||
#define NBPOPMAP (NBBY * sizeof(popmap_t))
|
||||
|
||||
/*
|
||||
* The number of population map entries in a reservation
|
||||
*/
|
||||
#define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
|
||||
|
||||
/*
|
||||
* The reservation structure
|
||||
*
|
||||
@ -114,6 +129,7 @@ struct vm_reserv {
|
||||
vm_page_t pages; /* first page of a superpage */
|
||||
int popcnt; /* # of pages in use */
|
||||
char inpartpopq;
|
||||
popmap_t popmap[NPOPMAP]; /* bit vector of used pages */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -170,11 +186,12 @@ static long vm_reserv_reclaimed;
|
||||
SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
|
||||
&vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
|
||||
|
||||
static void vm_reserv_depopulate(vm_reserv_t rv);
|
||||
static void vm_reserv_break(vm_reserv_t rv, vm_page_t m);
|
||||
static void vm_reserv_depopulate(vm_reserv_t rv, int index);
|
||||
static vm_reserv_t vm_reserv_from_page(vm_page_t m);
|
||||
static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
|
||||
vm_pindex_t pindex);
|
||||
static void vm_reserv_populate(vm_reserv_t rv);
|
||||
static void vm_reserv_populate(vm_reserv_t rv, int index);
|
||||
static void vm_reserv_reclaim(vm_reserv_t rv);
|
||||
|
||||
/*
|
||||
@ -212,13 +229,13 @@ sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
|
||||
/*
|
||||
* Reduces the given reservation's population count. If the population count
|
||||
* becomes zero, the reservation is destroyed. Additionally, moves the
|
||||
* reservation to the tail of the partially-populated reservations queue if the
|
||||
* reservation to the tail of the partially-populated reservation queue if the
|
||||
* population count is non-zero.
|
||||
*
|
||||
* The free page queue lock must be held.
|
||||
*/
|
||||
static void
|
||||
vm_reserv_depopulate(vm_reserv_t rv)
|
||||
vm_reserv_depopulate(vm_reserv_t rv, int index)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
@ -230,6 +247,7 @@ vm_reserv_depopulate(vm_reserv_t rv)
|
||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
|
||||
rv->inpartpopq = FALSE;
|
||||
}
|
||||
clrbit(rv->popmap, index);
|
||||
rv->popcnt--;
|
||||
if (rv->popcnt == 0) {
|
||||
LIST_REMOVE(rv, objq);
|
||||
@ -270,7 +288,7 @@ vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
|
||||
* The free page queue must be locked.
|
||||
*/
|
||||
static void
|
||||
vm_reserv_populate(vm_reserv_t rv)
|
||||
vm_reserv_populate(vm_reserv_t rv, int index)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
@ -282,6 +300,7 @@ vm_reserv_populate(vm_reserv_t rv)
|
||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
|
||||
rv->inpartpopq = FALSE;
|
||||
}
|
||||
setbit(rv->popmap, index);
|
||||
rv->popcnt++;
|
||||
if (rv->popcnt < VM_LEVEL_0_NPAGES) {
|
||||
rv->inpartpopq = TRUE;
|
||||
@ -437,9 +456,13 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
|
||||
KASSERT(!rv->inpartpopq,
|
||||
("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
|
||||
rv));
|
||||
for (i = 0; i < NPOPMAP; i++)
|
||||
KASSERT(rv->popmap[i] == 0,
|
||||
("vm_reserv_alloc_contig: reserv %p's popmap is corrupted",
|
||||
rv));
|
||||
n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
|
||||
for (i = 0; i < n; i++)
|
||||
vm_reserv_populate(rv);
|
||||
vm_reserv_populate(rv, index + i);
|
||||
npages -= n;
|
||||
if (m_ret == NULL) {
|
||||
m_ret = &rv->pages[index];
|
||||
@ -466,10 +489,10 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
|
||||
return (NULL);
|
||||
/* Handle vm_page_rename(m, new_object, ...). */
|
||||
for (i = 0; i < npages; i++)
|
||||
if ((rv->pages[index + i].flags & (PG_CACHED | PG_FREE)) == 0)
|
||||
if (isset(rv->popmap, index + i))
|
||||
return (NULL);
|
||||
for (i = 0; i < npages; i++)
|
||||
vm_reserv_populate(rv);
|
||||
vm_reserv_populate(rv, index + i);
|
||||
return (m);
|
||||
}
|
||||
|
||||
@ -487,6 +510,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
|
||||
vm_page_t m, msucc;
|
||||
vm_pindex_t first, leftcap, rightcap;
|
||||
vm_reserv_t rv;
|
||||
int i, index;
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
@ -575,21 +599,104 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
|
||||
("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
|
||||
KASSERT(!rv->inpartpopq,
|
||||
("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
|
||||
vm_reserv_populate(rv);
|
||||
return (&rv->pages[VM_RESERV_INDEX(object, pindex)]);
|
||||
for (i = 0; i < NPOPMAP; i++)
|
||||
KASSERT(rv->popmap[i] == 0,
|
||||
("vm_reserv_alloc_page: reserv %p's popmap is corrupted",
|
||||
rv));
|
||||
index = VM_RESERV_INDEX(object, pindex);
|
||||
vm_reserv_populate(rv, index);
|
||||
return (&rv->pages[index]);
|
||||
|
||||
/*
|
||||
* Found a matching reservation.
|
||||
*/
|
||||
found:
|
||||
m = &rv->pages[VM_RESERV_INDEX(object, pindex)];
|
||||
index = VM_RESERV_INDEX(object, pindex);
|
||||
m = &rv->pages[index];
|
||||
/* Handle vm_page_rename(m, new_object, ...). */
|
||||
if ((m->flags & (PG_CACHED | PG_FREE)) == 0)
|
||||
if (isset(rv->popmap, index))
|
||||
return (NULL);
|
||||
vm_reserv_populate(rv);
|
||||
vm_reserv_populate(rv, index);
|
||||
return (m);
|
||||
}
|
||||
|
||||
/*
|
||||
* Breaks the given reservation. Except for the specified cached or free
|
||||
* page, all cached and free pages in the reservation are returned to the
|
||||
* physical memory allocator. The reservation's population count and map are
|
||||
* reset to their initial state.
|
||||
*
|
||||
* The given reservation must not be in the partially-populated reservation
|
||||
* queue. The free page queue lock must be held.
|
||||
*/
|
||||
static void
|
||||
vm_reserv_break(vm_reserv_t rv, vm_page_t m)
|
||||
{
|
||||
int begin_zeroes, hi, i, lo;
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
KASSERT(rv->object != NULL,
|
||||
("vm_reserv_break: reserv %p is free", rv));
|
||||
KASSERT(!rv->inpartpopq,
|
||||
("vm_reserv_break: reserv %p's inpartpopq is TRUE", rv));
|
||||
LIST_REMOVE(rv, objq);
|
||||
rv->object = NULL;
|
||||
if (m != NULL) {
|
||||
/*
|
||||
* Since the reservation is being broken, there is no harm in
|
||||
* abusing the population map to stop "m" from being returned
|
||||
* to the physical memory allocator.
|
||||
*/
|
||||
i = m - rv->pages;
|
||||
KASSERT(isclr(rv->popmap, i),
|
||||
("vm_reserv_break: reserv %p's popmap is corrupted", rv));
|
||||
setbit(rv->popmap, i);
|
||||
rv->popcnt++;
|
||||
}
|
||||
i = hi = 0;
|
||||
do {
|
||||
/* Find the next 0 bit. Any previous 0 bits are < "hi". */
|
||||
lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
|
||||
if (lo == 0) {
|
||||
/* Redundantly clears bits < "hi". */
|
||||
rv->popmap[i] = 0;
|
||||
rv->popcnt -= NBPOPMAP - hi;
|
||||
while (++i < NPOPMAP) {
|
||||
lo = ffsl(~rv->popmap[i]);
|
||||
if (lo == 0) {
|
||||
rv->popmap[i] = 0;
|
||||
rv->popcnt -= NBPOPMAP;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
if (i == NPOPMAP)
|
||||
break;
|
||||
hi = 0;
|
||||
}
|
||||
KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo));
|
||||
/* Convert from ffsl() to ordinary bit numbering. */
|
||||
lo--;
|
||||
if (lo > 0) {
|
||||
/* Redundantly clears bits < "hi". */
|
||||
rv->popmap[i] &= ~((1UL << lo) - 1);
|
||||
rv->popcnt -= lo - hi;
|
||||
}
|
||||
begin_zeroes = NBPOPMAP * i + lo;
|
||||
/* Find the next 1 bit. */
|
||||
do
|
||||
hi = ffsl(rv->popmap[i]);
|
||||
while (hi == 0 && ++i < NPOPMAP);
|
||||
if (i != NPOPMAP)
|
||||
/* Convert from ffsl() to ordinary bit numbering. */
|
||||
hi--;
|
||||
vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i +
|
||||
hi - begin_zeroes);
|
||||
} while (i < NPOPMAP);
|
||||
KASSERT(rv->popcnt == 0,
|
||||
("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
|
||||
vm_reserv_broken++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Breaks all reservations belonging to the given object.
|
||||
*/
|
||||
@ -597,7 +704,6 @@ void
|
||||
vm_reserv_break_all(vm_object_t object)
|
||||
{
|
||||
vm_reserv_t rv;
|
||||
int i;
|
||||
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
|
||||
@ -607,18 +713,7 @@ vm_reserv_break_all(vm_object_t object)
|
||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
|
||||
rv->inpartpopq = FALSE;
|
||||
}
|
||||
LIST_REMOVE(rv, objq);
|
||||
rv->object = NULL;
|
||||
for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
|
||||
if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
|
||||
vm_phys_free_pages(&rv->pages[i], 0);
|
||||
else
|
||||
rv->popcnt--;
|
||||
}
|
||||
KASSERT(rv->popcnt == 0,
|
||||
("vm_reserv_break_all: reserv %p's popcnt is corrupted",
|
||||
rv));
|
||||
vm_reserv_broken++;
|
||||
vm_reserv_break(rv, NULL);
|
||||
}
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
}
|
||||
@ -641,7 +736,7 @@ vm_reserv_free_page(vm_page_t m)
|
||||
if ((m->flags & PG_CACHED) != 0 && m->pool != VM_FREEPOOL_CACHE)
|
||||
vm_phys_set_pool(VM_FREEPOOL_CACHE, rv->pages,
|
||||
VM_LEVEL_0_ORDER);
|
||||
vm_reserv_depopulate(rv);
|
||||
vm_reserv_depopulate(rv, m - rv->pages);
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
@ -699,43 +794,26 @@ boolean_t
|
||||
vm_reserv_reactivate_page(vm_page_t m)
|
||||
{
|
||||
vm_reserv_t rv;
|
||||
int i, m_index;
|
||||
int index;
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
rv = vm_reserv_from_page(m);
|
||||
if (rv->object == NULL)
|
||||
return (FALSE);
|
||||
KASSERT((m->flags & PG_CACHED) != 0,
|
||||
("vm_reserv_uncache_page: page %p is not cached", m));
|
||||
("vm_reserv_reactivate_page: page %p is not cached", m));
|
||||
if (m->object == rv->object &&
|
||||
m->pindex - rv->pindex == VM_RESERV_INDEX(m->object, m->pindex))
|
||||
vm_reserv_populate(rv);
|
||||
m->pindex - rv->pindex == (index = VM_RESERV_INDEX(m->object,
|
||||
m->pindex)))
|
||||
vm_reserv_populate(rv, index);
|
||||
else {
|
||||
KASSERT(rv->inpartpopq,
|
||||
("vm_reserv_uncache_page: reserv %p's inpartpopq is FALSE",
|
||||
("vm_reserv_reactivate_page: reserv %p's inpartpopq is FALSE",
|
||||
rv));
|
||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
|
||||
rv->inpartpopq = FALSE;
|
||||
LIST_REMOVE(rv, objq);
|
||||
rv->object = NULL;
|
||||
/* Don't vm_phys_free_pages(m, 0). */
|
||||
m_index = m - rv->pages;
|
||||
for (i = 0; i < m_index; i++) {
|
||||
if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
|
||||
vm_phys_free_pages(&rv->pages[i], 0);
|
||||
else
|
||||
rv->popcnt--;
|
||||
}
|
||||
for (i++; i < VM_LEVEL_0_NPAGES; i++) {
|
||||
if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
|
||||
vm_phys_free_pages(&rv->pages[i], 0);
|
||||
else
|
||||
rv->popcnt--;
|
||||
}
|
||||
KASSERT(rv->popcnt == 0,
|
||||
("vm_reserv_uncache_page: reserv %p's popcnt is corrupted",
|
||||
rv));
|
||||
vm_reserv_broken++;
|
||||
/* Don't release "m" to the physical memory allocator. */
|
||||
vm_reserv_break(rv, m);
|
||||
}
|
||||
return (TRUE);
|
||||
}
|
||||
@ -749,25 +827,13 @@ vm_reserv_reactivate_page(vm_page_t m)
|
||||
static void
|
||||
vm_reserv_reclaim(vm_reserv_t rv)
|
||||
{
|
||||
int i;
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
KASSERT(rv->inpartpopq,
|
||||
("vm_reserv_reclaim: reserv %p's inpartpopq is corrupted", rv));
|
||||
("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
|
||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
|
||||
rv->inpartpopq = FALSE;
|
||||
KASSERT(rv->object != NULL,
|
||||
("vm_reserv_reclaim: reserv %p is free", rv));
|
||||
LIST_REMOVE(rv, objq);
|
||||
rv->object = NULL;
|
||||
for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
|
||||
if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
|
||||
vm_phys_free_pages(&rv->pages[i], 0);
|
||||
else
|
||||
rv->popcnt--;
|
||||
}
|
||||
KASSERT(rv->popcnt == 0,
|
||||
("vm_reserv_reclaim: reserv %p's popcnt is corrupted", rv));
|
||||
vm_reserv_break(rv, NULL);
|
||||
vm_reserv_reclaimed++;
|
||||
}
|
||||
|
||||
@ -804,9 +870,9 @@ boolean_t
|
||||
vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
|
||||
u_long alignment, vm_paddr_t boundary)
|
||||
{
|
||||
vm_paddr_t pa, pa_length, size;
|
||||
vm_paddr_t pa, size;
|
||||
vm_reserv_t rv;
|
||||
int i;
|
||||
int hi, i, lo, next_free;
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
if (npages > VM_LEVEL_0_NPAGES - 1)
|
||||
@ -815,30 +881,61 @@ vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
|
||||
TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
|
||||
pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
|
||||
if (pa + PAGE_SIZE - size < low) {
|
||||
/* this entire reservation is too low; go to next */
|
||||
/* This entire reservation is too low; go to next. */
|
||||
continue;
|
||||
}
|
||||
pa_length = 0;
|
||||
for (i = 0; i < VM_LEVEL_0_NPAGES; i++)
|
||||
if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) {
|
||||
pa_length += PAGE_SIZE;
|
||||
if (pa_length == PAGE_SIZE) {
|
||||
pa = VM_PAGE_TO_PHYS(&rv->pages[i]);
|
||||
if (pa + size > high) {
|
||||
/* skip to next reservation */
|
||||
break;
|
||||
} else if (pa < low ||
|
||||
(pa & (alignment - 1)) != 0 ||
|
||||
((pa ^ (pa + size - 1)) &
|
||||
~(boundary - 1)) != 0)
|
||||
pa_length = 0;
|
||||
}
|
||||
if (pa_length >= size) {
|
||||
pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
|
||||
if (pa + size > high) {
|
||||
/* This entire reservation is too high; go to next. */
|
||||
continue;
|
||||
}
|
||||
if (pa < low) {
|
||||
/* Start the search for free pages at "low". */
|
||||
i = (low - pa) / NBPOPMAP;
|
||||
hi = (low - pa) % NBPOPMAP;
|
||||
} else
|
||||
i = hi = 0;
|
||||
do {
|
||||
/* Find the next free page. */
|
||||
lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
|
||||
while (lo == 0 && ++i < NPOPMAP)
|
||||
lo = ffsl(~rv->popmap[i]);
|
||||
if (i == NPOPMAP)
|
||||
break;
|
||||
/* Convert from ffsl() to ordinary bit numbering. */
|
||||
lo--;
|
||||
next_free = NBPOPMAP * i + lo;
|
||||
pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]);
|
||||
KASSERT(pa >= low,
|
||||
("vm_reserv_reclaim_contig: pa is too low"));
|
||||
if (pa + size > high) {
|
||||
/* The rest of this reservation is too high. */
|
||||
break;
|
||||
} else if ((pa & (alignment - 1)) != 0 ||
|
||||
((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
|
||||
/* Continue with this reservation. */
|
||||
hi = lo;
|
||||
continue;
|
||||
}
|
||||
/* Find the next used page. */
|
||||
hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1));
|
||||
while (hi == 0 && ++i < NPOPMAP) {
|
||||
if ((NBPOPMAP * i - next_free) * PAGE_SIZE >=
|
||||
size) {
|
||||
vm_reserv_reclaim(rv);
|
||||
return (TRUE);
|
||||
}
|
||||
} else
|
||||
pa_length = 0;
|
||||
hi = ffsl(rv->popmap[i]);
|
||||
}
|
||||
/* Convert from ffsl() to ordinary bit numbering. */
|
||||
if (i != NPOPMAP)
|
||||
hi--;
|
||||
if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >=
|
||||
size) {
|
||||
vm_reserv_reclaim(rv);
|
||||
return (TRUE);
|
||||
}
|
||||
} while (i < NPOPMAP);
|
||||
}
|
||||
return (FALSE);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user