Eliminate vm_phys_bootstrap_alloc(). It was a failed attempt at

eliminating duplicated code in the various pmap implementations.

Micro-optimize vm_phys_free_pages().

Introduce vm_phys_free_contig().  It is fast routine for freeing an
arbitrary number of physically contiguous pages.  In particular, it
doesn't require the number of pages to be a power of two.

Use "u_long" instead of "unsigned long".

Bruce Evans (bde@) has convinced me that the "boundary" parameters
to kmem_alloc_contig(), vm_phys_alloc_contig(), and
vm_reserv_reclaim_contig() should be of type "vm_paddr_t" and not
"u_long".  Make this change.
This commit is contained in:
Alan Cox 2011-10-30 05:06:14 +00:00
parent 2bb6453662
commit 5c1f2cc4c2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=226928
6 changed files with 76 additions and 57 deletions

View File

@ -335,7 +335,8 @@ contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, vm_memattr_t memattr,
vm_offset_t
kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, u_long boundary, vm_memattr_t memattr)
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
vm_offset_t ret;
vm_page_t pages;

View File

@ -44,7 +44,7 @@ vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, u_long boundary,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_nofault_space(vm_map_t, vm_size_t, int);

View File

@ -489,26 +489,6 @@ vm_phys_alloc_freelist_pages(int flind, int pool, int order)
return (NULL);
}
/*
* Allocate physical memory from phys_avail[].
*/
vm_paddr_t
vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment)
{
vm_paddr_t pa;
int i;
size = round_page(size);
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
if (phys_avail[i + 1] - phys_avail[i] < size)
continue;
pa = phys_avail[i];
phys_avail[i] += size;
return (pa);
}
panic("vm_phys_bootstrap_alloc");
}
/*
* Find the vm_page corresponding to the given physical address.
*/
@ -554,7 +534,7 @@ vm_phys_free_pages(vm_page_t m, int order)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
vm_paddr_t pa, pa_buddy;
vm_paddr_t pa;
vm_page_t m_buddy;
KASSERT(m->order == VM_NFREEORDER,
@ -566,25 +546,26 @@ vm_phys_free_pages(vm_page_t m, int order)
KASSERT(order < VM_NFREEORDER,
("vm_phys_free_pages: order %d is out of range", order));
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
pa = VM_PAGE_TO_PHYS(m);
seg = &vm_phys_segs[m->segind];
while (order < VM_NFREEORDER - 1) {
pa_buddy = pa ^ (1 << (PAGE_SHIFT + order));
if (pa_buddy < seg->start ||
pa_buddy >= seg->end)
break;
m_buddy = &seg->first_page[atop(pa_buddy - seg->start)];
if (m_buddy->order != order)
break;
fl = (*seg->free_queues)[m_buddy->pool];
TAILQ_REMOVE(&fl[m_buddy->order].pl, m_buddy, pageq);
fl[m_buddy->order].lcnt--;
m_buddy->order = VM_NFREEORDER;
if (m_buddy->pool != m->pool)
vm_phys_set_pool(m->pool, m_buddy, order);
order++;
pa &= ~((1 << (PAGE_SHIFT + order)) - 1);
m = &seg->first_page[atop(pa - seg->start)];
if (order < VM_NFREEORDER - 1) {
pa = VM_PAGE_TO_PHYS(m);
do {
pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
if (pa < seg->start || pa >= seg->end)
break;
m_buddy = &seg->first_page[atop(pa - seg->start)];
if (m_buddy->order != order)
break;
fl = (*seg->free_queues)[m_buddy->pool];
TAILQ_REMOVE(&fl[order].pl, m_buddy, pageq);
fl[order].lcnt--;
m_buddy->order = VM_NFREEORDER;
if (m_buddy->pool != m->pool)
vm_phys_set_pool(m->pool, m_buddy, order);
order++;
pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
m = &seg->first_page[atop(pa - seg->start)];
} while (order < VM_NFREEORDER - 1);
}
m->order = order;
fl = (*seg->free_queues)[m->pool];
@ -592,6 +573,47 @@ vm_phys_free_pages(vm_page_t m, int order)
fl[order].lcnt++;
}
/*
* Free a contiguous, arbitrarily sized set of physical pages.
*
* The free page queues must be locked.
*/
void
vm_phys_free_contig(vm_page_t m, u_long npages)
{
u_int n;
int order;
/*
* Avoid unnecessary coalescing by freeing the pages in the largest
* possible power-of-two-sized subsets.
*/
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
for (;; npages -= n) {
/*
* Unsigned "min" is used here so that "order" is assigned
* "VM_NFREEORDER - 1" when "m"'s physical address is zero
* or the low-order bits of its physical address are zero
* because the size of a physical address exceeds the size of
* a long.
*/
order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
VM_NFREEORDER - 1);
n = 1 << order;
if (npages < n)
break;
vm_phys_free_pages(m, order);
m += n;
}
/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
for (; npages > 0; npages -= n) {
order = flsl(npages) - 1;
n = 1 << order;
vm_phys_free_pages(m, order);
m += n;
}
}
/*
* Set the pool for a contiguous, power of two-sized set of physical pages.
*/
@ -728,14 +750,15 @@ vm_phys_zero_pages_idle(void)
* "alignment" and "boundary" must be a power of two.
*/
vm_page_t
vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, unsigned long boundary)
vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
struct vnode *vp;
vm_paddr_t pa, pa_last, size;
vm_page_t deferred_vdrop_list, m, m_ret;
u_long npages_end;
int domain, flind, i, oind, order, pind;
#if VM_NDOMAIN > 1
@ -848,13 +871,10 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
deferred_vdrop_list = m;
}
}
for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
m = &m_ret[i];
KASSERT(m->order == VM_NFREEORDER,
("vm_phys_alloc_contig: page %p has unexpected order %d",
m, m->order));
vm_phys_free_pages(m, 0);
}
/* Return excess pages to the free lists. */
npages_end = roundup2(npages, 1 << imin(oind, order));
if (npages < npages_end)
vm_phys_free_contig(&m_ret[npages], npages_end - npages);
mtx_unlock(&vm_page_queue_free_mtx);
while (deferred_vdrop_list != NULL) {
vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);

View File

@ -50,12 +50,11 @@ struct mem_affinity {
extern struct mem_affinity *mem_affinity;
void vm_phys_add_page(vm_paddr_t pa);
vm_page_t vm_phys_alloc_contig(unsigned long npages,
vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, unsigned long boundary);
vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary);
vm_page_t vm_phys_alloc_freelist_pages(int flind, int pool, int order);
vm_page_t vm_phys_alloc_pages(int pool, int order);
vm_paddr_t vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment);
void vm_phys_free_contig(vm_page_t m, u_long npages);
void vm_phys_free_pages(vm_page_t m, int order);
void vm_phys_init(void);
void vm_phys_set_pool(int pool, vm_page_t m, int order);

View File

@ -628,7 +628,7 @@ vm_reserv_reclaim_inactive(void)
*/
boolean_t
vm_reserv_reclaim_contig(vm_paddr_t size, vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, unsigned long boundary)
u_long alignment, vm_paddr_t boundary)
{
vm_paddr_t pa, pa_length;
vm_reserv_t rv;

View File

@ -49,8 +49,7 @@ void vm_reserv_init(void);
int vm_reserv_level_iffullpop(vm_page_t m);
boolean_t vm_reserv_reactivate_page(vm_page_t m);
boolean_t vm_reserv_reclaim_contig(vm_paddr_t size, vm_paddr_t low,
vm_paddr_t high, unsigned long alignment,
unsigned long boundary);
vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
boolean_t vm_reserv_reclaim_inactive(void);
void vm_reserv_rename(vm_page_t m, vm_object_t new_object,
vm_object_t old_object, vm_pindex_t old_object_offset);