vm_phys_enqueue_contig: handle npages==0

By letting vm_phys_enqueue_contig handle the case when npages == 0,
the callers can stop checking it, and the compiler can stop
zero-checking with every call to ffs(). Letting vm_phys_enqueue_contig
call vm_phys_enqueue_contig for part of its work also saves a few
bytes.

The amd64 object code shrinks by 80 bytes.

Reviewed by:	kib
Differential Revision:	https://reviews.freebsd.org/D41154
This commit is contained in:
Doug Moore 2023-08-01 22:12:00 -05:00
parent 77f6be4484
commit 1a7fcf6d51

View File

@ -72,6 +72,8 @@ __FBSDID("$FreeBSD$");
_Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
"Too many physsegs.");
_Static_assert(sizeof(long long) >= sizeof(vm_paddr_t),
"vm_paddr_t too big for ffsll, flsll.");
#ifdef NUMA
struct mem_affinity __read_mostly *mem_affinity;
@ -690,18 +692,16 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
*
* The physical page m's buddy must not be free.
*/
static void
static vm_page_t
vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
{
u_int n;
int order;
KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
("vm_phys_enq_range: page %p and npages %u are misaligned",
m, npages));
do {
while (npages > 0) {
KASSERT(m->order == VM_NFREEORDER,
("vm_phys_enq_range: page %p has unexpected order %d",
m, m->order));
@ -709,10 +709,10 @@ vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
KASSERT(order < VM_NFREEORDER,
("vm_phys_enq_range: order %d is out of range", order));
vm_freelist_add(fl, m, order, tail);
n = 1 << order;
m += n;
npages -= n;
} while (npages > 0);
m += 1 << order;
npages -= 1 << order;
}
return (m);
}
/*
@ -744,7 +744,7 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
{
struct vm_freelist *alt, *fl;
vm_page_t m;
int avail, end, flind, freelist, i, need, oind, pind;
int avail, end, flind, freelist, i, oind, pind;
KASSERT(domain >= 0 && domain < vm_ndomains,
("vm_phys_alloc_npages: domain %d is out of range", domain));
@ -762,20 +762,18 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
for (oind = 0; oind < VM_NFREEORDER; oind++) {
while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
vm_freelist_rem(fl, m, oind);
avail = 1 << oind;
need = imin(npages - i, avail);
for (end = i + need; i < end;)
avail = i + (1 << oind);
end = imin(npages, avail);
while (i < end)
ma[i++] = m++;
if (need < avail) {
if (i == npages) {
/*
* Return excess pages to fl. Its
* order [0, oind) queues are empty.
* Return excess pages to fl. Its order
* [0, oind) queues are empty.
*/
vm_phys_enq_range(m, avail - need, fl,
1);
return (npages);
} else if (i == npages)
vm_phys_enq_range(m, avail - i, fl, 1);
return (npages);
}
}
}
for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
@ -785,21 +783,20 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
NULL) {
vm_freelist_rem(alt, m, oind);
vm_phys_set_pool(pool, m, oind);
avail = 1 << oind;
need = imin(npages - i, avail);
for (end = i + need; i < end;)
avail = i + (1 << oind);
end = imin(npages, avail);
while (i < end)
ma[i++] = m++;
if (need < avail) {
if (i == npages) {
/*
* Return excess pages to fl.
* Its order [0, oind) queues
* are empty.
*/
vm_phys_enq_range(m, avail -
need, fl, 1);
return (npages);
} else if (i == npages)
vm_phys_enq_range(m, avail - i,
fl, 1);
return (npages);
}
}
}
}
@ -1146,7 +1143,7 @@ max_order(vm_page_t m)
* because the size of a physical address exceeds the size of
* a long.
*/
return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
return (min(ffsll(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
VM_NFREEORDER - 1));
}
@ -1162,6 +1159,7 @@ vm_phys_enqueue_contig(vm_page_t m, u_long npages)
struct vm_freelist *fl;
struct vm_phys_seg *seg;
vm_page_t m_end;
vm_paddr_t diff, lo;
int order;
/*
@ -1173,15 +1171,15 @@ vm_phys_enqueue_contig(vm_page_t m, u_long npages)
fl = (*seg->free_queues)[m->pool];
m_end = m + npages;
/* Free blocks of increasing size. */
while ((order = max_order(m)) < VM_NFREEORDER - 1 &&
m + (1 << order) <= m_end) {
KASSERT(seg == &vm_phys_segs[m->segind],
("%s: page range [%p,%p) spans multiple segments",
__func__, m_end - npages, m));
vm_freelist_add(fl, m, order, 1);
m += 1 << order;
lo = VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT;
if (m < m_end &&
(diff = lo ^ (lo + npages - 1)) != 0) {
order = min(flsll(diff) - 1, VM_NFREEORDER - 1);
m = vm_phys_enq_range(m, roundup2(-lo, 1 << order), fl, 1);
}
/* Free blocks of maximum size. */
order = VM_NFREEORDER - 1;
while (m + (1 << order) <= m_end) {
KASSERT(seg == &vm_phys_segs[m->segind],
("%s: page range [%p,%p) spans multiple segments",
@ -1560,10 +1558,8 @@ vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
}
/* Return excess pages to the free lists. */
if (&m_run[npages] < m) {
fl = (*queues)[VM_FREEPOOL_DEFAULT];
vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
}
fl = (*queues)[VM_FREEPOOL_DEFAULT];
vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
return (m_run);
}