vm: alloc pages from reserv before breaking it

Function vm_reserv_reclaim_contig breaks a reservation with enough
free space to satisfy an allocation request and returns the free space
to the buddy allocator. Change the function to allocate the request
memory from the reservation before breaking it, and return that memory
to the caller. That avoids a second call to the buddy allocator and
guarantees successful allocation after breaking the reservation, where
that success is not currently guaranteed.

Reviewed by:	alc, kib (previous version)
Differential Revision:	https://reviews.freebsd.org/D33644
This commit is contained in:
Doug Moore 2021-12-24 12:59:16 -06:00
parent 7c0ec66385
commit 0d5fac2872
3 changed files with 24 additions and 21 deletions

View File

@ -2186,9 +2186,6 @@ vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low,
vm_page_t m_ret;
vmd = VM_DOMAIN(domain);
#if VM_NRESERVLEVEL > 0
again:
#endif
if (!vm_domain_allocate(vmd, req, npages))
return (NULL);
/*
@ -2200,18 +2197,19 @@ vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low,
vm_domain_free_unlock(vmd);
if (m_ret != NULL)
return (m_ret);
vm_domain_freecnt_inc(vmd, npages);
#if VM_NRESERVLEVEL > 0
/*
* Try to break a reservation to replenish free page queues
* in a way that allows the allocation to succeed.
* Try to break a reservation to allocate the pages.
*/
if ((req & VM_ALLOC_NORECLAIM) == 0 &&
vm_reserv_reclaim_contig(domain, npages, low,
high, alignment, boundary))
goto again;
if ((req & VM_ALLOC_NORECLAIM) == 0) {
m_ret = vm_reserv_reclaim_contig(domain, npages, low,
high, alignment, boundary);
if (m_ret != NULL)
return (m_ret);
}
#endif
return (m_ret);
vm_domain_freecnt_inc(vmd, npages);
return (NULL);
}
vm_page_t

View File

@ -1312,12 +1312,13 @@ vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo,
* contiguous physical memory. If a satisfactory reservation is found, it is
* broken. Returns true if a reservation is broken and false otherwise.
*/
bool
vm_page_t
vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
{
struct vm_reserv_queue *queue;
vm_paddr_t pa, size;
vm_page_t m_ret;
vm_reserv_t marker, rv, rvn;
int hi, lo, posn, ppn_align, ppn_bound;
@ -1333,7 +1334,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
* no boundary-constrained allocation is possible.
*/
if (size > boundary)
return (false);
return (NULL);
marker = &vm_rvd[domain].marker;
queue = &vm_rvd[domain].partpop;
/*
@ -1386,18 +1387,22 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
posn = vm_reserv_find_contig(rv, (int)npages, lo, hi,
ppn_align, ppn_bound);
if (posn >= 0) {
pa = VM_PAGE_TO_PHYS(&rv->pages[posn]);
vm_reserv_domain_scan_unlock(domain);
/* Allocate requested space */
rv->popcnt += npages;
while (npages-- > 0)
popmap_set(rv->popmap, posn + npages);
vm_reserv_reclaim(rv);
vm_reserv_unlock(rv);
m_ret = &rv->pages[posn];
pa = VM_PAGE_TO_PHYS(m_ret);
KASSERT((pa & (alignment - 1)) == 0,
("%s: adjusted address does not align to %lx",
__func__, alignment));
KASSERT(((pa ^ (pa + size - 1)) & -boundary) == 0,
("%s: adjusted address spans boundary to %jx",
__func__, (uintmax_t)boundary));
vm_reserv_domain_scan_unlock(domain);
vm_reserv_reclaim(rv);
vm_reserv_unlock(rv);
return (true);
return (m_ret);
}
vm_reserv_domain_lock(domain);
rvn = TAILQ_NEXT(rv, partpopq);
@ -1405,7 +1410,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
}
vm_reserv_domain_unlock(domain);
vm_reserv_domain_scan_unlock(domain);
return (false);
return (NULL);
}
/*

View File

@ -59,7 +59,7 @@ void vm_reserv_init(void);
bool vm_reserv_is_page_free(vm_page_t m);
int vm_reserv_level(vm_page_t m);
int vm_reserv_level_iffullpop(vm_page_t m);
bool vm_reserv_reclaim_contig(int domain, u_long npages,
vm_page_t vm_reserv_reclaim_contig(int domain, u_long npages,
vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary);
bool vm_reserv_reclaim_inactive(int domain);