[mips] [vm] restore translation of freelist to flind for page allocation
Commit r326346 moved domain iterators from physical layer to vm_page one, but it also removed translation of freelist to flind for vm_page_alloc_freelist() call. Before it expects VM_FREELIST_ parameter, but after it expect freelist index. On small WiFi boxes with few megabytes of RAM, there is only one freelist VM_FREELIST_LOWMEM (1) and there is no VM_FREELIST_DEFAULT(0) (see file sys/mips/include/vmparam.h). It results in freelist 1 with flind 0. At first, this commit renames flind to freelist in vm_page_alloc_freelist to avoid misunderstanding about input parameters. Then on physical layer it restores translation for correct handling of freelist parameter. Reported by: landonf Reviewed by: jeff Differential Revision: https://reviews.freebsd.org/D13351
This commit is contained in:
parent
6f3d4ec84d
commit
0db2102aaa
@ -2043,7 +2043,7 @@ vm_page_alloc_check(vm_page_t m)
|
||||
* VM_ALLOC_ZERO prefer a zeroed page
|
||||
*/
|
||||
vm_page_t
|
||||
vm_page_alloc_freelist(int flind, int req)
|
||||
vm_page_alloc_freelist(int freelist, int req)
|
||||
{
|
||||
struct vm_domain_iterator vi;
|
||||
vm_page_t m;
|
||||
@ -2056,7 +2056,7 @@ vm_page_alloc_freelist(int flind, int req)
|
||||
while (vm_domain_iterator_run(&vi, &domain) == 0) {
|
||||
if (vm_domain_iterator_isdone(&vi))
|
||||
req |= wait;
|
||||
m = vm_page_alloc_freelist_domain(domain, flind, req);
|
||||
m = vm_page_alloc_freelist_domain(domain, freelist, req);
|
||||
if (m != NULL)
|
||||
break;
|
||||
}
|
||||
@ -2066,7 +2066,7 @@ vm_page_alloc_freelist(int flind, int req)
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
vm_page_alloc_freelist_domain(int domain, int flind, int req)
|
||||
vm_page_alloc_freelist_domain(int domain, int freelist, int req)
|
||||
{
|
||||
vm_page_t m;
|
||||
u_int flags, free_count;
|
||||
@ -2090,7 +2090,7 @@ vm_page_alloc_freelist_domain(int domain, int flind, int req)
|
||||
vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) ||
|
||||
(req_class == VM_ALLOC_INTERRUPT &&
|
||||
vm_cnt.v_free_count > 0))
|
||||
m = vm_phys_alloc_freelist_pages(domain, flind,
|
||||
m = vm_phys_alloc_freelist_pages(domain, freelist,
|
||||
VM_FREEPOOL_DIRECT, 0);
|
||||
if (m == NULL) {
|
||||
if (vm_page_alloc_fail(NULL, req))
|
||||
|
@ -603,10 +603,10 @@ vm_page_t
|
||||
vm_phys_alloc_pages(int domain, int pool, int order)
|
||||
{
|
||||
vm_page_t m;
|
||||
int flind;
|
||||
int freelist;
|
||||
|
||||
for (flind = 0; flind < vm_nfreelists; flind++) {
|
||||
m = vm_phys_alloc_freelist_pages(domain, flind, pool, order);
|
||||
for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
|
||||
m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
|
||||
if (m != NULL)
|
||||
return (m);
|
||||
}
|
||||
@ -621,16 +621,16 @@ vm_phys_alloc_pages(int domain, int pool, int order)
|
||||
* The free page queues must be locked.
|
||||
*/
|
||||
vm_page_t
|
||||
vm_phys_alloc_freelist_pages(int domain, int flind, int pool, int order)
|
||||
vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
|
||||
{
|
||||
struct vm_freelist *alt, *fl;
|
||||
vm_page_t m;
|
||||
int oind, pind;
|
||||
int oind, pind, flind;
|
||||
|
||||
KASSERT(domain >= 0 && domain < vm_ndomains,
|
||||
("vm_phys_alloc_freelist_pages: domain %d is out of range",
|
||||
domain));
|
||||
KASSERT(flind < VM_NFREELIST,
|
||||
KASSERT(freelist < VM_NFREELIST,
|
||||
("vm_phys_alloc_freelist_pages: freelist %d is out of range",
|
||||
flind));
|
||||
KASSERT(pool < VM_NFREEPOOL,
|
||||
@ -638,6 +638,11 @@ vm_phys_alloc_freelist_pages(int domain, int flind, int pool, int order)
|
||||
KASSERT(order < VM_NFREEORDER,
|
||||
("vm_phys_alloc_freelist_pages: order %d is out of range", order));
|
||||
|
||||
flind = vm_freelist_to_flind[freelist];
|
||||
/* Check if freelist is present */
|
||||
if (flind < 0)
|
||||
return (NULL);
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
fl = &vm_phys_free_queues[domain][flind][pool][0];
|
||||
for (oind = order; oind < VM_NFREEORDER; oind++) {
|
||||
|
Loading…
Reference in New Issue
Block a user