Fix boot_pages calculation for machines that don't have UMA_MD_SMALL_ALLOC.
o Call uma_startup1() after initializing kmem, vmem and domains. o Include 8 eight VM startup pages into uma_startup_count() calculation. o Account for vmem_startup() and vm_map_startup() preallocating pages. o Account for extra two allocations done by kmem_init() and vmem_create(). o Hardcode the place of execution of vm_radix_reserve_kva(). Using SYSINIT allowed several other SYSINITs to sneak in before it, thus bumping requirement for amount of boot pages.
This commit is contained in:
parent
330d62831f
commit
ae941b1b4e
@ -72,6 +72,9 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
#include <vm/uma_int.h>
|
||||
|
||||
int vmem_startup_count(void);
|
||||
|
||||
#define VMEM_OPTORDER 5
|
||||
#define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
|
||||
@ -653,6 +656,16 @@ vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* How many pages do we need to startup_alloc.
|
||||
*/
|
||||
int
|
||||
vmem_startup_count(void)
|
||||
{
|
||||
|
||||
return (howmany(BT_MAXALLOC, UMA_SLAB_SIZE / sizeof(struct vmem_btag)));
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
|
@ -93,6 +93,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_pager.h>
|
||||
#include <vm/vm_extern.h>
|
||||
|
||||
extern void uma_startup1(void);
|
||||
extern void vm_radix_reserve_kva(void);
|
||||
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
#define KVA_QUANTUM (1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT))
|
||||
@ -150,7 +152,11 @@ vm_mem_init(dummy)
|
||||
*/
|
||||
vm_set_page_size();
|
||||
virtual_avail = vm_page_startup(virtual_avail);
|
||||
|
||||
|
||||
#ifdef UMA_MD_SMALL_ALLOC
|
||||
/* Announce page availability to UMA. */
|
||||
uma_startup1();
|
||||
#endif
|
||||
/*
|
||||
* Initialize other VM packages
|
||||
*/
|
||||
@ -173,6 +179,12 @@ vm_mem_init(dummy)
|
||||
KVA_QUANTUM);
|
||||
}
|
||||
|
||||
#ifndef UMA_MD_SMALL_ALLOC
|
||||
/* Set up radix zone to use noobj_alloc. */
|
||||
vm_radix_reserve_kva();
|
||||
/* Announce page availability to UMA. */
|
||||
uma_startup1();
|
||||
#endif
|
||||
kmem_init_zero_region();
|
||||
pmap_init();
|
||||
vm_pager_init();
|
||||
|
@ -112,6 +112,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/vm_domainset.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_object.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
@ -127,7 +128,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
extern int uma_startup_count(int);
|
||||
extern void uma_startup(void *, int);
|
||||
extern void uma_startup1(void);
|
||||
extern int vmem_startup_count(void);
|
||||
|
||||
/*
|
||||
* Associated with page of user-allocatable memory is a
|
||||
@ -501,12 +502,33 @@ vm_page_startup(vm_offset_t vaddr)
|
||||
|
||||
/*
|
||||
* Allocate memory for use when boot strapping the kernel memory
|
||||
* allocator.
|
||||
*
|
||||
* allocator. Tell UMA how many zones we are going to create
|
||||
* before going fully functional. UMA will add its zones.
|
||||
*/
|
||||
#ifdef UMA_MD_SMALL_ALLOC
|
||||
boot_pages = uma_startup_count(0);
|
||||
#else
|
||||
/*
|
||||
* VM startup zones: vmem, vmem_btag, VM OBJECT, RADIX NODE, MAP,
|
||||
* KMAP ENTRY, MAP ENTRY, VMSPACE.
|
||||
*/
|
||||
boot_pages = uma_startup_count(8);
|
||||
|
||||
/* vmem_startup() calls uma_prealloc(). */
|
||||
boot_pages += vmem_startup_count();
|
||||
/* vm_map_startup() calls uma_prealloc(). */
|
||||
boot_pages += howmany(MAX_KMAP, UMA_SLAB_SIZE / sizeof(struct vm_map));
|
||||
|
||||
/*
|
||||
* Before going fully functional kmem_init() does allocation
|
||||
* from "KMAP ENTRY" and vmem_create() does allocation from "vmem".
|
||||
*/
|
||||
boot_pages += 2;
|
||||
#endif
|
||||
/*
|
||||
* CTFLAG_RDTUN doesn't work during the early boot process, so we must
|
||||
* manually fetch the value.
|
||||
*/
|
||||
boot_pages = uma_startup_count(0);
|
||||
TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages);
|
||||
new_end = end - (boot_pages * UMA_SLAB_SIZE);
|
||||
new_end = trunc_page(new_end);
|
||||
@ -740,9 +762,6 @@ vm_page_startup(vm_offset_t vaddr)
|
||||
*/
|
||||
domainset_zero();
|
||||
|
||||
/* Announce page availability to UMA. */
|
||||
uma_startup1();
|
||||
|
||||
return (vaddr);
|
||||
}
|
||||
|
||||
|
@ -284,6 +284,7 @@ vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused)
|
||||
#endif
|
||||
|
||||
#ifndef UMA_MD_SMALL_ALLOC
|
||||
void vm_radix_reserve_kva(void);
|
||||
/*
|
||||
* Reserve the KVA necessary to satisfy the node allocation.
|
||||
* This is mandatory in architectures not supporting direct
|
||||
@ -291,8 +292,8 @@ vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused)
|
||||
* every node allocation, resulting into deadlocks for consumers already
|
||||
* working with kernel maps.
|
||||
*/
|
||||
static void
|
||||
vm_radix_reserve_kva(void *arg __unused)
|
||||
void
|
||||
vm_radix_reserve_kva(void)
|
||||
{
|
||||
|
||||
/*
|
||||
@ -304,8 +305,6 @@ vm_radix_reserve_kva(void *arg __unused)
|
||||
sizeof(struct vm_radix_node))))
|
||||
panic("%s: unable to reserve KVA", __func__);
|
||||
}
|
||||
SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_THIRD,
|
||||
vm_radix_reserve_kva, NULL);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user