Move kernel vmem arena initialization to vm_kern.c.

This keeps the initialization coupled together with the kmem_* KPI
implementation, which is the main user of these arenas.

No functional change intended.

Reviewed by:	alc
Approved by:	re (gjb)
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D17247
This commit is contained in:
Mark Johnston 2018-09-19 19:13:43 +00:00
parent 997fecb5c2
commit 1aed6d48a8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=338806
2 changed files with 83 additions and 83 deletions

View File

@ -98,12 +98,6 @@ extern void uma_startup1(void);
extern void uma_startup2(void);
extern void vm_radix_reserve_kva(void);
#if VM_NRESERVLEVEL > 0
#define KVA_QUANTUM (1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT))
#else
/* On non-superpage architectures want large import sizes. */
#define KVA_QUANTUM (PAGE_SIZE * 1024)
#endif
long physmem;
/*
@ -112,58 +106,15 @@ long physmem;
static void vm_mem_init(void *);
SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL);
/*
* Import kva into the kernel arena.
*/
static int
kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
{
vm_offset_t addr;
int result;
KASSERT((size % KVA_QUANTUM) == 0,
("kva_import: Size %jd is not a multiple of %d",
(intmax_t)size, (int)KVA_QUANTUM));
addr = vm_map_min(kernel_map);
result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
if (result != KERN_SUCCESS)
return (ENOMEM);
*addrp = addr;
return (0);
}
#if VM_NRESERVLEVEL > 0
/*
* Import a superpage from the normal kernel arena into the special
* arena for allocations with different permissions.
*/
static int
kernel_rwx_alloc(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
{
KASSERT((size % KVA_QUANTUM) == 0,
("kernel_rwx_alloc: Size %jd is not a multiple of %d",
(intmax_t)size, (int)KVA_QUANTUM));
return (vmem_xalloc(arena, size, KVA_QUANTUM, 0, 0, VMEM_ADDR_MIN,
VMEM_ADDR_MAX, flags, addrp));
}
#endif
/*
* vm_init initializes the virtual memory system.
* This is done only by the first cpu up.
*
* The start and end address of physical memory is passed in.
*/
/* ARGSUSED*/
static void
vm_mem_init(dummy)
void *dummy;
vm_mem_init(void *dummy)
{
int domain;
/*
* Initializes resident memory structures. From here on, all physical
@ -184,39 +135,6 @@ vm_mem_init(dummy)
vm_map_startup();
kmem_init(virtual_avail, virtual_end);
/*
* Initialize the kernel_arena. This can grow on demand.
*/
vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
vmem_set_import(kernel_arena, kva_import, NULL, NULL, KVA_QUANTUM);
#if VM_NRESERVLEVEL > 0
/*
* In an architecture with superpages, maintain a separate arena
* for allocations with permissions that differ from the "standard"
* read/write permissions used for memory in the kernel_arena.
*/
kernel_rwx_arena = vmem_create("kernel rwx arena", 0, 0, PAGE_SIZE,
0, M_WAITOK);
vmem_set_import(kernel_rwx_arena, kernel_rwx_alloc,
(vmem_release_t *)vmem_xfree, kernel_arena, KVA_QUANTUM);
#endif
for (domain = 0; domain < vm_ndomains; domain++) {
vm_dom[domain].vmd_kernel_arena = vmem_create(
"kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_arena,
(vmem_import_t *)vmem_alloc, NULL, kernel_arena,
KVA_QUANTUM);
#if VM_NRESERVLEVEL > 0
vm_dom[domain].vmd_kernel_rwx_arena = vmem_create(
"kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_rwx_arena,
kernel_rwx_alloc, (vmem_release_t *)vmem_xfree,
vm_dom[domain].vmd_kernel_arena, KVA_QUANTUM);
#endif
}
#ifndef UMA_MD_SMALL_ALLOC
/* Set up radix zone to use noobj_alloc. */
vm_radix_reserve_kva();

View File

@ -121,6 +121,13 @@ SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
#endif
"Max kernel address");
#if VM_NRESERVLEVEL > 0
#define KVA_QUANTUM (1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT))
#else
/* On non-superpage architectures want large import sizes. */
#define KVA_QUANTUM (PAGE_SIZE * 1024)
#endif
/*
* kva_alloc:
*
@ -644,6 +651,46 @@ kmem_init_zero_region(void)
zero_region = (const void *)addr;
}
/*
* Import kva into the kernel arena.
*/
static int
kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
{
vm_offset_t addr;
int result;
KASSERT((size % KVA_QUANTUM) == 0,
("kva_import: Size %jd is not a multiple of %d",
(intmax_t)size, (int)KVA_QUANTUM));
addr = vm_map_min(kernel_map);
result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
if (result != KERN_SUCCESS)
return (ENOMEM);
*addrp = addr;
return (0);
}
#if VM_NRESERVLEVEL > 0
/*
* Import a superpage from the normal kernel arena into the special
* arena for allocations with different permissions.
*/
static int
kernel_rwx_alloc(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
{
KASSERT((size % KVA_QUANTUM) == 0,
("kernel_rwx_alloc: Size %jd is not a multiple of %d",
(intmax_t)size, (int)KVA_QUANTUM));
return (vmem_xalloc(arena, size, KVA_QUANTUM, 0, 0, VMEM_ADDR_MIN,
VMEM_ADDR_MAX, flags, addrp));
}
#endif
/*
* kmem_init:
*
@ -651,11 +698,13 @@ kmem_init_zero_region(void)
* data, bss, and all space allocated thus far (`boostrap' data). The
* new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
* `start' as allocated, and the range between `start' and `end' as free.
* Create the kernel vmem arena and its per-domain children.
*/
void
kmem_init(vm_offset_t start, vm_offset_t end)
{
vm_map_t m;
int domain;
m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
m->system_map = 1;
@ -671,6 +720,39 @@ kmem_init(vm_offset_t start, vm_offset_t end)
start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);
/*
* Initialize the kernel_arena. This can grow on demand.
*/
vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
vmem_set_import(kernel_arena, kva_import, NULL, NULL, KVA_QUANTUM);
#if VM_NRESERVLEVEL > 0
/*
* In an architecture with superpages, maintain a separate arena
* for allocations with permissions that differ from the "standard"
* read/write permissions used for memory in the kernel_arena.
*/
kernel_rwx_arena = vmem_create("kernel rwx arena", 0, 0, PAGE_SIZE,
0, M_WAITOK);
vmem_set_import(kernel_rwx_arena, kernel_rwx_alloc,
(vmem_release_t *)vmem_xfree, kernel_arena, KVA_QUANTUM);
#endif
for (domain = 0; domain < vm_ndomains; domain++) {
vm_dom[domain].vmd_kernel_arena = vmem_create(
"kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_arena,
(vmem_import_t *)vmem_alloc, NULL, kernel_arena,
KVA_QUANTUM);
#if VM_NRESERVLEVEL > 0
vm_dom[domain].vmd_kernel_rwx_arena = vmem_create(
"kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_rwx_arena,
kernel_rwx_alloc, (vmem_release_t *)vmem_xfree,
vm_dom[domain].vmd_kernel_arena, KVA_QUANTUM);
#endif
}
}
/*