Remove the VM map zone.

Today, the zone is only used to allocate a trio of kernel maps: the
kernel map itself, and the exec and pipe submaps.  Maps for user
processes are dynamically allocated but are embedded in the vmspace
structure, which is allocated from its own zone.  Make the
aforementioned kernel maps statically allocated and get rid of the zone.

While here, remove a stale comment above vmspace_alloc() and change the
names of locks initialized in vm_map_init() to match vmspace_zinit().

Reported by:	alc
Reviewed by:	alc, kib
MFC after:	2 weeks
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D26052
This commit is contained in:
Mark Johnston 2020-08-17 13:02:01 +00:00
parent fd6eb8fec7
commit 7dd979dfef
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=364302
6 changed files with 39 additions and 94 deletions

View File

@ -77,8 +77,8 @@ void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
/* Bootstrapping. */
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
boolean_t);
void kmem_subinit(vm_map_t, vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
bool);
void kmem_init(vm_offset_t, vm_offset_t);
void kmem_init_zero_region(void);
void kmeminit(void);

View File

@ -253,8 +253,8 @@ vm_ksubmap_init(struct kva_md_info *kmi)
exec_map_entries = 2 * mp_ncpus + 4;
#endif
exec_map_entry_size = round_page(PATH_MAX + ARG_MAX);
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, FALSE);
pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,
FALSE);
kmem_subinit(exec_map, kernel_map, &minaddr, &maxaddr,
exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, false);
kmem_subinit(pipe_map, kernel_map, &minaddr, &maxaddr, maxpipekva,
false);
}

View File

@ -97,9 +97,9 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/uma.h>
vm_map_t kernel_map;
vm_map_t exec_map;
vm_map_t pipe_map;
struct vm_map kernel_map_store;
struct vm_map exec_map_store;
struct vm_map pipe_map_store;
const void *zero_region;
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
@ -359,9 +359,9 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
}
/*
* kmem_suballoc:
* kmem_subinit:
*
* Allocates a map to manage a subrange
* Initializes a map to manage a subrange
* of the kernel virtual address space.
*
* Arguments are as follows:
@ -371,12 +371,11 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
* size Size of range to find
* superpage_align Request that min is superpage aligned
*/
vm_map_t
kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
vm_size_t size, boolean_t superpage_align)
void
kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
vm_size_t size, bool superpage_align)
{
int ret;
vm_map_t result;
size = round_page(size);
@ -385,14 +384,11 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
MAP_ACC_NO_CHARGE);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
panic("kmem_subinit: bad status return of %d", ret);
*max = *min + size;
result = vm_map_create(vm_map_pmap(parent), *min, *max);
if (result == NULL)
panic("kmem_suballoc: cannot create submap");
if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
panic("kmem_suballoc: unable to change range to submap");
return (result);
vm_map_init(map, vm_map_pmap(parent), *min, *max);
if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS)
panic("kmem_subinit: unable to change range to submap");
}
/*
@ -772,12 +768,12 @@ kmem_init(vm_offset_t start, vm_offset_t end)
* that handle vm_page_array allocation can simply adjust virtual_avail
* instead.
*/
(void)vm_map_insert(m, NULL, 0, (vm_offset_t)vm_page_array,
(void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array,
(vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size *
sizeof(struct vm_page)),
VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
#endif
vm_map_unlock(m);
vm_map_unlock(kernel_map);
/*
* Initialize the kernel_arena. This can grow on demand.

View File

@ -66,9 +66,12 @@
#define _VM_VM_KERN_H_
/* Kernel memory management definitions. */
extern vm_map_t kernel_map;
extern vm_map_t exec_map;
extern vm_map_t pipe_map;
extern struct vm_map kernel_map_store;
#define kernel_map (&kernel_map_store)
extern struct vm_map exec_map_store;
#define exec_map (&exec_map_store)
extern struct vm_map pipe_map_store;
#define pipe_map (&pipe_map_store)
extern struct vmem *kernel_arena;
extern struct vmem *kmem_arena;
extern struct vmem *buffer_arena;

View File

@ -128,10 +128,8 @@ __FBSDID("$FreeBSD$");
static struct mtx map_sleep_mtx;
static uma_zone_t mapentzone;
static uma_zone_t kmapentzone;
static uma_zone_t mapzone;
static uma_zone_t vmspace_zone;
static int vmspace_zinit(void *mem, int size, int flags);
static int vm_map_zinit(void *mem, int ize, int flags);
static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
vm_offset_t max);
static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
@ -142,7 +140,6 @@ static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
#ifdef INVARIANTS
static void vm_map_zdtor(void *mem, int size, void *arg);
static void vmspace_zdtor(void *mem, int size, void *arg);
#endif
static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
@ -198,14 +195,6 @@ void
vm_map_startup(void)
{
mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
#ifdef INVARIANTS
vm_map_zdtor,
#else
NULL,
#endif
vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_prealloc(mapzone, MAX_KMAP);
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
@ -224,24 +213,16 @@ static int
vmspace_zinit(void *mem, int size, int flags)
{
struct vmspace *vm;
vm = (struct vmspace *)mem;
vm->vm_map.pmap = NULL;
(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
PMAP_LOCK_INIT(vmspace_pmap(vm));
return (0);
}
static int
vm_map_zinit(void *mem, int size, int flags)
{
vm_map_t map;
map = (vm_map_t)mem;
vm = (struct vmspace *)mem;
map = &vm->vm_map;
memset(map, 0, sizeof(*map));
mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
mtx_init(&map->system_mtx, "vm map (system)", NULL,
MTX_DEF | MTX_DUPOK);
sx_init(&map->lock, "vm map (user)");
PMAP_LOCK_INIT(vmspace_pmap(vm));
return (0);
}
@ -252,29 +233,16 @@ vmspace_zdtor(void *mem, int size, void *arg)
struct vmspace *vm;
vm = (struct vmspace *)mem;
vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
}
static void
vm_map_zdtor(void *mem, int size, void *arg)
{
vm_map_t map;
map = (vm_map_t)mem;
KASSERT(map->nentries == 0,
("map %p nentries == %d on free.",
map, map->nentries));
KASSERT(map->size == 0,
("map %p size == %lu on free.",
map, (unsigned long)map->size));
KASSERT(vm->vm_map.nentries == 0,
("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
KASSERT(vm->vm_map.size == 0,
("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
}
#endif /* INVARIANTS */
/*
* Allocate a vmspace structure, including a vm_map and pmap,
* and initialize those structures. The refcnt is set to 1.
*
* If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
*/
struct vmspace *
vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
@ -870,24 +838,6 @@ vmspace_resident_count(struct vmspace *vmspace)
return pmap_resident_count(vmspace_pmap(vmspace));
}
/*
* vm_map_create:
*
* Creates and returns a new empty VM map with
* the given physical map structure, and having
* the given lower and upper address bounds.
*/
vm_map_t
vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
{
vm_map_t result;
result = uma_zalloc(mapzone, M_WAITOK);
CTR1(KTR_VM, "vm_map_create: %p", result);
_vm_map_init(result, pmap, min, max);
return (result);
}
/*
* Initialize an existing vm_map structure
* such as that in the vmspace structure.
@ -918,8 +868,9 @@ vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
{
_vm_map_init(map, pmap, min, max);
mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
sx_init(&map->lock, "user map");
mtx_init(&map->system_mtx, "vm map (system)", NULL,
MTX_DEF | MTX_DUPOK);
sx_init(&map->lock, "vm map (user)");
}
/*

View File

@ -351,10 +351,6 @@ bool vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end);
long vmspace_resident_count(struct vmspace *vmspace);
#endif /* _KERNEL */
/* XXX: number of kernel maps to statically allocate */
#define MAX_KMAP 10
/*
* Copy-on-write flags for vm_map operations
*/
@ -459,7 +455,6 @@ vm_map_entry_read_succ(void *token, struct vm_map_entry *const clone,
#ifdef _KERNEL
boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t);
int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t);
int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t,
vm_offset_t, int, vm_prot_t, vm_prot_t, int);