Get rid of the ad-hoc memory allocator for vm_map_entries, in lieu of

a simple, clean zone type allocator.  This new allocator will also be
used for machine dependent pmap PV entries.
This commit is contained in:
John Dyson 1997-08-05 00:02:08 +00:00
parent 5efb75076f
commit 3075778b63
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=27899
13 changed files with 84 additions and 169 deletions

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.252 1997/06/27 18:29:55 fsmp Exp $
* $Id: machdep.c,v 1.253 1997/07/20 08:37:19 bde Exp $
*/
#include "apm.h"
@ -341,6 +341,7 @@ cpu_startup(dummy)
(nbuf*BKVASIZE), TRUE);
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*ARG_MAX), TRUE);
u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
@ -359,6 +360,7 @@ cpu_startup(dummy)
bzero(mclrefcnt, mb_map_size / MCLBYTES);
mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
mb_map_size, FALSE);
mb_map->system_map = 1;
}
/*

View File

@ -416,3 +416,4 @@ vm/vm_pager.c standard
vm/vm_swap.c standard
vm/vm_unix.c standard
vm/vnode_pager.c standard
vm/vm_zone.c standard

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.252 1997/06/27 18:29:55 fsmp Exp $
* $Id: machdep.c,v 1.253 1997/07/20 08:37:19 bde Exp $
*/
#include "apm.h"
@ -341,6 +341,7 @@ cpu_startup(dummy)
(nbuf*BKVASIZE), TRUE);
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*ARG_MAX), TRUE);
u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
@ -359,6 +360,7 @@ cpu_startup(dummy)
bzero(mclrefcnt, mb_map_size / MCLBYTES);
mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr,
mb_map_size, FALSE);
mb_map->system_map = 1;
}
/*

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* @(#)init_main.c 8.9 (Berkeley) 1/21/94
* $Id: init_main.c,v 1.65 1997/06/22 16:04:09 peter Exp $
* $Id: init_main.c,v 1.66 1997/07/10 11:44:42 davidn Exp $
*/
#include "opt_rlimit.h"
@ -318,6 +318,11 @@ proc0_init(dummy)
*/
sleepinit();
/*
* additional VM structures
*/
vm_init2();
/*
* Create process 0 (the swapper).
*/

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
* $Id: kern_malloc.c,v 1.26 1997/02/22 09:39:07 peter Exp $
* $Id: kern_malloc.c,v 1.27 1997/06/24 09:41:00 davidg Exp $
*/
#include <sys/param.h>
@ -41,11 +41,14 @@
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/vmmeter.h>
#include <sys/lock.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
static void kmeminit __P((void *));
SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
@ -402,6 +405,7 @@ kmeminit(dummy)
kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE),
FALSE);
kmem_map->system_map = 1;
#ifdef KMEMSTATS
for (indx = 0; indx < MINBUCKET + 16; indx++) {
if (1 << indx >= PAGE_SIZE)

View File

@ -16,7 +16,7 @@
* 4. Modifications may be freely made to this file if the above conditions
* are met.
*
* $Id: sys_pipe.c,v 1.27 1997/03/24 11:52:26 bde Exp $
* $Id: sys_pipe.c,v 1.28 1997/04/09 16:53:39 bde Exp $
*/
/*
@ -80,6 +80,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_zone.h>
/*
* Use this define if you want to disable *fancy* VM things. Expect an
@ -144,6 +145,8 @@ static void pipe_clone_write_buffer __P((struct pipe *wpipe));
#endif
static void pipespace __P((struct pipe *cpipe));
vm_zone_t pipe_zone;
/*
* The pipe system call for the DTYPE_PIPE type of pipes
*/
@ -162,10 +165,20 @@ pipe(p, uap, retval)
struct pipe *rpipe, *wpipe;
int fd, error;
if (pipe_zone == NULL)
pipe_zone = zinit("PIPE", sizeof (struct pipe), 0,
ZONE_WAIT, 4);
rpipe = zalloc( pipe_zone);
/*
rpipe = malloc( sizeof (*rpipe), M_TEMP, M_WAITOK);
*/
pipeinit(rpipe);
rpipe->pipe_state |= PIPE_DIRECTOK;
/*
wpipe = malloc( sizeof (*wpipe), M_TEMP, M_WAITOK);
*/
wpipe = zalloc( pipe_zone);
pipeinit(wpipe);
wpipe->pipe_state |= PIPE_DIRECTOK;
@ -1099,6 +1112,9 @@ pipeclose(cpipe)
cpipe->pipe_buffer.size + PAGE_SIZE);
}
#endif
zfree(pipe_zone, cpipe);
/*
free(cpipe, M_TEMP);
*/
}
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)malloc.h 8.5 (Berkeley) 5/3/95
* $Id: malloc.h,v 1.20 1997/02/22 09:45:32 peter Exp $
* $Id: malloc.h,v 1.21 1997/07/06 02:40:35 dyson Exp $
*/
#ifndef _SYS_MALLOC_H_
@ -135,7 +135,8 @@
#define M_GEOM_MISC 88 /* geometry misc */
#define M_VFSCONF 89 /* vfsconf structure */
#define M_AIO 90 /* AIO structure(s) */
#define M_LAST 91 /* Must be last type + 1 */
#define M_ZONE 91 /* Zone header */
#define M_LAST 92 /* Must be last type + 1 */
#define INITKMEMNAMES { \
"free", /* 0 M_FREE */ \
@ -226,6 +227,7 @@
"GEOM misc", /* 88 M_GEOM_MISC */ \
"VFS conf", /* 89 M_VFSCONF */ \
"AIO", /* 90 M_AIO */ \
"ZONE", /* 91 M_ZONE */ \
}
struct kmemstats {

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.37 1997/06/22 15:47:11 peter Exp $
* $Id: vm_kern.c,v 1.38 1997/08/02 14:33:26 bde Exp $
*/
/*
@ -443,6 +443,7 @@ kmem_init(start, end)
vm_map_lock(m);
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
kernel_map = m;
kernel_map->system_map = 1;
(void) vm_map_insert(m, NULL, (vm_offset_t) 0,
VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
/* ... and ending with the completion of the above `insert' */

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.78 1997/06/23 21:51:03 tegge Exp $
* $Id: vm_map.c,v 1.79 1997/07/27 04:44:12 dyson Exp $
*/
/*
@ -89,6 +89,7 @@
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
#include <vm/default_pager.h>
#include <vm/vm_zone.h>
/*
* Virtual memory maps provide for the mapping, protection,
@ -148,20 +149,17 @@
* maps and requires map entries.
*/
vm_offset_t kentry_data;
vm_size_t kentry_data_size;
static vm_map_entry_t kentry_free;
static vm_map_t kmap_free;
extern char kstack[];
extern int inmprotect;
static int kentry_count;
static vm_offset_t mapvm_start, mapvm, mapvmmax;
static int mapvmpgcnt;
static struct vm_map_entry *mappool;
static int mappoolcnt;
#define KENTRY_LOW_WATER 128
static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
static vm_zone_t mapentzone, kmapentzone, mapzone;
static struct vm_object kmapentobj, mapentobj, mapobj;
#define MAP_ENTRY_INIT 128
struct vm_map_entry map_entry_init[MAX_MAPENT];
struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
struct vm_map map_init[MAX_KMAP];
static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
@ -175,33 +173,15 @@ static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
void
vm_map_startup()
{
register int i;
register vm_map_entry_t mep;
vm_map_t mp;
/*
* Static map structures for allocation before initialization of
* kernel map or kmem map. vm_map_create knows how to deal with them.
*/
kmap_free = mp = (vm_map_t) kentry_data;
i = MAX_KMAP;
while (--i > 0) {
mp->header.next = (vm_map_entry_t) (mp + 1);
mp++;
}
mp++->header.next = NULL;
/*
* Form a free list of statically allocated kernel map entries with
* the rest.
*/
kentry_free = mep = (vm_map_entry_t) mp;
kentry_count = i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep;
while (--i > 0) {
mep->next = mep + 1;
mep++;
}
mep->next = NULL;
mapzone = &mapzone_store;
_zbootinit(mapzone, "MAP", sizeof (struct vm_map),
map_init, MAX_KMAP);
kmapentzone = &kmapentzone_store;
_zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
kmap_entry_init, MAX_KMAPENT);
mapentzone = &mapentzone_store;
_zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
map_entry_init, MAX_MAPENT);
}
/*
@ -216,14 +196,6 @@ vmspace_alloc(min, max, pageable)
{
register struct vmspace *vm;
if (mapvmpgcnt == 0 && mapvm == 0) {
mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
mapvm_start = mapvm = kmem_alloc_pageable(kernel_map,
mapvmpgcnt * PAGE_SIZE);
mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE;
if (!mapvm)
mapvmpgcnt = 0;
}
MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
vm_map_init(&vm->vm_map, min, max, pageable);
@ -233,6 +205,16 @@ vmspace_alloc(min, max, pageable)
return (vm);
}
void
vm_init2(void) {
_zinit(kmapentzone, &kmapentobj,
NULL, 0, 4096, ZONE_INTERRUPT, 4);
_zinit(mapentzone, &mapentobj,
NULL, 0, 0, ZONE_WAIT, 4);
_zinit(mapzone, &mapobj,
NULL, 0, 0, ZONE_WAIT, 4);
}
void
vmspace_free(vm)
register struct vmspace *vm;
@ -278,15 +260,7 @@ vm_map_create(pmap, min, max, pageable)
{
register vm_map_t result;
if (kmem_map == NULL) {
result = kmap_free;
if (result == NULL)
panic("vm_map_create: out of maps");
kmap_free = (vm_map_t) result->header.next;
} else
MALLOC(result, vm_map_t, sizeof(struct vm_map),
M_VMMAP, M_WAITOK);
result = zalloc(mapzone);
vm_map_init(result, min, max, pageable);
result->pmap = pmap;
return (result);
@ -308,6 +282,7 @@ vm_map_init(map, min, max, pageable)
map->size = 0;
map->ref_count = 1;
map->is_main_map = TRUE;
map->system_map = 0;
map->min_offset = min;
map->max_offset = max;
map->entries_pageable = pageable;
@ -328,20 +303,7 @@ vm_map_entry_dispose(map, entry)
vm_map_t map;
vm_map_entry_t entry;
{
int s;
if (map == kernel_map || map == kmem_map ||
map == mb_map || map == pager_map) {
s = splvm();
entry->next = kentry_free;
kentry_free = entry;
++kentry_count;
splx(s);
} else {
entry->next = mappool;
mappool = entry;
++mappoolcnt;
}
zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
}
/*
@ -354,68 +316,7 @@ static vm_map_entry_t
vm_map_entry_create(map)
vm_map_t map;
{
vm_map_entry_t entry;
int i;
int s;
/*
* This is a *very* nasty (and sort of incomplete) hack!!!!
*/
if (kentry_count < KENTRY_LOW_WATER) {
s = splvm();
if (mapvmpgcnt && mapvm) {
vm_page_t m;
m = vm_page_alloc(kernel_object,
OFF_TO_IDX(mapvm - VM_MIN_KERNEL_ADDRESS),
(map == kmem_map || map == mb_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL);
if (m) {
int newentries;
newentries = (PAGE_SIZE / sizeof(struct vm_map_entry));
vm_page_wire(m);
PAGE_WAKEUP(m);
m->valid = VM_PAGE_BITS_ALL;
pmap_kenter(mapvm, VM_PAGE_TO_PHYS(m));
m->flags |= PG_WRITEABLE;
entry = (vm_map_entry_t) mapvm;
mapvm += PAGE_SIZE;
--mapvmpgcnt;
for (i = 0; i < newentries; i++) {
vm_map_entry_dispose(kernel_map, entry);
entry++;
}
}
}
splx(s);
}
if (map == kernel_map || map == kmem_map ||
map == mb_map || map == pager_map) {
s = splvm();
entry = kentry_free;
if (entry) {
kentry_free = entry->next;
--kentry_count;
} else {
panic("vm_map_entry_create: out of map entries for kernel");
}
splx(s);
} else {
entry = mappool;
if (entry) {
mappool = entry->next;
--mappoolcnt;
} else {
MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
M_VMMAPENT, M_WAITOK);
}
}
return (entry);
return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
}
/*
@ -496,7 +397,7 @@ vm_map_deallocate(map)
vm_map_unlock(map);
FREE(map, M_VMMAP);
zfree(mapzone, map);
}
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.h,v 1.25 1997/04/06 02:29:44 dyson Exp $
* $Id: vm_map.h,v 1.26 1997/04/07 07:16:06 peter Exp $
*/
/*
@ -131,7 +131,8 @@ struct vm_map {
struct vm_map_entry header; /* List of entries */
int nentries; /* Number of entries */
vm_size_t size; /* virtual size */
boolean_t is_main_map; /* Am I a main map? */
unsigned char is_main_map; /* Am I a main map? */
unsigned char system_map; /* Am I a system map? */
int ref_count; /* Reference count */
struct simplelock ref_lock; /* Lock for ref_count field */
vm_map_entry_t hint; /* hint for quick lookups */
@ -231,6 +232,7 @@ typedef struct {
/* XXX: number of kernel maps and entries to statically allocate */
#define MAX_KMAP 10
#define MAX_KMAPENT 128
#define MAX_MAPENT 128
/*
* Copy-on-write flags for vm_map operations
@ -279,6 +281,7 @@ void vm_map_startup __P((void));
int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
void vm_map_madvise __P((vm_map_t, pmap_t, vm_offset_t, vm_offset_t, int));
void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_init2 __P((void));
#endif
#endif /* _VM_MAP_ */

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.93 1997/06/22 03:00:24 dyson Exp $
* $Id: vm_object.c,v 1.94 1997/06/22 15:47:16 peter Exp $
*/
/*
@ -92,7 +92,6 @@
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
static void _vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t));
static void vm_object_qcollapse __P((vm_object_t object));
#ifdef not_used
static void vm_object_deactivate_pages __P((vm_object_t));
@ -142,7 +141,7 @@ static long object_collapses;
static long object_bypasses;
static int next_index;
static void
void
_vm_object_allocate(type, size, object)
objtype_t type;
vm_size_t size;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id$
* $Id: vm_object.h,v 1.35 1997/02/22 09:48:29 peter Exp $
*/
/*
@ -171,6 +171,7 @@ vm_object_pip_wakeup(vm_object_t object)
}
vm_object_t vm_object_allocate __P((objtype_t, vm_size_t));
void _vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t));
void vm_object_cache_clear __P((void));
boolean_t vm_object_coalesce __P((vm_object_t, vm_pindex_t, vm_size_t, vm_size_t));
void vm_object_collapse __P((vm_object_t));

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.77 1997/03/23 02:44:54 dyson Exp $
* $Id: vm_page.c,v 1.78 1997/05/01 14:36:01 dyson Exp $
*/
/*
@ -285,28 +285,6 @@ vm_page_startup(starta, enda, vaddr)
bucket++;
}
/*
* round (or truncate) the addresses to our page size.
*/
/*
* Pre-allocate maps and map entries that cannot be dynamically
* allocated via malloc(). The maps include the kernel_map and
* kmem_map which must be initialized before malloc() will work
* (obviously). Also could include pager maps which would be
* allocated before kmeminit.
*
* Allow some kernel map entries... this should be plenty since people
* shouldn't be cluttering up the kernel map (they should use their
* own maps).
*/
kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
MAX_KMAPENT * sizeof(struct vm_map_entry);
kentry_data_size = round_page(kentry_data_size);
kentry_data = (vm_offset_t) vaddr;
vaddr += kentry_data_size;
/*
* Validate these zone addresses.
*/