Introduce a new parameter "superpage_align" to kmem_suballoc() that is

used to request superpage alignment for the submap.

Request superpage alignment for the kmem_map.

Pass VMFS_ANY_SPACE instead of TRUE to vm_map_find().  (They are currently
equivalent but VMFS_ANY_SPACE is the new preferred spelling.)

Remove a stale comment from kmem_malloc().
This commit is contained in:
Alan Cox 2008-05-10 21:46:20 +00:00
parent 64982acf50
commit 3202ed7523
4 changed files with 16 additions and 18 deletions

View File

@ -594,7 +594,7 @@ kmeminit(void *dummy)
init_param3(vm_kmem_size / PAGE_SIZE);
kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit,
vm_kmem_size);
vm_kmem_size, TRUE);
kmem_map->system_map = 1;
#ifdef DEBUG_MEMGUARD

View File

@ -63,7 +63,8 @@ void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
void kmem_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
void kmem_init(vm_offset_t, vm_offset_t);
vm_offset_t kmem_malloc(vm_map_t, vm_size_t, boolean_t);
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t);
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
boolean_t);
void swapout_procs(int);
int useracc(void *, int, int);
int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);

View File

@ -186,16 +186,17 @@ vm_ksubmap_init(struct kva_md_info *kmi)
panic("startup: table size inconsistency");
clean_map = kmem_suballoc(kernel_map, &kmi->clean_sva, &kmi->clean_eva,
(nbuf*BKVASIZE) + (nswbuf*MAXPHYS));
nbuf * BKVASIZE + nswbuf * MAXPHYS, FALSE);
buffer_map = kmem_suballoc(clean_map, &kmi->buffer_sva,
&kmi->buffer_eva, (nbuf*BKVASIZE));
&kmi->buffer_eva, nbuf * BKVASIZE, FALSE);
buffer_map->system_map = 1;
pager_map = kmem_suballoc(clean_map, &kmi->pager_sva, &kmi->pager_eva,
(nswbuf*MAXPHYS));
nswbuf * MAXPHYS, FALSE);
pager_map->system_map = 1;
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(exec_map_entries*(ARG_MAX+(PAGE_SIZE*3))));
pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva);
exec_map_entries * (ARG_MAX + (PAGE_SIZE * 3)), FALSE);
pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,
FALSE);
/*
* XXX: Mbuf system machine-specific initializations should

View File

@ -109,8 +109,8 @@ kmem_alloc_nofault(map, size)
size = round_page(size);
addr = vm_map_min(map);
result = vm_map_find(map, NULL, 0,
&addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
if (result != KERN_SUCCESS) {
return (0);
}
@ -221,12 +221,11 @@ kmem_free(map, addr, size)
* parent Map to take range from
* min, max Returned endpoints of map
* size Size of range to find
* superpage_align Request that min is superpage aligned
*/
vm_map_t
kmem_suballoc(parent, min, max, size)
vm_map_t parent;
vm_offset_t *min, *max;
vm_size_t size;
kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
vm_size_t size, boolean_t superpage_align)
{
int ret;
vm_map_t result;
@ -234,8 +233,8 @@ kmem_suballoc(parent, min, max, size)
size = round_page(size);
*min = vm_map_min(parent);
ret = vm_map_find(parent, NULL, 0,
min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
*max = *min + size;
@ -259,9 +258,6 @@ kmem_suballoc(parent, min, max, size)
* (kmem_object). This, combined with the fact that only malloc uses
* this routine, ensures that we will never block in map or object waits.
*
* Note that this still only works in a uni-processor environment and
* when called at splhigh().
*
* We don't worry about expanding the map (adding entries) since entries
* for wired maps are statically allocated.
*