From 925692caa5a9378ceeb964b6604f8345149d36ab Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 22 Dec 2003 01:01:32 +0000 Subject: [PATCH] - Significantly reduce the number of preallocated pv entries in pmap_init(). Such a large preallocation is unnecessary and wastes nearly eight megabytes of kernel virtual address space per gigabyte of managed physical memory. - Increase UMA_BOOT_PAGES by two. This enables the removal of pmap_pv_allocf(). (Note: this function was only used during initialization, specifically, after pmap_init() but before pmap_init2(). During pmap_init2(), a new allocator is installed.) --- sys/amd64/amd64/pmap.c | 15 +-------------- sys/i386/i386/pmap.c | 15 +-------------- sys/vm/uma_int.h | 2 +- 3 files changed, 3 insertions(+), 29 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 5dddd7b91abf..7f113ee04960 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -227,7 +227,6 @@ static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va); static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex); static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); static vm_offset_t pmap_kmem_choose(vm_offset_t addr); -static void *pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); @@ -526,13 +525,6 @@ pmap_bootstrap(firstaddr) invltlb(); } -static void * -pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) -{ - *flags = UMA_SLAB_PRIV; - return (void *)kmem_alloc(kernel_map, bytes); -} - /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap @@ -545,7 +537,6 @@ pmap_init(phys_start, phys_end) vm_paddr_t phys_start, phys_end; { int i; - int initial_pvs; /* * Allocate memory for random pmap data structures. Includes the @@ -563,13 +554,9 @@ pmap_init(phys_start, phys_end) /* * init the pv free list */ - initial_pvs = vm_page_array_size; - if (initial_pvs < MINPV) - initial_pvs = MINPV; pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - uma_zone_set_allocf(pvzone, pmap_pv_allocf); - uma_prealloc(pvzone, initial_pvs); + uma_prealloc(pvzone, MINPV); /* * Now it is safe to enable pv_table recording. diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 823f6f4851aa..dfaf6999ccf7 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -245,7 +245,6 @@ static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex); static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va); static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); static vm_offset_t pmap_kmem_choose(vm_offset_t addr); -static void *pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); #ifdef PAE static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait); #endif @@ -418,13 +417,6 @@ pmap_set_pg(void) } } -static void * -pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) -{ - *flags = UMA_SLAB_PRIV; - return (void *)kmem_alloc(kernel_map, bytes); -} - #ifdef PAE static void * pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) @@ -446,7 +438,6 @@ pmap_init(phys_start, phys_end) vm_paddr_t phys_start, phys_end; { int i; - int initial_pvs; /* * Allocate memory for random pmap data structures. Includes the @@ -464,13 +455,9 @@ pmap_init(phys_start, phys_end) /* * init the pv free list */ - initial_pvs = vm_page_array_size; - if (initial_pvs < MINPV) - initial_pvs = MINPV; pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - uma_zone_set_allocf(pvzone, pmap_pv_allocf); - uma_prealloc(pvzone, initial_pvs); + uma_prealloc(pvzone, MINPV); #ifdef PAE pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL, diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h index b25468bccfd7..140451b282f8 100644 --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -103,7 +103,7 @@ #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */ #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */ -#define UMA_BOOT_PAGES 30 /* Pages allocated for startup */ +#define UMA_BOOT_PAGES 32 /* Pages allocated for startup */ /* Max waste before going to off page slab management */ #define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)