Fixed sign and overflow bugs that caused the allocation size of the kernel

malloc region (kmem_map) to be wrong and semi-random on systems with more
than 1GB of RAM. This is not a complete fix, but is sufficient for
machines with 4GB or less of memory. A complete fix will require some
changes to the getenv stuff so that 64bit values can be passed around.

NOT FIXED: machines with more than 4GB of RAM (e.g. some large Alphas)
since we're still using ints to hold some of the values.

Reviewed by:	bde
This commit is contained in:
David Greenman 2000-01-28 04:04:58 +00:00
parent bc9ca81f3d
commit 27b8623f21
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=56720

View File

@ -72,7 +72,7 @@ static struct kmembuckets bucket[MINBUCKET + 16];
static struct kmemusage *kmemusage;
static char *kmembase;
static char *kmemlimit;
static int vm_kmem_size;
static u_int vm_kmem_size;
#ifdef INVARIANTS
/*
@ -408,9 +408,9 @@ kmeminit(dummy)
void *dummy;
{
register long indx;
int npg;
int mem_size;
int xvm_kmem_size;
u_long npg;
u_long mem_size;
u_long xvm_kmem_size;
#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
#error "kmeminit: MAXALLOCSAVE not power of 2"
@ -450,8 +450,14 @@ kmeminit(dummy)
/* Allow final override from the kernel environment */
TUNABLE_INT_FETCH("kern.vm.kmem.size", xvm_kmem_size, vm_kmem_size);
if (vm_kmem_size > 2 * (cnt.v_page_count * PAGE_SIZE))
vm_kmem_size = 2 * (cnt.v_page_count * PAGE_SIZE);
/*
* Limit kmem virtual size to twice the physical memory.
* This allows for kmem map sparseness, but limits the size
* to something sane. Be careful to not overflow the 32bit
* ints while doing the check.
*/
if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE))
vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + vm_kmem_size)
/ PAGE_SIZE;