Fix a bug with memguard(9) on 32-bit architectures without a

VM_KMEM_MAX_SIZE.

The code was not taking into account the size of the kernel_map, which
the kmem_map is allocated from, so it could produce a sub-map size too
large to fit.  The simplest solution is to ignore VM_KMEM_MAX entirely
and base the memguard map's size off the kernel_map's size, since this
is always relevant and always smaller.

Found by:	Justin Hibbits
This commit is contained in:
Matthew D Fleming 2012-07-15 20:29:48 +00:00
parent 5e20b91dbe
commit f806cdcf99
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=238502
4 changed files with 17 additions and 17 deletions

View File

@ -744,7 +744,7 @@ kmeminit(void *dummy)
vm_kmem_size = 2 * mem_size * PAGE_SIZE;
#ifdef DEBUG_MEMGUARD
tmp = memguard_fudge(vm_kmem_size, vm_kmem_size_max);
tmp = memguard_fudge(vm_kmem_size, kernel_map);
#else
tmp = vm_kmem_size;
#endif

View File

@ -159,16 +159,18 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, frequency_hits, CTLFLAG_RD,
* the kmem_map. The memguard memory will be a submap.
*/
unsigned long
memguard_fudge(unsigned long km_size, unsigned long km_max)
memguard_fudge(unsigned long km_size, const struct vm_map *parent_map)
{
u_long mem_pgs = cnt.v_page_count;
u_long mem_pgs, parent_size;
vm_memguard_divisor = 10;
TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor);
parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) +
PAGE_SIZE;
/* Pick a conservative value if provided value sucks. */
if ((vm_memguard_divisor <= 0) ||
((km_size / vm_memguard_divisor) == 0))
((parent_size / vm_memguard_divisor) == 0))
vm_memguard_divisor = 10;
/*
* Limit consumption of physical pages to
@ -177,21 +179,19 @@ memguard_fudge(unsigned long km_size, unsigned long km_max)
* This prevents memguard's page promotions from completely
* using up memory, since most malloc(9) calls are sub-page.
*/
mem_pgs = cnt.v_page_count;
memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE;
/*
* We want as much KVA as we can take safely. Use at most our
* allotted fraction of kmem_max. Limit this to twice the
* physical memory to avoid using too much memory as pagetable
* pages.
* allotted fraction of the parent map's size. Limit this to
* twice the physical memory to avoid using too much memory as
* pagetable pages (size must be multiple of PAGE_SIZE).
*/
memguard_mapsize = km_max / vm_memguard_divisor;
/* size must be multiple of PAGE_SIZE */
memguard_mapsize = round_page(memguard_mapsize);
if (memguard_mapsize == 0 ||
memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
memguard_mapsize = round_page(parent_size / vm_memguard_divisor);
if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs)
memguard_mapsize = mem_pgs * 2 * PAGE_SIZE;
if (km_max > 0 && km_size + memguard_mapsize > km_max)
return (km_max);
if (km_size + memguard_mapsize > parent_size)
memguard_mapsize = 0;
return (km_size + memguard_mapsize);
}

View File

@ -35,7 +35,7 @@ struct malloc_type;
struct vm_map;
#ifdef DEBUG_MEMGUARD
unsigned long memguard_fudge(unsigned long, unsigned long);
unsigned long memguard_fudge(unsigned long, const struct vm_map *);
void memguard_init(struct vm_map *);
void *memguard_alloc(unsigned long, int);
void *memguard_realloc(void *, unsigned long, struct malloc_type *, int);

View File

@ -200,13 +200,13 @@ struct vm_map {
#ifdef _KERNEL
static __inline vm_offset_t
vm_map_max(vm_map_t map)
vm_map_max(const struct vm_map *map)
{
return (map->max_offset);
}
static __inline vm_offset_t
vm_map_min(vm_map_t map)
vm_map_min(const struct vm_map *map)
{
return (map->min_offset);
}