Fix null-pointer dereference crash when the system is intentionally

run out of KVM through a mmap()/fork() bomb that allocates hundreds
    of thousands of vm_map_entry structures.

    Add panic to make null-pointer dereference crash a little more verbose.

    Add a new sysctl, vm.max_proc_mmap, which specifies the maximum number
    of mmap()'d spaces (discrete vm_map_entry's in the process).  The value
    defaults to around 9000 for a 128MB machine.  The test is scaled for the
    number of processes sharing a vmspace (aka linux threads).  Setting
    the value to 0 disables the feature.

PR: kern/16573
Approved by: jkh
This commit is contained in:
Matthew Dillon 2000-02-16 21:11:33 +00:00
parent c2ebb466e5
commit 1f6889a1eb
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=57263
6 changed files with 55 additions and 7 deletions

View File

@ -253,7 +253,8 @@ was not page-aligned. (See BUGS below.)
.Dv MAP_FIXED
was specified and the
.Fa addr
parameter wasn't available.
parameter wasn't available, or the system has reached the per-process mmap
limit specified in the vm.max_proc_mmap sysctl.
.Dv MAP_ANON
was specified and insufficient memory was available.
.Sh "SEE ALSO"

View File

@ -72,7 +72,8 @@ static struct kmembuckets bucket[MINBUCKET + 16];
static struct kmemusage *kmemusage;
static char *kmembase;
static char *kmemlimit;
static u_int vm_kmem_size;
u_int vm_kmem_size;
#ifdef INVARIANTS
/*

View File

@ -112,6 +112,7 @@ enum sysinit_sub_id {
SI_SUB_TUNABLES = 0x0700000, /* establish tunable values */
SI_SUB_VM = 0x1000000, /* virtual memory system init*/
SI_SUB_KMEM = 0x1800000, /* kernel memory*/
SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
SI_SUB_CPU = 0x2000000, /* CPU resource(s)*/
SI_SUB_KLD = 0x2100000, /* KLD and module setup */
SI_SUB_INTRINSIC = 0x2200000, /* proc 0*/

View File

@ -75,6 +75,7 @@ extern vm_map_t mb_map;
extern int mb_map_full;
extern vm_map_t clean_map;
extern vm_map_t exec_map;
extern u_int vm_kmem_size;
extern vm_offset_t kernel_vm_end;
/* XXX - elsewhere? */

View File

@ -284,7 +284,13 @@ static vm_map_entry_t
vm_map_entry_create(map)
vm_map_t map;
{
return zalloc((map->system_map || !mapentzone) ? kmapentzone : mapentzone);
vm_map_entry_t new_entry;
new_entry = zalloc((map->system_map || !mapentzone) ?
kmapentzone : mapentzone);
if (new_entry == NULL)
panic("vm_map_entry_create: kernel resources exhausted");
return(new_entry);
}
/*

View File

@ -49,6 +49,7 @@
#include "opt_rlimit.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/sysproto.h>
#include <sys/filedesc.h>
@ -60,6 +61,7 @@
#include <sys/conf.h>
#include <sys/stat.h>
#include <sys/vmmeter.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -72,6 +74,7 @@
#include <vm/vm_pageout.h>
#include <vm/vm_extern.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#ifndef _SYS_SYSPROTO_H_
struct sbrk_args {
@ -79,6 +82,29 @@ struct sbrk_args {
};
#endif
static int max_proc_mmap;
SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, "");
/*
* Set the maximum number of vm_map_entry structures per process. Roughly
* speaking vm_map_entry structures are tiny, so allowing them to eat 1/100
* of our KVM malloc space still results in generous limits. We want a
* default that is good enough to prevent the kernel running out of resources
* if attacked from compromised user account but generous enough such that
* multi-threaded processes are not unduly inconvenienced.
*/
static void vmmapentry_rsrc_init __P((void *));
SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL)
static void
vmmapentry_rsrc_init(dummy)
void *dummy;
{
max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry);
max_proc_mmap /= 100;
}
/* ARGSUSED */
int
sbrk(p, uap)
@ -171,6 +197,7 @@ mmap(p, uap)
int flags, error;
int disablexworkaround;
off_t pos;
struct vmspace *vms = p->p_vmspace;
addr = (vm_offset_t) uap->addr;
size = uap->len;
@ -234,9 +261,9 @@ mmap(p, uap)
* location.
*/
else if (addr == 0 ||
(addr >= round_page((vm_offset_t)p->p_vmspace->vm_taddr) &&
addr < round_page((vm_offset_t)p->p_vmspace->vm_daddr + MAXDSIZ)))
addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr + MAXDSIZ);
(addr >= round_page((vm_offset_t)vms->vm_taddr) &&
addr < round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ)))
addr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
if (flags & MAP_ANON) {
/*
@ -332,7 +359,18 @@ mmap(p, uap)
handle = (void *)vp;
}
}
error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
/*
* Do not allow more then a certain number of vm_map_entry structures
* per process. Scale with the number of rforks sharing the map
* to make the limit reasonable for threads.
*/
if (max_proc_mmap &&
vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) {
return (ENOMEM);
}
error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
flags, handle, pos);
if (error == 0)
p->p_retval[0] = (register_t) (addr + pageoff);