Make memguard(9) capable to guard uma(9) allocations.

This commit is contained in:
Gleb Smirnoff 2011-10-12 18:08:28 +00:00
parent ce4903ad98
commit 8d689e042f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=226313
5 changed files with 85 additions and 15 deletions

View File

@ -458,7 +458,7 @@ malloc(unsigned long size, struct malloc_type *mtp, int flags)
("malloc(M_WAITOK) in interrupt context"));
#ifdef DEBUG_MEMGUARD
if (memguard_cmp(mtp, size)) {
if (memguard_cmp_mtp(mtp, size)) {
va = memguard_alloc(size, flags);
if (va != NULL)
return (va);

View File

@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
#include <vm/uma_int.h>
#include <vm/memguard.h>
SYSCTL_NODE(_vm, OID_AUTO, memguard, CTLFLAG_RW, NULL, "MemGuard data");
@ -125,15 +126,17 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_kva, CTLFLAG_RD,
SYSCTL_ULONG(_vm_memguard, OID_AUTO, fail_pgs, CTLFLAG_RD,
&memguard_fail_pgs, 0, "MemGuard failures due to lack of pages");
#define MG_GUARD 0x001
#define MG_ALLLARGE 0x002
static int memguard_options = MG_GUARD;
#define MG_GUARD_AROUND 0x001
#define MG_GUARD_ALLLARGE 0x002
#define MG_GUARD_NOFREE 0x004
static int memguard_options = MG_GUARD_AROUND;
TUNABLE_INT("vm.memguard.options", &memguard_options);
SYSCTL_INT(_vm_memguard, OID_AUTO, options, CTLFLAG_RW,
&memguard_options, 0,
"MemGuard options:\n"
"\t0x001 - add guard pages around each allocation\n"
"\t0x002 - always use MemGuard for allocations over a page");
"\t0x002 - always use MemGuard for allocations over a page\n"
"\t0x004 - guard uma(9) zones with UMA_ZONE_NOFREE flag");
static u_int memguard_minsize;
static u_long memguard_minsize_reject;
@ -282,7 +285,7 @@ memguard_alloc(unsigned long req_size, int flags)
* value.
*/
size_v = size_p;
do_guard = (memguard_options & MG_GUARD) != 0;
do_guard = (memguard_options & MG_GUARD_AROUND) != 0;
if (do_guard)
size_v += 2 * PAGE_SIZE;
@ -429,21 +432,32 @@ memguard_realloc(void *addr, unsigned long size, struct malloc_type *mtp,
return (newaddr);
}
int
memguard_cmp(struct malloc_type *mtp, unsigned long size)
static int
memguard_cmp(unsigned long size)
{
if (size < memguard_minsize) {
memguard_minsize_reject++;
return (0);
}
if ((memguard_options & MG_ALLLARGE) != 0 && size >= PAGE_SIZE)
if ((memguard_options & MG_GUARD_ALLLARGE) != 0 && size >= PAGE_SIZE)
return (1);
if (memguard_frequency > 0 &&
(random() % 100000) < memguard_frequency) {
memguard_frequency_hits++;
return (1);
}
return (0);
}
int
memguard_cmp_mtp(struct malloc_type *mtp, unsigned long size)
{
if (memguard_cmp(size))
return(1);
#if 1
/*
* The safest way of comparsion is to always compare short description
@ -467,3 +481,21 @@ memguard_cmp(struct malloc_type *mtp, unsigned long size)
return (0);
#endif
}
int
memguard_cmp_zone(uma_zone_t zone)
{
if ((memguard_options & MG_GUARD_NOFREE) == 0 &&
zone->uz_flags & UMA_ZONE_NOFREE)
return (0);
if (memguard_cmp(zone->uz_size))
return (1);
/*
* The safest way of comparsion is to always compare zone name,
* but it is also the slowest way.
*/
return (strcmp(zone->uz_name, vm_memguard_desc) == 0);
}

View File

@ -40,7 +40,8 @@ void memguard_init(struct vm_map *);
void *memguard_alloc(unsigned long, int);
void *memguard_realloc(void *, unsigned long, struct malloc_type *, int);
void memguard_free(void *);
int memguard_cmp(struct malloc_type *, unsigned long);
int memguard_cmp_mtp(struct malloc_type *, unsigned long);
int memguard_cmp_zone(uma_zone_t);
int is_memguard_addr(void *);
#else
#define memguard_fudge(size, xxx) (size)
@ -48,7 +49,8 @@ int is_memguard_addr(void *);
#define memguard_alloc(size, flags) NULL
#define memguard_realloc(a, s, mtp, f) NULL
#define memguard_free(addr) do { } while (0)
#define memguard_cmp(mtp, size) 0
#define memguard_cmp_mtp(mtp, size) 0
#define memguard_cmp_zone(zone) 0
#define is_memguard_addr(addr) 0
#endif

View File

@ -255,8 +255,8 @@ int uma_zsecond_add(uma_zone_t zone, uma_zone_t master);
* physical parameters of the request and may not be provided by the consumer.
*/
#define UMA_ZONE_INHERIT \
(UMA_ZONE_OFFPAGE | UMA_ZONE_MALLOC | UMA_ZONE_HASH | \
UMA_ZONE_REFCNT | UMA_ZONE_VTOSLAB)
(UMA_ZONE_OFFPAGE | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \
UMA_ZONE_HASH | UMA_ZONE_REFCNT | UMA_ZONE_VTOSLAB)
/* Definitions for align */
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */

View File

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_param.h"
#include "opt_vm.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -88,6 +89,10 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
#ifdef DEBUG_MEMGUARD
#include <vm/memguard.h>
#endif
/*
* This is the zone and keg from which all zones are spawned. The idea is that
* even the zone & keg heads are allocated from the allocator, so we use the
@ -1978,7 +1983,29 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"uma_zalloc_arg: zone \"%s\"", zone->uz_name);
}
#ifdef DEBUG_MEMGUARD
if (memguard_cmp_zone(zone)) {
item = memguard_alloc(zone->uz_size, flags);
if (item != NULL) {
/*
* Avoid conflict with the use-after-free
* protecting infrastructure from INVARIANTS.
*/
if (zone->uz_init != NULL &&
zone->uz_init != mtrash_init &&
zone->uz_init(item, zone->uz_size, flags) != 0)
return (NULL);
if (zone->uz_ctor != NULL &&
zone->uz_ctor != mtrash_ctor &&
zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
zone->uz_fini(item, zone->uz_size);
return (NULL);
}
return (item);
}
/* This is unfortunate but should not be fatal. */
}
#endif
/*
* If possible, allocate from the per-CPU cache. There are two
* requirements for safe access to the per-CPU cache: (1) the thread
@ -2544,7 +2571,16 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
/* uma_zfree(..., NULL) does nothing, to match free(9). */
if (item == NULL)
return;
#ifdef DEBUG_MEMGUARD
if (is_memguard_addr(item)) {
if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
zone->uz_dtor(item, zone->uz_size, udata);
if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
zone->uz_fini(item, zone->uz_size);
memguard_free(item);
return;
}
#endif
if (zone->uz_dtor)
zone->uz_dtor(item, zone->uz_size, udata);