Remove the temporary alignment check in free().

Implement the following checks on freed memory in the bucket path:
	- Slab membership
	- Alignment
	- Duplicate free

This previously was only done if we skipped the buckets.  This code will slow
down INVARIANTS a bit, but it is smp safe.  The checks were moved out of the
normal path and into hooks supplied in uma_dbg.
This commit is contained in:
jeff 2002-05-02 02:08:48 +00:00
parent c136e5d442
commit b152d5fbb5
4 changed files with 118 additions and 25 deletions

View File

@ -208,12 +208,6 @@ free(addr, type)
if (addr == NULL)
return;
if ((u_long)addr & 3) { /* XXX: Jeff: find better value for 3 */
printf("free(9)'ing unaligned pointer %p\n", addr);
Debugger("Don't do that...");
return;
}
size = 0;
mem = (void *)((u_long)addr & (~UMA_SLAB_MASK));

View File

@ -45,7 +45,6 @@
/*
* TODO:
* - Improve memory usage for large allocations
* - Improve INVARIANTS (0xdeadc0de write out)
* - Investigate cache size adjustments
*/
@ -81,6 +80,7 @@
#include <vm/vm_extern.h>
#include <vm/uma.h>
#include <vm/uma_int.h>
#include <vm/uma_dbg.h>
/*
* This is the zone from which all zones are spawned. The idea is that even
@ -1321,6 +1321,9 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
("uma_zalloc: Bucket pointer mangled."));
cache->uc_allocs++;
CPU_UNLOCK(zone, cpu);
#ifdef INVARIANTS
uma_dbg_alloc(zone, NULL, item);
#endif
if (zone->uz_ctor)
zone->uz_ctor(item, zone->uz_size, udata);
if (flags & M_ZERO)
@ -1540,13 +1543,14 @@ uma_zalloc_internal(uma_zone_t zone, void *udata, int flags, uma_bucket_t bucket
while (slab->us_freecount) {
freei = slab->us_firstfree;
slab->us_firstfree = slab->us_freelist[freei];
#ifdef INVARIANTS
slab->us_freelist[freei] = 255;
#endif
slab->us_freecount--;
zone->uz_free--;
item = slab->us_data + (zone->uz_rsize * freei);
slab->us_freecount--;
zone->uz_free--;
#ifdef INVARIANTS
uma_dbg_alloc(zone, slab, item);
#endif
if (bucket == NULL) {
zone->uz_allocs++;
break;
@ -1616,6 +1620,13 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
if (zone->uz_flags & UMA_ZFLAG_FULL)
goto zfree_internal;
#ifdef INVARIANTS
if (zone->uz_flags & UMA_ZFLAG_MALLOC)
uma_dbg_free(zone, udata, item);
else
uma_dbg_free(zone, NULL, item);
#endif
zfree_restart:
cpu = PCPU_GET(cpuid);
CPU_LOCK(zone, cpu);
@ -1768,21 +1779,12 @@ uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
/* Slab management stuff */
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ zone->uz_rsize;
#ifdef INVARIANTS
if (((freei * zone->uz_rsize) + slab->us_data) != item)
panic("zone: %s(%p) slab %p freed address %p unaligned.\n",
zone->uz_name, zone, slab, item);
if (freei >= zone->uz_ipers)
panic("zone: %s(%p) slab %p freelist %i out of range 0-%d\n",
zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
if (slab->us_freelist[freei] != 255) {
printf("Slab at %p, freei %d = %d.\n",
slab, freei, slab->us_freelist[freei]);
panic("Duplicate free of item %p from zone %p(%s)\n",
item, zone, zone->uz_name);
}
#ifdef INVARIANTS
if (!skip)
uma_dbg_free(zone, slab, item);
#endif
slab->us_freelist[freei] = slab->us_firstfree;
slab->us_firstfree = freei;
slab->us_freecount++;

View File

@ -110,3 +110,98 @@ trash_fini(void *mem, int size)
{
trash_ctor(mem, size, NULL);
}
static uma_slab_t
uma_dbg_getslab(uma_zone_t zone, void *item)
{
uma_slab_t slab;
u_int8_t *mem;
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
slab = hash_sfind(mallochash, mem);
} else if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
ZONE_LOCK(zone);
slab = hash_sfind(&zone->uz_hash, mem);
ZONE_UNLOCK(zone);
} else {
mem += zone->uz_pgoff;
slab = (uma_slab_t)mem;
}
return (slab);
}
/*
* Set up the slab's freei data such that uma_dbg_free can function.
*
*/
void
uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
{
int freei;
if (slab == NULL) {
slab = uma_dbg_getslab(zone, item);
if (slab == NULL)
panic("uma: item %p did not belong to zone %s\n",
item, zone->uz_name);
}
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ zone->uz_rsize;
slab->us_freelist[freei] = 255;
return;
}
/*
* Verifies freed addresses. Checks for alignment, valid slab membership
* and duplicate frees.
*
*/
void
uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
{
int freei;
return;
if (slab == NULL) {
slab = uma_dbg_getslab(zone, item);
if (slab == NULL)
panic("uma: Freed item %p did not belong to zone %s\n",
item, zone->uz_name);
}
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ zone->uz_rsize;
if (freei >= zone->uz_ipers)
panic("zone: %s(%p) slab %p freelist %i out of range 0-%d\n",
zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
if (((freei * zone->uz_rsize) + slab->us_data) != item) {
printf("zone: %s(%p) slab %p freed address %p unaligned.\n",
zone->uz_name, zone, slab, item);
panic("should be %p\n",
(freei * zone->uz_rsize) + slab->us_data);
}
if (slab->us_freelist[freei] != 255) {
printf("Slab at %p, freei %d = %d.\n",
slab, freei, slab->us_freelist[freei]);
panic("Duplicate free of item %p from zone %p(%s)\n",
item, zone, zone->uz_name);
}
/*
* When this is actually linked into the slab this will change.
* Until then the count of valid slabs will make sure we don't
* accidentally follow this and assume it's a valid index.
*/
slab->us_freelist[freei] = 0;
}

View File

@ -43,5 +43,7 @@ void trash_ctor(void *mem, int size, void *arg);
void trash_dtor(void *mem, int size, void *arg);
void trash_init(void *mem, int size);
void trash_fini(void *mem, int size);
void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
#endif /* VM_UMA_DBG_H */