malloc/free(9) no longer require Giant. Use the malloc_mtx to protect the

mallochash.  Mallochash is going to go away as soon as I introduce the
kfree/kmalloc api and partially overhaul the malloc wrapper.  This can't happen
until all users of the malloc api that expect memory to be aligned on the size
of the allocation are fixed.
This commit is contained in:
Jeff Roberson 2002-05-02 07:22:19 +00:00
parent 23dc40e1dd
commit 5a34a9f089
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=95923
4 changed files with 46 additions and 10 deletions

View File

@ -118,7 +118,13 @@ struct {
};
u_int vm_kmem_size;
static struct mtx malloc_mtx;
/*
* The malloc_mtx protects the kmemstatistics linked list as well as the
* mallochash.
*/
struct mtx malloc_mtx;
#ifdef MALLOC_PROFILE
uint64_t krequests[KMEM_ZSIZE + 1];
@ -165,6 +171,7 @@ malloc(size, type, flags)
krequests[size >> KMEM_ZSHIFT]++;
#endif
va = uma_zalloc(zone, flags);
mtx_lock(&ksp->ks_mtx);
if (va == NULL)
goto out;
@ -174,6 +181,7 @@ malloc(size, type, flags)
size = roundup(size, PAGE_SIZE);
zone = NULL;
va = uma_large_malloc(size, flags);
mtx_lock(&ksp->ks_mtx);
if (va == NULL)
goto out;
}
@ -184,6 +192,7 @@ malloc(size, type, flags)
if (ksp->ks_memuse > ksp->ks_maxused)
ksp->ks_maxused = ksp->ks_memuse;
mtx_unlock(&ksp->ks_mtx);
return ((void *) va);
}
@ -211,7 +220,9 @@ free(addr, type)
size = 0;
mem = (void *)((u_long)addr & (~UMA_SLAB_MASK));
mtx_lock(&malloc_mtx);
slab = hash_sfind(mallochash, mem);
mtx_unlock(&malloc_mtx);
if (slab == NULL)
panic("free: address %p(%p) has not been allocated.\n",
@ -224,8 +235,10 @@ free(addr, type)
size = slab->us_size;
uma_large_free(slab);
}
mtx_lock(&ksp->ks_mtx);
ksp->ks_memuse -= size;
ksp->ks_inuse--;
mtx_unlock(&ksp->ks_mtx);
}
/*
@ -246,8 +259,10 @@ realloc(addr, size, type, flags)
if (addr == NULL)
return (malloc(size, type, flags));
mtx_lock(&malloc_mtx);
slab = hash_sfind(mallochash,
(void *)((u_long)addr & ~(UMA_SLAB_MASK)));
mtx_unlock(&malloc_mtx);
/* Sanity check */
KASSERT(slab != NULL,
@ -413,6 +428,7 @@ malloc_init(data)
type->ks_next = kmemstatistics;
kmemstatistics = type;
mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF);
mtx_unlock(&malloc_mtx);
}
@ -424,6 +440,7 @@ malloc_uninit(data)
struct malloc_type *t;
mtx_lock(&malloc_mtx);
mtx_lock(&type->ks_mtx);
if (type->ks_magic != M_MAGIC)
panic("malloc type lacks magic");
@ -441,6 +458,7 @@ malloc_uninit(data)
}
}
type->ks_next = NULL;
mtx_destroy(&type->ks_mtx);
mtx_unlock(&malloc_mtx);
}
@ -465,8 +483,10 @@ sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
for (type = kmemstatistics; type != NULL; type = type->ks_next)
cnt++;
mtx_unlock(&malloc_mtx);
bufsize = linesize * (cnt + 1);
p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
mtx_lock(&malloc_mtx);
len = snprintf(p, linesize,
"\n Type InUse MemUse HighUse Requests Size(s)\n");

View File

@ -38,6 +38,8 @@
#define _SYS_MALLOC_H_
#include <vm/uma.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#define splmem splhigh
@ -62,12 +64,15 @@ struct malloc_type {
u_long ks_maxused; /* maximum number ever used */
u_long ks_magic; /* if it's not magic, don't touch it */
const char *ks_shortdesc; /* short description */
struct mtx ks_mtx; /* Lock for stats */
};
extern struct mtx malloc_mtx;
#ifdef _KERNEL
#define MALLOC_DEFINE(type, shortdesc, longdesc) \
struct malloc_type type[1] = { \
{ NULL, 0, 0, 0, 0, 0, M_MAGIC, shortdesc } \
{ NULL, 0, 0, 0, 0, 0, M_MAGIC, shortdesc, {} } \
}; \
SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_SECOND, malloc_init, type); \
SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY, malloc_uninit, type)

View File

@ -471,10 +471,13 @@ bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
* hold them. This will go away when free() gets a size passed
* to it.
*/
if (mzone)
if (mzone) {
mtx_lock(&malloc_mtx);
slab = hash_sfind(mallochash,
(u_int8_t *)((unsigned long)item &
(~UMA_SLAB_MASK)));
mtx_unlock(&malloc_mtx);
}
uma_zfree_internal(zone, item, slab, 1);
}
}
@ -616,17 +619,18 @@ zone_drain(uma_zone_t zone)
zone->uz_size);
flags = slab->us_flags;
mem = slab->us_data;
if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
mtx_lock(&malloc_mtx);
UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
mtx_unlock(&malloc_mtx);
}
if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
UMA_HASH_REMOVE(mallochash,
slab, slab->us_data);
} else {
if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
UMA_HASH_REMOVE(&zone->uz_hash,
slab, slab->us_data);
}
uma_zfree_internal(slabzone, slab, NULL, 0);
} else if (zone->uz_flags & UMA_ZFLAG_MALLOC)
UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
}
#ifdef UMA_DEBUG
printf("%s: Returning %d bytes.\n",
zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
@ -714,8 +718,9 @@ slab_zalloc(uma_zone_t zone, int wait)
printf("Inserting %p into malloc hash from slab %p\n",
mem, slab);
#endif
/* XXX Yikes! No lock on the malloc hash! */
mtx_lock(&malloc_mtx);
UMA_HASH_INSERT(mallochash, slab, mem);
mtx_unlock(&malloc_mtx);
}
slab->us_zone = zone;
@ -1958,7 +1963,9 @@ uma_large_malloc(int size, int wait)
slab->us_data = mem;
slab->us_flags = flags | UMA_SLAB_MALLOC;
slab->us_size = size;
mtx_lock(&malloc_mtx);
UMA_HASH_INSERT(mallochash, slab, mem);
mtx_unlock(&malloc_mtx);
} else {
uma_zfree_internal(slabzone, slab, NULL, 0);
}
@ -1970,7 +1977,9 @@ uma_large_malloc(int size, int wait)
void
uma_large_free(uma_slab_t slab)
{
mtx_lock(&malloc_mtx);
UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
mtx_unlock(&malloc_mtx);
page_free(slab->us_data, slab->us_size, slab->us_flags);
uma_zfree_internal(slabzone, slab, NULL, 0);
}

View File

@ -119,7 +119,9 @@ uma_dbg_getslab(uma_zone_t zone, void *item)
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
mtx_lock(&malloc_mtx);
slab = hash_sfind(mallochash, mem);
mtx_unlock(&malloc_mtx);
} else if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
ZONE_LOCK(zone);
slab = hash_sfind(&zone->uz_hash, mem);