Remove malloc_type's ks_limit.

Updated the kmemzones logic such that the ks_size bitmap can be used as an
index into it to report the size of the zone used.

Create the kern.malloc sysctl which replaces the kvm mechanism to report
similar data.  This will provide an easy place for statistics aggregation if
malloc_type statistics become per cpu data.

Add some code ifdef'd under MALLOC_PROFILING to facilitate a tool for sizing
the malloc buckets.
This commit is contained in:
Jeff Roberson 2002-04-15 04:05:53 +00:00
parent ab4f115e57
commit 6f2671750e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=94729
3 changed files with 154 additions and 160 deletions

View File

@ -45,6 +45,7 @@
#include <sys/mutex.h>
#include <sys/vmmeter.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -91,33 +92,40 @@ static char *kmemlimit;
#define KMEM_ZMAX 65536
#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
static uma_zone_t kmemzones[KMEM_ZSIZE + 1];
static u_int8_t kmemsize[KMEM_ZSIZE + 1];
#ifdef MALLOC_PROFILE
uint64_t krequests[KMEM_ZSIZE + 1];
#endif
/* These won't be powers of two for long */
struct {
int size;
char *name;
} kmemsizes[] = {
{16, "16"},
{32, "32"},
{64, "64"},
{128, "128"},
{256, "256"},
{512, "512"},
{1024, "1024"},
{2048, "2048"},
{4096, "4096"},
{8192, "8192"},
{16384, "16384"},
{32768, "32768"},
{65536, "65536"},
int kz_size;
char *kz_name;
uma_zone_t kz_zone;
} kmemzones[] = {
{16, "16", NULL},
{32, "32", NULL},
{64, "64", NULL},
{128, "128", NULL},
{256, "256", NULL},
{512, "512", NULL},
{1024, "1024", NULL},
{2048, "2048", NULL},
{4096, "4096", NULL},
{8192, "8192", NULL},
{16384, "16384", NULL},
{32768, "32768", NULL},
{65536, "65536", NULL},
{0, NULL},
};
u_int vm_kmem_size;
static struct mtx malloc_mtx;
u_int vm_kmem_size;
static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
/*
* malloc:
@ -133,8 +141,7 @@ malloc(size, type, flags)
struct malloc_type *type;
int flags;
{
int s;
long indx;
int indx;
caddr_t va;
uma_zone_t zone;
register struct malloc_type *ksp = type;
@ -144,52 +151,34 @@ malloc(size, type, flags)
KASSERT(curthread->td_intr_nesting_level == 0,
("malloc(M_WAITOK) in interrupt context"));
#endif
s = splmem();
/* mtx_lock(&malloc_mtx); XXX */
while (ksp->ks_memuse >= ksp->ks_limit) {
if (flags & M_NOWAIT) {
splx(s);
/* mtx_unlock(&malloc_mtx); XXX */
return ((void *) NULL);
}
if (ksp->ks_limblocks < 65535)
ksp->ks_limblocks++;
msleep((caddr_t)ksp, /* &malloc_mtx */ NULL, PSWP+2, type->ks_shortdesc,
0);
}
/* mtx_unlock(&malloc_mtx); XXX */
if (size <= KMEM_ZMAX) {
indx = size;
if (indx & KMEM_ZMASK)
indx = (indx & ~KMEM_ZMASK) + KMEM_ZBASE;
zone = kmemzones[indx >> KMEM_ZSHIFT];
indx = zone->uz_size;
if (size & KMEM_ZMASK)
size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
indx = kmemsize[size >> KMEM_ZSHIFT];
zone = kmemzones[indx].kz_zone;
#ifdef MALLOC_PROFILE
krequests[size >> KMEM_ZSHIFT]++;
#endif
va = uma_zalloc(zone, flags);
if (va == NULL) {
/* mtx_lock(&malloc_mtx); XXX */
if (va == NULL)
goto out;
}
ksp->ks_size |= indx;
ksp->ks_size |= 1 << indx;
size = zone->uz_size;
} else {
/* XXX This is not the next power of two so this will break ks_size */
indx = roundup(size, PAGE_SIZE);
size = roundup(size, PAGE_SIZE);
zone = NULL;
va = uma_large_malloc(size, flags);
if (va == NULL) {
/* mtx_lock(&malloc_mtx); XXX */
if (va == NULL)
goto out;
}
}
/* mtx_lock(&malloc_mtx); XXX */
ksp->ks_memuse += indx;
ksp->ks_memuse += size;
ksp->ks_inuse++;
out:
ksp->ks_calls++;
if (ksp->ks_memuse > ksp->ks_maxused)
ksp->ks_maxused = ksp->ks_memuse;
splx(s);
/* mtx_unlock(&malloc_mtx); XXX */
/* XXX: Do idle pre-zeroing. */
if (va != NULL && (flags & M_ZERO))
bzero(va, size);
@ -211,7 +200,6 @@ free(addr, type)
uma_slab_t slab;
void *mem;
u_long size;
int s;
register struct malloc_type *ksp = type;
/* free(NULL, ...) does nothing */
@ -219,13 +207,13 @@ free(addr, type)
return;
size = 0;
s = splmem();
mem = (void *)((u_long)addr & (~UMA_SLAB_MASK));
slab = hash_sfind(mallochash, mem);
if (slab == NULL)
panic("free: address %p(%p) has not been allocated.\n", addr, mem);
panic("free: address %p(%p) has not been allocated.\n",
addr, mem);
if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
size = slab->us_zone->uz_size;
@ -234,15 +222,8 @@ free(addr, type)
size = slab->us_size;
uma_large_free(slab);
}
/* mtx_lock(&malloc_mtx); XXX */
ksp->ks_memuse -= size;
if (ksp->ks_memuse + size >= ksp->ks_limit &&
ksp->ks_memuse < ksp->ks_limit)
wakeup((caddr_t)ksp);
ksp->ks_inuse--;
splx(s);
/* mtx_unlock(&malloc_mtx); XXX */
}
/*
@ -316,7 +297,7 @@ static void
kmeminit(dummy)
void *dummy;
{
register long indx;
u_int8_t indx;
u_long npg;
u_long mem_size;
void *hashmem;
@ -324,7 +305,7 @@ kmeminit(dummy)
int highbit;
int bits;
int i;
mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
/*
@ -394,15 +375,15 @@ kmeminit(dummy)
hashmem = (void *)kmem_alloc(kernel_map, (vm_size_t)hashsize);
uma_startup2(hashmem, hashsize / sizeof(void *));
for (i = 0, indx = 0; kmemsizes[indx].size != 0; indx++) {
uma_zone_t zone;
int size = kmemsizes[indx].size;
char *name = kmemsizes[indx].name;
for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
int size = kmemzones[indx].kz_size;
char *name = kmemzones[indx].kz_name;
zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
kmemzones[indx].kz_zone = uma_zcreate(name, size, NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
for (;i <= size; i+= KMEM_ZBASE)
kmemzones[i >> KMEM_ZSHIFT] = zone;
kmemsize[i >> KMEM_ZSHIFT] = indx;
}
}
@ -413,22 +394,19 @@ malloc_init(data)
{
struct malloc_type *type = (struct malloc_type *)data;
mtx_lock(&malloc_mtx);
if (type->ks_magic != M_MAGIC)
panic("malloc type lacks magic");
if (type->ks_limit != 0)
return;
if (cnt.v_page_count == 0)
panic("malloc_init not allowed before vm init");
/*
* The default limits for each malloc region is 1/2 of the
* malloc portion of the kmem map size.
*/
type->ks_limit = vm_kmem_size / 2;
if (type->ks_next != NULL)
return;
type->ks_next = kmemstatistics;
kmemstatistics = type;
mtx_unlock(&malloc_mtx);
}
void
@ -438,15 +416,13 @@ malloc_uninit(data)
struct malloc_type *type = (struct malloc_type *)data;
struct malloc_type *t;
mtx_lock(&malloc_mtx);
if (type->ks_magic != M_MAGIC)
panic("malloc type lacks magic");
if (cnt.v_page_count == 0)
panic("malloc_uninit not allowed before vm init");
if (type->ks_limit == 0)
panic("malloc_uninit on uninitialized type");
if (type == kmemstatistics)
kmemstatistics = type->ks_next;
else {
@ -458,5 +434,80 @@ malloc_uninit(data)
}
}
type->ks_next = NULL;
type->ks_limit = 0;
mtx_unlock(&malloc_mtx);
}
static int
sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
{
struct malloc_type *type;
int linesize = 128;
int curline;
int bufsize;
int first;
int error;
char *buf;
char *p;
int cnt;
int len;
int i;
cnt = 0;
mtx_lock(&malloc_mtx);
for (type = kmemstatistics; type != NULL; type = type->ks_next)
cnt++;
bufsize = linesize * (cnt + 1);
p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
len = snprintf(p, linesize,
"\n Type InUse MemUse HighUse Requests Size(s)\n");
p += len;
for (type = kmemstatistics; cnt != 0 && type != NULL;
type = type->ks_next, cnt--) {
if (type->ks_calls == 0)
continue;
curline = linesize - 2; /* Leave room for the \n */
len = snprintf(p, curline, "%13s%6ld%6ldK%7ldK%9llu",
type->ks_shortdesc,
type->ks_inuse,
(type->ks_memuse + 1023) / 1024,
(type->ks_maxused + 1023) / 1024,
(long long unsigned)type->ks_calls);
curline -= len;
p += len;
first = 1;
for (i = 0; i < 14/* 8 * sizeof(type->ks_size)*/; i++)
if (type->ks_size & (1 << i)) {
if (first)
len = snprintf(p, curline, " ");
else
len = snprintf(p, curline, ",");
curline -= len;
p += len;
len = snprintf(p, curline,
"%s", kmemzones[i].kz_name);
curline -= len;
p += len;
first = 0;
}
len = snprintf(p, 2, "\n");
p += len;
}
mtx_unlock(&malloc_mtx);
error = SYSCTL_OUT(req, buf, p - buf);
free(buf, M_TEMP);
return (error);
}
SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");

View File

@ -56,21 +56,18 @@
struct malloc_type {
struct malloc_type *ks_next; /* next in list */
long ks_memuse; /* total memory held in bytes */
long ks_limit; /* most that are allowed to exist */
long ks_size; /* sizes of this thing that are allocated */
long ks_inuse; /* # of packets of this type currently in use */
uint64_t ks_calls; /* total packets of this type ever allocated */
long ks_maxused; /* maximum number ever used */
u_long ks_magic; /* if it's not magic, don't touch it */
const char *ks_shortdesc; /* short description */
u_short ks_limblocks; /* number of times blocked for hitting limit */
u_short ks_mapblocks; /* number of times blocked for kernel map */
};
#ifdef _KERNEL
#define MALLOC_DEFINE(type, shortdesc, longdesc) \
struct malloc_type type[1] = { \
{ NULL, 0, 0, 0, 0, 0, 0, M_MAGIC, shortdesc, 0, 0 } \
{ NULL, 0, 0, 0, 0, 0, M_MAGIC, shortdesc } \
}; \
SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_SECOND, malloc_init, type); \
SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY, malloc_uninit, type)

View File

@ -98,22 +98,20 @@ static struct nlist namelist[] = {
{ "_intrcnt" },
#define X_EINTRCNT 9
{ "_eintrcnt" },
#define X_KMEMSTATISTICS 10
{ "_kmemstatistics" },
#ifdef notyet
#define X_DEFICIT 12
#define X_DEFICIT 10
{ "_deficit" },
#define X_FORKSTAT 13
#define X_FORKSTAT 11
{ "_forkstat" },
#define X_REC 14
#define X_REC 12
{ "_rectime" },
#define X_PGIN 15
#define X_PGIN 13
{ "_pgintime" },
#define X_XSTATS 16
#define X_XSTATS 14
{ "_xstats" },
#define X_END 17
#define X_END 15
#else
#define X_END 18
#define X_END 10
#endif
{ "" },
};
@ -147,6 +145,7 @@ kvm_t *kd;
static void cpustats(void);
static void devstats(void);
static void dosysctl(char *);
static void domem(void);
static void dointr(void);
static void dosum(void);
@ -753,73 +752,20 @@ dointr()
inttotal / (u_int64_t) uptime);
}
#define MAX_KMSTATS 200
void
domem()
domem(void)
{
struct malloc_type *ks;
int i, j;
int first, nkms;
long totuse = 0, totfree = 0;
uint64_t totreq = 0;
struct malloc_type kmemstats[MAX_KMSTATS], *kmsp;
char buf[1024];
kread(X_KMEMSTATISTICS, &kmsp, sizeof(kmsp));
for (nkms = 0; nkms < MAX_KMSTATS && kmsp != NULL; nkms++) {
if (sizeof(kmemstats[0]) != kvm_read(kd, (u_long)kmsp,
&kmemstats[nkms], sizeof(kmemstats[0])))
err(1, "kvm_read(%p)", (void *)kmsp);
if (sizeof(buf) != kvm_read(kd,
(u_long)kmemstats[nkms].ks_shortdesc, buf, sizeof(buf)))
err(1, "kvm_read(%p)",
(const void *)kmemstats[nkms].ks_shortdesc);
buf[sizeof(buf) - 1] = '\0';
kmemstats[nkms].ks_shortdesc = strdup(buf);
kmsp = kmemstats[nkms].ks_next;
}
(void)printf(
"\nMemory statistics by type Type Kern\n");
(void)printf(
" Type InUse MemUse HighUse Limit Requests Limit Limit Size(s)\n");
for (i = 0, ks = &kmemstats[0]; i < nkms; i++, ks++) {
if (ks->ks_calls == 0)
continue;
(void)printf("%13s%6ld%6ldK%7ldK%6ldK%9llu%5u%6u",
ks->ks_shortdesc,
ks->ks_inuse, (ks->ks_memuse + 1023) / 1024,
(ks->ks_maxused + 1023) / 1024,
(ks->ks_limit + 1023) / 1024,
(unsigned long long)ks->ks_calls,
ks->ks_limblocks, ks->ks_mapblocks);
first = 1;
for (j = 1 << MINBUCKET; j < 1 << (MINBUCKET + 16); j <<= 1) {
if ((ks->ks_size & j) == 0)
continue;
if (first)
(void)printf(" ");
else
(void)printf(",");
if(j<1024)
(void)printf("%d",j);
else
(void)printf("%dK",j>>10);
first = 0;
}
(void)printf("\n");
totuse += ks->ks_memuse;
totreq += ks->ks_calls;
}
(void)printf("\nMemory Totals: In Use Free Requests\n");
(void)printf(" %7ldK %6ldK %13llu\n",
(totuse + 1023) / 1024, (totfree + 1023) / 1024,
(unsigned long long)totreq);
dosysctl("kern.malloc");
}
void
dozmem()
dozmem(void)
{
dosysctl("vm.zone");
}
void
dosysctl(char *name)
{
char *buf;
size_t bufsize;
@ -829,7 +775,7 @@ dozmem()
for (;;) {
if ((buf = realloc(buf, bufsize)) == NULL)
err(1, "realloc()");
if (sysctlbyname("vm.zone", buf, &bufsize, 0, NULL) == 0)
if (sysctlbyname(name, buf, &bufsize, 0, NULL) == 0)
break;
if (errno != ENOMEM)
err(1, "sysctl()");