malloc: move malloc_type_internal into malloc_type
According to code comments the original motivation was to allow for malloc_type_internal changes without ABI breakage. This can be trivially accomplished by providing spare fields and versioning the struct, as implemented in the patch below. The upshots are one less memory indirection on each alloc and disappearance of mt_zone. Reviewed by: markj Differential Revision: https://reviews.freebsd.org/D27104
This commit is contained in:
parent
2f331e493f
commit
bf8d89a375
@ -317,7 +317,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
||||
int hint_dontsearch, j, mp_maxcpus, mp_ncpus, ret;
|
||||
char name[MEMTYPE_MAXNAME];
|
||||
struct malloc_type_stats mts;
|
||||
struct malloc_type_internal mti, *mtip;
|
||||
struct malloc_type_internal *mtip;
|
||||
struct malloc_type type, *typep;
|
||||
kvm_t *kvm;
|
||||
|
||||
@ -372,18 +372,17 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
||||
list->mtl_error = ret;
|
||||
return (-1);
|
||||
}
|
||||
if (type.ks_version != M_VERSION) {
|
||||
warnx("type %s with unsupported version %lu; skipped",
|
||||
name, type.ks_version);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since our compile-time value for MAXCPU may differ from the
|
||||
* kernel's, we populate our own array.
|
||||
*/
|
||||
mtip = type.ks_handle;
|
||||
ret = kread(kvm, mtip, &mti, sizeof(mti), 0);
|
||||
if (ret != 0) {
|
||||
_memstat_mtl_empty(list);
|
||||
list->mtl_error = ret;
|
||||
return (-1);
|
||||
}
|
||||
mtip = &type.ks_mti;
|
||||
|
||||
if (hint_dontsearch == 0) {
|
||||
mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name);
|
||||
@ -404,7 +403,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
||||
*/
|
||||
_memstat_mt_reset_stats(mtp, mp_maxcpus);
|
||||
for (j = 0; j < mp_ncpus; j++) {
|
||||
ret = kread_zpcpu(kvm, (u_long)mti.mti_stats, &mts,
|
||||
ret = kread_zpcpu(kvm, (u_long)mtip->mti_stats, &mts,
|
||||
sizeof(mts), j);
|
||||
if (ret != 0) {
|
||||
_memstat_mtl_empty(list);
|
||||
|
@ -114,7 +114,7 @@ static void
|
||||
dtmalloc_type_cb(struct malloc_type *mtp, void *arg __unused)
|
||||
{
|
||||
char name[DTRACE_FUNCNAMELEN];
|
||||
struct malloc_type_internal *mtip = mtp->ks_handle;
|
||||
struct malloc_type_internal *mtip = &mtp->ks_mti;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -175,14 +175,8 @@ struct {
|
||||
};
|
||||
|
||||
/*
|
||||
* Zone to allocate malloc type descriptions from. For ABI reasons, memory
|
||||
* types are described by a data structure passed by the declaring code, but
|
||||
* the malloc(9) implementation has its own data structure describing the
|
||||
* type and statistics. This permits the malloc(9)-internal data structures
|
||||
* to be modified without breaking binary-compiled kernel modules that
|
||||
* declare malloc types.
|
||||
* Zone to allocate per-CPU storage for statistics.
|
||||
*/
|
||||
static uma_zone_t mt_zone;
|
||||
static uma_zone_t mt_stats_zone;
|
||||
|
||||
u_long vm_kmem_size;
|
||||
@ -342,7 +336,7 @@ mtp_set_subzone(struct malloc_type *mtp)
|
||||
size_t len;
|
||||
u_int val;
|
||||
|
||||
mtip = mtp->ks_handle;
|
||||
mtip = &mtp->ks_mti;
|
||||
desc = mtp->ks_shortdesc;
|
||||
if (desc == NULL || (len = strlen(desc)) == 0)
|
||||
val = 0;
|
||||
@ -356,7 +350,7 @@ mtp_get_subzone(struct malloc_type *mtp)
|
||||
{
|
||||
struct malloc_type_internal *mtip;
|
||||
|
||||
mtip = mtp->ks_handle;
|
||||
mtip = &mtp->ks_mti;
|
||||
|
||||
KASSERT(mtip->mti_zone < numzones,
|
||||
("mti_zone %u out of range %d",
|
||||
@ -371,7 +365,7 @@ mtp_set_subzone(struct malloc_type *mtp)
|
||||
{
|
||||
struct malloc_type_internal *mtip;
|
||||
|
||||
mtip = mtp->ks_handle;
|
||||
mtip = &mtp->ks_mti;
|
||||
mtip->mti_zone = 0;
|
||||
}
|
||||
|
||||
@ -404,7 +398,7 @@ malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
|
||||
struct malloc_type_stats *mtsp;
|
||||
|
||||
critical_enter();
|
||||
mtip = mtp->ks_handle;
|
||||
mtip = &mtp->ks_mti;
|
||||
mtsp = zpcpu_get(mtip->mti_stats);
|
||||
if (size > 0) {
|
||||
mtsp->mts_memalloced += size;
|
||||
@ -447,7 +441,7 @@ malloc_type_freed(struct malloc_type *mtp, unsigned long size)
|
||||
struct malloc_type_stats *mtsp;
|
||||
|
||||
critical_enter();
|
||||
mtip = mtp->ks_handle;
|
||||
mtip = &mtp->ks_mti;
|
||||
mtsp = zpcpu_get(mtip->mti_stats);
|
||||
mtsp->mts_memfreed += size;
|
||||
mtsp->mts_numfrees++;
|
||||
@ -524,7 +518,7 @@ malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
|
||||
#ifdef INVARIANTS
|
||||
int indx;
|
||||
|
||||
KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
|
||||
KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
|
||||
/*
|
||||
* Check that exactly one of M_WAITOK or M_NOWAIT is specified.
|
||||
*/
|
||||
@ -848,7 +842,7 @@ free_dbg(void **addrp, struct malloc_type *mtp)
|
||||
void *addr;
|
||||
|
||||
addr = *addrp;
|
||||
KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
|
||||
KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
|
||||
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
|
||||
("free: called with spinlock or critical section held"));
|
||||
|
||||
@ -965,8 +959,8 @@ realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
|
||||
unsigned long alloc;
|
||||
void *newaddr;
|
||||
|
||||
KASSERT(mtp->ks_magic == M_MAGIC,
|
||||
("realloc: bad malloc type magic"));
|
||||
KASSERT(mtp->ks_version == M_VERSION,
|
||||
("realloc: bad malloc type version"));
|
||||
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
|
||||
("realloc: called with spinlock or critical section held"));
|
||||
|
||||
@ -1193,13 +1187,6 @@ mallocinit(void *dummy)
|
||||
mt_stats_zone = uma_zcreate("mt_stats_zone",
|
||||
sizeof(struct malloc_type_stats), NULL, NULL, NULL, NULL,
|
||||
UMA_ALIGN_PTR, UMA_ZONE_PCPU);
|
||||
mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
|
||||
#ifdef INVARIANTS
|
||||
mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
|
||||
#else
|
||||
NULL, NULL, NULL, NULL,
|
||||
#endif
|
||||
UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
|
||||
for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
|
||||
int size = kmemzones[indx].kz_size;
|
||||
const char *name = kmemzones[indx].kz_name;
|
||||
@ -1230,12 +1217,12 @@ malloc_init(void *data)
|
||||
KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
|
||||
|
||||
mtp = data;
|
||||
if (mtp->ks_magic != M_MAGIC)
|
||||
panic("malloc_init: bad malloc type magic");
|
||||
if (mtp->ks_version != M_VERSION)
|
||||
panic("malloc_init: unsupported malloc type version %lu",
|
||||
mtp->ks_version);
|
||||
|
||||
mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
|
||||
mtip = &mtp->ks_mti;
|
||||
mtip->mti_stats = uma_zalloc_pcpu(mt_stats_zone, M_WAITOK | M_ZERO);
|
||||
mtp->ks_handle = mtip;
|
||||
mtp_set_subzone(mtp);
|
||||
|
||||
mtx_lock(&malloc_mtx);
|
||||
@ -1251,18 +1238,15 @@ malloc_uninit(void *data)
|
||||
struct malloc_type_internal *mtip;
|
||||
struct malloc_type_stats *mtsp;
|
||||
struct malloc_type *mtp, *temp;
|
||||
uma_slab_t slab;
|
||||
long temp_allocs, temp_bytes;
|
||||
int i;
|
||||
|
||||
mtp = data;
|
||||
KASSERT(mtp->ks_magic == M_MAGIC,
|
||||
("malloc_uninit: bad malloc type magic"));
|
||||
KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
|
||||
KASSERT(mtp->ks_version == M_VERSION,
|
||||
("malloc_uninit: bad malloc type version"));
|
||||
|
||||
mtx_lock(&malloc_mtx);
|
||||
mtip = mtp->ks_handle;
|
||||
mtp->ks_handle = NULL;
|
||||
mtip = &mtp->ks_mti;
|
||||
if (mtp != kmemstatistics) {
|
||||
for (temp = kmemstatistics; temp != NULL;
|
||||
temp = temp->ks_next) {
|
||||
@ -1295,9 +1279,7 @@ malloc_uninit(void *data)
|
||||
temp_allocs, temp_bytes);
|
||||
}
|
||||
|
||||
slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
|
||||
uma_zfree_pcpu(mt_stats_zone, mtip->mti_stats);
|
||||
uma_zfree_arg(mt_zone, mtip, slab);
|
||||
}
|
||||
|
||||
struct malloc_type *
|
||||
@ -1346,7 +1328,7 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
|
||||
* Insert alternating sequence of type headers and type statistics.
|
||||
*/
|
||||
for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
|
||||
mtip = (struct malloc_type_internal *)mtp->ks_handle;
|
||||
mtip = &mtp->ks_mti;
|
||||
|
||||
/*
|
||||
* Insert type header.
|
||||
@ -1483,7 +1465,7 @@ DB_SHOW_COMMAND(malloc, db_show_malloc)
|
||||
ties = 1;
|
||||
continue;
|
||||
}
|
||||
size = get_malloc_stats(mtp->ks_handle, &allocs,
|
||||
size = get_malloc_stats(&mtp->ks_mti, &allocs,
|
||||
&inuse);
|
||||
if (size > cur_size && size < last_size + ties) {
|
||||
cur_size = size;
|
||||
@ -1493,7 +1475,7 @@ DB_SHOW_COMMAND(malloc, db_show_malloc)
|
||||
if (cur_mtype == NULL)
|
||||
break;
|
||||
|
||||
size = get_malloc_stats(cur_mtype->ks_handle, &allocs, &inuse);
|
||||
size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
|
||||
db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
|
||||
howmany(size, 1024), allocs);
|
||||
|
||||
@ -1517,17 +1499,17 @@ DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
|
||||
return;
|
||||
}
|
||||
mtp = (void *)addr;
|
||||
if (mtp->ks_magic != M_MAGIC) {
|
||||
db_printf("Magic %lx does not match expected %x\n",
|
||||
mtp->ks_magic, M_MAGIC);
|
||||
if (mtp->ks_version != M_VERSION) {
|
||||
db_printf("Version %lx does not match expected %x\n",
|
||||
mtp->ks_version, M_VERSION);
|
||||
return;
|
||||
}
|
||||
|
||||
mtip = mtp->ks_handle;
|
||||
mtip = &mtp->ks_mti;
|
||||
subzone = mtip->mti_zone;
|
||||
|
||||
for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
|
||||
mtip = mtp->ks_handle;
|
||||
mtip = &mtp->ks_mti;
|
||||
if (mtip->mti_zone != subzone)
|
||||
continue;
|
||||
db_printf("%s\n", mtp->ks_shortdesc);
|
||||
|
@ -63,7 +63,7 @@
|
||||
#define M_EXEC 0x4000 /* allocate executable space */
|
||||
#define M_NEXTFIT 0x8000 /* only for vmem, follow cursor */
|
||||
|
||||
#define M_MAGIC 877983977 /* time when first defined :-) */
|
||||
#define M_VERSION 2020110501
|
||||
|
||||
/*
|
||||
* Two malloc type structures are present: malloc_type, which is used by a
|
||||
@ -104,18 +104,17 @@ struct malloc_type_internal {
|
||||
/* DTrace probe ID array. */
|
||||
u_char mti_zone;
|
||||
struct malloc_type_stats *mti_stats;
|
||||
u_long mti_spare[8];
|
||||
};
|
||||
|
||||
/*
|
||||
* Public data structure describing a malloc type. Private data is hung off
|
||||
* of ks_handle to avoid encoding internal malloc(9) data structures in
|
||||
* modules, which will statically allocate struct malloc_type.
|
||||
* Public data structure describing a malloc type.
|
||||
*/
|
||||
struct malloc_type {
|
||||
struct malloc_type *ks_next; /* Next in global chain. */
|
||||
u_long ks_magic; /* Detect programmer error. */
|
||||
u_long ks_version; /* Detect programmer error. */
|
||||
const char *ks_shortdesc; /* Printable type name. */
|
||||
void *ks_handle; /* Priv. data, was lo_class. */
|
||||
struct malloc_type_internal ks_mti;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -141,7 +140,12 @@ struct malloc_type_header {
|
||||
#ifdef _KERNEL
|
||||
#define MALLOC_DEFINE(type, shortdesc, longdesc) \
|
||||
struct malloc_type type[1] = { \
|
||||
{ NULL, M_MAGIC, shortdesc, NULL } \
|
||||
{ \
|
||||
.ks_next = NULL, \
|
||||
.ks_version = M_VERSION, \
|
||||
.ks_shortdesc = shortdesc, \
|
||||
.ks_mti = { 0 }, \
|
||||
} \
|
||||
}; \
|
||||
SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_THIRD, malloc_init, \
|
||||
type); \
|
||||
|
@ -60,7 +60,7 @@
|
||||
* in the range 5 to 9.
|
||||
*/
|
||||
#undef __FreeBSD_version
|
||||
#define __FreeBSD_version 1300126 /* Master, propagated to newvers */
|
||||
#define __FreeBSD_version 1300127 /* Master, propagated to newvers */
|
||||
|
||||
/*
|
||||
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
|
||||
|
Loading…
Reference in New Issue
Block a user