malloc: move malloc_type_internal into malloc_type

According to code comments the original motivation was to allow for
malloc_type_internal changes without ABI breakage. This can be trivially
accomplished by providing spare fields and versioning the struct, as
implemented in the patch below.

The upshots are one less memory indirection on each alloc and disappearance
of mt_zone.

Reviewed by:	markj
Differential Revision:	https://reviews.freebsd.org/D27104
This commit is contained in:
mjg 2020-11-06 21:33:59 +00:00
parent 2f331e493f
commit bf8d89a375
5 changed files with 46 additions and 61 deletions

View File

@ -317,7 +317,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
int hint_dontsearch, j, mp_maxcpus, mp_ncpus, ret; int hint_dontsearch, j, mp_maxcpus, mp_ncpus, ret;
char name[MEMTYPE_MAXNAME]; char name[MEMTYPE_MAXNAME];
struct malloc_type_stats mts; struct malloc_type_stats mts;
struct malloc_type_internal mti, *mtip; struct malloc_type_internal *mtip;
struct malloc_type type, *typep; struct malloc_type type, *typep;
kvm_t *kvm; kvm_t *kvm;
@ -372,18 +372,17 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
list->mtl_error = ret; list->mtl_error = ret;
return (-1); return (-1);
} }
if (type.ks_version != M_VERSION) {
warnx("type %s with unsupported version %lu; skipped",
name, type.ks_version);
continue;
}
/* /*
* Since our compile-time value for MAXCPU may differ from the * Since our compile-time value for MAXCPU may differ from the
* kernel's, we populate our own array. * kernel's, we populate our own array.
*/ */
mtip = type.ks_handle; mtip = &type.ks_mti;
ret = kread(kvm, mtip, &mti, sizeof(mti), 0);
if (ret != 0) {
_memstat_mtl_empty(list);
list->mtl_error = ret;
return (-1);
}
if (hint_dontsearch == 0) { if (hint_dontsearch == 0) {
mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name); mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name);
@ -404,7 +403,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
*/ */
_memstat_mt_reset_stats(mtp, mp_maxcpus); _memstat_mt_reset_stats(mtp, mp_maxcpus);
for (j = 0; j < mp_ncpus; j++) { for (j = 0; j < mp_ncpus; j++) {
ret = kread_zpcpu(kvm, (u_long)mti.mti_stats, &mts, ret = kread_zpcpu(kvm, (u_long)mtip->mti_stats, &mts,
sizeof(mts), j); sizeof(mts), j);
if (ret != 0) { if (ret != 0) {
_memstat_mtl_empty(list); _memstat_mtl_empty(list);

View File

@ -114,7 +114,7 @@ static void
dtmalloc_type_cb(struct malloc_type *mtp, void *arg __unused) dtmalloc_type_cb(struct malloc_type *mtp, void *arg __unused)
{ {
char name[DTRACE_FUNCNAMELEN]; char name[DTRACE_FUNCNAMELEN];
struct malloc_type_internal *mtip = mtp->ks_handle; struct malloc_type_internal *mtip = &mtp->ks_mti;
int i; int i;
/* /*

View File

@ -175,14 +175,8 @@ struct {
}; };
/* /*
* Zone to allocate malloc type descriptions from. For ABI reasons, memory * Zone to allocate per-CPU storage for statistics.
* types are described by a data structure passed by the declaring code, but
* the malloc(9) implementation has its own data structure describing the
* type and statistics. This permits the malloc(9)-internal data structures
* to be modified without breaking binary-compiled kernel modules that
* declare malloc types.
*/ */
static uma_zone_t mt_zone;
static uma_zone_t mt_stats_zone; static uma_zone_t mt_stats_zone;
u_long vm_kmem_size; u_long vm_kmem_size;
@ -342,7 +336,7 @@ mtp_set_subzone(struct malloc_type *mtp)
size_t len; size_t len;
u_int val; u_int val;
mtip = mtp->ks_handle; mtip = &mtp->ks_mti;
desc = mtp->ks_shortdesc; desc = mtp->ks_shortdesc;
if (desc == NULL || (len = strlen(desc)) == 0) if (desc == NULL || (len = strlen(desc)) == 0)
val = 0; val = 0;
@ -356,7 +350,7 @@ mtp_get_subzone(struct malloc_type *mtp)
{ {
struct malloc_type_internal *mtip; struct malloc_type_internal *mtip;
mtip = mtp->ks_handle; mtip = &mtp->ks_mti;
KASSERT(mtip->mti_zone < numzones, KASSERT(mtip->mti_zone < numzones,
("mti_zone %u out of range %d", ("mti_zone %u out of range %d",
@ -371,7 +365,7 @@ mtp_set_subzone(struct malloc_type *mtp)
{ {
struct malloc_type_internal *mtip; struct malloc_type_internal *mtip;
mtip = mtp->ks_handle; mtip = &mtp->ks_mti;
mtip->mti_zone = 0; mtip->mti_zone = 0;
} }
@ -404,7 +398,7 @@ malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
struct malloc_type_stats *mtsp; struct malloc_type_stats *mtsp;
critical_enter(); critical_enter();
mtip = mtp->ks_handle; mtip = &mtp->ks_mti;
mtsp = zpcpu_get(mtip->mti_stats); mtsp = zpcpu_get(mtip->mti_stats);
if (size > 0) { if (size > 0) {
mtsp->mts_memalloced += size; mtsp->mts_memalloced += size;
@ -447,7 +441,7 @@ malloc_type_freed(struct malloc_type *mtp, unsigned long size)
struct malloc_type_stats *mtsp; struct malloc_type_stats *mtsp;
critical_enter(); critical_enter();
mtip = mtp->ks_handle; mtip = &mtp->ks_mti;
mtsp = zpcpu_get(mtip->mti_stats); mtsp = zpcpu_get(mtip->mti_stats);
mtsp->mts_memfreed += size; mtsp->mts_memfreed += size;
mtsp->mts_numfrees++; mtsp->mts_numfrees++;
@ -524,7 +518,7 @@ malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
#ifdef INVARIANTS #ifdef INVARIANTS
int indx; int indx;
KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic")); KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
/* /*
* Check that exactly one of M_WAITOK or M_NOWAIT is specified. * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
*/ */
@ -848,7 +842,7 @@ free_dbg(void **addrp, struct malloc_type *mtp)
void *addr; void *addr;
addr = *addrp; addr = *addrp;
KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic")); KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
("free: called with spinlock or critical section held")); ("free: called with spinlock or critical section held"));
@ -965,8 +959,8 @@ realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
unsigned long alloc; unsigned long alloc;
void *newaddr; void *newaddr;
KASSERT(mtp->ks_magic == M_MAGIC, KASSERT(mtp->ks_version == M_VERSION,
("realloc: bad malloc type magic")); ("realloc: bad malloc type version"));
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
("realloc: called with spinlock or critical section held")); ("realloc: called with spinlock or critical section held"));
@ -1193,13 +1187,6 @@ mallocinit(void *dummy)
mt_stats_zone = uma_zcreate("mt_stats_zone", mt_stats_zone = uma_zcreate("mt_stats_zone",
sizeof(struct malloc_type_stats), NULL, NULL, NULL, NULL, sizeof(struct malloc_type_stats), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_PCPU); UMA_ALIGN_PTR, UMA_ZONE_PCPU);
mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
#ifdef INVARIANTS
mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
#else
NULL, NULL, NULL, NULL,
#endif
UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
int size = kmemzones[indx].kz_size; int size = kmemzones[indx].kz_size;
const char *name = kmemzones[indx].kz_name; const char *name = kmemzones[indx].kz_name;
@ -1230,12 +1217,12 @@ malloc_init(void *data)
KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init")); KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
mtp = data; mtp = data;
if (mtp->ks_magic != M_MAGIC) if (mtp->ks_version != M_VERSION)
panic("malloc_init: bad malloc type magic"); panic("malloc_init: unsupported malloc type version %lu",
mtp->ks_version);
mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); mtip = &mtp->ks_mti;
mtip->mti_stats = uma_zalloc_pcpu(mt_stats_zone, M_WAITOK | M_ZERO); mtip->mti_stats = uma_zalloc_pcpu(mt_stats_zone, M_WAITOK | M_ZERO);
mtp->ks_handle = mtip;
mtp_set_subzone(mtp); mtp_set_subzone(mtp);
mtx_lock(&malloc_mtx); mtx_lock(&malloc_mtx);
@ -1251,18 +1238,15 @@ malloc_uninit(void *data)
struct malloc_type_internal *mtip; struct malloc_type_internal *mtip;
struct malloc_type_stats *mtsp; struct malloc_type_stats *mtsp;
struct malloc_type *mtp, *temp; struct malloc_type *mtp, *temp;
uma_slab_t slab;
long temp_allocs, temp_bytes; long temp_allocs, temp_bytes;
int i; int i;
mtp = data; mtp = data;
KASSERT(mtp->ks_magic == M_MAGIC, KASSERT(mtp->ks_version == M_VERSION,
("malloc_uninit: bad malloc type magic")); ("malloc_uninit: bad malloc type version"));
KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
mtx_lock(&malloc_mtx); mtx_lock(&malloc_mtx);
mtip = mtp->ks_handle; mtip = &mtp->ks_mti;
mtp->ks_handle = NULL;
if (mtp != kmemstatistics) { if (mtp != kmemstatistics) {
for (temp = kmemstatistics; temp != NULL; for (temp = kmemstatistics; temp != NULL;
temp = temp->ks_next) { temp = temp->ks_next) {
@ -1295,9 +1279,7 @@ malloc_uninit(void *data)
temp_allocs, temp_bytes); temp_allocs, temp_bytes);
} }
slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
uma_zfree_pcpu(mt_stats_zone, mtip->mti_stats); uma_zfree_pcpu(mt_stats_zone, mtip->mti_stats);
uma_zfree_arg(mt_zone, mtip, slab);
} }
struct malloc_type * struct malloc_type *
@ -1346,7 +1328,7 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
* Insert alternating sequence of type headers and type statistics. * Insert alternating sequence of type headers and type statistics.
*/ */
for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
mtip = (struct malloc_type_internal *)mtp->ks_handle; mtip = &mtp->ks_mti;
/* /*
* Insert type header. * Insert type header.
@ -1483,7 +1465,7 @@ DB_SHOW_COMMAND(malloc, db_show_malloc)
ties = 1; ties = 1;
continue; continue;
} }
size = get_malloc_stats(mtp->ks_handle, &allocs, size = get_malloc_stats(&mtp->ks_mti, &allocs,
&inuse); &inuse);
if (size > cur_size && size < last_size + ties) { if (size > cur_size && size < last_size + ties) {
cur_size = size; cur_size = size;
@ -1493,7 +1475,7 @@ DB_SHOW_COMMAND(malloc, db_show_malloc)
if (cur_mtype == NULL) if (cur_mtype == NULL)
break; break;
size = get_malloc_stats(cur_mtype->ks_handle, &allocs, &inuse); size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse, db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
howmany(size, 1024), allocs); howmany(size, 1024), allocs);
@ -1517,17 +1499,17 @@ DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
return; return;
} }
mtp = (void *)addr; mtp = (void *)addr;
if (mtp->ks_magic != M_MAGIC) { if (mtp->ks_version != M_VERSION) {
db_printf("Magic %lx does not match expected %x\n", db_printf("Version %lx does not match expected %x\n",
mtp->ks_magic, M_MAGIC); mtp->ks_version, M_VERSION);
return; return;
} }
mtip = mtp->ks_handle; mtip = &mtp->ks_mti;
subzone = mtip->mti_zone; subzone = mtip->mti_zone;
for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
mtip = mtp->ks_handle; mtip = &mtp->ks_mti;
if (mtip->mti_zone != subzone) if (mtip->mti_zone != subzone)
continue; continue;
db_printf("%s\n", mtp->ks_shortdesc); db_printf("%s\n", mtp->ks_shortdesc);

View File

@ -63,7 +63,7 @@
#define M_EXEC 0x4000 /* allocate executable space */ #define M_EXEC 0x4000 /* allocate executable space */
#define M_NEXTFIT 0x8000 /* only for vmem, follow cursor */ #define M_NEXTFIT 0x8000 /* only for vmem, follow cursor */
#define M_MAGIC 877983977 /* time when first defined :-) */ #define M_VERSION 2020110501
/* /*
* Two malloc type structures are present: malloc_type, which is used by a * Two malloc type structures are present: malloc_type, which is used by a
@ -104,18 +104,17 @@ struct malloc_type_internal {
/* DTrace probe ID array. */ /* DTrace probe ID array. */
u_char mti_zone; u_char mti_zone;
struct malloc_type_stats *mti_stats; struct malloc_type_stats *mti_stats;
u_long mti_spare[8];
}; };
/* /*
* Public data structure describing a malloc type. Private data is hung off * Public data structure describing a malloc type.
* of ks_handle to avoid encoding internal malloc(9) data structures in
* modules, which will statically allocate struct malloc_type.
*/ */
struct malloc_type { struct malloc_type {
struct malloc_type *ks_next; /* Next in global chain. */ struct malloc_type *ks_next; /* Next in global chain. */
u_long ks_magic; /* Detect programmer error. */ u_long ks_version; /* Detect programmer error. */
const char *ks_shortdesc; /* Printable type name. */ const char *ks_shortdesc; /* Printable type name. */
void *ks_handle; /* Priv. data, was lo_class. */ struct malloc_type_internal ks_mti;
}; };
/* /*
@ -141,7 +140,12 @@ struct malloc_type_header {
#ifdef _KERNEL #ifdef _KERNEL
#define MALLOC_DEFINE(type, shortdesc, longdesc) \ #define MALLOC_DEFINE(type, shortdesc, longdesc) \
struct malloc_type type[1] = { \ struct malloc_type type[1] = { \
{ NULL, M_MAGIC, shortdesc, NULL } \ { \
.ks_next = NULL, \
.ks_version = M_VERSION, \
.ks_shortdesc = shortdesc, \
.ks_mti = { 0 }, \
} \
}; \ }; \
SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_THIRD, malloc_init, \ SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_THIRD, malloc_init, \
type); \ type); \

View File

@ -60,7 +60,7 @@
* in the range 5 to 9. * in the range 5 to 9.
*/ */
#undef __FreeBSD_version #undef __FreeBSD_version
#define __FreeBSD_version 1300126 /* Master, propagated to newvers */ #define __FreeBSD_version 1300127 /* Master, propagated to newvers */
/* /*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,