Add libmemstat(3), a library for use by debugging and monitoring

applications in tracking kernel memory statistics.  It provides an
abstracted interface to uma(9) and malloc(9) statistics, wrapped
around the recently added binary stream sysctls for the allocators.

Using this interface, it is easy to build monitoring tools, query
specific memory types for usage information, etc.  Facilities are
provided for binding caller-provided data to memory types,
incremental updates of memory types, and queries that span multiple
allocators.

Support for additional allocators is (relatively) easy to add.

The API for libmemstat(3) will probably change some over time as
consumers are written, and requirements evolve.  It is written to
avoid encoding ABIs for data structure layout into consuming
applications for this reason.

MFC after:	1 week
This commit is contained in:
rwatson 2005-07-14 17:40:02 +00:00
parent 83343a94ec
commit 2dbb0a62ae
8 changed files with 1402 additions and 0 deletions

23
lib/libmemstat/Makefile Normal file
View File

@ -0,0 +1,23 @@
# $FreeBSD$
WARNS?= 3
LIB= memstat
SHLIB_MAJOR= 1
SRCS+= memstat.c
SRCS+= memstat_all.c
SRCS+= memstat_malloc.c
SRCS+= memstat_uma.c
INCS= memstat.h
MAN= libmemstat.3
MLINKS+= libmemstat.3 memstat_mtl_alloc.3
MLINKS+= libmemstat.3 memstat_mtl_first.3
MLINKS+= libmemstat.3 memstat_mtl_next.3
MLINKS+= libmemstat.3 memstat_mtl_find.3
MLINKS+= libmemstat.3 memstat_mtl_free.3
MLINKS+= libmemstat.3 memstat_sysctl_all.3
MLINKS+= libmemstat.3 memstat_sysctl_malloc.3
MLINKS+= libmemstat.3 memstat_sysctl_uma.3
.include <bsd.lib.mk>

238
lib/libmemstat/libmemstat.3 Normal file
View File

@ -0,0 +1,238 @@
.\" Copyright (c) 2005 Robert N. M. Watson
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd June 27, 2005
.Os
.Dt LIBMEMSTAT 3
.Sh NAME
.Nm libmemstat
.Nd "library interface to retrieve kernel memory allocator statistics"
.Sh LIBRARY
.Lb libmemstat
.Sh SYNOPSIS
.In sys/types.h
.In memstat.h
.Ss Memory Type List Management Functions
.Ft struct memory_type_list *
.Fn memstat_mtl_alloc "void"
.Ft struct memory_type *
.Fn memstat_mtl_first "struct memory_type_list *list"
.Ft struct memory_type *
.Fn memstat_mtl_next "struct memory_type *mtp"
.Ft struct memory_type *
.Fn memstat_mtl_find "struct memory_type_list *list" "int allocator" "const char *name"
.Ft void
.Fn memstat_mtl_free "struct memory_type_list *list"
.Ss Allocator Query Functions
.Ft int
.Fn memstat_sysctl_all "struct memory_type_list *list" "int flags"
.Ft int
.Fn memstat_sysctl_malloc "struct memory_type_list *list" "int flags"
.Ft int
.Fn memstat_sysctl_uma "struct memory_type_list *list" "int flags"
.Ss Memory Type Accessor Methods
.Ft const char *
.Fn memstat_get_name "const struct memory_type *mtp"
.Ft int
.Fn memstat_get_allocator "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_countlimit "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_byteslimit "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_sizemask "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_size "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_memalloced "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_memfreed "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_numallocs "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_numfrees "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_bytes "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_count "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_free "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_failures "const struct memory_type *mtp"
.Ft void *
.Fn memstat_get_caller_pointer "const struct memory_type *mtp" "int index"
.Ft void
.Fn memstat_set_caller_pointer "struct memory_type *mtp" "int index" "void *value"
.Ft uint64_t
.Fn memstat_get_caller_uint64 "const struct memory_type *mtp" "int index"
.Ft void
.Fn memstat_set_caller_uint64 "struct memory_type *mtp" "int index" "uint64_t value"
.Ft uint64_t
.Fn memstat_get_zonefree "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_percpu_memalloced "const struct memory_type *mtp" "int cpu"
.Ft uint64_t
.Fn memstat_get_percpu_memfreed "const struct memory_type *mtp" "int cpu"
.Ft uint64_t
.Fn memstat_get_percpu_numallocs "const struct memory_type *mtp" "int cpu"
.Ft uint64_t
.Fn memstat_get_percpu_numfrees "const struct memory_type *mtp" "int cpu"
.Ft uint64_t
.Fn memstat_get_percpu_sizemask "const struct memory_type *mtp" "int cpu"
.Ft void *
.Fn memstat_get_percpu_caller_pointer "const struct memory_type *mtp" "int cpu" "int index"
.Ft void
.Fn memstat_set_percpu_caller_pointer "struct memory_type *mtp" "int cpu" "int index" "void *value"
.Ft uint64_t
.Fn memstat_get_percpu_caller_uint64 "const struct memory_type *mtp" "int cpu" "int index"
.Ft void
.Fn memstat_set_percpu_caller_uint64 "struct memory_type *mtp" "int cpu" "int index" "uint64_t value"
.Ft uint64_t
.Fn memstat_get_percpu_free "const struct memory_type *mtp" "int cpu"
.Sh DESCRIPTION
.Nm
provides an interface to retrieve kernel memory allocator statistics, for
the purposes of debugging and system monitoring, insulating applications
from implementation details of the allocators, and allowing a tool to
transparently support multiple allocators.
.Nm
supports both retrieving a single statistics snapshot, as well as
incrementally updating statistics for long-term monitoring.
.Pp
.Nm
describes each memory type using a
.Vt struct memory_type ,
an opaque memory type accessed by the application using accessor functions
in the library.
.Nm
returns and updates chains of
.Vt struct memory_type
via a
.Vt struct memory_type_list ,
which will be allocated by calling
.Fn memstat_mtl_alloc ,
and freed on completion using
.Fn memstat_mtl_free .
Lists of memory types are populated via calls that query the kernel for
statistics information; currently:
.Fn memstat_sysctl_all ,
.Fn memstat_sysctl_uma ,
and
.Fn memstat_sysctl_malloc .
Repeated calls will incrementally update the list of memory types, permitting
tracking over time without recreating all list state.
Freeing the list will free all memory type data in the list, and so
invalidates any outstanding pointers to entries in the list.
.Vt struct memory_type
entries in the list may be iterated over using
.Fn memstat_mtl_first
and
.fn memstat_mtl_next ,
which respectively return the first entry in a list, and the next entry in a
list.
.Fn memstat_mtl_find ,
which will return a pointer to the first entry matching the passed
parameters.
.Pp
A series of accessor methods is provided to access fields of the structure,
including retrieving statistics and properties, as well as setting of caller
owned fields.
Direct application access to the data structure fields is not supported.
.Ss Library memory_type Fields
Each
.Vt struct memory_type
holds a description of the memory type, including its name and the allocator
it is managed by, as well as current statistics on use.
Some statistics are directly measured, others are derived from directly
measured statistics.
Certain high level statistics are present across all available allocators,
such as the number of allocation and free operations; other measurements,
such as the quantity of free items in per-CPU caches, or administrative
limit on the number of allocations, is available only for specific
allocators.
.Ss User memory_type Fields
.Vt struct memory_type
includes fields appropriate for use by the application in order to more
easily maintain state associated with memory types across updates.
For example, the application author might make use of one of the caller
pointers to reference a more complex data structure tracking long-term
behavior of the memory type, or a window system object that is used to
render the state of the memory type.
Query updates may reset or otherwise modify all other fields in the
.Vt struct memory_type
data structure, but will preserve the caller-provided values, which will
be initialized to
.Dv 0
or
.Dv NULL
before first use.
.Sh EXAMPLES
Create a memory type list, query the
.Xr uma 9
memory allocator for available statistics, and print out the number of
allocations performed by the
.Dv Mbuf
zone.
.Bd -literal -offset indent
struct memory_type_list *mtlp;
struct memory_type *mtp;
uint64_t mbuf_count;
mtlp = memstat_mtl_alloc();
if (mtlp == NULL)
err(-1, "memstat_mtl_alloc");
if (memstat_sysctl_uma(mtlp, 0) < 0)
err(-1, "memstat_sysctl_uma");
mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, "Mbuf");
if (mtp == NULL)
errx(-1, "memstat_mtl_find: Mbuf not found");
mbuf_count = memstat_get_count(mtp);
memstat_mtl_free(mtlp);
printf("Mbufs: %llu\\n", (unsigned long long)mbuf_count);
.Ed
.Sh SEE ALSO
.Xr malloc 9 ,
.Xr uma 9
.Sh HISTORY
The
.Nm memstat
library appeared in
.Fx 6.0 .
.Sh AUTHORS
The kernel memory allocator changes necessary to support a general purpose
monitoring library, along with the library, were written by
.An Robert Watson Aq rwatson@FreeBSD.org
.Sh BUGS
.Nm
cannot yet extract statistics from kernel core dumps, although this should be
straight forward to implement.
.Pp
Once a memory type is present on a memory type list, it will not be removed
even if the kernel no longer presents information on the type via its
monitoring interfaces.
In order to flush removed memory types, it is necessary to free the entire
list and allocate a new one.

366
lib/libmemstat/memstat.c Normal file
View File

@ -0,0 +1,366 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "memstat.h"
#include "memstat_internal.h"
struct memory_type_list *
memstat_mtl_alloc(void)
{
struct memory_type_list *mtlp;
mtlp = malloc(sizeof(*mtlp));
if (mtlp == NULL)
return (NULL);
LIST_INIT(mtlp);
return (mtlp);
}
struct memory_type *
memstat_mtl_first(struct memory_type_list *list)
{
return (LIST_FIRST(list));
}
struct memory_type *
memstat_mtl_next(struct memory_type *mtp)
{
return (LIST_NEXT(mtp, mt_list));
}
void
memstat_mtl_free(struct memory_type_list *list)
{
struct memory_type *mtp;
while ((mtp = LIST_FIRST(list))) {
LIST_REMOVE(mtp, mt_list);
free(mtp);
}
free(list);
}
/*
* Look for an existing memory_type entry in a memory_type list, based on the
* allocator and name of the type. If not found, return NULL. O(n).
*/
struct memory_type *
memstat_mtl_find(struct memory_type_list *list, int allocator,
const char *name)
{
struct memory_type *mtp;
LIST_FOREACH(mtp, list, mt_list) {
if ((mtp->mt_allocator == allocator ||
allocator == ALLOCATOR_ANY) &&
strcmp(mtp->mt_name, name) == 0)
return (mtp);
}
return (NULL);
}
/*
* Allocate a new memory_type with the specificed allocator type and name,
* then insert into the list. The structure will be zero'd.
*/
struct memory_type *
memstat_mt_allocate(struct memory_type_list *list, int allocator,
const char *name)
{
struct memory_type *mtp;
mtp = malloc(sizeof(*mtp));
if (mtp == NULL)
return (NULL);
bzero(mtp, sizeof(*mtp));
mtp->mt_allocator = allocator;
strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME);
LIST_INSERT_HEAD(list, mtp, mt_list);
return (mtp);
}
/*
* Reset any libmemstat(3)-owned statistics in a memory_type record so that
* it can be reused without incremental addition problems. Caller-owned
* memory is left "as-is", and must be updated by the caller if desired.
*/
void
memstat_mt_reset_stats(struct memory_type *mtp)
{
int i;
mtp->mt_countlimit = 0;
mtp->mt_byteslimit = 0;
mtp->mt_sizemask = 0;
mtp->mt_size = 0;
mtp->mt_memalloced = 0;
mtp->mt_memfreed = 0;
mtp->mt_numallocs = 0;
mtp->mt_numfrees = 0;
mtp->mt_bytes = 0;
mtp->mt_count = 0;
mtp->mt_free = 0;
mtp->mt_failures = 0;
mtp->mt_zonefree = 0;
for (i = 0; i < MEMSTAT_MAXCPU; i++) {
mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
mtp->mt_percpu_alloc[i].mtp_numfrees = 0;
mtp->mt_percpu_alloc[i].mtp_sizemask = 0;
mtp->mt_percpu_cache[i].mtp_free = 0;
}
}
/*
* Accessor methods for struct memory_type. Avoids encoding the structure
* ABI into the application.
*/
const char *
memstat_get_name(const struct memory_type *mtp)
{
return (mtp->mt_name);
}
int
memstat_get_allocator(const struct memory_type *mtp)
{
return (mtp->mt_allocator);
}
uint64_t
memstat_get_countlimit(const struct memory_type *mtp)
{
return (mtp->mt_countlimit);
}
uint64_t
memstat_get_byteslimit(const struct memory_type *mtp)
{
return (mtp->mt_byteslimit);
}
uint64_t
memstat_get_sizemask(const struct memory_type *mtp)
{
return (mtp->mt_sizemask);
}
uint64_t
memstat_get_size(const struct memory_type *mtp)
{
return (mtp->mt_size);
}
uint64_t
memstat_get_memalloced(const struct memory_type *mtp)
{
return (mtp->mt_memalloced);
}
uint64_t
memstat_get_memfreed(const struct memory_type *mtp)
{
return (mtp->mt_memfreed);
}
uint64_t
memstat_get_numallocs(const struct memory_type *mtp)
{
return (mtp->mt_numallocs);
}
uint64_t
memstat_get_numfrees(const struct memory_type *mtp)
{
return (mtp->mt_numfrees);
}
uint64_t
memstat_get_bytes(const struct memory_type *mtp)
{
return (mtp->mt_bytes);
}
uint64_t
memstat_get_count(const struct memory_type *mtp)
{
return (mtp->mt_count);
}
uint64_t
memstat_get_free(const struct memory_type *mtp)
{
return (mtp->mt_free);
}
uint64_t
memstat_get_failures(const struct memory_type *mtp)
{
return (mtp->mt_failures);
}
void *
memstat_get_caller_pointer(const struct memory_type *mtp, int index)
{
return (mtp->mt_caller_pointer[index]);
}
void
memstat_set_caller_pointer(struct memory_type *mtp, int index, void *value)
{
mtp->mt_caller_pointer[index] = value;
}
uint64_t
memstat_get_caller_uint64(const struct memory_type *mtp, int index)
{
return (mtp->mt_caller_uint64[index]);
}
void
memstat_set_caller_uint64(struct memory_type *mtp, int index, uint64_t value)
{
mtp->mt_caller_uint64[index] = value;
}
uint64_t
memstat_get_zonefree(const struct memory_type *mtp)
{
return (mtp->mt_zonefree);
}
uint64_t
memstat_get_percpu_memalloced(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_memalloced);
}
uint64_t
memstat_get_percpu_memfreed(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_memfreed);
}
uint64_t
memstat_get_percpu_numallocs(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_numallocs);
}
uint64_t
memstat_get_percpu_numfrees(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_numfrees);
}
uint64_t
memstat_get_percpu_sizemask(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_sizemask);
}
void *
memstat_get_percpu_caller_pointer(const struct memory_type *mtp, int cpu,
int index)
{
return (mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index]);
}
void
memstat_set_percpu_caller_pointer(struct memory_type *mtp, int cpu,
int index, void *value)
{
mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index] = value;
}
uint64_t
memstat_get_percpu_caller_uint64(const struct memory_type *mtp, int cpu,
int index)
{
return (mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index]);
}
void
memstat_set_percpu_caller_uint64(struct memory_type *mtp, int cpu, int index,
uint64_t value)
{
mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index] = value;
}
uint64_t
memstat_get_percpu_free(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_cache[cpu].mtp_free);
}

134
lib/libmemstat/memstat.h Normal file
View File

@ -0,0 +1,134 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MEMSTAT_H_
#define _MEMSTAT_H_
/*
* Number of CPU slots in library-internal data structures. This should be
* at least the value of MAXCPU from param.h.
*/
#define MEMSTAT_MAXCPU 16
/*
* libmemstat(3) is able to extract memory data from different allocators;
* when it does so, it tags which allocator it got the data from so that
* consumers can determine which fields are usable, as data returned varies
* some.
*/
#define ALLOCATOR_UNKNOWN 0
#define ALLOCATOR_MALLOC 1
#define ALLOCATOR_UMA 2
#define ALLOCATOR_ANY 255
/*
* Library maximum type name. Should be max(set of name maximums over
* various allocators).
*/
#define MEMTYPE_MAXNAME 32
/*
* Forward declare struct memory_type, which holds per-type properties and
* statistics. This is an opaque type, to be frobbed only from within the
* library, in order to avoid building ABI assumptions into the application.
* Accessor methods should be used to get and sometimes set the fields from
* consumers of the library.
*/
struct memory_type;
/*
* struct memory_type_list is the head of a list of memory types and
* statistics.
*/
struct memory_type_list;
/*
* Functions for managing memory type and statistics data.
*/
struct memory_type_list *memstat_mtl_alloc(void);
struct memory_type *memstat_mtl_first(struct memory_type_list *list);
struct memory_type *memstat_mtl_next(struct memory_type *mtp);
struct memory_type *memstat_mtl_find(struct memory_type_list *list,
int allocator, const char *name);
void memstat_mtl_free(struct memory_type_list *list);
/*
* Functions to retrieve data from a live kernel using sysctl.
*/
int memstat_sysctl_all(struct memory_type_list *list, int flags);
int memstat_sysctl_malloc(struct memory_type_list *list, int flags);
int memstat_sysctl_uma(struct memory_type_list *list, int flags);
/*
* Accessor methods for struct memory_type_list.
*/
const char *memstat_get_name(const struct memory_type *mtp);
int memstat_get_allocator(const struct memory_type *mtp);
uint64_t memstat_get_countlimit(const struct memory_type *mtp);
uint64_t memstat_get_byteslimit(const struct memory_type *mtp);
uint64_t memstat_get_sizemask(const struct memory_type *mtp);
uint64_t memstat_get_size(const struct memory_type *mtp);
uint64_t memstat_get_memalloced(const struct memory_type *mtp);
uint64_t memstat_get_memfreed(const struct memory_type *mtp);
uint64_t memstat_get_numallocs(const struct memory_type *mtp);
uint64_t memstat_get_numfrees(const struct memory_type *mtp);
uint64_t memstat_get_bytes(const struct memory_type *mtp);
uint64_t memstat_get_count(const struct memory_type *mtp);
uint64_t memstat_get_free(const struct memory_type *mtp);
uint64_t memstat_get_failures(const struct memory_type *mtp);
void *memstat_get_caller_pointer(const struct memory_type *mtp,
int index);
void memstat_set_caller_pointer(struct memory_type *mtp,
int index, void *value);
uint64_t memstat_get_caller_uint64(const struct memory_type *mtp,
int index);
void memstat_set_caller_uint64(struct memory_type *mtp, int index,
uint64_t value);
uint64_t memstat_get_zonefree(const struct memory_type *mtp);
uint64_t memstat_get_percpu_memalloced(const struct memory_type *mtp,
int cpu);
uint64_t memstat_get_percpu_memfreed(const struct memory_type *mtp,
int cpu);
uint64_t memstat_get_percpu_numallocs(const struct memory_type *mtp,
int cpu);
uint64_t memstat_get_percpu_numfrees(const struct memory_type *mtp,
int cpu);
uint64_t memstat_get_percpu_sizemask(const struct memory_type *mtp,
int cpu);
void *memstat_get_percpu_caller_pointer(
const struct memory_type *mtp, int cpu, int index);
void memstat_set_percpu_caller_pointer(struct memory_type *mtp,
int cpu, int index, void *value);
uint64_t memstat_get_percpu_caller_uint64(
const struct memory_type *mtp, int cpu, int index);
void memstat_set_percpu_caller_uint64(struct memory_type *mtp,
int cpu, int index, uint64_t value);
uint64_t memstat_get_percpu_free(const struct memory_type *mtp,
int cpu);
#endif /* !_MEMSTAT_H_ */

View File

@ -0,0 +1,47 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <sys/queue.h>
#include "memstat.h"
/*
* Query all available memory allocator sources. Currently this consists of
* malloc(9) and UMA(9).
*/
int
memstat_sysctl_all(struct memory_type_list *mtlp, int flags)
{
if (memstat_sysctl_malloc(mtlp, flags) < 0)
return (-1);
if (memstat_sysctl_uma(mtlp, flags) < 0)
return (-1);
return (0);
}

View File

@ -0,0 +1,124 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MEMSTAT_INTERNAL_H_
#define _MEMSTAT_INTERNAL_H_
/*
* memstat maintains its own internal notion of statistics on each memory
* type, common across UMA and kernel malloc. Some fields are straight from
* the allocator statistics, others are derived when extracted from the
* kernel. A struct memory_type will describe each type supported by an
* allocator. memory_type structures can be chained into lists.
*/
struct memory_type {
/*
* Static properties of type.
*/
int mt_allocator; /* malloc(9), uma(9), etc. */
char mt_name[MEMTYPE_MAXNAME]; /* name of memory type. */
/*
* (Relatively) static zone settings, that don't uniquely identify
* the zone, but also don't change much.
*/
uint64_t mt_countlimit; /* 0, or maximum allocations. */
uint64_t mt_byteslimit; /* 0, or maximum bytes. */
uint64_t mt_sizemask; /* malloc: allocated size bitmask. */
uint64_t mt_size; /* uma: size of objects. */
/*
* Zone or type information that includes all caches and any central
* zone state. Depending on the allocator, this may be synthesized
* from several sources, or directly measured.
*/
uint64_t mt_memalloced; /* Bytes allocated over life time. */
uint64_t mt_memfreed; /* Bytes freed over life time. */
uint64_t mt_numallocs; /* Allocations over life time. */
uint64_t mt_numfrees; /* Frees over life time. */
uint64_t mt_bytes; /* Bytes currently allocated. */
uint64_t mt_count; /* Number of current allocations. */
uint64_t mt_free; /* Number of cached free items. */
uint64_t mt_failures; /* Number of allocation failures. */
uint64_t _mt_spare_uint64[4]; /* Spare. */
/*
* Caller-owned memory.
*/
void *mt_caller_pointer[4]; /* Caller-owned pointers. */
uint64_t mt_caller_uint64[4]; /* Caller-owned longs. */
/*
* For allocators making use of per-CPU caches, we also provide raw
* statistics from the central allocator and each per-CPU cache,
* which (combined) sometimes make up the above general statistics.
*
* First, central zone/type state, all numbers excluding any items
* cached in per-CPU caches.
*
* XXXRW: Might be desirable to separately expose allocation stats
* from zone, which should (combined with per-cpu) add up to the
* global stats above.
*/
uint64_t mt_zonefree; /* Free items in zone. */
uint64_t _mt_spare_uint642[4]; /* Spare. */
/*
* Per-CPU measurements fall into two categories: per-CPU allocation,
* and per-CPU cache state.
*/
struct {
uint64_t mtp_memalloced;/* Per-CPU mt_memalloced. */
uint64_t mtp_memfreed; /* Per-CPU mt_memfreed. */
uint64_t mtp_numallocs; /* Per-CPU mt_numallocs. */
uint64_t mtp_numfrees; /* Per-CPU mt_numfrees. */
uint64_t mtp_sizemask; /* Per-CPU mt_sizemask. */
void *mtp_caller_pointer[2]; /* Caller data. */
uint64_t mtp_caller_uint64[2]; /* Caller data. */
uint64_t _mtp_spare_uint64[3]; /* Per-CPU spare. */
} mt_percpu_alloc[MEMSTAT_MAXCPU];
struct {
uint64_t mtp_free; /* Per-CPU cache free items. */
uint64_t _mtp_spare_uint64[3]; /* Per-CPU spare. */
} mt_percpu_cache[MEMSTAT_MAXCPU];
LIST_ENTRY(memory_type) mt_list; /* List of types. */
};
/*
* Description of struct memory_type_list is in memstat.h.
*/
LIST_HEAD(memory_type_list, memory_type);
void memstat_mtl_free(struct memory_type_list *list);
struct memory_type *memstat_mt_allocate(struct memory_type_list *list,
int allocator, const char *name);
void memstat_mt_reset_stats(struct memory_type *mtp);
#endif /* !_MEMSTAT_INTERNAL_H_ */

View File

@ -0,0 +1,240 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/sysctl.h>
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "memstat.h"
#include "memstat_internal.h"
/*
* Extract malloc(9) statistics from the running kernel, and store all memory
* type information in the passed list. For each type, check the list for an
* existing entry with the right name/allocator -- if present, update that
* entry. Otherwise, add a new entry. On error, the entire list will be
* cleared, as entries will be in an inconsistent state.
*
* To reduce the level of work for a list that starts empty, we keep around a
* hint as to whether it was empty when we began, so we can avoid searching
* the list for entries to update. Updates are O(n^2) due to searching for
* each entry before adding it.
*/
int
memstat_sysctl_malloc(struct memory_type_list *list, int flags)
{
struct malloc_type_stream_header *mtshp;
struct malloc_type_header *mthp;
struct malloc_type_stats *mtsp;
struct memory_type *mtp;
int count, error, hint_dontsearch, i, j, maxcpus;
char *buffer, *p;
size_t size;
hint_dontsearch = LIST_EMPTY(list);
/*
* Query the number of CPUs, number of malloc types so that we can
* guess an initial buffer size. We loop until we succeed or really
* fail. Note that the value of maxcpus we query using sysctl is not
* the version we use when processing the real data -- that is read
* from the header.
*/
retry:
size = sizeof(maxcpus);
if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
error = errno;
perror("kern.smp.maxcpus");
errno = error;
return (-1);
}
if (size != sizeof(maxcpus)) {
fprintf(stderr, "kern.smp.maxcpus: wronge size");
errno = EINVAL;
return (-1);
}
if (maxcpus > MEMSTAT_MAXCPU) {
fprintf(stderr, "kern.smp.maxcpus: too many CPUs\n");
errno = EINVAL;
return (-1);
}
size = sizeof(count);
if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
error = errno;
perror("kern.malloc_count");
errno = error;
return (-1);
}
if (size != sizeof(count)) {
fprintf(stderr, "kern.malloc_count: wronge size");
errno = EINVAL;
return (-1);
}
size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) *
maxcpus);
buffer = malloc(size);
if (buffer == NULL) {
error = errno;
perror("malloc");
errno = error;
return (-1);
}
if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) {
/*
* XXXRW: ENOMEM is an ambiguous return, we should bound the
* number of loops, perhaps.
*/
if (errno == ENOMEM) {
free(buffer);
goto retry;
}
error = errno;
free(buffer);
perror("kern.malloc_stats");
errno = error;
return (-1);
}
if (size == 0) {
free(buffer);
return (0);
}
if (size < sizeof(*mtshp)) {
fprintf(stderr, "sysctl_malloc: invalid malloc header");
free(buffer);
errno = EINVAL;
return (-1);
}
p = buffer;
mtshp = (struct malloc_type_stream_header *)p;
p += sizeof(*mtshp);
if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) {
fprintf(stderr, "sysctl_malloc: unknown malloc version");
free(buffer);
errno = EINVAL;
return (-1);
}
if (mtshp->mtsh_maxcpus > MEMSTAT_MAXCPU) {
fprintf(stderr, "sysctl_malloc: too many CPUs");
free(buffer);
errno = EINVAL;
return (-1);
}
/*
* For the remainder of this function, we are quite trusting about
* the layout of structures and sizes, since we've determined we have
* a matching version and acceptable CPU count.
*/
maxcpus = mtshp->mtsh_maxcpus;
count = mtshp->mtsh_count;
for (i = 0; i < count; i++) {
mthp = (struct malloc_type_header *)p;
p += sizeof(*mthp);
if (hint_dontsearch == 0) {
mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC,
mthp->mth_name);
/*
* Reset the statistics on a reused node.
*/
if (mtp != NULL)
memstat_mt_reset_stats(mtp);
} else
mtp = NULL;
if (mtp == NULL)
mtp = memstat_mt_allocate(list, ALLOCATOR_MALLOC,
mthp->mth_name);
if (mtp == NULL) {
memstat_mtl_free(list);
free(buffer);
errno = ENOMEM;
perror("malloc");
errno = ENOMEM;
return (-1);
}
/*
* Reset the statistics on a current node.
*/
memstat_mt_reset_stats(mtp);
for (j = 0; j < maxcpus; j++) {
mtsp = (struct malloc_type_stats *)p;
p += sizeof(*mtsp);
/*
* Sumarize raw statistics across CPUs into coalesced
* statistics.
*/
mtp->mt_memalloced += mtsp->mts_memalloced;
mtp->mt_memfreed += mtsp->mts_memfreed;
mtp->mt_numallocs += mtsp->mts_numallocs;
mtp->mt_numfrees += mtsp->mts_numfrees;
mtp->mt_sizemask |= mtsp->mts_size;
/*
* Copies of per-CPU statistics.
*/
mtp->mt_percpu_alloc[j].mtp_memalloced =
mtsp->mts_memalloced;
mtp->mt_percpu_alloc[j].mtp_memfreed =
mtsp->mts_memfreed;
mtp->mt_percpu_alloc[j].mtp_numallocs =
mtsp->mts_numallocs;
mtp->mt_percpu_alloc[j].mtp_numfrees =
mtsp->mts_numfrees;
mtp->mt_percpu_alloc[j].mtp_sizemask =
mtsp->mts_size;
}
/*
* Derived cross-CPU statistics.
*/
mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
}
free(buffer);
return (0);
}

View File

@ -0,0 +1,230 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <vm/uma.h>
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "memstat.h"
#include "memstat_internal.h"
/*
* Extract uma(9) statistics from the running kernel, and store all memory
* type information in the passed list. For each type, check the list for an
* existing entry with the right name/allocator -- if present, update that
* entry. Otherwise, add a new entry. On error, the entire list will be
* cleared, as entries will be in an inconsistent state.
*
* To reduce the level of work for a list that starts empty, we keep around a
* hint as to whether it was empty when we began, so we can avoid searching
* the list for entries to update. Updates are O(n^2) due to searching for
* each entry before adding it.
*/
int
memstat_sysctl_uma(struct memory_type_list *list, int flags)
{
struct uma_stream_header *ushp;
struct uma_type_header *uthp;
struct uma_percpu_stat *upsp;
struct memory_type *mtp;
int count, error, hint_dontsearch, i, j, maxcpus;
char *buffer, *p;
size_t size;
hint_dontsearch = LIST_EMPTY(list);
/*
* Query the number of CPUs, number of malloc types so that we can
* guess an initial buffer size. We loop until we succeed or really
* fail. Note that the value of maxcpus we query using sysctl is not
* the version we use when processing the real data -- that is read
* from the header.
*/
retry:
size = sizeof(maxcpus);
if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
error = errno;
perror("kern.smp.maxcpus");
errno = error;
return (-1);
}
if (size != sizeof(maxcpus)) {
fprintf(stderr, "kern.smp.maxcpus: wronge size");
errno = EINVAL;
return (-1);
}
if (maxcpus > MEMSTAT_MAXCPU) {
fprintf(stderr, "kern.smp.maxcpus: too many CPUs\n");
errno = EINVAL;
return (-1);
}
size = sizeof(count);
if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
error = errno;
perror("vm.zone_count");
errno = error;
return (-1);
}
if (size != sizeof(count)) {
fprintf(stderr, "vm.zone_count: wronge size");
errno = EINVAL;
return (-1);
}
size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
maxcpus);
buffer = malloc(size);
if (buffer == NULL) {
error = errno;
perror("malloc");
errno = error;
return (-1);
}
if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) {
/*
* XXXRW: ENOMEM is an ambiguous return, we should bound the
* number of loops, perhaps.
*/
if (errno == ENOMEM) {
free(buffer);
goto retry;
}
error = errno;
free(buffer);
perror("vm.zone_stats");
errno = error;
return (-1);
}
if (size == 0) {
free(buffer);
return (0);
}
if (size < sizeof(*ushp)) {
fprintf(stderr, "sysctl_uma: invalid malloc header");
free(buffer);
errno = EINVAL;
return (-1);
}
p = buffer;
ushp = (struct uma_stream_header *)p;
p += sizeof(*ushp);
if (ushp->ush_version != UMA_STREAM_VERSION) {
fprintf(stderr, "sysctl_uma: unknown malloc version");
free(buffer);
errno = EINVAL;
return (-1);
}
if (ushp->ush_maxcpus > MEMSTAT_MAXCPU) {
fprintf(stderr, "sysctl_uma: too many CPUs");
free(buffer);
errno = EINVAL;
return (-1);
}
/*
* For the remainder of this function, we are quite trusting about
* the layout of structures and sizes, since we've determined we have
* a matching version and acceptable CPU count.
*/
maxcpus = ushp->ush_maxcpus;
count = ushp->ush_count;
for (i = 0; i < count; i++) {
uthp = (struct uma_type_header *)p;
p += sizeof(*uthp);
if (hint_dontsearch == 0) {
mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
uthp->uth_name);
/*
* Reset the statistics on a reused node.
*/
if (mtp != NULL)
memstat_mt_reset_stats(mtp);
} else
mtp = NULL;
if (mtp == NULL)
mtp = memstat_mt_allocate(list, ALLOCATOR_UMA,
uthp->uth_name);
if (mtp == NULL) {
memstat_mtl_free(list);
free(buffer);
errno = ENOMEM;
perror("malloc");
errno = ENOMEM;
return (-1);
}
/*
* Reset the statistics on a current node.
*/
memstat_mt_reset_stats(mtp);
for (j = 0; j < maxcpus; j++) {
upsp = (struct uma_percpu_stat *)p;
p += sizeof(*upsp);
mtp->mt_percpu_cache[j].mtp_free =
upsp->ups_cache_free;
mtp->mt_free += upsp->ups_cache_free;
mtp->mt_numallocs += upsp->ups_allocs;
mtp->mt_numfrees += upsp->ups_frees;
}
mtp->mt_size = uthp->uth_size;
mtp->mt_memalloced = uthp->uth_allocs * uthp->uth_size;
mtp->mt_memfreed = uthp->uth_frees * uthp->uth_size;
mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
mtp->mt_countlimit = uthp->uth_limit;
mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size;
mtp->mt_numallocs = uthp->uth_allocs;
mtp->mt_numfrees = uthp->uth_frees;
mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
mtp->mt_zonefree = uthp->uth_zone_free + uthp->uth_keg_free;
mtp->mt_free += mtp->mt_zonefree;
}
free(buffer);
return (0);
}