This commit was manufactured by cvs2svn to create branch 'RELENG_6'.

This commit is contained in:
cvs2svn 2005-07-24 01:41:48 +00:00
parent db6e7961b9
commit 35319bf6b8
7 changed files with 1622 additions and 0 deletions

25
lib/libmemstat/Makefile Normal file
View File

@ -0,0 +1,25 @@
# $FreeBSD$
WARNS?= 3
LIB= memstat
SHLIB_MAJOR= 1
SRCS+= memstat.c
SRCS+= memstat_all.c
SRCS+= memstat_malloc.c
SRCS+= memstat_uma.c
INCS= memstat.h
MAN= libmemstat.3
MLINKS+= libmemstat.3 memstat_mtl_alloc.3
MLINKS+= libmemstat.3 memstat_mtl_first.3
MLINKS+= libmemstat.3 memstat_mtl_next.3
MLINKS+= libmemstat.3 memstat_mtl_find.3
MLINKS+= libmemstat.3 memstat_mtl_free.3
MLINKS+= libmemstat.3 memstat_mtl_geterror.3
MLINKS+= libmemstat.3 memstat_strerror.3
MLINKS+= libmemstat.3 memstat_sysctl_all.3
MLINKS+= libmemstat.3 memstat_sysctl_malloc.3
MLINKS+= libmemstat.3 memstat_sysctl_uma.3
.include <bsd.lib.mk>

453
lib/libmemstat/libmemstat.3 Normal file
View File

@ -0,0 +1,453 @@
.\" Copyright (c) 2005 Robert N. M. Watson
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd June 27, 2005
.Os
.Dt LIBMEMSTAT 3
.Sh NAME
.Nm libmemstat
.Nd "library interface to retrieve kernel memory allocator statistics"
.Sh LIBRARY
.Lb libmemstat
.Sh SYNOPSIS
.In sys/types.h
.In memstat.h
.Ss General Functions
.Ft const char *
.Fn memstat_strerror "int error"
.Ss Memory Type List Management Functions
.Ft struct memory_type_list *
.Fn memstat_mtl_alloc "void"
.Ft struct memory_type *
.Fn memstat_mtl_first "struct memory_type_list *list"
.Ft struct memory_type *
.Fn memstat_mtl_next "struct memory_type *mtp"
.Ft struct memory_type *
.Fn memstat_mtl_find "struct memory_type_list *list" "int allocator" "const char *name"
.Ft void
.Fn memstat_mtl_free "struct memory_type_list *list"
.Ft int
.Fn memstat_mtl_geterror "struct memory_type_list *list"
.Ss Allocator Query Functions
.Ft int
.Fn memstat_sysctl_all "struct memory_type_list *list" "int flags"
.Ft int
.Fn memstat_sysctl_malloc "struct memory_type_list *list" "int flags"
.Ft int
.Fn memstat_sysctl_uma "struct memory_type_list *list" "int flags"
.Ss Memory Type Accessor Methods
.Ft const char *
.Fn memstat_get_name "const struct memory_type *mtp"
.Ft int
.Fn memstat_get_allocator "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_countlimit "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_byteslimit "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_sizemask "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_size "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_memalloced "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_memfreed "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_numallocs "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_numfrees "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_bytes "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_count "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_free "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_failures "const struct memory_type *mtp"
.Ft void *
.Fn memstat_get_caller_pointer "const struct memory_type *mtp" "int index"
.Ft void
.Fn memstat_set_caller_pointer "struct memory_type *mtp" "int index" "void *value"
.Ft uint64_t
.Fn memstat_get_caller_uint64 "const struct memory_type *mtp" "int index"
.Ft void
.Fn memstat_set_caller_uint64 "struct memory_type *mtp" "int index" "uint64_t value"
.Ft uint64_t
.Fn memstat_get_zonefree "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_kegfree "const struct memory_type *mtp"
.Ft uint64_t
.Fn memstat_get_percpu_memalloced "const struct memory_type *mtp" "int cpu"
.Ft uint64_t
.Fn memstat_get_percpu_memfreed "const struct memory_type *mtp" "int cpu"
.Ft uint64_t
.Fn memstat_get_percpu_numallocs "const struct memory_type *mtp" "int cpu"
.Ft uint64_t
.Fn memstat_get_percpu_numfrees "const struct memory_type *mtp" "int cpu"
.Ft uint64_t
.Fn memstat_get_percpu_sizemask "const struct memory_type *mtp" "int cpu"
.Ft void *
.Fn memstat_get_percpu_caller_pointer "const struct memory_type *mtp" "int cpu" "int index"
.Ft void
.Fn memstat_set_percpu_caller_pointer "struct memory_type *mtp" "int cpu" "int index" "void *value"
.Ft uint64_t
.Fn memstat_get_percpu_caller_uint64 "const struct memory_type *mtp" "int cpu" "int index"
.Ft void
.Fn memstat_set_percpu_caller_uint64 "struct memory_type *mtp" "int cpu" "int index" "uint64_t value"
.Ft uint64_t
.Fn memstat_get_percpu_free "const struct memory_type *mtp" "int cpu"
.Sh DESCRIPTION
.Nm
provides an interface to retrieve kernel memory allocator statistics, for
the purposes of debugging and system monitoring, insulating applications
from implementation details of the allocators, and allowing a tool to
transparently support multiple allocators.
.Nm
supports both retrieving a single statistics snapshot, as well as
incrementally updating statistics for long-term monitoring.
.Pp
.Nm
describes each memory type using a
.Vt struct memory_type ,
an opaque memory type accessed by the application using accessor functions
in the library.
.Nm
returns and updates chains of
.Vt struct memory_type
via a
.Vt struct memory_type_list ,
which will be allocated by calling
.Fn memstat_mtl_alloc ,
and freed on completion using
.Fn memstat_mtl_free .
Lists of memory types are populated via calls that query the kernel for
statistics information; currently:
.Fn memstat_sysctl_all ,
.Fn memstat_sysctl_uma ,
and
.Fn memstat_sysctl_malloc .
Repeated calls will incrementally update the list of memory types, permitting
tracking over time without recreating all list state.
If an error is detected during a query call, error condition information may
be retrieved using
.Fn memstat_mtl_geterror ,
and converted to a user-readable string using
.Fn memstat_strerror .
.Pp
Freeing the list will free all memory type data in the list, and so
invalidates any outstanding pointers to entries in the list.
.Vt struct memory_type
entries in the list may be iterated over using
.Fn memstat_mtl_first
and
.fn memstat_mtl_next ,
which respectively return the first entry in a list, and the next entry in a
list.
.Fn memstat_mtl_find ,
which will return a pointer to the first entry matching the passed
parameters.
.Pp
A series of accessor methods is provided to access fields of the structure,
including retrieving statistics and properties, as well as setting of caller
owned fields.
Direct application access to the data structure fields is not supported.
.Ss Library memory_type Fields
Each
.Vt struct memory_type
holds a description of the memory type, including its name and the allocator
it is managed by, as well as current statistics on use.
Some statistics are directly measured, others are derived from directly
measured statistics.
Certain high level statistics are present across all available allocators,
such as the number of allocation and free operations; other measurements,
such as the quantity of free items in per-CPU caches, or administrative
limit on the number of allocations, is available only for specific
allocators.
.Ss Caller memory_type Fields
.Vt struct memory_type
includes fields to allow the application to store data, in the form of
pointers and 64-bit integers, with memory types.
For example, the application author might make use of one of the caller
pointers to reference a more complex data structure tracking long-term
behavior of the memory type, or a window system object that is used to
render the state of the memory type.
General and per-CPU storage is provided with each
.Vt struct memory_type
in the form of an array of pointers and integers.
The array entries are accessed via the
.Fa index
argument to the get and set accessor methods.
Possible values of
.Fa index
range between
.Dv 0
and
.Dv MEMSTAT_MAXCALLER .
.Pp
Caller-owned fields are initialized to
.Dv 0
or
.Dv NULL
when a new
.Vt struct memory_type
is allocated and attached to a memory type list; these fields retain their
values across queries that update library-owned fields.
.Ss Allocator Types
Currently,
.Nm
supports two kernel allocators:
.Dv ALLOCATOR_UMA
for
.Xr uma 9 ,
and
.Dv ALLOCATOR_MALLOC
for
.Xr malloc 9 .
These values may be passed to
.Fn memstat_mtl_find ,
and will be returned by
.Fn memstat_get_allocator .
Two additional constants in the allocator name space are defined:
.Dv ALLOCATOR_UNKNOWN ,
which will only be returned as a result of a library error, and
.Dv ALLOCATOR_ANY ,
which can be used to specify that returning types matching any allocator is
permittible from
.Fn memstat_mtl_find .
.Ss Access Method List
The following accessor methods are defined, of which some will be valid for
a given memory type:
.Pp
.Bl -tag -width "memstat_get_name" -compact -offset wee
.It memstat_get_name
Return a pointer to the name of the memory type.
Memory for the name is owned by
.Nm
and will be valid through a call to
.Fn memstat_mtl_free .
Note that names will be unique with respect to a single allocator, but that
the same name might be used by different memory types owned by different
memory allocators.
.It memstat_get_allocator
Return an integer identifier for the memory allocator that owns the memory
type.
.It memstat_get_countlimit
If the memory type has an administrative limit on the number of simultaneous
allocations, return it.
.It memstat_get_byteslimit
If the memory type has an administrative limit on the number of bytes of
memory that may be simultaenously allocated for the memory type, return it.
.It memstat_get_sizemask
If the memory type supports variable allocation sizes, return a bitmask of
sizes allocated for the memory type.
.It memstat_get_size
If the memory type supports a fixed allocation size, return that size.
.It memstat_get_memalloced
Return the total number of bytes allocated for the memory type over its
lifetime.
.It memstat_get_memfreed
Return the total number of bytes freed for the memory type over its lifetime.
.It memstat_get_numallocs
Return the total number of allocations for the memory type over its lifetime.
.It memstat_get_numfrees
Return the total number of frees for the memory type over its lifetime.
.It memstat_get_bytes
Return the current number of bytes allocated to the memory type.
.It memstat_get_count
Return the current number of allocations for the memory type.
.It memstat_get_free
If the memory allocator supports a cache, return the number of items in the
cache.
.It memstat_get_failures
If the memory allocator and type permit allocation failures, return the
number of allocation failures measured.
.It memstat_get_caller_pointer
Return a caller-owned pointer for the memory type.
.It memstat_set_caller_pointer
Set a caller-owned pointer for the memory type.
.It memstat_get_caller_uint64
Return a caller-owned integer for the memory type.
.It memstat_set_caller_uint64
Set a caller-owned integer for the memory type.
.It memstat_get_zonefree
If the memory allocator supports a multi-level allocation structure, return
the number of cached items in the zone.
These items will be in a fully constructed state available for immediate
use.
.It memstat_get_kegfree
If the memory allocator supports a multi-level allocation structure, return
the number of cached items in the keg.
These items may be in a partially constructed state, and may require further
processing before they can be made available for use.
.It memstat_get_percpu_memalloced
If the memory allocator supports per-CPU statistics, return the number of
bytes of memory allocated for the memory type on the CPU over its lifetime.
.It memstat_get_percpu_memfreed
If the memory allocator supports per-CPU statistics, return the number of
bytes of memory freed from the memory type on the CPU over its lifetime.
.It memstat_get_percpu_numallocs
If the memory allocator supports per-CPU statistics, return the number of
allocations for the memory type on the CPU over its lifetime.
.It memstat_get_percpu_numfrees
If the memory allocator supports per-CPU statistics, return the number of
frees for the memory type on the CPU over its lifetime.
.It memstat_get_percpu_sizemask
If the memory allocator supports variable size memory allocation and per-CPU
statistics, return the size bitmask for the memory type on the CPU.
.It memstat_get_percpu_caller_pointer
Return a caller-owned per-CPU pointer for the memory type.
.It memstat_set_percpu_caller_pointer
Set a caller-owned per-CPU pointer for the memory type.
.It memstat_get_percpu_caller_uint64
Return a caller-owned per-CPU integer for the memory type.
.It memsttat_set_percpu_caller_uint64
Set a caller-owned per-CPU integer for the memory type.
.It memstat_get_percpu_free
If the memory allocator supports a per-CPU cache, return the number of free
items in the per-CPU cache of the designated CPU.
.El
.Sh RETURN VALUES
.Nm
functions fall into three categories: functions returning a pointer to an
object, functions returning an integer return value, and functions
implementing accessor methods returning data from a
.Vt struct memory_type .
.Pp
Functions returning a pointer to an object will generally return
.Dv NULL
on failure.
.Fn memstat_mtl_alloc
will return an error value via
.Va errno ,
which will consist of the value
.Dv ENOMEM .
Functions
.Fn memstat_mtl_first ,
.Fn memstat_mtl_next ,
and
.Fn memstat_mtl_find
will return
.Dv NULL
when there is no entry or match in the list; however, this is not considered
a failure mode and no error value is available.
.Pp
Functions returning a integer success valuye will return
.Dv 0
on success, or
.Dv -1
on failure.
If a failure is returned, the list error access method,
.Fn memstat_mtl_geterror ,
may be used to retrieve the error state.
The string representation of the error may be retrieved using
.Fn memstat_strerror .
Possible error values are:
.Pp
.Bl -tag -width "MEMSTAT_ERROR_TOOMANYCPUS" -compact -offset wee
.It Dv MEMSTAT_ERROR_UNDEFINED
Undefined error. Occurs if
.Fn memstat_mtl_geterror
is called on a list before an error associated with the list has occurred.
.It Dv MEMSTAT_ERROR_NOMEMORY
Insufficient memory. Occurs if library calls to
.Xr malloc 3
fail, or if a system call to retrieve kernel statistics fails with
.Er ENOMEM .
.It Dv MEMSTAT_ERROR_VERSION
Returned if the current version of
.Nm
is unable to interpret the statistics data returned by the kernel due to an
explicit version mismatch, or to differences in data structures that cannot
be reconciled.
.It Dv MEMSTAT_ERROR_PERMISSION
Returned if a statistics source returns
.Va errno
values of
.Dv EACCES
or
.Dv EPERM .
.It Dv MEMSTAT_ERROR_TOOMANYCPUS
Returned if the compile-time limit on the number of CPUs in
.Nm
is lower than the number of CPUs returned by a statistics data source.
.It Dv MEMSTAT_ERROR_DATAERROR
Returned if
.Nm
is unable to interpret statistics data returned by the data source, even
though there does not appear to be a version problem.
.El
.Pp
Finally, functions returning data from a
.Dt struct memory_type
pointer are not permitted to fail, and directly return either a statistic
or pointer to a string.
.Sh EXAMPLES
Create a memory type list, query the
.Xr uma 9
memory allocator for available statistics, and print out the number of
allocations performed by the
.Dv Mbuf
zone.
.Bd -literal -offset indent
struct memory_type_list *mtlp;
struct memory_type *mtp;
uint64_t mbuf_count;
mtlp = memstat_mtl_alloc();
if (mtlp == NULL)
err(-1, "memstat_mtl_alloc");
if (memstat_sysctl_uma(mtlp, 0) < 0)
err(-1, "memstat_sysctl_uma");
mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, "Mbuf");
if (mtp == NULL)
errx(-1, "memstat_mtl_find: Mbuf not found");
mbuf_count = memstat_get_count(mtp);
memstat_mtl_free(mtlp);
printf("Mbufs: %llu\\n", (unsigned long long)mbuf_count);
.Ed
.Sh SEE ALSO
.Xr malloc 9 ,
.Xr uma 9
.Sh HISTORY
The
.Nm libmemstat
library appeared in
.Fx 6.0 .
.Sh AUTHORS
The kernel memory allocator changes necessary to support a general purpose
monitoring library, along with the library, were written by
.An Robert Watson Aq rwatson@FreeBSD.org
.Sh BUGS
.Nm
cannot yet extract statistics from kernel core dumps, although this should be
straight forward to implement.
.Pp
Once a memory type is present on a memory type list, it will not be removed
even if the kernel no longer presents information on the type via its
monitoring interfaces.
In order to flush removed memory types, it is necessary to free the entire
list and allocate a new one.

408
lib/libmemstat/memstat.c Normal file
View File

@ -0,0 +1,408 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "memstat.h"
#include "memstat_internal.h"
const char *
memstat_strerror(int error)
{
switch (error) {
case MEMSTAT_ERROR_NOMEMORY:
return ("Cannot allocate memory");
case MEMSTAT_ERROR_VERSION:
return ("Version mismatch");
case MEMSTAT_ERROR_PERMISSION:
return ("Permission denied");
case MEMSTAT_ERROR_TOOMANYCPUS:
return ("Too many CPUs");
case MEMSTAT_ERROR_DATAERROR:
return ("Data format error");
case MEMSTAT_ERROR_UNDEFINED:
default:
return ("Unknown error");
}
}
struct memory_type_list *
memstat_mtl_alloc(void)
{
struct memory_type_list *mtlp;
mtlp = malloc(sizeof(*mtlp));
if (mtlp == NULL)
return (NULL);
LIST_INIT(&mtlp->mtl_list);
mtlp->mtl_error = MEMSTAT_ERROR_UNDEFINED;
return (mtlp);
}
struct memory_type *
memstat_mtl_first(struct memory_type_list *list)
{
return (LIST_FIRST(&list->mtl_list));
}
struct memory_type *
memstat_mtl_next(struct memory_type *mtp)
{
return (LIST_NEXT(mtp, mt_list));
}
void
memstat_mtl_free(struct memory_type_list *list)
{
struct memory_type *mtp;
while ((mtp = LIST_FIRST(&list->mtl_list))) {
LIST_REMOVE(mtp, mt_list);
free(mtp);
}
free(list);
}
int
memstat_mtl_geterror(struct memory_type_list *list)
{
return (list->mtl_error);
}
/*
* Look for an existing memory_type entry in a memory_type list, based on the
* allocator and name of the type. If not found, return NULL. No errno or
* memstat error.
*/
struct memory_type *
memstat_mtl_find(struct memory_type_list *list, int allocator,
const char *name)
{
struct memory_type *mtp;
LIST_FOREACH(mtp, &list->mtl_list, mt_list) {
if ((mtp->mt_allocator == allocator ||
allocator == ALLOCATOR_ANY) &&
strcmp(mtp->mt_name, name) == 0)
return (mtp);
}
return (NULL);
}
/*
* Allocate a new memory_type with the specificed allocator type and name,
* then insert into the list. The structure will be zero'd.
*
* libmemstat(3) internal function.
*/
struct memory_type *
_memstat_mt_allocate(struct memory_type_list *list, int allocator,
const char *name)
{
struct memory_type *mtp;
mtp = malloc(sizeof(*mtp));
if (mtp == NULL)
return (NULL);
bzero(mtp, sizeof(*mtp));
mtp->mt_allocator = allocator;
strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME);
LIST_INSERT_HEAD(&list->mtl_list, mtp, mt_list);
return (mtp);
}
/*
* Reset any libmemstat(3)-owned statistics in a memory_type record so that
* it can be reused without incremental addition problems. Caller-owned
* memory is left "as-is", and must be updated by the caller if desired.
*
* libmemstat(3) internal function.
*/
void
_memstat_mt_reset_stats(struct memory_type *mtp)
{
int i;
mtp->mt_countlimit = 0;
mtp->mt_byteslimit = 0;
mtp->mt_sizemask = 0;
mtp->mt_size = 0;
mtp->mt_memalloced = 0;
mtp->mt_memfreed = 0;
mtp->mt_numallocs = 0;
mtp->mt_numfrees = 0;
mtp->mt_bytes = 0;
mtp->mt_count = 0;
mtp->mt_free = 0;
mtp->mt_failures = 0;
mtp->mt_zonefree = 0;
mtp->mt_kegfree = 0;
for (i = 0; i < MEMSTAT_MAXCPU; i++) {
mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
mtp->mt_percpu_alloc[i].mtp_numfrees = 0;
mtp->mt_percpu_alloc[i].mtp_sizemask = 0;
mtp->mt_percpu_cache[i].mtp_free = 0;
}
}
/*
* Accessor methods for struct memory_type. Avoids encoding the structure
* ABI into the application.
*/
const char *
memstat_get_name(const struct memory_type *mtp)
{
return (mtp->mt_name);
}
int
memstat_get_allocator(const struct memory_type *mtp)
{
return (mtp->mt_allocator);
}
uint64_t
memstat_get_countlimit(const struct memory_type *mtp)
{
return (mtp->mt_countlimit);
}
uint64_t
memstat_get_byteslimit(const struct memory_type *mtp)
{
return (mtp->mt_byteslimit);
}
uint64_t
memstat_get_sizemask(const struct memory_type *mtp)
{
return (mtp->mt_sizemask);
}
uint64_t
memstat_get_size(const struct memory_type *mtp)
{
return (mtp->mt_size);
}
uint64_t
memstat_get_memalloced(const struct memory_type *mtp)
{
return (mtp->mt_memalloced);
}
uint64_t
memstat_get_memfreed(const struct memory_type *mtp)
{
return (mtp->mt_memfreed);
}
uint64_t
memstat_get_numallocs(const struct memory_type *mtp)
{
return (mtp->mt_numallocs);
}
uint64_t
memstat_get_numfrees(const struct memory_type *mtp)
{
return (mtp->mt_numfrees);
}
uint64_t
memstat_get_bytes(const struct memory_type *mtp)
{
return (mtp->mt_bytes);
}
uint64_t
memstat_get_count(const struct memory_type *mtp)
{
return (mtp->mt_count);
}
uint64_t
memstat_get_free(const struct memory_type *mtp)
{
return (mtp->mt_free);
}
uint64_t
memstat_get_failures(const struct memory_type *mtp)
{
return (mtp->mt_failures);
}
void *
memstat_get_caller_pointer(const struct memory_type *mtp, int index)
{
return (mtp->mt_caller_pointer[index]);
}
void
memstat_set_caller_pointer(struct memory_type *mtp, int index, void *value)
{
mtp->mt_caller_pointer[index] = value;
}
uint64_t
memstat_get_caller_uint64(const struct memory_type *mtp, int index)
{
return (mtp->mt_caller_uint64[index]);
}
void
memstat_set_caller_uint64(struct memory_type *mtp, int index, uint64_t value)
{
mtp->mt_caller_uint64[index] = value;
}
uint64_t
memstat_get_zonefree(const struct memory_type *mtp)
{
return (mtp->mt_zonefree);
}
uint64_t
memstat_get_kegfree(const struct memory_type *mtp)
{
return (mtp->mt_kegfree);
}
uint64_t
memstat_get_percpu_memalloced(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_memalloced);
}
uint64_t
memstat_get_percpu_memfreed(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_memfreed);
}
uint64_t
memstat_get_percpu_numallocs(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_numallocs);
}
uint64_t
memstat_get_percpu_numfrees(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_numfrees);
}
uint64_t
memstat_get_percpu_sizemask(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_alloc[cpu].mtp_sizemask);
}
void *
memstat_get_percpu_caller_pointer(const struct memory_type *mtp, int cpu,
int index)
{
return (mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index]);
}
void
memstat_set_percpu_caller_pointer(struct memory_type *mtp, int cpu,
int index, void *value)
{
mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index] = value;
}
uint64_t
memstat_get_percpu_caller_uint64(const struct memory_type *mtp, int cpu,
int index)
{
return (mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index]);
}
void
memstat_set_percpu_caller_uint64(struct memory_type *mtp, int cpu, int index,
uint64_t value)
{
mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index] = value;
}
uint64_t
memstat_get_percpu_free(const struct memory_type *mtp, int cpu)
{
return (mtp->mt_percpu_cache[cpu].mtp_free);
}

164
lib/libmemstat/memstat.h Normal file
View File

@ -0,0 +1,164 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MEMSTAT_H_
#define _MEMSTAT_H_
/*
* Number of CPU slots in library-internal data structures. This should be
* at least the value of MAXCPU from param.h.
*/
#define MEMSTAT_MAXCPU 16
/*
* Amount of caller data to maintain for each caller data slot. Applications
* must not request more than this number of caller save data, or risk
* corrupting internal libmemstat(3) data structures. A compile time check
* in the application is probably appropriate.
*/
#define MEMSTAT_MAXCALLER 16
/*
* libmemstat(3) is able to extract memory data from different allocators;
* when it does so, it tags which allocator it got the data from so that
* consumers can determine which fields are usable, as data returned varies
* some.
*/
#define ALLOCATOR_UNKNOWN 0
#define ALLOCATOR_MALLOC 1
#define ALLOCATOR_UMA 2
#define ALLOCATOR_ANY 255
/*
* Library maximum type name. Should be max(set of name maximums over
* various allocators).
*/
#define MEMTYPE_MAXNAME 32
/*
* Library error conditions, mostly from the underlying data sources. On
* failure, functions typically return (-1) or (NULL); on success, (0) or a
* valid data pointer. The error from the last operation is stored in
* struct memory_type, and accessed via memstat_get_error(mtp).
*/
#define MEMSTAT_ERROR_UNDEFINED 0 /* Initialization value. */
#define MEMSTAT_ERROR_NOMEMORY 1 /* Out of memory. */
#define MEMSTAT_ERROR_VERSION 2 /* Unsupported version. */
#define MEMSTAT_ERROR_PERMISSION 3 /* Permission denied. */
#define MEMSTAT_ERROR_TOOMANYCPUS 4 /* Too many CPUs. */
#define MEMSTAT_ERROR_DATAERROR 5 /* Error in stat data. */
/*
* Forward declare struct memory_type, which holds per-type properties and
* statistics. This is an opaque type, to be frobbed only from within the
* library, in order to avoid building ABI assumptions into the application.
* Accessor methods should be used to get and sometimes set the fields from
* consumers of the library.
*/
struct memory_type;
/*
* struct memory_type_list is the head of a list of memory types and
* statistics.
*/
struct memory_type_list;
__BEGIN_DECLS
/*
* Functions that operate without memory type or memory type list context.
*/
const char *memstat_strerror(int error);
/*
* Functions for managing memory type and statistics data.
*/
struct memory_type_list *memstat_mtl_alloc(void);
struct memory_type *memstat_mtl_first(struct memory_type_list *list);
struct memory_type *memstat_mtl_next(struct memory_type *mtp);
struct memory_type *memstat_mtl_find(struct memory_type_list *list,
int allocator, const char *name);
void memstat_mtl_free(struct memory_type_list *list);
int memstat_mtl_geterror(struct memory_type_list *list);
/*
* Functions to retrieve data from a live kernel using sysctl.
*/
int memstat_sysctl_all(struct memory_type_list *list, int flags);
int memstat_sysctl_malloc(struct memory_type_list *list, int flags);
int memstat_sysctl_uma(struct memory_type_list *list, int flags);
/*
* Accessor methods for struct memory_type.
*/
const char *memstat_get_name(const struct memory_type *mtp);
int memstat_get_allocator(const struct memory_type *mtp);
uint64_t memstat_get_countlimit(const struct memory_type *mtp);
uint64_t memstat_get_byteslimit(const struct memory_type *mtp);
uint64_t memstat_get_sizemask(const struct memory_type *mtp);
uint64_t memstat_get_size(const struct memory_type *mtp);
uint64_t memstat_get_memalloced(const struct memory_type *mtp);
uint64_t memstat_get_memfreed(const struct memory_type *mtp);
uint64_t memstat_get_numallocs(const struct memory_type *mtp);
uint64_t memstat_get_numfrees(const struct memory_type *mtp);
uint64_t memstat_get_bytes(const struct memory_type *mtp);
uint64_t memstat_get_count(const struct memory_type *mtp);
uint64_t memstat_get_free(const struct memory_type *mtp);
uint64_t memstat_get_failures(const struct memory_type *mtp);
void *memstat_get_caller_pointer(const struct memory_type *mtp,
int index);
void memstat_set_caller_pointer(struct memory_type *mtp,
int index, void *value);
uint64_t memstat_get_caller_uint64(const struct memory_type *mtp,
int index);
void memstat_set_caller_uint64(struct memory_type *mtp, int index,
uint64_t value);
uint64_t memstat_get_zonefree(const struct memory_type *mtp);
uint64_t memstat_get_kegfree(const struct memory_type *mtp);
uint64_t memstat_get_percpu_memalloced(const struct memory_type *mtp,
int cpu);
uint64_t memstat_get_percpu_memfreed(const struct memory_type *mtp,
int cpu);
uint64_t memstat_get_percpu_numallocs(const struct memory_type *mtp,
int cpu);
uint64_t memstat_get_percpu_numfrees(const struct memory_type *mtp,
int cpu);
uint64_t memstat_get_percpu_sizemask(const struct memory_type *mtp,
int cpu);
void *memstat_get_percpu_caller_pointer(
const struct memory_type *mtp, int cpu, int index);
void memstat_set_percpu_caller_pointer(struct memory_type *mtp,
int cpu, int index, void *value);
uint64_t memstat_get_percpu_caller_uint64(
const struct memory_type *mtp, int cpu, int index);
void memstat_set_percpu_caller_uint64(struct memory_type *mtp,
int cpu, int index, uint64_t value);
uint64_t memstat_get_percpu_free(const struct memory_type *mtp,
int cpu);
__END_DECLS
#endif /* !_MEMSTAT_H_ */

View File

@ -0,0 +1,123 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MEMSTAT_INTERNAL_H_
#define _MEMSTAT_INTERNAL_H_
/*
* memstat maintains its own internal notion of statistics on each memory
* type, common across UMA and kernel malloc. Some fields are straight from
* the allocator statistics, others are derived when extracted from the
* kernel. A struct memory_type will describe each type supported by an
* allocator. memory_type structures can be chained into lists.
*/
struct memory_type {
/*
* Static properties of type.
*/
int mt_allocator; /* malloc(9), uma(9), etc. */
char mt_name[MEMTYPE_MAXNAME]; /* name of memory type. */
/*
* (Relatively) static zone settings, that don't uniquely identify
* the zone, but also don't change much.
*/
uint64_t mt_countlimit; /* 0, or maximum allocations. */
uint64_t mt_byteslimit; /* 0, or maximum bytes. */
uint64_t mt_sizemask; /* malloc: allocated size bitmask. */
uint64_t mt_size; /* uma: size of objects. */
/*
* Zone or type information that includes all caches and any central
* zone state. Depending on the allocator, this may be synthesized
* from several sources, or directly measured.
*/
uint64_t mt_memalloced; /* Bytes allocated over life time. */
uint64_t mt_memfreed; /* Bytes freed over life time. */
uint64_t mt_numallocs; /* Allocations over life time. */
uint64_t mt_numfrees; /* Frees over life time. */
uint64_t mt_bytes; /* Bytes currently allocated. */
uint64_t mt_count; /* Number of current allocations. */
uint64_t mt_free; /* Number of cached free items. */
uint64_t mt_failures; /* Number of allocation failures. */
/*
* Caller-owned memory.
*/
void *mt_caller_pointer[MEMSTAT_MAXCALLER]; /* Pointers. */
uint64_t mt_caller_uint64[MEMSTAT_MAXCALLER]; /* Integers. */
/*
* For allocators making use of per-CPU caches, we also provide raw
* statistics from the central allocator and each per-CPU cache,
* which (combined) sometimes make up the above general statistics.
*
* First, central zone/type state, all numbers excluding any items
* cached in per-CPU caches.
*
* XXXRW: Might be desirable to separately expose allocation stats
* from zone, which should (combined with per-cpu) add up to the
* global stats above.
*/
uint64_t mt_zonefree; /* Free items in zone. */
uint64_t mt_kegfree; /* Free items in keg. */
/*
* Per-CPU measurements fall into two categories: per-CPU allocation,
* and per-CPU cache state.
*/
struct {
uint64_t mtp_memalloced;/* Per-CPU mt_memalloced. */
uint64_t mtp_memfreed; /* Per-CPU mt_memfreed. */
uint64_t mtp_numallocs; /* Per-CPU mt_numallocs. */
uint64_t mtp_numfrees; /* Per-CPU mt_numfrees. */
uint64_t mtp_sizemask; /* Per-CPU mt_sizemask. */
void *mtp_caller_pointer[MEMSTAT_MAXCALLER];
uint64_t mtp_caller_uint64[MEMSTAT_MAXCALLER];
} mt_percpu_alloc[MEMSTAT_MAXCPU];
struct {
uint64_t mtp_free; /* Per-CPU cache free items. */
} mt_percpu_cache[MEMSTAT_MAXCPU];
LIST_ENTRY(memory_type) mt_list; /* List of types. */
};
/*
* Description of struct memory_type_list is in memstat.h.
*/
struct memory_type_list {
LIST_HEAD(, memory_type) mtl_list;
int mtl_error;
};
struct memory_type *_memstat_mt_allocate(struct memory_type_list *list,
int allocator, const char *name);
void _memstat_mt_reset_stats(struct memory_type *mtp);
#endif /* !_MEMSTAT_INTERNAL_H_ */

View File

@ -0,0 +1,228 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/sysctl.h>
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "memstat.h"
#include "memstat_internal.h"
/*
* Extract malloc(9) statistics from the running kernel, and store all memory
* type information in the passed list. For each type, check the list for an
* existing entry with the right name/allocator -- if present, update that
* entry. Otherwise, add a new entry. On error, the entire list will be
* cleared, as entries will be in an inconsistent state.
*
* To reduce the level of work for a list that starts empty, we keep around a
* hint as to whether it was empty when we began, so we can avoid searching
* the list for entries to update. Updates are O(n^2) due to searching for
* each entry before adding it.
*/
int
memstat_sysctl_malloc(struct memory_type_list *list, int flags)
{
struct malloc_type_stream_header *mtshp;
struct malloc_type_header *mthp;
struct malloc_type_stats *mtsp;
struct memory_type *mtp;
int count, hint_dontsearch, i, j, maxcpus;
char *buffer, *p;
size_t size;
hint_dontsearch = LIST_EMPTY(&list->mtl_list);
/*
* Query the number of CPUs, number of malloc types so that we can
* guess an initial buffer size. We loop until we succeed or really
* fail. Note that the value of maxcpus we query using sysctl is not
* the version we use when processing the real data -- that is read
* from the header.
*/
retry:
size = sizeof(maxcpus);
if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
if (errno == EACCES || errno == EPERM)
list->mtl_error = MEMSTAT_ERROR_PERMISSION;
else
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
return (-1);
}
if (size != sizeof(maxcpus)) {
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
return (-1);
}
if (maxcpus > MEMSTAT_MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
return (-1);
}
size = sizeof(count);
if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
if (errno == EACCES || errno == EPERM)
list->mtl_error = MEMSTAT_ERROR_PERMISSION;
else
list->mtl_error = MEMSTAT_ERROR_VERSION;
return (-1);
}
if (size != sizeof(count)) {
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
return (-1);
}
size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) *
maxcpus);
buffer = malloc(size);
if (buffer == NULL) {
list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
return (-1);
}
if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) {
/*
* XXXRW: ENOMEM is an ambiguous return, we should bound the
* number of loops, perhaps.
*/
if (errno == ENOMEM) {
free(buffer);
goto retry;
}
if (errno == EACCES || errno == EPERM)
list->mtl_error = MEMSTAT_ERROR_PERMISSION;
else
list->mtl_error = MEMSTAT_ERROR_VERSION;
free(buffer);
return (-1);
}
if (size == 0) {
free(buffer);
return (0);
}
if (size < sizeof(*mtshp)) {
list->mtl_error = MEMSTAT_ERROR_VERSION;
free(buffer);
return (-1);
}
p = buffer;
mtshp = (struct malloc_type_stream_header *)p;
p += sizeof(*mtshp);
if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) {
list->mtl_error = MEMSTAT_ERROR_VERSION;
free(buffer);
return (-1);
}
if (mtshp->mtsh_maxcpus > MEMSTAT_MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
free(buffer);
return (-1);
}
/*
* For the remainder of this function, we are quite trusting about
* the layout of structures and sizes, since we've determined we have
* a matching version and acceptable CPU count.
*/
maxcpus = mtshp->mtsh_maxcpus;
count = mtshp->mtsh_count;
for (i = 0; i < count; i++) {
mthp = (struct malloc_type_header *)p;
p += sizeof(*mthp);
if (hint_dontsearch == 0) {
mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC,
mthp->mth_name);
} else
mtp = NULL;
if (mtp == NULL)
mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
mthp->mth_name);
if (mtp == NULL) {
memstat_mtl_free(list);
free(buffer);
list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
return (-1);
}
/*
* Reset the statistics on a current node.
*/
_memstat_mt_reset_stats(mtp);
for (j = 0; j < maxcpus; j++) {
mtsp = (struct malloc_type_stats *)p;
p += sizeof(*mtsp);
/*
* Sumarize raw statistics across CPUs into coalesced
* statistics.
*/
mtp->mt_memalloced += mtsp->mts_memalloced;
mtp->mt_memfreed += mtsp->mts_memfreed;
mtp->mt_numallocs += mtsp->mts_numallocs;
mtp->mt_numfrees += mtsp->mts_numfrees;
mtp->mt_sizemask |= mtsp->mts_size;
/*
* Copies of per-CPU statistics.
*/
mtp->mt_percpu_alloc[j].mtp_memalloced =
mtsp->mts_memalloced;
mtp->mt_percpu_alloc[j].mtp_memfreed =
mtsp->mts_memfreed;
mtp->mt_percpu_alloc[j].mtp_numallocs =
mtsp->mts_numallocs;
mtp->mt_percpu_alloc[j].mtp_numfrees =
mtsp->mts_numfrees;
mtp->mt_percpu_alloc[j].mtp_sizemask =
mtsp->mts_size;
}
/*
* Derived cross-CPU statistics.
*/
mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
}
free(buffer);
return (0);
}

View File

@ -0,0 +1,221 @@
/*-
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <vm/uma.h>
#include <err.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "memstat.h"
#include "memstat_internal.h"
/*
* Extract uma(9) statistics from the running kernel, and store all memory
* type information in the passed list. For each type, check the list for an
* existing entry with the right name/allocator -- if present, update that
* entry. Otherwise, add a new entry. On error, the entire list will be
* cleared, as entries will be in an inconsistent state.
*
* To reduce the level of work for a list that starts empty, we keep around a
* hint as to whether it was empty when we began, so we can avoid searching
* the list for entries to update. Updates are O(n^2) due to searching for
* each entry before adding it.
*/
int
memstat_sysctl_uma(struct memory_type_list *list, int flags)
{
struct uma_stream_header *ushp;
struct uma_type_header *uthp;
struct uma_percpu_stat *upsp;
struct memory_type *mtp;
int count, hint_dontsearch, i, j, maxcpus;
char *buffer, *p;
size_t size;
hint_dontsearch = LIST_EMPTY(&list->mtl_list);
/*
* Query the number of CPUs, number of malloc types so that we can
* guess an initial buffer size. We loop until we succeed or really
* fail. Note that the value of maxcpus we query using sysctl is not
* the version we use when processing the real data -- that is read
* from the header.
*/
retry:
size = sizeof(maxcpus);
if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
if (errno == EACCES || errno == EPERM)
list->mtl_error = MEMSTAT_ERROR_PERMISSION;
else
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
return (-1);
}
if (size != sizeof(maxcpus)) {
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
return (-1);
}
if (maxcpus > MEMSTAT_MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
return (-1);
}
size = sizeof(count);
if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
if (errno == EACCES || errno == EPERM)
list->mtl_error = MEMSTAT_ERROR_PERMISSION;
else
list->mtl_error = MEMSTAT_ERROR_VERSION;
return (-1);
}
if (size != sizeof(count)) {
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
return (-1);
}
size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
maxcpus);
buffer = malloc(size);
if (buffer == NULL) {
list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
return (-1);
}
if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) {
/*
* XXXRW: ENOMEM is an ambiguous return, we should bound the
* number of loops, perhaps.
*/
if (errno == ENOMEM) {
free(buffer);
goto retry;
}
if (errno == EACCES || errno == EPERM)
list->mtl_error = MEMSTAT_ERROR_PERMISSION;
else
list->mtl_error = MEMSTAT_ERROR_VERSION;
free(buffer);
return (-1);
}
if (size == 0) {
free(buffer);
return (0);
}
if (size < sizeof(*ushp)) {
list->mtl_error = MEMSTAT_ERROR_VERSION;
free(buffer);
return (-1);
}
p = buffer;
ushp = (struct uma_stream_header *)p;
p += sizeof(*ushp);
if (ushp->ush_version != UMA_STREAM_VERSION) {
list->mtl_error = MEMSTAT_ERROR_VERSION;
free(buffer);
return (-1);
}
if (ushp->ush_maxcpus > MEMSTAT_MAXCPU) {
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
free(buffer);
return (-1);
}
/*
* For the remainder of this function, we are quite trusting about
* the layout of structures and sizes, since we've determined we have
* a matching version and acceptable CPU count.
*/
maxcpus = ushp->ush_maxcpus;
count = ushp->ush_count;
for (i = 0; i < count; i++) {
uthp = (struct uma_type_header *)p;
p += sizeof(*uthp);
if (hint_dontsearch == 0) {
mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
uthp->uth_name);
} else
mtp = NULL;
if (mtp == NULL)
mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
uthp->uth_name);
if (mtp == NULL) {
memstat_mtl_free(list);
free(buffer);
list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
return (-1);
}
/*
* Reset the statistics on a current node.
*/
_memstat_mt_reset_stats(mtp);
mtp->mt_numallocs = uthp->uth_allocs;
mtp->mt_numfrees = uthp->uth_frees;
mtp->mt_failures = uthp->uth_fails;
for (j = 0; j < maxcpus; j++) {
upsp = (struct uma_percpu_stat *)p;
p += sizeof(*upsp);
mtp->mt_percpu_cache[j].mtp_free =
upsp->ups_cache_free;
mtp->mt_free += upsp->ups_cache_free;
mtp->mt_numallocs += upsp->ups_allocs;
mtp->mt_numfrees += upsp->ups_frees;
}
mtp->mt_size = uthp->uth_size;
mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size;
mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size;
mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
mtp->mt_countlimit = uthp->uth_limit;
mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size;
mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
mtp->mt_zonefree = uthp->uth_zone_free;
mtp->mt_kegfree = uthp->uth_keg_free;
mtp->mt_free += mtp->mt_zonefree;
}
free(buffer);
return (0);
}