Get rid of MAXCPU knowledge used for internal needs only. Switch to
dynamic memory allocation to hold per-CPU memory types data (sized to mp_maxid for UMA, and to mp_maxcpus for malloc to match the kernel). That fixes libmemstat with arbitrary large MAXCPU values and therefore eliminates MEMSTAT_ERROR_TOOMANYCPUS error type. Reviewed by: jhb Approved by: re (kib)
This commit is contained in:
parent
664d9b6f47
commit
3ec0e5bcb6
@ -24,7 +24,7 @@
|
|||||||
.\"
|
.\"
|
||||||
.\" $FreeBSD$
|
.\" $FreeBSD$
|
||||||
.\"
|
.\"
|
||||||
.Dd June 27, 2005
|
.Dd July 21, 2011
|
||||||
.Dt LIBMEMSTAT 3
|
.Dt LIBMEMSTAT 3
|
||||||
.Os
|
.Os
|
||||||
.Sh NAME
|
.Sh NAME
|
||||||
@ -412,10 +412,6 @@ values of
|
|||||||
.Er EACCES
|
.Er EACCES
|
||||||
or
|
or
|
||||||
.Er EPERM .
|
.Er EPERM .
|
||||||
.It Dv MEMSTAT_ERROR_TOOMANYCPUS
|
|
||||||
Returned if the compile-time limit on the number of CPUs in
|
|
||||||
.Nm
|
|
||||||
is lower than the number of CPUs returned by a statistics data source.
|
|
||||||
.It Dv MEMSTAT_ERROR_DATAERROR
|
.It Dv MEMSTAT_ERROR_DATAERROR
|
||||||
Returned if
|
Returned if
|
||||||
.Nm
|
.Nm
|
||||||
|
@ -49,8 +49,6 @@ memstat_strerror(int error)
|
|||||||
return ("Version mismatch");
|
return ("Version mismatch");
|
||||||
case MEMSTAT_ERROR_PERMISSION:
|
case MEMSTAT_ERROR_PERMISSION:
|
||||||
return ("Permission denied");
|
return ("Permission denied");
|
||||||
case MEMSTAT_ERROR_TOOMANYCPUS:
|
|
||||||
return ("Too many CPUs");
|
|
||||||
case MEMSTAT_ERROR_DATAERROR:
|
case MEMSTAT_ERROR_DATAERROR:
|
||||||
return ("Data format error");
|
return ("Data format error");
|
||||||
case MEMSTAT_ERROR_KVM:
|
case MEMSTAT_ERROR_KVM:
|
||||||
@ -99,6 +97,8 @@ _memstat_mtl_empty(struct memory_type_list *list)
|
|||||||
struct memory_type *mtp;
|
struct memory_type *mtp;
|
||||||
|
|
||||||
while ((mtp = LIST_FIRST(&list->mtl_list))) {
|
while ((mtp = LIST_FIRST(&list->mtl_list))) {
|
||||||
|
free(mtp->mt_percpu_alloc);
|
||||||
|
free(mtp->mt_percpu_cache);
|
||||||
LIST_REMOVE(mtp, mt_list);
|
LIST_REMOVE(mtp, mt_list);
|
||||||
free(mtp);
|
free(mtp);
|
||||||
}
|
}
|
||||||
@ -147,7 +147,7 @@ memstat_mtl_find(struct memory_type_list *list, int allocator,
|
|||||||
*/
|
*/
|
||||||
struct memory_type *
|
struct memory_type *
|
||||||
_memstat_mt_allocate(struct memory_type_list *list, int allocator,
|
_memstat_mt_allocate(struct memory_type_list *list, int allocator,
|
||||||
const char *name)
|
const char *name, int maxcpus)
|
||||||
{
|
{
|
||||||
struct memory_type *mtp;
|
struct memory_type *mtp;
|
||||||
|
|
||||||
@ -158,6 +158,10 @@ _memstat_mt_allocate(struct memory_type_list *list, int allocator,
|
|||||||
bzero(mtp, sizeof(*mtp));
|
bzero(mtp, sizeof(*mtp));
|
||||||
|
|
||||||
mtp->mt_allocator = allocator;
|
mtp->mt_allocator = allocator;
|
||||||
|
mtp->mt_percpu_alloc = malloc(sizeof(struct mt_percpu_alloc_s) *
|
||||||
|
maxcpus);
|
||||||
|
mtp->mt_percpu_cache = malloc(sizeof(struct mt_percpu_cache_s) *
|
||||||
|
maxcpus);
|
||||||
strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME);
|
strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME);
|
||||||
LIST_INSERT_HEAD(&list->mtl_list, mtp, mt_list);
|
LIST_INSERT_HEAD(&list->mtl_list, mtp, mt_list);
|
||||||
return (mtp);
|
return (mtp);
|
||||||
@ -171,7 +175,7 @@ _memstat_mt_allocate(struct memory_type_list *list, int allocator,
|
|||||||
* libmemstat(3) internal function.
|
* libmemstat(3) internal function.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
_memstat_mt_reset_stats(struct memory_type *mtp)
|
_memstat_mt_reset_stats(struct memory_type *mtp, int maxcpus)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -193,7 +197,7 @@ _memstat_mt_reset_stats(struct memory_type *mtp)
|
|||||||
mtp->mt_zonefree = 0;
|
mtp->mt_zonefree = 0;
|
||||||
mtp->mt_kegfree = 0;
|
mtp->mt_kegfree = 0;
|
||||||
|
|
||||||
for (i = 0; i < MEMSTAT_MAXCPU; i++) {
|
for (i = 0; i < maxcpus; i++) {
|
||||||
mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
|
mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
|
||||||
mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
|
mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
|
||||||
mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
|
mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
|
||||||
|
@ -29,12 +29,6 @@
|
|||||||
#ifndef _MEMSTAT_H_
|
#ifndef _MEMSTAT_H_
|
||||||
#define _MEMSTAT_H_
|
#define _MEMSTAT_H_
|
||||||
|
|
||||||
/*
|
|
||||||
* Number of CPU slots in library-internal data structures. This should be
|
|
||||||
* at least the value of MAXCPU from param.h.
|
|
||||||
*/
|
|
||||||
#define MEMSTAT_MAXCPU 32
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Amount of caller data to maintain for each caller data slot. Applications
|
* Amount of caller data to maintain for each caller data slot. Applications
|
||||||
* must not request more than this number of caller save data, or risk
|
* must not request more than this number of caller save data, or risk
|
||||||
@ -70,7 +64,6 @@
|
|||||||
#define MEMSTAT_ERROR_NOMEMORY 1 /* Out of memory. */
|
#define MEMSTAT_ERROR_NOMEMORY 1 /* Out of memory. */
|
||||||
#define MEMSTAT_ERROR_VERSION 2 /* Unsupported version. */
|
#define MEMSTAT_ERROR_VERSION 2 /* Unsupported version. */
|
||||||
#define MEMSTAT_ERROR_PERMISSION 3 /* Permission denied. */
|
#define MEMSTAT_ERROR_PERMISSION 3 /* Permission denied. */
|
||||||
#define MEMSTAT_ERROR_TOOMANYCPUS 4 /* Too many CPUs. */
|
|
||||||
#define MEMSTAT_ERROR_DATAERROR 5 /* Error in stat data. */
|
#define MEMSTAT_ERROR_DATAERROR 5 /* Error in stat data. */
|
||||||
#define MEMSTAT_ERROR_KVM 6 /* See kvm_geterr() for err. */
|
#define MEMSTAT_ERROR_KVM 6 /* See kvm_geterr() for err. */
|
||||||
#define MEMSTAT_ERROR_KVM_NOSYMBOL 7 /* Symbol not available. */
|
#define MEMSTAT_ERROR_KVM_NOSYMBOL 7 /* Symbol not available. */
|
||||||
|
@ -92,7 +92,7 @@ struct memory_type {
|
|||||||
* Per-CPU measurements fall into two categories: per-CPU allocation,
|
* Per-CPU measurements fall into two categories: per-CPU allocation,
|
||||||
* and per-CPU cache state.
|
* and per-CPU cache state.
|
||||||
*/
|
*/
|
||||||
struct {
|
struct mt_percpu_alloc_s {
|
||||||
uint64_t mtp_memalloced;/* Per-CPU mt_memalloced. */
|
uint64_t mtp_memalloced;/* Per-CPU mt_memalloced. */
|
||||||
uint64_t mtp_memfreed; /* Per-CPU mt_memfreed. */
|
uint64_t mtp_memfreed; /* Per-CPU mt_memfreed. */
|
||||||
uint64_t mtp_numallocs; /* Per-CPU mt_numallocs. */
|
uint64_t mtp_numallocs; /* Per-CPU mt_numallocs. */
|
||||||
@ -100,11 +100,11 @@ struct memory_type {
|
|||||||
uint64_t mtp_sizemask; /* Per-CPU mt_sizemask. */
|
uint64_t mtp_sizemask; /* Per-CPU mt_sizemask. */
|
||||||
void *mtp_caller_pointer[MEMSTAT_MAXCALLER];
|
void *mtp_caller_pointer[MEMSTAT_MAXCALLER];
|
||||||
uint64_t mtp_caller_uint64[MEMSTAT_MAXCALLER];
|
uint64_t mtp_caller_uint64[MEMSTAT_MAXCALLER];
|
||||||
} mt_percpu_alloc[MEMSTAT_MAXCPU];
|
} *mt_percpu_alloc;
|
||||||
|
|
||||||
struct {
|
struct mt_percpu_cache_s {
|
||||||
uint64_t mtp_free; /* Per-CPU cache free items. */
|
uint64_t mtp_free; /* Per-CPU cache free items. */
|
||||||
} mt_percpu_cache[MEMSTAT_MAXCPU];
|
} *mt_percpu_cache;
|
||||||
|
|
||||||
LIST_ENTRY(memory_type) mt_list; /* List of types. */
|
LIST_ENTRY(memory_type) mt_list; /* List of types. */
|
||||||
};
|
};
|
||||||
@ -119,7 +119,8 @@ struct memory_type_list {
|
|||||||
|
|
||||||
void _memstat_mtl_empty(struct memory_type_list *list);
|
void _memstat_mtl_empty(struct memory_type_list *list);
|
||||||
struct memory_type *_memstat_mt_allocate(struct memory_type_list *list,
|
struct memory_type *_memstat_mt_allocate(struct memory_type_list *list,
|
||||||
int allocator, const char *name);
|
int allocator, const char *name, int maxcpus);
|
||||||
void _memstat_mt_reset_stats(struct memory_type *mtp);
|
void _memstat_mt_reset_stats(struct memory_type *mtp,
|
||||||
|
int maxcpus);
|
||||||
|
|
||||||
#endif /* !_MEMSTAT_INTERNAL_H_ */
|
#endif /* !_MEMSTAT_INTERNAL_H_ */
|
||||||
|
@ -96,11 +96,6 @@ retry:
|
|||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (maxcpus > MEMSTAT_MAXCPU) {
|
|
||||||
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
|
|
||||||
return (-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
size = sizeof(count);
|
size = sizeof(count);
|
||||||
if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
|
if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
|
||||||
if (errno == EACCES || errno == EPERM)
|
if (errno == EACCES || errno == EPERM)
|
||||||
@ -160,12 +155,6 @@ retry:
|
|||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mtshp->mtsh_maxcpus > MEMSTAT_MAXCPU) {
|
|
||||||
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
|
|
||||||
free(buffer);
|
|
||||||
return (-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the remainder of this function, we are quite trusting about
|
* For the remainder of this function, we are quite trusting about
|
||||||
* the layout of structures and sizes, since we've determined we have
|
* the layout of structures and sizes, since we've determined we have
|
||||||
@ -184,7 +173,7 @@ retry:
|
|||||||
mtp = NULL;
|
mtp = NULL;
|
||||||
if (mtp == NULL)
|
if (mtp == NULL)
|
||||||
mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
|
mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
|
||||||
mthp->mth_name);
|
mthp->mth_name, maxcpus);
|
||||||
if (mtp == NULL) {
|
if (mtp == NULL) {
|
||||||
_memstat_mtl_empty(list);
|
_memstat_mtl_empty(list);
|
||||||
free(buffer);
|
free(buffer);
|
||||||
@ -195,7 +184,7 @@ retry:
|
|||||||
/*
|
/*
|
||||||
* Reset the statistics on a current node.
|
* Reset the statistics on a current node.
|
||||||
*/
|
*/
|
||||||
_memstat_mt_reset_stats(mtp);
|
_memstat_mt_reset_stats(mtp, maxcpus);
|
||||||
|
|
||||||
for (j = 0; j < maxcpus; j++) {
|
for (j = 0; j < maxcpus; j++) {
|
||||||
mtsp = (struct malloc_type_stats *)p;
|
mtsp = (struct malloc_type_stats *)p;
|
||||||
@ -295,7 +284,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
|||||||
void *kmemstatistics;
|
void *kmemstatistics;
|
||||||
int hint_dontsearch, j, mp_maxcpus, ret;
|
int hint_dontsearch, j, mp_maxcpus, ret;
|
||||||
char name[MEMTYPE_MAXNAME];
|
char name[MEMTYPE_MAXNAME];
|
||||||
struct malloc_type_stats mts[MEMSTAT_MAXCPU], *mtsp;
|
struct malloc_type_stats *mts, *mtsp;
|
||||||
struct malloc_type_internal *mtip;
|
struct malloc_type_internal *mtip;
|
||||||
struct malloc_type type, *typep;
|
struct malloc_type type, *typep;
|
||||||
kvm_t *kvm;
|
kvm_t *kvm;
|
||||||
@ -322,11 +311,6 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
|||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mp_maxcpus > MEMSTAT_MAXCPU) {
|
|
||||||
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
|
|
||||||
return (-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = kread_symbol(kvm, X_KMEMSTATISTICS, &kmemstatistics,
|
ret = kread_symbol(kvm, X_KMEMSTATISTICS, &kmemstatistics,
|
||||||
sizeof(kmemstatistics), 0);
|
sizeof(kmemstatistics), 0);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
@ -334,10 +318,17 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
|||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mts = malloc(sizeof(struct malloc_type_stats) * mp_maxcpus);
|
||||||
|
if (mts == NULL) {
|
||||||
|
list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
|
||||||
|
return (-1);
|
||||||
|
}
|
||||||
|
|
||||||
for (typep = kmemstatistics; typep != NULL; typep = type.ks_next) {
|
for (typep = kmemstatistics; typep != NULL; typep = type.ks_next) {
|
||||||
ret = kread(kvm, typep, &type, sizeof(type), 0);
|
ret = kread(kvm, typep, &type, sizeof(type), 0);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
_memstat_mtl_empty(list);
|
_memstat_mtl_empty(list);
|
||||||
|
free(mts);
|
||||||
list->mtl_error = ret;
|
list->mtl_error = ret;
|
||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
@ -345,6 +336,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
|||||||
MEMTYPE_MAXNAME);
|
MEMTYPE_MAXNAME);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
_memstat_mtl_empty(list);
|
_memstat_mtl_empty(list);
|
||||||
|
free(mts);
|
||||||
list->mtl_error = ret;
|
list->mtl_error = ret;
|
||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
@ -358,6 +350,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
|||||||
sizeof(struct malloc_type_stats), 0);
|
sizeof(struct malloc_type_stats), 0);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
_memstat_mtl_empty(list);
|
_memstat_mtl_empty(list);
|
||||||
|
free(mts);
|
||||||
list->mtl_error = ret;
|
list->mtl_error = ret;
|
||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
@ -368,9 +361,10 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
|||||||
mtp = NULL;
|
mtp = NULL;
|
||||||
if (mtp == NULL)
|
if (mtp == NULL)
|
||||||
mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
|
mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
|
||||||
name);
|
name, mp_maxcpus);
|
||||||
if (mtp == NULL) {
|
if (mtp == NULL) {
|
||||||
_memstat_mtl_empty(list);
|
_memstat_mtl_empty(list);
|
||||||
|
free(mts);
|
||||||
list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
|
list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
|
||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
@ -379,7 +373,7 @@ memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
|
|||||||
* This logic is replicated from kern_malloc.c, and should
|
* This logic is replicated from kern_malloc.c, and should
|
||||||
* be kept in sync.
|
* be kept in sync.
|
||||||
*/
|
*/
|
||||||
_memstat_mt_reset_stats(mtp);
|
_memstat_mt_reset_stats(mtp, mp_maxcpus);
|
||||||
for (j = 0; j < mp_maxcpus; j++) {
|
for (j = 0; j < mp_maxcpus; j++) {
|
||||||
mtsp = &mts[j];
|
mtsp = &mts[j];
|
||||||
mtp->mt_memalloced += mtsp->mts_memalloced;
|
mtp->mt_memalloced += mtsp->mts_memalloced;
|
||||||
|
@ -79,7 +79,7 @@ memstat_sysctl_uma(struct memory_type_list *list, int flags)
|
|||||||
struct uma_type_header *uthp;
|
struct uma_type_header *uthp;
|
||||||
struct uma_percpu_stat *upsp;
|
struct uma_percpu_stat *upsp;
|
||||||
struct memory_type *mtp;
|
struct memory_type *mtp;
|
||||||
int count, hint_dontsearch, i, j, maxcpus;
|
int count, hint_dontsearch, i, j, maxcpus, maxid;
|
||||||
char *buffer, *p;
|
char *buffer, *p;
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
@ -93,24 +93,19 @@ memstat_sysctl_uma(struct memory_type_list *list, int flags)
|
|||||||
* from the header.
|
* from the header.
|
||||||
*/
|
*/
|
||||||
retry:
|
retry:
|
||||||
size = sizeof(maxcpus);
|
size = sizeof(maxid);
|
||||||
if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
|
if (sysctlbyname("kern.smp.maxid", &maxid, &size, NULL, 0) < 0) {
|
||||||
if (errno == EACCES || errno == EPERM)
|
if (errno == EACCES || errno == EPERM)
|
||||||
list->mtl_error = MEMSTAT_ERROR_PERMISSION;
|
list->mtl_error = MEMSTAT_ERROR_PERMISSION;
|
||||||
else
|
else
|
||||||
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
|
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
|
||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
if (size != sizeof(maxcpus)) {
|
if (size != sizeof(maxid)) {
|
||||||
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
|
list->mtl_error = MEMSTAT_ERROR_DATAERROR;
|
||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (maxcpus > MEMSTAT_MAXCPU) {
|
|
||||||
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
|
|
||||||
return (-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
size = sizeof(count);
|
size = sizeof(count);
|
||||||
if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
|
if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
|
||||||
if (errno == EACCES || errno == EPERM)
|
if (errno == EACCES || errno == EPERM)
|
||||||
@ -125,7 +120,7 @@ retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
|
size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
|
||||||
maxcpus);
|
(maxid + 1));
|
||||||
|
|
||||||
buffer = malloc(size);
|
buffer = malloc(size);
|
||||||
if (buffer == NULL) {
|
if (buffer == NULL) {
|
||||||
@ -170,12 +165,6 @@ retry:
|
|||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ushp->ush_maxcpus > MEMSTAT_MAXCPU) {
|
|
||||||
list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
|
|
||||||
free(buffer);
|
|
||||||
return (-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the remainder of this function, we are quite trusting about
|
* For the remainder of this function, we are quite trusting about
|
||||||
* the layout of structures and sizes, since we've determined we have
|
* the layout of structures and sizes, since we've determined we have
|
||||||
@ -194,7 +183,7 @@ retry:
|
|||||||
mtp = NULL;
|
mtp = NULL;
|
||||||
if (mtp == NULL)
|
if (mtp == NULL)
|
||||||
mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
|
mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
|
||||||
uthp->uth_name);
|
uthp->uth_name, maxid + 1);
|
||||||
if (mtp == NULL) {
|
if (mtp == NULL) {
|
||||||
_memstat_mtl_empty(list);
|
_memstat_mtl_empty(list);
|
||||||
free(buffer);
|
free(buffer);
|
||||||
@ -205,7 +194,7 @@ retry:
|
|||||||
/*
|
/*
|
||||||
* Reset the statistics on a current node.
|
* Reset the statistics on a current node.
|
||||||
*/
|
*/
|
||||||
_memstat_mt_reset_stats(mtp);
|
_memstat_mt_reset_stats(mtp, maxid + 1);
|
||||||
|
|
||||||
mtp->mt_numallocs = uthp->uth_allocs;
|
mtp->mt_numallocs = uthp->uth_allocs;
|
||||||
mtp->mt_numfrees = uthp->uth_frees;
|
mtp->mt_numfrees = uthp->uth_frees;
|
||||||
@ -398,7 +387,7 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
|
|||||||
mtp = NULL;
|
mtp = NULL;
|
||||||
if (mtp == NULL)
|
if (mtp == NULL)
|
||||||
mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
|
mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
|
||||||
name);
|
name, mp_maxid + 1);
|
||||||
if (mtp == NULL) {
|
if (mtp == NULL) {
|
||||||
free(ucp_array);
|
free(ucp_array);
|
||||||
_memstat_mtl_empty(list);
|
_memstat_mtl_empty(list);
|
||||||
@ -408,7 +397,7 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
|
|||||||
/*
|
/*
|
||||||
* Reset the statistics on a current node.
|
* Reset the statistics on a current node.
|
||||||
*/
|
*/
|
||||||
_memstat_mt_reset_stats(mtp);
|
_memstat_mt_reset_stats(mtp, mp_maxid + 1);
|
||||||
mtp->mt_numallocs = uz.uz_allocs;
|
mtp->mt_numallocs = uz.uz_allocs;
|
||||||
mtp->mt_numfrees = uz.uz_frees;
|
mtp->mt_numfrees = uz.uz_frees;
|
||||||
mtp->mt_failures = uz.uz_fails;
|
mtp->mt_failures = uz.uz_fails;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user