Add a drain function for struct sysctl_req, and use it for a variety of

handlers, some of which had to do awkward things to get a large enough
FIXEDLEN buffer.

Note that some sysctl handlers were explicitly outputting a trailing NUL
byte.  This behaviour was preserved, though it should not be necessary.

Reviewed by:	phk
This commit is contained in:
Matthew D Fleming 2010-09-09 18:33:46 +00:00
parent 64393db3d1
commit dd67e2103c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=212370
11 changed files with 88 additions and 194 deletions

View File

@ -3227,7 +3227,6 @@ t3_dump_rspq(SYSCTL_HANDLER_ARGS)
struct sge_rspq *rspq;
struct sge_qset *qs;
int i, err, dump_end, idx;
static int multiplier = 1;
struct sbuf *sb;
struct rsp_desc *rspd;
uint32_t data[4];
@ -3252,8 +3251,8 @@ t3_dump_rspq(SYSCTL_HANDLER_ARGS)
err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
if (err)
return (err);
retry_sbufops:
sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
(data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
@ -3276,13 +3275,11 @@ t3_dump_rspq(SYSCTL_HANDLER_ARGS)
rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
be32toh(rspd->len_cq), rspd->intr_gen);
}
if (sbuf_overflowed(sb)) {
sbuf_delete(sb);
multiplier++;
goto retry_sbufops;
}
sbuf_finish(sb);
err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
err = sbuf_finish(sb);
/* Output a trailing NUL. */
if (err == 0)
err = SYSCTL_OUT(req, "", 1);
sbuf_delete(sb);
return (err);
}
@ -3293,7 +3290,6 @@ t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
struct sge_txq *txq;
struct sge_qset *qs;
int i, j, err, dump_end;
static int multiplier = 1;
struct sbuf *sb;
struct tx_desc *txd;
uint32_t *WR, wr_hi, wr_lo, gen;
@ -3321,9 +3317,7 @@ t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
if (err)
return (err);
retry_sbufops:
sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
(data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
@ -3350,13 +3344,10 @@ t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
}
if (sbuf_overflowed(sb)) {
sbuf_delete(sb);
multiplier++;
goto retry_sbufops;
}
sbuf_finish(sb);
err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
err = sbuf_finish(sb);
/* Output a trailing NUL. */
if (err == 0)
err = SYSCTL_OUT(req, "", 1);
sbuf_delete(sb);
return (err);
}
@ -3367,7 +3358,6 @@ t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
struct sge_txq *txq;
struct sge_qset *qs;
int i, j, err, dump_end;
static int multiplier = 1;
struct sbuf *sb;
struct tx_desc *txd;
uint32_t *WR, wr_hi, wr_lo, gen;
@ -3391,8 +3381,7 @@ t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
retry_sbufops:
sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
txq->txq_dump_start,
(txq->txq_dump_start + txq->txq_dump_count) & 255);
@ -3412,13 +3401,10 @@ t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
}
if (sbuf_overflowed(sb)) {
sbuf_delete(sb);
multiplier++;
goto retry_sbufops;
}
sbuf_finish(sb);
err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
err = sbuf_finish(sb);
/* Output a trailing NUL. */
if (err == 0)
err = SYSCTL_OUT(req, "", 1);
sbuf_delete(sb);
return (err);
}

View File

@ -828,25 +828,11 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
struct malloc_type_internal *mtip;
struct malloc_type_header mth;
struct malloc_type *mtp;
int buflen, count, error, i;
int error, i;
struct sbuf sbuf;
char *buffer;
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
mtx_lock(&malloc_mtx);
restart:
mtx_assert(&malloc_mtx, MA_OWNED);
count = kmemcount;
mtx_unlock(&malloc_mtx);
buflen = sizeof(mtsh) + count * (sizeof(mth) +
sizeof(struct malloc_type_stats) * MAXCPU) + 1;
buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
mtx_lock(&malloc_mtx);
if (count < kmemcount) {
free(buffer, M_TEMP);
goto restart;
}
sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
/*
* Insert stream header.
@ -855,11 +841,7 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
mtsh.mtsh_maxcpus = MAXCPU;
mtsh.mtsh_count = kmemcount;
if (sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)) < 0) {
mtx_unlock(&malloc_mtx);
error = ENOMEM;
goto out;
}
(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
/*
* Insert alternating sequence of type headers and type statistics.
@ -872,30 +854,19 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
*/
bzero(&mth, sizeof(mth));
strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
if (sbuf_bcat(&sbuf, &mth, sizeof(mth)) < 0) {
mtx_unlock(&malloc_mtx);
error = ENOMEM;
goto out;
}
(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
/*
* Insert type statistics for each CPU.
*/
for (i = 0; i < MAXCPU; i++) {
if (sbuf_bcat(&sbuf, &mtip->mti_stats[i],
sizeof(mtip->mti_stats[i])) < 0) {
mtx_unlock(&malloc_mtx);
error = ENOMEM;
goto out;
}
(void)sbuf_bcat(&sbuf, &mtip->mti_stats[i],
sizeof(mtip->mti_stats[i]));
}
}
mtx_unlock(&malloc_mtx);
sbuf_finish(&sbuf);
error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
out:
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
free(buffer, M_TEMP);
return (error);
}
@ -1005,26 +976,19 @@ DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
static int
sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
{
int linesize = 64;
struct sbuf sbuf;
uint64_t count;
uint64_t waste;
uint64_t mem;
int bufsize;
int error;
char *buf;
int rsize;
int size;
int i;
bufsize = linesize * (KMEM_ZSIZE + 1);
bufsize += 128; /* For the stats line */
bufsize += 128; /* For the banner line */
waste = 0;
mem = 0;
buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
sbuf_printf(&sbuf,
"\n Size Requests Real Size\n");
for (i = 0; i < KMEM_ZSIZE; i++) {
@ -1042,12 +1006,8 @@ sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
sbuf_printf(&sbuf,
"\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
(unsigned long long)mem, (unsigned long long)waste);
sbuf_finish(&sbuf);
error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
free(buf, M_TEMP);
return (error);
}

View File

@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/jail.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sbuf.h>
#include <sys/sx.h>
#include <sys/sysproto.h>
#include <sys/uio.h>
@ -1544,3 +1545,30 @@ userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
}
return (error);
}
/*
* Drain into a sysctl struct. The user buffer must be wired.
*/
static int
sbuf_sysctl_drain(void *arg, const char *data, int len)
{
struct sysctl_req *req = arg;
int error;
error = SYSCTL_OUT(req, data, len);
KASSERT(error >= 0, ("Got unexpected negative value %d", error));
return (error == 0 ? len : -error);
}
struct sbuf *
sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length,
struct sysctl_req *req)
{
/* Wire the user buffer, so we can write without blocking. */
sysctl_wire_old_buffer(req, 0);
s = sbuf_new(s, buf, length, SBUF_FIXEDLEN);
sbuf_set_drain(s, sbuf_sysctl_drain, req);
return (s);
}

View File

@ -191,8 +191,7 @@ struct lock_prof_cpu *lp_cpu[MAXCPU];
volatile int lock_prof_enable = 0;
static volatile int lock_prof_resetting;
/* SWAG: sbuf size = avg stat. line size * number of locks */
#define LPROF_SBUF_SIZE 256 * 400
#define LPROF_SBUF_SIZE 256
static int lock_prof_rejected;
static int lock_prof_skipspin;
@ -384,8 +383,6 @@ lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
continue;
lock_prof_sum(l, &lp, i, spin, t);
lock_prof_output(&lp, sb);
if (sbuf_overflowed(sb))
return;
}
}
}
@ -393,13 +390,11 @@ lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
static int
dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
{
static int multiplier = 1;
struct sbuf *sb;
int error, cpu, t;
int enabled;
retry_sbufops:
sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
"max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
enabled = lock_prof_enable;
@ -411,16 +406,13 @@ dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
continue;
lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
if (sbuf_overflowed(sb)) {
sbuf_delete(sb);
multiplier++;
goto retry_sbufops;
}
}
lock_prof_enable = enabled;
sbuf_finish(sb);
error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
error = sbuf_finish(sb);
/* Output a trailing NUL. */
if (error == 0)
error = SYSCTL_OUT(req, "", 1);
sbuf_delete(sb);
return (error);
}

View File

@ -305,8 +305,8 @@ sbuf_drain(struct sbuf *s)
SBUF_SETFLAG(s, SBUF_OVERFLOWED);
return (s->s_error);
}
KASSERT(len > 0, ("Drain must either error or work!"));
KASSERT(len > 0 && len <= s->s_len,
("Bad drain amount %d for sbuf %p", len, s));
s->s_len -= len;
/*
* Fast path for the expected case where all the data was

View File

@ -1018,7 +1018,7 @@ sleepq_abort(struct thread *td, int intrval)
#ifdef SLEEPQUEUE_PROFILING
#define SLEEPQ_PROF_LOCATIONS 1024
#define SLEEPQ_SBUFSIZE (40 * 512)
#define SLEEPQ_SBUFSIZE 512
struct sleepq_prof {
LIST_ENTRY(sleepq_prof) sp_link;
const char *sp_wmesg;
@ -1123,15 +1123,13 @@ reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
static int
dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
{
static int multiplier = 1;
struct sleepq_prof *sp;
struct sbuf *sb;
int enabled;
int error;
int i;
retry_sbufops:
sb = sbuf_new(NULL, NULL, SLEEPQ_SBUFSIZE * multiplier, SBUF_FIXEDLEN);
sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
sbuf_printf(sb, "\nwmesg\tcount\n");
enabled = prof_enabled;
mtx_lock_spin(&sleepq_prof_lock);
@ -1141,19 +1139,13 @@ dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
sbuf_printf(sb, "%s\t%ld\n",
sp->sp_wmesg, sp->sp_count);
if (sbuf_overflowed(sb)) {
sbuf_delete(sb);
multiplier++;
goto retry_sbufops;
}
}
}
mtx_lock_spin(&sleepq_prof_lock);
prof_enabled = enabled;
mtx_unlock_spin(&sleepq_prof_lock);
sbuf_finish(sb);
error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
error = sbuf_finish(sb);
sbuf_delete(sb);
return (error);
}

View File

@ -154,8 +154,7 @@ __FBSDID("$FreeBSD$");
#define MAX_W_NAME 64
#define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT)
#define CYCLEGRAPH_SBUF_SIZE 8192
#define FULLGRAPH_SBUF_SIZE 32768
#define FULLGRAPH_SBUF_SIZE 512
/*
* These flags go in the witness relationship matrix and describe the
@ -2545,7 +2544,7 @@ sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
return (error);
}
error = 0;
sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN);
sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
if (sb == NULL)
return (ENOMEM);
sbuf_printf(sb, "\n");
@ -2557,20 +2556,10 @@ sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
witness_add_fullgraph(sb, w);
mtx_unlock_spin(&w_mtx);
/*
* While using SBUF_FIXEDLEN, check if the sbuf overflowed.
*/
if (sbuf_overflowed(sb)) {
sbuf_delete(sb);
panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n",
__func__);
}
/*
* Close the sbuf and return to userland.
*/
sbuf_finish(sb);
error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
error = sbuf_finish(sb);
sbuf_delete(sb);
return (error);

View File

@ -710,6 +710,9 @@ void sysctl_lock(void);
void sysctl_unlock(void);
int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len);
struct sbuf;
struct sbuf *sbuf_new_for_sysctl(struct sbuf *, char *, int,
struct sysctl_req *);
#else /* !_KERNEL */
#include <sys/cdefs.h>

View File

@ -3175,36 +3175,16 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
uma_keg_t kz;
uma_zone_t z;
uma_keg_t k;
char *buffer;
int buflen, count, error, i;
int count, error, i;
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
mtx_lock(&uma_mtx);
restart:
mtx_assert(&uma_mtx, MA_OWNED);
count = 0;
mtx_lock(&uma_mtx);
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link)
count++;
}
mtx_unlock(&uma_mtx);
buflen = sizeof(ush) + count * (sizeof(uth) + sizeof(ups) *
(mp_maxid + 1)) + 1;
buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
mtx_lock(&uma_mtx);
i = 0;
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link)
i++;
}
if (i > count) {
free(buffer, M_TEMP);
goto restart;
}
count = i;
sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
/*
* Insert stream header.
@ -3213,11 +3193,7 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
ush.ush_version = UMA_STREAM_VERSION;
ush.ush_maxcpus = (mp_maxid + 1);
ush.ush_count = count;
if (sbuf_bcat(&sbuf, &ush, sizeof(ush)) < 0) {
mtx_unlock(&uma_mtx);
error = ENOMEM;
goto out;
}
(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link) {
@ -3250,12 +3226,7 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
uth.uth_frees = z->uz_frees;
uth.uth_fails = z->uz_fails;
uth.uth_sleeps = z->uz_sleeps;
if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) {
ZONE_UNLOCK(z);
mtx_unlock(&uma_mtx);
error = ENOMEM;
goto out;
}
(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
/*
* While it is not normally safe to access the cache
* bucket pointers while not on the CPU that owns the
@ -3280,21 +3251,14 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
ups.ups_allocs = cache->uc_allocs;
ups.ups_frees = cache->uc_frees;
skip:
if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) {
ZONE_UNLOCK(z);
mtx_unlock(&uma_mtx);
error = ENOMEM;
goto out;
}
(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
}
ZONE_UNLOCK(z);
}
}
mtx_unlock(&uma_mtx);
sbuf_finish(&sbuf);
error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
out:
free(buffer, M_TEMP);
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
return (error);
}

View File

@ -123,12 +123,9 @@ sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
{
struct sbuf sbuf;
struct vm_freelist *fl;
char *cbuf;
const int cbufsize = vm_nfreelists*(VM_NFREEORDER + 1)*81;
int error, flind, oind, pind;
cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
for (flind = 0; flind < vm_nfreelists; flind++) {
sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
"\n ORDER (SIZE) | NUMBER"
@ -149,10 +146,8 @@ sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
sbuf_printf(&sbuf, "\n");
}
}
sbuf_finish(&sbuf);
error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
free(cbuf, M_TEMP);
return (error);
}
@ -164,12 +159,9 @@ sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
{
struct sbuf sbuf;
struct vm_phys_seg *seg;
char *cbuf;
const int cbufsize = VM_PHYSSEG_MAX*(VM_NFREEORDER + 1)*81;
int error, segind;
cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
for (segind = 0; segind < vm_phys_nsegs; segind++) {
sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
seg = &vm_phys_segs[segind];
@ -180,10 +172,8 @@ sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
}
sbuf_finish(&sbuf);
error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
free(cbuf, M_TEMP);
return (error);
}
@ -195,23 +185,18 @@ static int
sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS)
{
struct sbuf sbuf;
char *cbuf;
const int cbufsize = (vm_nfreelists + 1) * VM_NDOMAIN * 81;
int domain, error, flind, ndomains;
ndomains = vm_nfreelists - VM_NFREELIST + 1;
cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
for (domain = 0; domain < ndomains; domain++) {
sbuf_printf(&sbuf, "\nDOMAIN %d:\n\n", domain);
for (flind = 0; flind < vm_nfreelists; flind++)
sbuf_printf(&sbuf, " [%d]:\t%p\n", flind,
vm_phys_lookup_lists[domain][flind]);
}
sbuf_finish(&sbuf);
error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
free(cbuf, M_TEMP);
return (error);
}
#endif

View File

@ -180,12 +180,9 @@ sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
{
struct sbuf sbuf;
vm_reserv_t rv;
char *cbuf;
const int cbufsize = (VM_NRESERVLEVEL + 1) * 81;
int counter, error, level, unused_pages;
cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
sbuf_printf(&sbuf, "\nLEVEL SIZE NUMBER\n\n");
for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
counter = 0;
@ -199,10 +196,8 @@ sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
sbuf_printf(&sbuf, "%5.5d: %6.6dK, %6.6d\n", level,
unused_pages * (PAGE_SIZE / 1024), counter);
}
sbuf_finish(&sbuf);
error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
free(cbuf, M_TEMP);
return (error);
}