Explicitly wire the user buffer rather than doing it implicitly in

sbuf_new_for_sysctl(9).  This allows using an sbuf with a SYSCTL_OUT
drain for extremely large amounts of data where the caller knows that
appropriate references are held, and sleeping is not an issue.

Inspired by:	rwatson
This commit is contained in:
Matthew D Fleming 2011-01-27 00:34:12 +00:00
parent 73d6f8516d
commit 00f0e671ff
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=217916
10 changed files with 46 additions and 10 deletions

View File

@ -177,9 +177,9 @@ The
function will set up an sbuf with a drain function to use
.Fn SYSCTL_OUT
when the internal buffer fills.
The sysctl old buffer will be wired, which allows for doing an
.Fn sbuf_printf
while holding a mutex.
Note that if the various functions which append to an sbuf are used while
a non-sleepable lock is held, the user buffer should be wired using
.Fn sysctl_wire_old_buffer .
.Pp
The
.Fn sbuf_delete

View File

@ -3251,7 +3251,9 @@ t3_dump_rspq(SYSCTL_HANDLER_ARGS)
err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
if (err)
return (err);
err = sysctl_wire_old_buffer(req, 0);
if (err)
return (err);
sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
@ -3316,7 +3318,9 @@ t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
if (err)
return (err);
err = sysctl_wire_old_buffer(req, 0);
if (err)
return (err);
sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
@ -3381,6 +3385,9 @@ t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
err = sysctl_wire_old_buffer(req, 0);
if (err != 0)
return (err);
sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
txq->txq_dump_start,

View File

@ -862,6 +862,9 @@ sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
int error, i;
struct sbuf sbuf;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
mtx_lock(&malloc_mtx);
@ -1019,6 +1022,9 @@ sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
waste = 0;
mem = 0;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
sbuf_printf(&sbuf,
"\n Size Requests Real Size\n");

View File

@ -1591,7 +1591,8 @@ userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
}
/*
* Drain into a sysctl struct. The user buffer must be wired.
* Drain into a sysctl struct. The user buffer should be wired if a page
* fault would cause issue.
*/
static int
sbuf_sysctl_drain(void *arg, const char *data, int len)
@ -1609,9 +1610,6 @@ sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length,
struct sysctl_req *req)
{
/* Wire the user buffer, so we can write without blocking. */
sysctl_wire_old_buffer(req, 0);
s = sbuf_new(s, buf, length, SBUF_FIXEDLEN);
sbuf_set_drain(s, sbuf_sysctl_drain, req);
return (s);

View File

@ -393,6 +393,9 @@ dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
int error, cpu, t;
int enabled;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
"max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");

View File

@ -1130,6 +1130,9 @@ dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
int error;
int i;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
sbuf_printf(sb, "\nwmesg\tcount\n");
enabled = prof_enabled;

View File

@ -2544,6 +2544,10 @@ sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
return (error);
}
error = 0;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
if (sb == NULL)
return (ENOMEM);

View File

@ -3224,6 +3224,9 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
uma_keg_t k;
int count, error, i;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
count = 0;

View File

@ -125,6 +125,9 @@ sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
struct vm_freelist *fl;
int error, flind, oind, pind;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
for (flind = 0; flind < vm_nfreelists; flind++) {
sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
@ -161,6 +164,9 @@ sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
struct vm_phys_seg *seg;
int error, segind;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
for (segind = 0; segind < vm_phys_nsegs; segind++) {
sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
@ -187,8 +193,11 @@ sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS)
struct sbuf sbuf;
int domain, error, flind, ndomains;
ndomains = vm_nfreelists - VM_NFREELIST + 1;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
ndomains = vm_nfreelists - VM_NFREELIST + 1;
for (domain = 0; domain < ndomains; domain++) {
sbuf_printf(&sbuf, "\nDOMAIN %d:\n\n", domain);
for (flind = 0; flind < vm_nfreelists; flind++)

View File

@ -182,6 +182,9 @@ sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
vm_reserv_t rv;
int counter, error, level, unused_pages;
error = sysctl_wire_old_buffer(req, 0);
if (error != 0)
return (error);
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
sbuf_printf(&sbuf, "\nLEVEL SIZE NUMBER\n\n");
for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {