1452 DTrace buffer autoscaling should be less violent

illumos/illumos-gate@6fb4854bed

This fixes the tst.resize1.d and tst.resize2.d DTrace tests, which have
been failing since r261122 since they were causing dtrace(1) to attempt to
allocate and use large amounts of memory, and get killed by the OOM killer
as a result.

MFC after:	1 month
This commit is contained in:
Mark Johnston 2014-02-22 05:18:55 +00:00
parent dc0f030e51
commit 33db01542c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=262330
4 changed files with 33 additions and 58 deletions

View File

@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* ASSERTION:
* Checks that setting "bufresize" to "auto" will cause buffer
@ -34,14 +32,8 @@
* SECTION: Buffers and Buffering/Buffer Resizing Policy;
* Options and Tunables/bufsize;
* Options and Tunables/bufresize
*
* NOTES:
* We use the undocumented "preallocate" option to make sure dtrace(1M)
* has enough space in its heap to allocate a buffer as large as the
* kernel's trace buffer.
*/
#pragma D option preallocate=100t
#pragma D option bufresize=auto
#pragma D option bufsize=100t

View File

@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* ASSERTION:
* Checks that setting "bufresize" to "auto" will cause buffer
@ -34,14 +32,8 @@
* SECTION: Buffers and Buffering/Buffer Resizing Policy;
* Options and Tunables/aggsize;
* Options and Tunables/bufresize
*
* NOTES:
* We use the undocumented "preallocate" option to make sure dtrace(1M)
* has enough space in its heap to allocate a buffer as large as the
* kernel's trace buffer.
*/
#pragma D option preallocate=100t
#pragma D option bufresize=auto
#pragma D option aggsize=100t

View File

@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
*/
@ -906,30 +904,6 @@ dt_options_load(dtrace_hdl_t *dtp)
return (0);
}
/*ARGSUSED*/
static int
dt_opt_preallocate(dtrace_hdl_t *dtp, const char *arg, uintptr_t option)
{
dtrace_optval_t size;
void *p;
if (arg == NULL || dt_optval_parse(arg, &size) != 0)
return (dt_set_errno(dtp, EDT_BADOPTVAL));
if (size > SIZE_MAX)
size = SIZE_MAX;
if ((p = dt_zalloc(dtp, size)) == NULL) {
do {
size /= 2;
} while ((p = dt_zalloc(dtp, size)) == NULL);
}
dt_free(dtp, p);
return (0);
}
typedef struct dt_option {
const char *o_name;
int (*o_func)(dtrace_hdl_t *, const char *, uintptr_t);
@ -968,7 +942,6 @@ static const dt_option_t _dtrace_ctoptions[] = {
{ "linktype", dt_opt_linktype },
{ "nolibs", dt_opt_cflags, DTRACE_C_NOLIBS },
{ "pgmax", dt_opt_pgmax },
{ "preallocate", dt_opt_preallocate },
{ "pspec", dt_opt_cflags, DTRACE_C_PSPEC },
{ "setenv", dt_opt_setenv, 1 },
{ "stdc", dt_opt_stdc },

View File

@ -10853,17 +10853,20 @@ dtrace_buffer_activate(dtrace_state_t *state)
static int
dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
processorid_t cpu)
processorid_t cpu, int *factor)
{
#if defined(sun)
cpu_t *cp;
#endif
dtrace_buffer_t *buf;
int allocated = 0, desired = 0;
#if defined(sun)
ASSERT(MUTEX_HELD(&cpu_lock));
ASSERT(MUTEX_HELD(&dtrace_lock));
*factor = 1;
if (size > dtrace_nonroot_maxsize &&
!PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
return (EFBIG);
@ -10887,7 +10890,8 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
ASSERT(buf->dtb_xamot == NULL);
if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
if ((buf->dtb_tomax = kmem_zalloc(size,
KM_NOSLEEP | KM_NORMALPRI)) == NULL)
goto err;
buf->dtb_size = size;
@ -10898,7 +10902,8 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
if (flags & DTRACEBUF_NOSWITCH)
continue;
if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
if ((buf->dtb_xamot = kmem_zalloc(size,
KM_NOSLEEP | KM_NORMALPRI)) == NULL)
goto err;
} while ((cp = cp->cpu_next) != cpu_list);
@ -10912,27 +10917,29 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
continue;
buf = &bufs[cp->cpu_id];
desired += 2;
if (buf->dtb_xamot != NULL) {
ASSERT(buf->dtb_tomax != NULL);
ASSERT(buf->dtb_size == size);
kmem_free(buf->dtb_xamot, size);
allocated++;
}
if (buf->dtb_tomax != NULL) {
ASSERT(buf->dtb_size == size);
kmem_free(buf->dtb_tomax, size);
allocated++;
}
buf->dtb_tomax = NULL;
buf->dtb_xamot = NULL;
buf->dtb_size = 0;
} while ((cp = cp->cpu_next) != cpu_list);
return (ENOMEM);
#else
int i;
*factor = 1;
#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
/*
* FreeBSD isn't good at limiting the amount of memory we
@ -10940,7 +10947,7 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
* to do something that might well end in tears at bedtime.
*/
if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
return(ENOMEM);
return (ENOMEM);
#endif
ASSERT(MUTEX_HELD(&dtrace_lock));
@ -10962,7 +10969,8 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
ASSERT(buf->dtb_xamot == NULL);
if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
if ((buf->dtb_tomax = kmem_zalloc(size,
KM_NOSLEEP | KM_NORMALPRI)) == NULL)
goto err;
buf->dtb_size = size;
@ -10973,7 +10981,8 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
if (flags & DTRACEBUF_NOSWITCH)
continue;
if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
if ((buf->dtb_xamot = kmem_zalloc(size,
KM_NOSLEEP | KM_NORMALPRI)) == NULL)
goto err;
}
@ -10989,16 +10998,19 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
continue;
buf = &bufs[i];
desired += 2;
if (buf->dtb_xamot != NULL) {
ASSERT(buf->dtb_tomax != NULL);
ASSERT(buf->dtb_size == size);
kmem_free(buf->dtb_xamot, size);
allocated++;
}
if (buf->dtb_tomax != NULL) {
ASSERT(buf->dtb_size == size);
kmem_free(buf->dtb_tomax, size);
allocated++;
}
buf->dtb_tomax = NULL;
@ -11006,9 +11018,10 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
buf->dtb_size = 0;
}
#endif
*factor = desired / (allocated > 0 ? allocated : 1);
return (ENOMEM);
#endif
}
/*
@ -12961,7 +12974,7 @@ dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
size = min;
if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
return (ENOMEM);
dstate->dtds_size = size;
@ -13413,7 +13426,7 @@ dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
{
dtrace_optval_t *opt = state->dts_options, size;
processorid_t cpu = 0;;
int flags = 0, rval;
int flags = 0, rval, factor, divisor = 1;
ASSERT(MUTEX_HELD(&dtrace_lock));
ASSERT(MUTEX_HELD(&cpu_lock));
@ -13443,7 +13456,7 @@ dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
flags |= DTRACEBUF_INACTIVE;
}
for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
/*
* The size must be 8-byte aligned. If the size is not 8-byte
* aligned, drop it down by the difference.
@ -13461,7 +13474,7 @@ dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
return (E2BIG);
}
rval = dtrace_buffer_alloc(buf, size, flags, cpu);
rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
if (rval != ENOMEM) {
opt[which] = size;
@ -13470,6 +13483,9 @@ dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
return (rval);
for (divisor = 2; divisor < factor; divisor <<= 1)
continue;
}
return (ENOMEM);
@ -13571,7 +13587,8 @@ dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
goto out;
}
spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
KM_NOSLEEP | KM_NORMALPRI);
if (spec == NULL) {
rval = ENOMEM;
@ -13582,7 +13599,8 @@ dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
state->dts_nspeculations = (int)nspec;
for (i = 0; i < nspec; i++) {
if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
if ((buf = kmem_zalloc(bufsize,
KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
rval = ENOMEM;
goto err;
}