Import jemalloc 606f1fdc3cdbc700717133ca56685313caea24bb (dev branch,

prior to 3.0.0 release), and mangle internal symbols.
This commit is contained in:
Jason Evans 2012-04-21 15:09:22 +00:00
parent fbd21ea620
commit 8ed34ab00d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=234543
25 changed files with 262 additions and 158 deletions

View File

@ -34,6 +34,8 @@ found in the git revision history:
- Add the --with-mangling option.
- Add the --disable-experimental option.
- Add the "thread.tcache.enabled" mallctl.
- Add the "opt.prof_final" mallctl.
- Update pprof (from gperftools 2.0).
Incompatible changes:
- Enable stats by default.
@ -41,6 +43,8 @@ found in the git revision history:
- Disable lazy locking by default.
- Rename the "tcache.flush" mallctl to "thread.tcache.flush".
- Rename the "arenas.pagesize" mallctl to "arenas.page".
- Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB).
- Change the "opt.prof_accum" default from true to false.
Removed features:
- Remove the swap feature, including the "config.swap", "swap.avail",
@ -77,6 +81,7 @@ found in the git revision history:
- Add missing "opt.lg_tcache_max" mallctl implementation.
- Use glibc allocator hooks to make mixed allocator usage less likely.
- Fix build issues for --disable-tcache.
- Don't mangle pthread_create() when --with-private-namespace is specified.
* 2.2.5 (November 14, 2011)

View File

@ -1,5 +1,5 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 98d0ba4..23d2152 100644
index f78f423..ce6df80 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -51,12 +51,23 @@
@ -27,7 +27,7 @@ index 98d0ba4..23d2152 100644
<refsect2>
<title>Standard API</title>
<funcprototype>
@@ -2080,4 +2091,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
@@ -2091,4 +2102,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
<para>The <function>posix_memalign<parameter/></function> function conforms
to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
</refsect1>
@ -45,7 +45,7 @@ index 98d0ba4..23d2152 100644
+ </refsect1>
</refentry>
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 905653a..b235a0d 100644
index b61abe8..edbb437 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,5 +1,8 @@
@ -68,18 +68,38 @@ index 905653a..b235a0d 100644
#include "../jemalloc@install_suffix@.h"
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
index c46feee..d7133f4 100644
index 8837ef5..d7133f4 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
@@ -39,8 +39,6 @@ struct malloc_mutex_s {
@@ -39,9 +39,6 @@ struct malloc_mutex_s {
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
-#else
-# undef isthreaded /* Undo private_namespace.h definition. */
-# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex);
diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h
index 15fe3c5..be94eb8 100644
--- a/include/jemalloc/internal/private_namespace.h
+++ b/include/jemalloc/internal/private_namespace.h
@@ -1,6 +1,3 @@
-#define a0calloc JEMALLOC_N(a0calloc)
-#define a0free JEMALLOC_N(a0free)
-#define a0malloc JEMALLOC_N(a0malloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
@@ -167,7 +164,6 @@
#define iqalloc JEMALLOC_N(iqalloc)
#define iralloc JEMALLOC_N(iralloc)
#define isalloc JEMALLOC_N(isalloc)
-#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in
index f0581db..f26d8bc 100644
--- a/include/jemalloc/jemalloc.h.in
@ -175,7 +195,7 @@ index 0000000..2c5797f
+#define pthread_mutex_lock _pthread_mutex_lock
+#define pthread_mutex_unlock _pthread_mutex_unlock
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 0decd8a..73fad29 100644
index 00c2b23..729f4e1 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -8,6 +8,9 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
@ -221,7 +241,7 @@ index 4b8ce57..7be5fc9 100644
bool
diff --git a/src/util.c b/src/util.c
index 2aab61f..8b05042 100644
index 99ae26d..b80676c 100644
--- a/src/util.c
+++ b/src/util.c
@@ -60,6 +60,22 @@ wrtmessage(void *cbopaque, const char *s)

View File

@ -72,7 +72,8 @@ do_extract() {
find . -name '*.orig' -delete
# Generate various files.
./autogen.sh --enable-cc-silence --enable-dss --enable-xmalloc \
--enable-utrace --with-xslroot=/usr/local/share/xsl/docbook
--enable-utrace --with-xslroot=/usr/local/share/xsl/docbook \
--with-private-namespace=__jemalloc_
gmake dist
)
}

View File

@ -1 +1 @@
1.0.0-266-gb57d3ec571c6551231be62b7bf92c084a8c8291c
1.0.0-283-g606f1fdc3cdbc700717133ca56685313caea24bb

View File

@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
.\" Date: 04/17/2012
.\" Date: 04/20/2012
.\" Manual: User Manual
.\" Source: jemalloc 1.0.0-266-gb57d3ec571c6551231be62b7bf92c084a8c8291c
.\" Source: jemalloc 1.0.0-283-g606f1fdc3cdbc700717133ca56685313caea24bb
.\" Language: English
.\"
.TH "JEMALLOC" "3" "04/17/2012" "jemalloc 1.0.0-266-gb57d3ec571" "User Manual"
.TH "JEMALLOC" "3" "04/20/2012" "jemalloc 1.0.0-283-g606f1fdc3c" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
This manual describes jemalloc 1\&.0\&.0\-266\-gb57d3ec571c6551231be62b7bf92c084a8c8291c\&. More information can be found at the
This manual describes jemalloc 1\&.0\&.0\-283\-g606f1fdc3cdbc700717133ca56685313caea24bb\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.PP
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
@ -753,14 +753,7 @@ Maximum size class (log base 2) to cache in the thread\-specific cache\&. At a m
.PP
"opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity, and use an
\fBatexit\fR(3)
function to dump final memory usage to a file named according to the pattern
<prefix>\&.<pid>\&.<seq>\&.f\&.heap, where
<prefix>
is controlled by the
"opt\&.prof_prefix"
option\&. See the
Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity\&. See the
"opt\&.prof_active"
option for on\-the\-fly activation/deactivation\&. See the
"opt\&.lg_prof_sample"
@ -768,12 +761,14 @@ option for probabilistic sampling control\&. See the
"opt\&.prof_accum"
option for control of cumulative sample reporting\&. See the
"opt\&.lg_prof_interval"
option for information on interval\-triggered profile dumping, and the
option for information on interval\-triggered profile dumping, the
"opt\&.prof_gdump"
option for information on high\-water\-triggered profile dumping\&. Profile output is compatible with the included
option for information on high\-water\-triggered profile dumping, and the
"opt\&.prof_final"
option for final profile dumping\&. Profile output is compatible with the included
\fBpprof\fR
Perl script, which originates from the
\m[blue]\fBgoogle\-perftools package\fR\m[]\&\s-2\u[3]\d\s+2\&.
\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2\&.
.RE
.PP
"opt\&.prof_prefix" (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR]
@ -793,12 +788,12 @@ mallctl\&. This option is enabled by default\&.
.PP
"opt\&.lg_prof_sample" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 1 (2^0) (i\&.e\&. all allocations are sampled)\&.
Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 512 KiB (2^19 B)\&.
.RE
.PP
"opt\&.prof_accum" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is enabled by default\&.
Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is disabled by default\&.
.RE
.PP
"opt\&.lg_prof_interval" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR]
@ -821,6 +816,18 @@ is controlled by the
option\&. This option is disabled by default\&.
.RE
.PP
"opt\&.prof_final" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Use an
\fBatexit\fR(3)
function to dump final memory usage to a file named according to the pattern
<prefix>\&.<pid>\&.<seq>\&.f\&.heap, where
<prefix>
is controlled by the
"opt\&.prof_prefix"
option\&. This option is enabled by default\&.
.RE
.PP
"opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Leak reporting enabled/disabled\&. If enabled, use an
@ -1458,7 +1465,7 @@ Valgrind
\%http://http://valgrind.org/
.RE
.IP " 3." 4
google-perftools package
gperftools package
.RS 4
\%http://code.google.com/p/google-perftools/
\%http://code.google.com/p/gperftools/
.RE

View File

@ -408,7 +408,6 @@ void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
size_t arena_salloc(const void *ptr, bool demote);
void arena_prof_promoted(const void *ptr, size_t size);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm);
@ -437,6 +436,7 @@ unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
bool try_tcache);
#endif
@ -625,6 +625,46 @@ arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
}
}
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_INLINE size_t
arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = chunk->map[pageind-map_bias].bits;
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
== 0);
ret = bin_info->reg_size;
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = mapbits & ~PAGE_MASK;
if (config_prof && demote && prof_promote && ret == PAGE &&
(mapbits & CHUNK_MAP_CLASS_MASK) != 0) {
size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
CHUNK_MAP_CLASS_SHIFT) - 1;
assert(binind < NBINS);
ret = arena_bin_info[binind].reg_size;
}
assert(ret != 0);
}
return (ret);
}
JEMALLOC_INLINE void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
{

View File

@ -9,6 +9,8 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void pages_purge(void *addr, size_t length);
void *chunk_alloc_mmap(size_t size, size_t alignment);
bool chunk_dealloc_mmap(void *chunk, size_t size);

View File

@ -738,10 +738,9 @@ isalloc(const void *ptr, bool demote)
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
} else
else
ret = huge_salloc(ptr);
return (ret);

View File

@ -1,5 +1,6 @@
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
@ -8,6 +9,7 @@
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
@ -24,9 +26,13 @@
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
@ -75,6 +81,11 @@
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
@ -129,9 +140,13 @@
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
@ -171,12 +186,16 @@
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mmap_unaligned_tsd_boot JEMALLOC_N(mmap_unaligned_tsd_boot)
#define mmap_unaligned_tsd_cleanup_wrapper JEMALLOC_N(mmap_unaligned_tsd_cleanup_wrapper)
#define mmap_unaligned_tsd_get JEMALLOC_N(mmap_unaligned_tsd_get)
#define mmap_unaligned_tsd_set JEMALLOC_N(mmap_unaligned_tsd_set)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas JEMALLOC_N(narenas)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
@ -188,14 +207,20 @@
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_valgrind JEMALLOC_N(opt_valgrind)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_purge JEMALLOC_N(pages_purge)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
@ -206,18 +231,24 @@
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define pthread_create JEMALLOC_N(pthread_create)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
@ -238,6 +269,7 @@
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
@ -247,26 +279,39 @@
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)

View File

@ -9,7 +9,7 @@ typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
#define PROF_PREFIX_DEFAULT "jeprof"
#define LG_PROF_SAMPLE_DEFAULT 0
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
/*
@ -169,6 +169,7 @@ extern bool opt_prof_active;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
extern char opt_prof_prefix[PATH_MAX + 1];

View File

@ -100,6 +100,9 @@ extern size_t nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
size_t tcache_salloc(const void *ptr);
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
size_t binind);
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
@ -107,8 +110,6 @@ void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_dissociate(tcache_t *tcache);
tcache_t *tcache_create(arena_t *arena);
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
size_t binind);
void tcache_destroy(tcache_t *tcache);
void tcache_thread_cleanup(void *arg);
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
@ -340,7 +341,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
if (ret == NULL)
return (NULL);
}
assert(arena_salloc(ret, false) == arena_bin_info[binind].reg_size);
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
if (zero == false) {
if (config_fill) {
@ -431,7 +432,7 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
size_t pageind, binind;
arena_chunk_map_t *mapelm;
assert(arena_salloc(ptr, false) <= SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
@ -468,8 +469,8 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(arena_salloc(ptr, false) > SMALL_MAXCLASS);
assert(arena_salloc(ptr, false) <= tcache_maxclass);
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;

View File

@ -4,11 +4,7 @@
/* Maximum number of malloc_tsd users with cleanup functions. */
#define MALLOC_TSD_CLEANUPS_MAX 8
typedef struct malloc_tsd_cleanup_s malloc_tsd_cleanup_t;
struct malloc_tsd_cleanup_s {
bool (*f)(void *);
void *arg;
};
typedef bool (*malloc_tsd_cleanup_t)(void);
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
@ -110,13 +106,12 @@ a_attr bool a_name##_booted = false;
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_cleanup_wrapper(void *arg) \
a_name##_tsd_cleanup_wrapper(void) \
{ \
bool (*cleanup)(void *) = arg; \
\
if (a_name##_initialized) { \
a_name##_initialized = false; \
cleanup(&a_name##_tls); \
a_cleanup(&a_name##_tls); \
} \
return (a_name##_initialized); \
} \
@ -126,7 +121,7 @@ a_name##_tsd_boot(void) \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper, a_cleanup); \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
@ -192,7 +187,6 @@ a_name##_tsd_set(a_type *val) \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool isstatic; \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
@ -218,8 +212,7 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
return; \
} \
} \
if (wrapper->isstatic == false) \
malloc_tsd_dalloc(wrapper); \
malloc_tsd_dalloc(wrapper); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
@ -242,25 +235,19 @@ a_name##_tsd_get_wrapper(void) \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
if (wrapper == NULL) { \
static a_name##_tsd_wrapper_t \
a_name##_tsd_static_data = \
{true, false, a_initializer}; \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
wrapper = &a_name##_tsd_static_data; \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->isstatic = false; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
abort(); \
} \
} \
return (wrapper); \
@ -298,7 +285,7 @@ a_name##_tsd_set(a_type *val) \
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *);
void malloc_tsd_cleanup_register(bool (*f)(void *), void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
void malloc_tsd_boot(void);
#endif /* JEMALLOC_H_EXTERNS */

View File

@ -7,12 +7,12 @@ extern "C" {
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "1.0.0-266-gb57d3ec571c6551231be62b7bf92c084a8c8291c"
#define JEMALLOC_VERSION "1.0.0-283-g606f1fdc3cdbc700717133ca56685313caea24bb"
#define JEMALLOC_VERSION_MAJOR 1
#define JEMALLOC_VERSION_MINOR 0
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 266
#define JEMALLOC_VERSION_GID "b57d3ec571c6551231be62b7bf92c084a8c8291c"
#define JEMALLOC_VERSION_NREV 283
#define JEMALLOC_VERSION_GID "606f1fdc3cdbc700717133ca56685313caea24bb"
#include "jemalloc_defs.h"
#include "jemalloc_FreeBSD.h"

View File

@ -39,8 +39,8 @@
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE ""
#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix
#define JEMALLOC_PRIVATE_NAMESPACE "__jemalloc_"
#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) __jemalloc_##string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
@ -221,13 +221,6 @@
*/
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
#define JEMALLOC_PURGE_MADVISE_FREE
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
#else
# error "No method defined for purging unused dirty pages."
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR 3

View File

@ -676,8 +676,8 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
if (config_debug)
ndirty -= npages;
madvise((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
(npages << LG_PAGE), JEMALLOC_MADV_PURGE);
pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
(npages << LG_PAGE));
if (config_stats)
nmadvise++;
}
@ -1213,7 +1213,9 @@ void
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
if (prof_interval != 0) {
cassert(config_prof);
if (config_prof && prof_interval != 0) {
arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
prof_idump();
@ -1463,53 +1465,13 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
return (ret);
}
/* Return the size of the allocation pointed to by ptr. */
size_t
arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = chunk->map[pageind-map_bias].bits;
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
== 0);
ret = bin_info->reg_size;
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = mapbits & ~PAGE_MASK;
if (demote && prof_promote && ret == PAGE && (mapbits &
CHUNK_MAP_CLASS_MASK) != 0) {
size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
CHUNK_MAP_CLASS_SHIFT) - 1;
assert(binind < NBINS);
ret = arena_bin_info[binind].reg_size;
}
assert(ret != 0);
}
return (ret);
}
void
arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind, binind;
assert(config_prof);
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(ptr, false) == PAGE);

View File

@ -171,7 +171,7 @@ chunk_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
madvise(chunk, size, JEMALLOC_MADV_PURGE);
pages_purge(chunk, size);
xnode = NULL;
malloc_mutex_lock(&chunks_mtx);

View File

@ -72,6 +72,20 @@ pages_unmap(void *addr, size_t size)
}
}
void
pages_purge(void *addr, size_t length)
{
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
#else
# error "No method defined for purging unused dirty pages."
#endif
madvise(addr, length, JEMALLOC_MADV_PURGE);
}
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
{

View File

@ -80,6 +80,7 @@ CTL_PROTO(opt_prof_active)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
CTL_PROTO(arenas_bin_i_size)
@ -210,6 +211,7 @@ static const ctl_node_t opt_node[] = {
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_final"), CTL(opt_prof_final)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)}
};
@ -1122,6 +1124,7 @@ CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)

View File

@ -427,7 +427,7 @@ malloc_conf_init(void)
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \
vlen == sizeof("true")-1) \
@ -450,7 +450,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_SIZE_T(o, n, min, max) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
uintmax_t um; \
char *end; \
@ -471,7 +471,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
long l; \
char *end; \
@ -493,7 +493,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_CHAR_P(o, n, d) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
@ -503,7 +503,7 @@ malloc_conf_init(void)
continue; \
}
CONF_HANDLE_BOOL(opt_abort, abort)
CONF_HANDLE_BOOL(opt_abort, "abort")
/*
* Chunks always require at least one header page, plus
* one data page in the absence of redzones, or three
@ -511,26 +511,27 @@ malloc_conf_init(void)
* simplify options processing, fix the limit based on
* config_fill.
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE +
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
(config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
SIZE_T_MAX)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_stats_print, stats_print)
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
if (config_fill) {
CONF_HANDLE_BOOL(opt_junk, junk)
CONF_HANDLE_SIZE_T(opt_quarantine, quarantine,
CONF_HANDLE_BOOL(opt_junk, "junk")
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
0, SIZE_T_MAX)
CONF_HANDLE_BOOL(opt_redzone, redzone)
CONF_HANDLE_BOOL(opt_zero, zero)
CONF_HANDLE_BOOL(opt_redzone, "redzone")
CONF_HANDLE_BOOL(opt_zero, "zero")
}
if (config_utrace) {
CONF_HANDLE_BOOL(opt_utrace, utrace)
CONF_HANDLE_BOOL(opt_utrace, "utrace")
}
if (config_valgrind) {
bool hit;
CONF_HANDLE_BOOL_HIT(opt_valgrind,
valgrind, hit)
"valgrind", hit)
if (config_fill && opt_valgrind && hit) {
opt_junk = false;
opt_zero = false;
@ -544,28 +545,29 @@ malloc_conf_init(void)
continue;
}
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
}
if (config_tcache) {
CONF_HANDLE_BOOL(opt_tcache, tcache)
CONF_HANDLE_BOOL(opt_tcache, "tcache")
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
lg_tcache_max, -1,
"lg_tcache_max", -1,
(sizeof(size_t) << 3) - 1)
}
if (config_prof) {
CONF_HANDLE_BOOL(opt_prof, prof)
CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
"jeprof")
CONF_HANDLE_BOOL(opt_prof_active, prof_active)
CONF_HANDLE_BOOL(opt_prof, "prof")
CONF_HANDLE_CHAR_P(opt_prof_prefix,
"prof_prefix", "jeprof")
CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
lg_prof_sample, 0,
"lg_prof_sample", 0,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
lg_prof_interval, -1,
"lg_prof_interval", -1,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
}
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
@ -1624,6 +1626,7 @@ je_nallocm(size_t *rsize, size_t size, int flags)
void
jemalloc_prefork(void)
#else
JEMALLOC_ATTR(visibility("default"))
void
_malloc_prefork(void)
#endif
@ -1645,6 +1648,7 @@ _malloc_prefork(void)
void
jemalloc_postfork_parent(void)
#else
JEMALLOC_ATTR(visibility("default"))
void
_malloc_postfork(void)
#endif

View File

@ -21,8 +21,9 @@ bool opt_prof_active = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
bool opt_prof_final = true;
bool opt_prof_leak = false;
bool opt_prof_accum = true;
bool opt_prof_accum = false;
char opt_prof_prefix[PATH_MAX + 1];
uint64_t prof_interval;
@ -944,7 +945,7 @@ prof_fdump(void)
if (prof_booted == false)
return;
if (opt_prof_prefix[0] != '\0') {
if (opt_prof_final && opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff));
malloc_mutex_unlock(&prof_dump_seq_mtx);

View File

@ -101,7 +101,7 @@ quarantine(void *ptr)
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
assert(config_fill);
cassert(config_fill);
assert(opt_quarantine);
quarantine = *quarantine_tsd_get();
@ -154,7 +154,7 @@ bool
quarantine_boot(void)
{
assert(config_fill);
cassert(config_fill);
if (quarantine_tsd_boot())
return (true);

View File

@ -397,6 +397,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_final)
OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL

View File

@ -18,6 +18,12 @@ size_t tcache_maxclass;
/******************************************************************************/
size_t tcache_salloc(const void *ptr)
{
return (arena_salloc(ptr, false));
}
void *
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{

View File

@ -32,6 +32,7 @@ malloc_tsd_no_cleanup(void *arg)
}
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
JEMALLOC_ATTR(visibility("default"))
void
_malloc_thread_cleanup(void)
{
@ -45,7 +46,7 @@ _malloc_thread_cleanup(void)
again = false;
for (i = 0; i < ncleanups; i++) {
if (pending[i]) {
pending[i] = cleanups[i].f(cleanups[i].arg);
pending[i] = cleanups[i]();
if (pending[i])
again = true;
}
@ -55,12 +56,11 @@ _malloc_thread_cleanup(void)
#endif
void
malloc_tsd_cleanup_register(bool (*f)(void *), void *arg)
malloc_tsd_cleanup_register(bool (*f)(void))
{
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups].f = f;
cleanups[ncleanups].arg = arg;
cleanups[ncleanups] = f;
ncleanups++;
}

View File

@ -336,12 +336,21 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case '?': \
val = va_arg(ap, int); \
break; \
case '?' | 0x80: \
val = va_arg(ap, unsigned int); \
break; \
case 'l': \
val = va_arg(ap, long); \
break; \
case 'l' | 0x80: \
val = va_arg(ap, unsigned long); \
break; \
case 'q': \
val = va_arg(ap, long long); \
break; \
case 'q' | 0x80: \
val = va_arg(ap, unsigned long long); \
break; \
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
@ -351,6 +360,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case 'z': \
val = va_arg(ap, ssize_t); \
break; \
case 'z' | 0x80: \
val = va_arg(ap, size_t); \
break; \
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
@ -374,7 +386,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
bool plus_plus = false;
int prec = -1;
int width = -1;
char len = '?';
unsigned char len = '?';
f++;
if (*f == '%') {
@ -496,7 +508,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[O2S_BUFSIZE];
GET_ARG_NUMERIC(val, len);
GET_ARG_NUMERIC(val, len | 0x80);
s = o2s(val, alt_form, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
@ -505,7 +517,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[U2S_BUFSIZE];
GET_ARG_NUMERIC(val, len);
GET_ARG_NUMERIC(val, len | 0x80);
s = u2s(val, 10, false, buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
@ -514,7 +526,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[X2S_BUFSIZE];
GET_ARG_NUMERIC(val, len);
GET_ARG_NUMERIC(val, len | 0x80);
s = x2s(val, alt_form, *f == 'X', buf, &slen);
APPEND_PADDED_S(s, slen, width, left_justify);
f++;