Update jemalloc to 4.2.0.

This commit is contained in:
Jason Evans 2016-05-13 04:03:20 +00:00
parent b1f46f712f
commit 1f0a49e863
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=299587
53 changed files with 4066 additions and 2352 deletions

View File

@ -4,6 +4,50 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 4.2.0 (May 12, 2016)
New features:
- Add the arena.<i>.reset mallctl, which makes it possible to discard all of
an arena's allocations in a single operation. (@jasone)
- Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone)
- Add the --with-version configure option. (@jasone)
- Support --with-lg-page values larger than actual page size. (@jasone)
Optimizations:
- Use pairing heaps rather than red-black trees for various hot data
structures. (@djwatson, @jasone)
- Streamline fast paths of rtree operations. (@jasone)
- Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone)
- Decommit unused virtual memory if the OS does not overcommit. (@jasone)
- Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
to avoid unfortunate interactions during fork(2). (@jasone)
Bug fixes:
- Fix chunk accounting related to triggering gdump profiles. (@jasone)
- Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
- Scale leak report summary according to sampling probability. (@jasone)
* 4.1.1 (May 3, 2016)
This bugfix release resolves a variety of mostly minor issues, though the
bitmap fix is critical for 64-bit Windows.
Bug fixes:
- Fix the linear scan version of bitmap_sfu() to shift by the proper amount
even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
Windows. (@jasone)
- Fix hashing functions to avoid unaligned memory accesses (and resulting
crashes). This is relevant at least to some ARM-based platforms.
(@rkmisra)
- Fix fork()-related lock rank ordering reversals. These reversals were
unlikely to cause deadlocks in practice except when heap profiling was
enabled and active. (@jasone)
- Fix various chunk leaks in OOM code paths. (@jasone)
- Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
- Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin)
- Fix a variety of test failures that were due to test fragility rather than
core bugs. (@jasone)
* 4.1.0 (February 28, 2016)
This release is primarily about optimizations, but it also incorporates a lot
@ -59,14 +103,14 @@ brevity. Much more detail can be found in the git revision history:
Bug fixes:
- Fix stats.cactive accounting regression. (@rustyx, @jasone)
- Handle unaligned keys in hash(). This caused problems for some ARM systems.
(@jasone, Christopher Ferris)
(@jasone, @cferris1000)
- Refactor arenas array. In addition to fixing a fork-related deadlock, this
makes arena lookups faster and simpler. (@jasone)
- Move retained memory allocation out of the default chunk allocation
function, to a location that gets executed even if the application installs
a custom chunk allocation function. This resolves a virtual memory leak.
(@buchgr)
- Fix a potential tsd cleanup leak. (Christopher Ferris, @jasone)
- Fix a potential tsd cleanup leak. (@cferris1000, @jasone)
- Fix run quantization. In practice this bug had no impact unless
applications requested memory with alignment exceeding one page.
(@jasone, @djwatson)

View File

@ -1,5 +1,5 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index bc5dbd1..ba182da 100644
index c4a44e3..4626e9b 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -53,11 +53,23 @@
@ -27,7 +27,7 @@ index bc5dbd1..ba182da 100644
<refsect2>
<title>Standard API</title>
<funcprototype>
@@ -2905,4 +2917,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
@@ -2961,4 +2973,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
<para>The <function>posix_memalign<parameter/></function> function conforms
to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
</refsect1>
@ -47,7 +47,7 @@ index bc5dbd1..ba182da 100644
+ </refsect1>
</refentry>
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 3f54391..d240256 100644
index 51bf897..7de22ea 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -8,6 +8,9 @@
@ -90,10 +90,10 @@ index 2b8ca5d..42d97f2 100644
#ifdef _WIN32
# include <windows.h>
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
index f051f29..561378f 100644
index 5221799..60ab041 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
@@ -47,15 +47,13 @@ struct malloc_mutex_s {
@@ -52,9 +52,6 @@ struct malloc_mutex_s {
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
@ -102,19 +102,20 @@ index f051f29..561378f 100644
-# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex);
void malloc_mutex_prefork(malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
@@ -62,6 +59,7 @@ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
+bool malloc_mutex_first_thread(void);
bool mutex_boot(void);
bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 5880996..6e94e03 100644
index f2b6a55..69369c9 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -296,7 +296,6 @@ iralloct_realign
@@ -311,7 +311,6 @@ iralloct_realign
isalloc
isdalloct
isqalloc
@ -124,10 +125,10 @@ index 5880996..6e94e03 100644
jemalloc_postfork_child
diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
new file mode 100644
index 0000000..433dab5
index 0000000..c58a8f3
--- /dev/null
+++ b/include/jemalloc/jemalloc_FreeBSD.h
@@ -0,0 +1,160 @@
@@ -0,0 +1,162 @@
+/*
+ * Override settings that were generated in jemalloc_defs.h as necessary.
+ */
@ -138,6 +139,8 @@ index 0000000..433dab5
+#define JEMALLOC_DEBUG
+#endif
+
+#undef JEMALLOC_DSS
+
+/*
+ * The following are architecture-dependent, so conditionally define them for
+ * each supported architecture.
@ -300,7 +303,7 @@ index f943891..47d032c 100755
+#include "jemalloc_FreeBSD.h"
EOF
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 0735376..a34b85c 100644
index 40eb2ea..666c49d 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -4,6 +4,10 @@
@ -314,7 +317,7 @@ index 0735376..a34b85c 100644
/* Runtime configuration options. */
const char *je_malloc_conf JEMALLOC_ATTR(weak);
bool opt_abort =
@@ -2611,6 +2615,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
@@ -2673,6 +2677,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
*/
/******************************************************************************/
/*
@ -341,7 +344,7 @@ index 0735376..a34b85c 100644
+ if (p == NULL)
+ return (ALLOCM_ERR_OOM);
+ if (rsize != NULL)
+ *rsize = isalloc(p, config_prof);
+ *rsize = isalloc(tsdn_fetch(), p, config_prof);
+ *ptr = p;
+ return (ALLOCM_SUCCESS);
+}
@ -370,7 +373,7 @@ index 0735376..a34b85c 100644
+ } else
+ ret = ALLOCM_ERR_OOM;
+ if (rsize != NULL)
+ *rsize = isalloc(*ptr, config_prof);
+ *rsize = isalloc(tsdn_fetch(), *ptr, config_prof);
+ }
+ return (ret);
+}
@ -422,8 +425,8 @@ index 0735376..a34b85c 100644
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
@@ -2717,4 +2822,11 @@ jemalloc_postfork_child(void)
ctl_postfork_child();
@@ -2814,4 +2919,11 @@ jemalloc_postfork_child(void)
ctl_postfork_child(tsd_tsdn(tsd));
}
+void
@ -435,7 +438,7 @@ index 0735376..a34b85c 100644
+
/******************************************************************************/
diff --git a/src/mutex.c b/src/mutex.c
index 2d47af9..934d5aa 100644
index a1fac34..a24e420 100644
--- a/src/mutex.c
+++ b/src/mutex.c
@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
@ -456,22 +459,22 @@ index 2d47af9..934d5aa 100644
#endif
bool
@@ -137,7 +148,7 @@ malloc_mutex_postfork_child(malloc_mutex_t *mutex)
@@ -140,7 +151,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
}
bool
-mutex_boot(void)
-malloc_mutex_boot(void)
+malloc_mutex_first_thread(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
@@ -151,3 +162,14 @@ mutex_boot(void)
@@ -154,3 +165,14 @@ malloc_mutex_boot(void)
#endif
return (false);
}
+
+bool
+mutex_boot(void)
+malloc_mutex_boot(void)
+{
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
@ -481,10 +484,10 @@ index 2d47af9..934d5aa 100644
+#endif
+}
diff --git a/src/util.c b/src/util.c
index 02673c7..116e981 100644
index a1c4a2a..04f9153 100644
--- a/src/util.c
+++ b/src/util.c
@@ -66,6 +66,22 @@ wrtmessage(void *cbopaque, const char *s)
@@ -67,6 +67,22 @@ wrtmessage(void *cbopaque, const char *s)
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);

View File

@ -1 +1 @@
4.1.0-1-g994da4232621dd1210fcf39bdf0d6454cefda473
4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc

View File

@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
.\" Date: 02/28/2016
.\" Date: 05/12/2016
.\" Manual: User Manual
.\" Source: jemalloc 4.1.0-1-g994da4232621dd1210fcf39bdf0d6454cefda473
.\" Source: jemalloc 4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc
.\" Language: English
.\"
.TH "JEMALLOC" "3" "02/28/2016" "jemalloc 4.1.0-1-g994da4232621" "User Manual"
.TH "JEMALLOC" "3" "05/12/2016" "jemalloc 4.2.0-1-gdc7ff6306d7a" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
This manual describes jemalloc 4\&.1\&.0\-1\-g994da4232621dd1210fcf39bdf0d6454cefda473\&. More information can be found at the
This manual describes jemalloc 4\&.2\&.0\-1\-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.PP
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
@ -461,7 +461,8 @@ Memory is conceptually broken into equal\-sized chunks, where the chunk size is
Small objects are managed in groups by page runs\&. Each run maintains a bitmap to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes\&. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the
"opt\&.lg_chunk"
option), and huge size classes extend from the chunk size up to one size class less than the full address space size\&.
option), and huge size classes extend from the chunk size up to the largest size class that does not exceed
\fBPTRDIFF_MAX\fR\&.
.PP
Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&.
.PP
@ -518,6 +519,8 @@ l r l
^ r l
^ r l
^ r l
^ r l
^ r l
^ r l.
T{
Small
@ -645,6 +648,16 @@ T}
T}:T{
\&.\&.\&.
T}
:T{
512 PiB
T}:T{
[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]
T}
:T{
1 EiB
T}:T{
[5 EiB, 6 EiB, 7 EiB]
T}
.TE
.sp 1
.SH "MALLCTL NAMESPACE"
@ -841,7 +854,7 @@ function\&. If
is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Furthermore,
\fBatexit\fR\fB\fR
may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
\fBatexit\fR\fB\fR, so this option is not universally usable (though the application can register its own
\fBatexit\fR\fB\fR
function with equivalent functionality)\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&.
.RE
@ -1007,7 +1020,7 @@ is controlled by the
option\&. Note that
\fBatexit\fR\fB\fR
may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
\fBatexit\fR\fB\fR, so this option is not universally usable (though the application can register its own
\fBatexit\fR\fB\fR
function with equivalent functionality)\&. This option is disabled by default\&.
.RE
@ -1113,6 +1126,14 @@ Trigger decay\-based purging of unused dirty pages for arena <i>, or for all are
for details\&.
.RE
.PP
"arena\&.<i>\&.reset" (\fBvoid\fR) \-\-
.RS 4
Discard all of the arena\*(Aqs extant allocations\&. This interface can only be used with arenas created via
"arenas\&.extend"\&. None of the arena\*(Aqs discarded/cached allocations may accessed afterward\&. As part of this requirement, all thread caches which were used to allocate/deallocate in conjunction with the arena must be flushed beforehand\&. This interface cannot be used if running inside Valgrind, nor if the
quarantine
size is non\-zero\&.
.RE
.PP
"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
.RS 4
Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
@ -1503,7 +1524,7 @@ Get the current sample rate (see
.PP
"prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average number of bytes allocated between inverval\-based profile dumps\&. See the
Average number of bytes allocated between interval\-based profile dumps\&. See the
"opt\&.lg_prof_interval"
option for additional information\&.
.RE
@ -1547,6 +1568,15 @@ Total number of bytes in active chunks mapped by the allocator\&. This is a mult
"stats\&.resident"\&.
.RE
.PP
"stats\&.retained" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in virtual memory mappings that were retained rather than being returned to the operating system via e\&.g\&.
\fBmunmap\fR(2)\&. Retained virtual memory is typically untouched, decommitted, or purged, so it has no strongly associated physical memory (see
chunk hooks
for details)\&. Retained memory is excluded from mapped memory statistics, e\&.g\&.
"stats\&.mapped"\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
@ -1592,6 +1622,13 @@ or similar has not been called\&.
Number of mapped bytes\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.retained" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of retained bytes\&. See
"stats\&.retained"
for details\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.metadata\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of mapped bytes in arena chunk headers, which track the states of the non\-metadata pages\&.

View File

@ -36,6 +36,7 @@ typedef enum {
#define DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
typedef struct arena_avail_links_s arena_avail_links_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
@ -153,13 +154,13 @@ struct arena_runs_dirty_link_s {
*/
struct arena_chunk_map_misc_s {
/*
* Linkage for run trees. There are two disjoint uses:
* Linkage for run heaps. There are two disjoint uses:
*
* 1) arena_t's runs_avail tree.
* 1) arena_t's runs_avail heaps.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
rb_node(arena_chunk_map_misc_t) rb_link;
phn(arena_chunk_map_misc_t) ph_link;
union {
/* Linkage for list of dirty runs. */
@ -175,7 +176,7 @@ struct arena_chunk_map_misc_s {
arena_run_t run;
};
};
typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
#endif /* JEMALLOC_ARENA_STRUCTS_A */
#ifdef JEMALLOC_ARENA_STRUCTS_B
@ -272,13 +273,13 @@ struct arena_bin_s {
arena_run_t *runcur;
/*
* Tree of non-full runs. This tree is used when looking for an
* Heap of non-full runs. This heap is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
arena_run_tree_t runs;
arena_run_heap_t runs;
/* Bin statistics. */
malloc_bin_stats_t stats;
@ -289,10 +290,18 @@ struct arena_s {
unsigned ind;
/*
* Number of threads currently assigned to this arena. This field is
* synchronized via atomic operations.
* Number of threads currently assigned to this arena, synchronized via
* atomic operations. Each thread has two distinct assignments, one for
* application-serving allocation, and the other for internal metadata
* allocation. Internal metadata must not be allocated from arenas
* created via the arenas.extend mallctl, because the arena.<i>.reset
* mallctl indiscriminately discards all allocations for the affected
* arena.
*
* 0: Application allocation.
* 1: Internal metadata allocation.
*/
unsigned nthreads;
unsigned nthreads[2];
/*
* There are three classes of arena operations from a locking
@ -321,6 +330,10 @@ struct arena_s {
dss_prec_t dss_prec;
/* Extant arena chunks. */
ql_head(extent_node_t) achunks;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@ -457,10 +470,10 @@ struct arena_s {
arena_bin_t bins[NBINS];
/*
* Quantized address-ordered trees of this arena's available runs. The
* trees are used for first-best-fit run allocation.
* Quantized address-ordered heaps of this arena's available runs. The
* heaps are used for first-best-fit run allocation.
*/
arena_run_tree_t runs_avail[1]; /* Dynamically sized. */
arena_run_heap_t runs_avail[1]; /* Dynamically sized. */
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
@ -505,25 +518,28 @@ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
bool cache);
extent_node_t *arena_node_alloc(arena_t *arena);
void arena_node_dalloc(arena_t *arena, extent_node_t *node);
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
bool *zero);
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
size_t oldsize, size_t usize);
void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
size_t oldsize, size_t usize, bool *zero);
ssize_t arena_lg_dirty_mult_get(arena_t *arena);
bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
ssize_t arena_decay_time_get(arena_t *arena);
bool arena_decay_time_set(arena_t *arena, ssize_t decay_time);
void arena_maybe_purge(arena_t *arena);
void arena_purge(arena_t *arena, bool all);
void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
szind_t binind, uint64_t prof_accumbytes);
extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero);
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t usize);
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize);
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize, bool *zero);
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
ssize_t lg_dirty_mult);
ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@ -536,17 +552,18 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif
void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache);
void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
bool zero);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
void arena_prof_promoted(const void *ptr, size_t size);
void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
@ -554,67 +571,80 @@ extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
#else
void arena_dalloc_junk_large(void *ptr, size_t usize);
#endif
void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
arena_chunk_t *chunk, void *ptr);
void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr);
#ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t size, size_t extra, bool zero);
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
ssize_t arena_decay_time_default_get(void);
bool arena_decay_time_default_set(ssize_t decay_time);
void arena_basic_stats_merge(arena_t *arena, unsigned *nthreads,
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
ssize_t *decay_time, size_t *nactive, size_t *ndirty);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
size_t *nactive, size_t *ndirty);
void arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
unsigned arena_nthreads_get(arena_t *arena);
void arena_nthreads_inc(arena_t *arena);
void arena_nthreads_dec(arena_t *arena);
arena_t *arena_new(unsigned ind);
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
malloc_huge_stats_t *hstats);
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
bool arena_boot(void);
void arena_prefork(arena_t *arena);
void arena_postfork_parent(arena_t *arena);
void arena_postfork_child(arena_t *arena);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
size_t pageind);
arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
const arena_chunk_map_bits_t *arena_bitselm_get_const(
const arena_chunk_t *chunk, size_t pageind);
arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
size_t pageind);
const arena_chunk_map_misc_t *arena_miscelm_get_const(
const arena_chunk_t *chunk, size_t pageind);
size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(const size_t *mapbitsp);
size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
size_t pageind);
szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
@ -634,29 +664,31 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void arena_prof_tctx_reset(const void *ptr, size_t usize,
prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *old_tctx);
void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
void arena_decay_tick(tsd_t *tsd, arena_t *arena);
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache, bool slow_path);
arena_t *arena_aalloc(const void *ptr);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@ -665,8 +697,15 @@ arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
return (&chunk->map_bits[pageind-map_bias]);
}
JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
{
return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
}
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@ -676,6 +715,13 @@ arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
(uintptr_t)map_misc_offset) + pageind-map_bias);
}
JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
{
return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
{
@ -690,7 +736,7 @@ arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
}
JEMALLOC_ALWAYS_INLINE void *
arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@ -723,24 +769,31 @@ arena_run_to_miscelm(arena_run_t *run)
}
JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
return (&arena_bitselm_get(chunk, pageind)->bits);
return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE const size_t *
arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
{
return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbitsp_read(size_t *mapbitsp)
arena_mapbitsp_read(const size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
{
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
@ -760,7 +813,7 @@ arena_mapbits_size_decode(size_t mapbits)
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -770,7 +823,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -781,7 +834,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -792,7 +845,7 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
szind_t binind;
@ -804,7 +857,7 @@ arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -815,7 +868,7 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -826,7 +879,7 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -837,7 +890,7 @@ arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -846,7 +899,7 @@ arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@ -882,7 +935,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@ -896,7 +949,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
@ -908,7 +961,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((flags & CHUNK_MAP_UNZEROED) == flags);
arena_mapbitsp_write(mapbitsp, flags);
@ -918,7 +971,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@ -933,7 +986,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
szind_t binind)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
@ -947,7 +1000,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
szind_t binind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert(binind < BININD_INVALID);
assert(pageind - runind >= map_bias);
@ -1004,7 +1057,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
}
JEMALLOC_INLINE bool
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
@ -1015,9 +1068,9 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
bool ret;
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
return (ret);
}
}
@ -1035,12 +1088,12 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t pageind;
size_t actual_mapbits;
size_t rpages_ind;
arena_run_t *run;
const arena_run_t *run;
arena_bin_t *bin;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
arena_chunk_map_misc_t *miscelm;
void *rpages;
const arena_chunk_map_misc_t *miscelm;
const void *rpages;
assert(binind != BININD_INVALID);
assert(binind < NBINS);
@ -1053,7 +1106,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
pageind);
miscelm = arena_miscelm_get(chunk, rpages_ind);
miscelm = arena_miscelm_get_const(chunk, rpages_ind);
run = &miscelm->run;
run_binind = run->binind;
bin = &arena->bins[run_binind];
@ -1153,7 +1206,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
}
JEMALLOC_INLINE prof_tctx_t *
arena_prof_tctx_get(const void *ptr)
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
prof_tctx_t *ret;
arena_chunk_t *chunk;
@ -1169,18 +1222,19 @@ arena_prof_tctx_get(const void *ptr)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
pageind);
arena_chunk_map_misc_t *elm =
arena_miscelm_get_mutable(chunk, pageind);
ret = atomic_read_p(&elm->prof_tctx_pun);
}
} else
ret = huge_prof_tctx_get(ptr);
ret = huge_prof_tctx_get(tsdn, ptr);
return (ret);
}
JEMALLOC_INLINE void
arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
@ -1199,7 +1253,7 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
assert(arena_mapbits_large_get(chunk, pageind) != 0);
elm = arena_miscelm_get(chunk, pageind);
elm = arena_miscelm_get_mutable(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
} else {
/*
@ -1211,12 +1265,12 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
huge_prof_tctx_set(ptr, tctx);
huge_prof_tctx_set(tsdn, ptr, tctx);
}
JEMALLOC_INLINE void
arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *old_tctx)
{
cassert(config_prof);
@ -1235,56 +1289,59 @@ arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
0);
assert(arena_mapbits_large_get(chunk, pageind) != 0);
elm = arena_miscelm_get(chunk, pageind);
elm = arena_miscelm_get_mutable(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun,
(prof_tctx_t *)(uintptr_t)1U);
} else
huge_prof_tctx_reset(ptr);
huge_prof_tctx_reset(tsdn, ptr);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
{
tsd_t *tsd;
ticker_t *decay_ticker;
if (unlikely(tsd == NULL))
if (unlikely(tsdn_null(tsdn)))
return;
tsd = tsdn_tsd(tsdn);
decay_ticker = decay_ticker_get(tsd, arena->ind);
if (unlikely(decay_ticker == NULL))
return;
if (unlikely(ticker_ticks(decay_ticker, nticks)))
arena_purge(arena, false);
arena_purge(tsdn, arena, false);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsd_t *tsd, arena_t *arena)
arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
{
arena_decay_ticks(tsd, arena, 1);
arena_decay_ticks(tsdn, arena, 1);
}
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path)
{
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
if (likely(tcache != NULL)) {
if (likely(size <= SMALL_MAXCLASS)) {
return (tcache_alloc_small(tsd, arena, tcache, size,
ind, zero, slow_path));
return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path));
}
if (likely(size <= tcache_maxclass)) {
return (tcache_alloc_large(tsd, arena, tcache, size,
ind, zero, slow_path));
return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path));
}
/* (size > tcache_maxclass) case falls through. */
assert(size > tcache_maxclass);
}
return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache));
return (arena_malloc_hard(tsdn, arena, size, ind, zero));
}
JEMALLOC_ALWAYS_INLINE arena_t *
@ -1301,7 +1358,7 @@ arena_aalloc(const void *ptr)
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void *ptr, bool demote)
arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
@ -1344,17 +1401,18 @@ arena_salloc(const void *ptr, bool demote)
ret = index2size(binind);
}
} else
ret = huge_salloc(ptr);
ret = huge_salloc(tsdn, ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@ -1367,11 +1425,12 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL)) {
szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
tcache_dalloc_small(tsd, tcache, ptr, binind,
slow_path);
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
binind, slow_path);
} else {
arena_dalloc_small(tsd, extent_node_arena_get(
&chunk->node), chunk, ptr, pageind);
arena_dalloc_small(tsdn,
extent_node_arena_get(&chunk->node), chunk,
ptr, pageind);
}
} else {
size_t size = arena_mapbits_large_size_get(chunk,
@ -1382,22 +1441,26 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL) && size - large_pad <=
tcache_maxclass) {
tcache_dalloc_large(tsd, tcache, ptr, size -
large_pad, slow_path);
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
size - large_pad, slow_path);
} else {
arena_dalloc_large(tsd, extent_node_arena_get(
&chunk->node), chunk, ptr);
arena_dalloc_large(tsdn,
extent_node_arena_get(&chunk->node), chunk,
ptr);
}
}
} else
huge_dalloc(tsd, ptr, tcache);
huge_dalloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path)
{
arena_chunk_t *chunk;
assert(!tsdn_null(tsdn) || tcache == NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
if (config_prof && opt_prof) {
@ -1414,34 +1477,36 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
pageind) - large_pad;
}
}
assert(s2u(size) == s2u(arena_salloc(ptr, false)));
assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
szind_t binind = size2index(size);
tcache_dalloc_small(tsd, tcache, ptr, binind,
true);
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
binind, slow_path);
} else {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_dalloc_small(tsd, extent_node_arena_get(
&chunk->node), chunk, ptr, pageind);
arena_dalloc_small(tsdn,
extent_node_arena_get(&chunk->node), chunk,
ptr, pageind);
}
} else {
assert(config_cache_oblivious || ((uintptr_t)ptr &
PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass) {
tcache_dalloc_large(tsd, tcache, ptr, size,
true);
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
size, slow_path);
} else {
arena_dalloc_large(tsd, extent_node_arena_get(
&chunk->node), chunk, ptr);
arena_dalloc_large(tsdn,
extent_node_arena_get(&chunk->node), chunk,
ptr);
}
}
} else
huge_dalloc(tsd, ptr, tcache);
huge_dalloc(tsdn, ptr);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif

View File

@ -9,12 +9,13 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(size_t size);
void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
void *base_alloc(tsdn_t *tsdn, size_t size);
void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped);
bool base_boot(void);
void base_prefork(void);
void base_postfork_parent(void);
void base_postfork_child(void);
void base_prefork(tsdn_t *tsdn);
void base_postfork_parent(tsdn_t *tsdn);
void base_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -17,8 +17,8 @@ typedef unsigned long bitmap_t;
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffsl more than 2^3 times, use a
* tree instead.
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE
@ -223,7 +223,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
i++;
g = bitmap[i];
}
bit = (bit - 1) + (i << 6);
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);

View File

@ -48,32 +48,32 @@ extern size_t chunk_npages;
extern const chunk_hooks_t chunk_hooks_default;
chunk_hooks_t chunk_hooks_get(arena_t *arena);
chunk_hooks_t chunk_hooks_set(arena_t *arena,
chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(const void *chunk, const extent_node_t *node);
bool chunk_register(tsdn_t *tsdn, const void *chunk,
const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero,
bool dalloc_node);
void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed);
void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool dalloc_node);
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
bool committed);
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);
bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);
void chunk_postfork_child(void);
void chunk_prefork(tsdn_t *tsdn);
void chunk_postfork_parent(tsdn_t *tsdn);
void chunk_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -21,15 +21,15 @@ extern const char *dss_prec_names[];
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
dss_prec_t chunk_dss_prec_get(tsdn_t *tsdn);
bool chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec);
void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(tsdn_t *tsdn, void *chunk);
bool chunk_dss_boot(void);
void chunk_dss_prefork(void);
void chunk_dss_postfork_parent(void);
void chunk_dss_postfork_child(void);
void chunk_dss_prefork(tsdn_t *tsdn);
void chunk_dss_postfork_parent(tsdn_t *tsdn);
void chunk_dss_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -64,13 +64,13 @@ struct ckh_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
bool ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
void ckh_delete(tsdn_t *tsdn, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
bool ckh_insert(tsdn_t *tsdn, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsdn_t *tsdn, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);

View File

@ -21,13 +21,14 @@ struct ctl_named_node_s {
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(const size_t *, size_t, void *, size_t *,
void *, size_t);
int (*ctl)(tsd_t *, const size_t *, size_t, void *,
size_t *, void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
};
struct ctl_arena_stats_s {
@ -60,6 +61,7 @@ struct ctl_stats_s {
size_t metadata;
size_t resident;
size_t mapped;
size_t retained;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
@ -68,16 +70,17 @@ struct ctl_stats_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen);
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(void);
void ctl_postfork_parent(void);
void ctl_postfork_child(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \

View File

@ -48,7 +48,7 @@ struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
/* Linkage for arena's huge and node_cache lists. */
/* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};

View File

@ -53,7 +53,7 @@ hash_get_block_32(const uint32_t *p, int i)
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
memcpy(&ret, &p[i], sizeof(uint32_t));
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
}
@ -68,7 +68,7 @@ hash_get_block_64(const uint64_t *p, int i)
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
memcpy(&ret, &p[i], sizeof(uint64_t));
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
}

View File

@ -9,24 +9,23 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
tcache_t *tcache);
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache);
bool huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero);
bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void huge_dalloc(tsdn_t *tsdn, void *ptr);
arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(const void *ptr);
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
void huge_prof_tctx_reset(const void *ptr);
size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -158,6 +158,7 @@ static const bool config_cache_oblivious =
#include <malloc/malloc.h>
#endif
#include "jemalloc/internal/ph.h"
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
@ -367,6 +368,7 @@ typedef unsigned szind_t;
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
@ -398,6 +400,7 @@ typedef unsigned szind_t;
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@ -440,6 +443,9 @@ extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
/* Number of arenas used for automatic multiplexing of threads and arenas. */
extern unsigned narenas_auto;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
@ -463,14 +469,14 @@ void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
arena_t *arenas_extend(unsigned ind);
unsigned narenas_total_get(void);
arena_t *arena_init(unsigned ind);
arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_choose_hard(tsd_t *tsd);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_tdata_cleanup(tsd_t *tsd);
void narenas_tdata_cleanup(tsd_t *tsd);
@ -490,6 +496,7 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@ -521,8 +528,9 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
@ -542,10 +550,12 @@ size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
arena_t *arena_ichoose(tsdn_t *tsdn, arena_t *arena);
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing);
arena_t *arena_get(unsigned ind, bool init_if_missing);
arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
@ -741,7 +751,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
*/
if (usize + large_pad + alignment - PAGE <= arena_maxrun)
if (usize + large_pad + alignment <= arena_maxrun)
return (usize);
}
@ -771,7 +781,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the multi-chunk mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
if (usize + alignment - PAGE < usize) {
if (usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
@ -780,19 +790,38 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
arena_choose(tsd_t *tsd, arena_t *arena)
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
{
arena_t *ret;
if (arena != NULL)
return (arena);
if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
ret = arena_choose_hard(tsd);
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL))
ret = arena_choose_hard(tsd, internal);
return (ret);
}
JEMALLOC_INLINE arena_t *
arena_choose(tsd_t *tsd, arena_t *arena)
{
return (arena_choose_impl(tsd, arena, false));
}
JEMALLOC_INLINE arena_t *
arena_ichoose(tsdn_t *tsdn, arena_t *arena)
{
assert(!tsdn_null(tsdn) || arena != NULL);
if (!tsdn_null(tsdn))
return (arena_choose_impl(tsdn_tsd(tsdn), NULL, true));
return (arena);
}
JEMALLOC_INLINE arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
{
@ -819,7 +848,7 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
}
JEMALLOC_INLINE arena_t *
arena_get(unsigned ind, bool init_if_missing)
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
{
arena_t *ret;
@ -829,7 +858,7 @@ arena_get(unsigned ind, bool init_if_missing)
if (unlikely(ret == NULL)) {
ret = atomic_read_p((void *)&arenas[ind]);
if (init_if_missing && unlikely(ret == NULL))
ret = arena_init(ind);
ret = arena_init(tsdn, ind);
}
return (ret);
}
@ -863,30 +892,27 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
size_t isalloc(const void *ptr, bool demote);
void *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero,
size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
void *imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
arena_t *arena);
void *imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path);
void *icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
arena_t *arena);
void *icalloc(tsd_t *tsd, size_t size, szind_t ind);
void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
bool slow_path);
void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(const void *ptr, bool demote);
size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
size_t p2rz(tsdn_t *tsdn, const void *ptr);
void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path);
void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
bool slow_path);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
@ -894,7 +920,7 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
bool ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero);
#endif
@ -910,102 +936,85 @@ iaalloc(const void *ptr)
/*
* Typical usage:
* tsdn_t *tsdn = [...]
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
* size_t sz = isalloc(tsdn, ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
return (arena_salloc(ptr, demote));
return (arena_salloc(tsdn, ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache,
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena, bool slow_path)
{
void *ret;
assert(size != 0);
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
arena_metadata_allocated_add(iaalloc(ret),
isalloc(tsdn, ret, config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
{
return (iallocztm(tsd, size, ind, false, tcache, false, arena, true));
return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
false, NULL, slow_path));
}
JEMALLOC_ALWAYS_INLINE void *
imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path)
{
return (iallocztm(tsd, size, ind, false, tcache_get(tsd, true), false,
NULL, slow_path));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
{
return (iallocztm(tsd, size, ind, true, tcache, false, arena, true));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(tsd_t *tsd, size_t size, szind_t ind)
{
return (iallocztm(tsd, size, ind, true, tcache_get(tsd, true), false,
NULL, true));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, true),
false, NULL));
return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
extent_node_t *node;
@ -1017,7 +1026,7 @@ ivsalloc(const void *ptr, bool demote)
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
return (isalloc(ptr, demote));
return (isalloc(tsdn, ptr, demote));
}
JEMALLOC_INLINE size_t
@ -1035,39 +1044,34 @@ u2rz(size_t usize)
}
JEMALLOC_INLINE size_t
p2rz(const void *ptr)
p2rz(tsdn_t *tsdn, const void *ptr)
{
size_t usize = isalloc(ptr, false);
size_t usize = isalloc(tsdn, ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path)
{
assert(ptr != NULL);
assert(!is_metadata || tcache == NULL);
assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
config_prof));
}
arena_dalloc(tsd, ptr, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
idalloctm(tsd, ptr, tcache, false, true);
arena_dalloc(tsdn, ptr, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
idalloctm(tsd, ptr, tcache_get(tsd, false), false, true);
idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
}
JEMALLOC_ALWAYS_INLINE void
@ -1077,24 +1081,25 @@ iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (slow_path && config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
idalloctm(tsd, ptr, tcache, false, slow_path);
idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
bool slow_path)
{
arena_sdalloc(tsd, ptr, size, tcache);
arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
{
if (config_fill && unlikely(opt_quarantine))
if (slow_path && config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
isdalloct(tsd, ptr, size, tcache);
isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
@ -1107,7 +1112,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
usize = sa2u(size + extra, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
@ -1115,7 +1120,8 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
usize = sa2u(size, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
arena);
if (p == NULL)
return (NULL);
}
@ -1125,7 +1131,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
isqalloc(tsd, ptr, oldsize, tcache, true);
return (p);
}
@ -1161,7 +1167,7 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero)
{
@ -1174,7 +1180,7 @@ ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
return (true);
}
return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero));
return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
}
#endif

View File

@ -215,6 +215,15 @@
/* #undef JEMALLOC_ZONE */
/* #undef JEMALLOC_ZONE_VERSION */
/*
* Methods for determining whether the OS overcommits.
* JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
* /proc/sys/vm.overcommit_memory file.
* JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
*/
#define JEMALLOC_SYSCTL_VM_OVERCOMMIT
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
/*
* Methods for purging unused pages differ between operating systems.
*

View File

@ -42,7 +42,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
#else
# else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
@ -52,7 +52,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
#endif
# endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
@ -104,9 +104,9 @@ mb_write(void)
{
malloc_mutex_t mtx;
malloc_mutex_init(&mtx);
malloc_mutex_lock(&mtx);
malloc_mutex_unlock(&mtx);
malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
malloc_mutex_lock(NULL, &mtx);
malloc_mutex_unlock(NULL, &mtx);
}
#endif
#endif

View File

@ -6,17 +6,21 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0}
# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# define MALLOC_MUTEX_INITIALIZER \
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# endif
#endif
@ -39,6 +43,7 @@ struct malloc_mutex_s {
#else
pthread_mutex_t lock;
#endif
witness_t witness;
};
#endif /* JEMALLOC_H_STRUCTS */
@ -49,28 +54,32 @@ struct malloc_mutex_s {
extern bool isthreaded;
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex);
void malloc_mutex_prefork(malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
witness_rank_t rank);
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_first_thread(void);
bool mutex_boot(void);
bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(malloc_mutex_t *mutex);
void malloc_mutex_unlock(malloc_mutex_t *mutex);
void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
malloc_mutex_lock(malloc_mutex_t *mutex)
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
@ -82,14 +91,16 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
#else
pthread_mutex_lock(&mutex->lock);
#endif
witness_lock(tsdn, &mutex->witness);
}
}
JEMALLOC_INLINE void
malloc_mutex_unlock(malloc_mutex_t *mutex)
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
@ -103,6 +114,22 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
#endif
}
}
JEMALLOC_INLINE void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_owner(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_not_owner(tsdn, &mutex->witness);
}
#endif
#endif /* JEMALLOC_H_INLINES */

View File

@ -1,7 +1,7 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
&& _POSIX_MONOTONIC_CLOCK >= 0
typedef struct nstime_s nstime_t;

View File

@ -9,13 +9,14 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *pages_map(void *addr, size_t size);
void *pages_map(void *addr, size_t size, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size);
size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -0,0 +1,345 @@
/*
* A Pairing Heap implementation.
*
* "The Pairing Heap: A New Form of Self-Adjusting Heap"
* https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
*
* With auxiliary twopass list, described in a follow on paper.
*
* "Pairing Heaps: Experiments and Analysis"
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
*/
#ifndef PH_H_
#define PH_H_
/* Node structure. */
#define phn(a_type) \
struct { \
a_type *phn_prev; \
a_type *phn_next; \
a_type *phn_lchild; \
}
/* Root structure. */
#define ph(a_type) \
struct { \
a_type *ph_root; \
}
/* Internal utility macros. */
#define phn_lchild_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_lchild)
#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
a_phn->a_field.phn_lchild = a_lchild; \
} while (0)
#define phn_next_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_next)
#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
a_phn->a_field.phn_prev = a_prev; \
} while (0)
#define phn_prev_get(a_type, a_field, a_phn) \
(a_phn->a_field.phn_prev)
#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
a_phn->a_field.phn_next = a_next; \
} while (0)
#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
a_type *phn0child; \
\
assert(a_phn0 != NULL); \
assert(a_phn1 != NULL); \
assert(a_cmp(a_phn0, a_phn1) <= 0); \
\
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
phn_next_set(a_type, a_field, a_phn1, phn0child); \
if (phn0child != NULL) \
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
} while (0)
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
if (a_phn0 == NULL) \
r_phn = a_phn1; \
else if (a_phn1 == NULL) \
r_phn = a_phn0; \
else if (a_cmp(a_phn0, a_phn1) < 0) { \
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
a_cmp); \
r_phn = a_phn0; \
} else { \
phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
a_cmp); \
r_phn = a_phn1; \
} \
} while (0)
#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *head = NULL; \
a_type *tail = NULL; \
a_type *phn0 = a_phn; \
a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
\
/* \
* Multipass merge, wherein the first two elements of a FIFO \
* are repeatedly merged, and each result is appended to the \
* singly linked FIFO, until the FIFO contains only a single \
* element. We start with a sibling list but no reference to \
* its tail, so we do a single pass over the sibling list to \
* populate the FIFO. \
*/ \
if (phn1 != NULL) { \
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
if (phnrest != NULL) \
phn_prev_set(a_type, a_field, phnrest, NULL); \
phn_prev_set(a_type, a_field, phn0, NULL); \
phn_next_set(a_type, a_field, phn0, NULL); \
phn_prev_set(a_type, a_field, phn1, NULL); \
phn_next_set(a_type, a_field, phn1, NULL); \
phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
head = tail = phn0; \
phn0 = phnrest; \
while (phn0 != NULL) { \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
phnrest = phn_next_get(a_type, a_field, \
phn1); \
if (phnrest != NULL) { \
phn_prev_set(a_type, a_field, \
phnrest, NULL); \
} \
phn_prev_set(a_type, a_field, phn0, \
NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
phn_prev_set(a_type, a_field, phn1, \
NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = phnrest; \
} else { \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = NULL; \
} \
} \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, phn0); \
if (phn1 != NULL) { \
while (true) { \
head = phn_next_get(a_type, a_field, \
phn1); \
assert(phn_prev_get(a_type, a_field, \
phn0) == NULL); \
phn_next_set(a_type, a_field, phn0, \
NULL); \
assert(phn_prev_get(a_type, a_field, \
phn1) == NULL); \
phn_next_set(a_type, a_field, phn1, \
NULL); \
phn_merge(a_type, a_field, phn0, phn1, \
a_cmp, phn0); \
if (head == NULL) \
break; \
phn_next_set(a_type, a_field, tail, \
phn0); \
tail = phn0; \
phn0 = head; \
phn1 = phn_next_get(a_type, a_field, \
phn0); \
} \
} \
} \
r_phn = phn0; \
} while (0)
#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
if (phn != NULL) { \
phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
phn_prev_set(a_type, a_field, phn, NULL); \
ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
assert(phn_next_get(a_type, a_field, phn) == NULL); \
phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
a_ph->ph_root); \
} \
} while (0)
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
if (lchild == NULL) \
r_phn = NULL; \
else { \
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
r_phn); \
} \
} while (0)
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
a_attr void a_prefix##new(a_ph_type *ph); \
a_attr bool a_prefix##empty(a_ph_type *ph); \
a_attr a_type *a_prefix##first(a_ph_type *ph); \
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
/*
* The ph_gen() macro generates a type-specific pairing heap implementation,
* based on the above cpp macros.
*/
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_ph_type *ph) \
{ \
\
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) \
{ \
\
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) \
{ \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return (ph->ph_root); \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) \
{ \
\
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \
* Treat the root as an aux list during insertion, and lazily \
* merge during a_prefix##remove_first(). For elements that \
* are inserted, then removed via a_prefix##remove() before the \
* aux list is ever processed, this makes insert/remove \
* constant-time, whereas eager merging would make insert \
* O(log n). \
*/ \
if (ph->ph_root == NULL) \
ph->ph_root = phn; \
else { \
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
a_field, ph->ph_root)); \
if (phn_next_get(a_type, a_field, ph->ph_root) != \
NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, ph->ph_root), \
phn); \
} \
phn_prev_set(a_type, a_field, phn, ph->ph_root); \
phn_next_set(a_type, a_field, ph->ph_root, phn); \
} \
} \
a_attr a_type * \
a_prefix##remove_first(a_ph_type *ph) \
{ \
a_type *ret; \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
ret = ph->ph_root; \
\
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return (ret); \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) \
{ \
a_type *replace, *parent; \
\
/* \
* We can delete from aux list without merging it, but we need \
* to merge if we are dealing with the root node. \
*/ \
if (ph->ph_root == phn) { \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
if (ph->ph_root == phn) { \
ph_merge_children(a_type, a_field, ph->ph_root, \
a_cmp, ph->ph_root); \
return; \
} \
} \
\
/* Get parent (if phn is leftmost child) before mutating. */ \
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
if (phn_lchild_get(a_type, a_field, parent) != phn) \
parent = NULL; \
} \
/* Find a possible replacement node, and link to parent. */ \
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
/* Set next/prev for sibling linked list. */ \
if (replace != NULL) { \
if (parent != NULL) { \
phn_prev_set(a_type, a_field, replace, parent); \
phn_lchild_set(a_type, a_field, parent, \
replace); \
} else { \
phn_prev_set(a_type, a_field, replace, \
phn_prev_get(a_type, a_field, phn)); \
if (phn_prev_get(a_type, a_field, phn) != \
NULL) { \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
replace); \
} \
} \
phn_next_set(a_type, a_field, replace, \
phn_next_get(a_type, a_field, phn)); \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
replace); \
} \
} else { \
if (parent != NULL) { \
a_type *next = phn_next_get(a_type, a_field, \
phn); \
phn_lchild_set(a_type, a_field, parent, next); \
if (next != NULL) { \
phn_prev_set(a_type, a_field, next, \
parent); \
} \
} else { \
assert(phn_prev_get(a_type, a_field, phn) != \
NULL); \
phn_next_set(a_type, a_field, \
phn_prev_get(a_type, a_field, phn), \
phn_next_get(a_type, a_field, phn)); \
} \
if (phn_next_get(a_type, a_field, phn) != NULL) { \
phn_prev_set(a_type, a_field, \
phn_next_get(a_type, a_field, phn), \
phn_prev_get(a_type, a_field, phn)); \
} \
} \
}
#endif /* PH_H_ */

View File

@ -5,10 +5,12 @@
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_bitselm_get JEMALLOC_N(arena_bitselm_get)
#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const)
#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_choose JEMALLOC_N(arena_choose)
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
#define arena_choose_impl JEMALLOC_N(arena_choose_impl)
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
@ -34,6 +36,7 @@
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_get JEMALLOC_N(arena_get)
#define arena_ichoose JEMALLOC_N(arena_ichoose)
#define arena_init JEMALLOC_N(arena_init)
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
@ -60,7 +63,8 @@
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const)
#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_maxrun JEMALLOC_N(arena_maxrun)
@ -69,7 +73,8 @@
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
#define arena_migrate JEMALLOC_N(arena_migrate)
#define arena_miscelm_get JEMALLOC_N(arena_miscelm_get)
#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const)
#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable)
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
#define arena_new JEMALLOC_N(arena_new)
@ -81,7 +86,10 @@
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork JEMALLOC_N(arena_prefork)
#define arena_prefork0 JEMALLOC_N(arena_prefork0)
#define arena_prefork1 JEMALLOC_N(arena_prefork1)
#define arena_prefork2 JEMALLOC_N(arena_prefork2)
#define arena_prefork3 JEMALLOC_N(arena_prefork3)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
@ -97,6 +105,7 @@
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_reset JEMALLOC_N(arena_reset)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
#define arena_salloc JEMALLOC_N(arena_salloc)
@ -123,6 +132,11 @@
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define atomic_write_p JEMALLOC_N(atomic_write_p)
#define atomic_write_u JEMALLOC_N(atomic_write_u)
#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32)
#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64)
#define atomic_write_z JEMALLOC_N(atomic_write_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
@ -148,7 +162,6 @@
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dalloc_arena JEMALLOC_N(chunk_dalloc_arena)
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
@ -168,7 +181,6 @@
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_purge_arena JEMALLOC_N(chunk_purge_arena)
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
#define chunk_register JEMALLOC_N(chunk_register)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
@ -200,6 +212,8 @@
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get)
#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set)
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
@ -210,6 +224,8 @@
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse)
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
@ -227,6 +243,8 @@
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_destroy JEMALLOC_N(extent_tree_szad_destroy)
#define extent_tree_szad_destroy_recurse JEMALLOC_N(extent_tree_szad_destroy_recurse)
#define extent_tree_szad_empty JEMALLOC_N(extent_tree_szad_empty)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
@ -273,14 +291,11 @@
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iaalloc JEMALLOC_N(iaalloc)
#define ialloc JEMALLOC_N(ialloc)
#define iallocztm JEMALLOC_N(iallocztm)
#define icalloc JEMALLOC_N(icalloc)
#define icalloct JEMALLOC_N(icalloct)
#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
#define idalloc JEMALLOC_N(idalloc)
#define idalloct JEMALLOC_N(idalloct)
#define idalloctm JEMALLOC_N(idalloctm)
#define imalloc JEMALLOC_N(imalloc)
#define imalloct JEMALLOC_N(imalloct)
#define in_valgrind JEMALLOC_N(in_valgrind)
#define index2size JEMALLOC_N(index2size)
#define index2size_compute JEMALLOC_N(index2size_compute)
@ -303,7 +318,11 @@
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define large_maxclass JEMALLOC_N(large_maxclass)
#define lg_floor JEMALLOC_N(lg_floor)
#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
@ -325,11 +344,13 @@
#define map_bias JEMALLOC_N(map_bias)
#define map_misc_offset JEMALLOC_N(map_misc_offset)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define nhclasses JEMALLOC_N(nhclasses)
#define nlclasses JEMALLOC_N(nlclasses)
#define nstime_add JEMALLOC_N(nstime_add)
#define nstime_compare JEMALLOC_N(nstime_compare)
#define nstime_copy JEMALLOC_N(nstime_copy)
@ -372,6 +393,7 @@
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_boot JEMALLOC_N(pages_boot)
#define pages_commit JEMALLOC_N(pages_commit)
#define pages_decommit JEMALLOC_N(pages_decommit)
#define pages_map JEMALLOC_N(pages_map)
@ -383,6 +405,7 @@
#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
#define prng_lg_range JEMALLOC_N(prng_lg_range)
#define prng_range JEMALLOC_N(prng_range)
#define prof_active JEMALLOC_N(prof_active)
#define prof_active_get JEMALLOC_N(prof_active_get)
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
#define prof_active_set JEMALLOC_N(prof_active_set)
@ -392,6 +415,7 @@
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_dump_header JEMALLOC_N(prof_dump_header)
#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
@ -409,7 +433,8 @@
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_prefork0 JEMALLOC_N(prof_prefork0)
#define prof_prefork1 JEMALLOC_N(prof_prefork1)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_reset JEMALLOC_N(prof_reset)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
@ -418,6 +443,7 @@
#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
@ -469,8 +495,6 @@
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
@ -505,38 +529,83 @@
#define ticker_ticks JEMALLOC_N(ticker_ticks)
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
#define tsd_boot JEMALLOC_N(tsd_boot)
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
#define tsd_booted JEMALLOC_N(tsd_booted)
#define tsd_booted_get JEMALLOC_N(tsd_booted_get)
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
#define tsd_fetch JEMALLOC_N(tsd_fetch)
#define tsd_get JEMALLOC_N(tsd_get)
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
#define tsd_initialized JEMALLOC_N(tsd_initialized)
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define tsd_init_head JEMALLOC_N(tsd_init_head)
#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
#define tsd_nominal JEMALLOC_N(tsd_nominal)
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get)
#define tsd_set JEMALLOC_N(tsd_set)
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
#define tsd_tls JEMALLOC_N(tsd_tls)
#define tsd_tsd JEMALLOC_N(tsd_tsd)
#define tsd_tsdn JEMALLOC_N(tsd_tsdn)
#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
#define tsdn_fetch JEMALLOC_N(tsdn_fetch)
#define tsdn_null JEMALLOC_N(tsdn_null)
#define tsdn_tsd JEMALLOC_N(tsdn_tsd)
#define u2rz JEMALLOC_N(u2rz)
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
#define witness_assert_owner JEMALLOC_N(witness_assert_owner)
#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup)
#define witness_init JEMALLOC_N(witness_init)
#define witness_lock JEMALLOC_N(witness_lock)
#define witness_lock_error JEMALLOC_N(witness_lock_error)
#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
#define witness_owner_error JEMALLOC_N(witness_owner_error)
#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
#define witness_prefork JEMALLOC_N(witness_prefork)
#define witness_unlock JEMALLOC_N(witness_unlock)
#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)

View File

@ -281,7 +281,7 @@ extern uint64_t prof_interval;
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(const void *ptr, size_t usize,
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
@ -293,32 +293,33 @@ size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
void prof_idump(void);
bool prof_mdump(const char *filename);
void prof_gdump(void);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_init(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_reset(tsdn_t *tsdn, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
const char *prof_thread_name_get(void);
bool prof_active_get(void);
bool prof_active_set(bool active);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
bool prof_thread_active_get(void);
bool prof_thread_active_set(bool active);
bool prof_thread_active_init_get(void);
bool prof_thread_active_init_set(bool active_init);
bool prof_gdump_get(void);
bool prof_gdump_set(bool active);
bool prof_thread_active_get(tsd_t *tsd);
bool prof_thread_active_set(tsd_t *tsd, bool active);
bool prof_thread_active_init_get(tsdn_t *tsdn);
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
void prof_prefork(void);
void prof_postfork_parent(void);
void prof_postfork_child(void);
bool prof_boot2(tsdn_t *tsdn);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
@ -329,17 +330,17 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
prof_tctx_t *prof_tctx_get(const void *ptr);
void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
@ -383,7 +384,7 @@ prof_tdata_get(tsd_t *tsd, bool create)
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tdata = prof_tdata_init(tsd_tsdn(tsd));
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
@ -397,34 +398,34 @@ prof_tdata_get(tsd_t *tsd, bool create)
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(const void *ptr)
prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
return (arena_prof_tctx_get(ptr));
return (arena_prof_tctx_get(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(ptr, usize, tctx);
arena_prof_tctx_set(tsdn, ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
@ -479,17 +480,17 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(ptr, true));
assert(usize == isalloc(tsdn, ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(ptr, usize, tctx);
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
else
prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
@ -503,7 +504,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(ptr, true));
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
@ -520,9 +521,9 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
prof_malloc_sample_object(ptr, usize, tctx);
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
else
prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
@ -531,10 +532,10 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
prof_tctx_t *tctx = prof_tctx_get(ptr);
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
cassert(config_prof);
assert(usize == isalloc(ptr, true));
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);

View File

@ -15,9 +15,10 @@ typedef struct rtree_s rtree_t;
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
/* Maximum rtree height. */
#define RTREE_HEIGHT_MAX \
((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
@ -111,22 +112,25 @@ unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
unsigned level);
unsigned level, bool dependent);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
bool dependent);
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
bool dependent);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
JEMALLOC_INLINE unsigned
JEMALLOC_ALWAYS_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
@ -140,7 +144,7 @@ rtree_start_level(rtree_t *rtree, uintptr_t key)
return (start_level);
}
JEMALLOC_INLINE uintptr_t
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
@ -149,37 +153,40 @@ rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
rtree->levels[level].bits) - 1));
}
JEMALLOC_INLINE bool
JEMALLOC_ALWAYS_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
JEMALLOC_INLINE rtree_node_elm_t *
rtree_child_tryread(rtree_node_elm_t *elm)
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
if (!rtree_node_valid(child))
if (!dependent && !rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_INLINE rtree_node_elm_t *
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
bool dependent)
{
rtree_node_elm_t *child;
child = rtree_child_tryread(elm);
if (unlikely(!rtree_node_valid(child)))
child = rtree_child_tryread(elm, dependent);
if (!dependent && unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
assert(!dependent || child != NULL);
return (child);
}
JEMALLOC_INLINE extent_node_t *
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
@ -208,54 +215,119 @@ rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
atomic_write_p(&elm->pun, val);
}
JEMALLOC_INLINE rtree_node_elm_t *
rtree_subtree_tryread(rtree_t *rtree, unsigned level)
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
if (!rtree_node_valid(subtree))
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_INLINE rtree_node_elm_t *
rtree_subtree_read(rtree_t *rtree, unsigned level)
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
subtree = rtree_subtree_tryread(rtree, level);
if (unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_tryread(rtree, level, dependent);
if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
assert(!dependent || subtree != NULL);
return (subtree);
}
JEMALLOC_INLINE extent_node_t *
JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
unsigned i, start_level;
rtree_node_elm_t *node, *child;
unsigned start_level;
rtree_node_elm_t *node;
start_level = rtree_start_level(rtree, key);
for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
/**/; i++, node = child) {
if (!dependent && unlikely(!rtree_node_valid(node)))
return (NULL);
subkey = rtree_subkey(rtree, key, i);
if (i == rtree->height - 1) {
/*
* node is a leaf, so it contains values rather than
* child pointers.
*/
return (rtree_val_read(rtree, &node[subkey],
dependent));
}
assert(i < rtree->height - 1);
child = rtree_child_tryread(&node[subkey]);
node = rtree_subtree_tryread(rtree, start_level, dependent);
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
switch (start_level + RTREE_GET_BIAS) {
#define RTREE_GET_SUBTREE(level) \
case level: \
assert(level < (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
node = rtree_child_tryread(&node[subkey], dependent); \
/* Fall through. */
#define RTREE_GET_LEAF(level) \
case level: \
assert(level == (RTREE_HEIGHT_MAX-1)); \
if (!dependent && unlikely(!rtree_node_valid(node))) \
return (NULL); \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
/* \
* node is a leaf, so it contains values rather than \
* child pointers. \
*/ \
return (rtree_val_read(rtree, &node[subkey], \
dependent));
#if RTREE_HEIGHT_MAX > 1
RTREE_GET_SUBTREE(0)
#endif
#if RTREE_HEIGHT_MAX > 2
RTREE_GET_SUBTREE(1)
#endif
#if RTREE_HEIGHT_MAX > 3
RTREE_GET_SUBTREE(2)
#endif
#if RTREE_HEIGHT_MAX > 4
RTREE_GET_SUBTREE(3)
#endif
#if RTREE_HEIGHT_MAX > 5
RTREE_GET_SUBTREE(4)
#endif
#if RTREE_HEIGHT_MAX > 6
RTREE_GET_SUBTREE(5)
#endif
#if RTREE_HEIGHT_MAX > 7
RTREE_GET_SUBTREE(6)
#endif
#if RTREE_HEIGHT_MAX > 8
RTREE_GET_SUBTREE(7)
#endif
#if RTREE_HEIGHT_MAX > 9
RTREE_GET_SUBTREE(8)
#endif
#if RTREE_HEIGHT_MAX > 10
RTREE_GET_SUBTREE(9)
#endif
#if RTREE_HEIGHT_MAX > 11
RTREE_GET_SUBTREE(10)
#endif
#if RTREE_HEIGHT_MAX > 12
RTREE_GET_SUBTREE(11)
#endif
#if RTREE_HEIGHT_MAX > 13
RTREE_GET_SUBTREE(12)
#endif
#if RTREE_HEIGHT_MAX > 14
RTREE_GET_SUBTREE(13)
#endif
#if RTREE_HEIGHT_MAX > 15
RTREE_GET_SUBTREE(14)
#endif
#if RTREE_HEIGHT_MAX > 16
# error Unsupported RTREE_HEIGHT_MAX
#endif
RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
#undef RTREE_GET_SUBTREE
#undef RTREE_GET_LEAF
default: not_reached();
}
#undef RTREE_GET_BIAS
not_reached();
}
@ -268,7 +340,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
start_level = rtree_start_level(rtree, key);
node = rtree_subtree_read(rtree, start_level);
node = rtree_subtree_read(rtree, start_level, false);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
@ -282,7 +354,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
return (false);
}
assert(i + 1 < rtree->height);
child = rtree_child_read(rtree, &node[subkey], i);
child = rtree_child_read(rtree, &node[subkey], i, false);
if (child == NULL)
return (true);
}

View File

@ -102,6 +102,14 @@ struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
/*
* Number of bytes currently retained as a side effect of munmap() being
* disabled/bypassed. Retained bytes are technically mapped (though
* always decommitted or purged), but they are excluded from the mapped
* statistic (above).
*/
size_t retained;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under

View File

@ -130,27 +130,25 @@ extern size_t tcache_maxclass;
*/
extern tcaches_t *tcaches;
size_t tcache_salloc(const void *ptr);
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
arena_t *newarena);
void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
arena_t *oldarena, arena_t *newarena);
tcache_t *tcache_get_hard(tsd_t *tsd);
tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
void tcache_cleanup(tsd_t *tsd);
void tcache_enabled_cleanup(tsd_t *tsd);
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsdn_t *tsdn, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(void);
bool tcache_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@ -297,8 +295,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL))
return (NULL);
ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind,
&tcache_hard_success);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
tbin, binind, &tcache_hard_success);
if (tcache_hard_success == false)
return (NULL);
}
@ -310,7 +308,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = index2size(binind);
assert(tcache_salloc(ret) == usize);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (likely(!zero)) {
@ -358,7 +356,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL))
return (NULL);
ret = arena_malloc_large(tsd, arena, binind, zero);
ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
if (ret == NULL)
return (NULL);
} else {
@ -381,9 +379,10 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc))
memset(ret, 0xa5, usize);
else if (unlikely(opt_zero))
if (unlikely(opt_junk_alloc)) {
memset(ret, JEMALLOC_ALLOC_JUNK,
usize);
} else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else
@ -406,7 +405,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
@ -433,8 +432,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
binind = size2index(size);
@ -458,8 +457,10 @@ JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL))
elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
if (unlikely(elm->tcache == NULL)) {
elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
NULL));
}
return (elm->tcache);
}
#endif

View File

@ -13,6 +13,9 @@ typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
typedef struct tsdn_s tsdn_t;
#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
@ -44,6 +47,7 @@ typedef enum {
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
* bool example_tsd_booted_get(void) {...}
* example_t *example_tsd_get() {...}
* void example_tsd_set(example_t *val) {...}
*
@ -98,6 +102,8 @@ a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
a_attr bool \
a_name##tsd_booted_get(void); \
a_attr a_type * \
a_name##tsd_get(void); \
a_attr void \
@ -201,6 +207,12 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@ -246,6 +258,12 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@ -368,6 +386,12 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@ -490,6 +514,12 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@ -536,12 +566,15 @@ struct tsd_init_head_s {
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_tdata, arena_tdata_t *) \
O(narenas_tdata, unsigned) \
O(arenas_tdata_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
O(witnesses, witness_list_t) \
O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@ -551,10 +584,13 @@ struct tsd_init_head_s {
NULL, \
NULL, \
NULL, \
NULL, \
0, \
false, \
tcache_enabled_default, \
NULL \
NULL, \
ql_head_initializer(witnesses), \
false \
}
struct tsd_s {
@ -565,6 +601,15 @@ MALLOC_TSD
#undef O
};
/*
* Wrapper around tsd_t that makes it possible to avoid implicit conversion
* between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
* explicitly converted to tsd_t, which is non-nullable.
*/
struct tsdn_s {
tsd_t tsd;
};
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
@ -577,7 +622,7 @@ void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
bool malloc_tsd_boot0(void);
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
@ -595,6 +640,7 @@ void tsd_cleanup(void *arg);
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
tsd_t *tsd_fetch(void);
tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
@ -602,6 +648,9 @@ t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
tsdn_t *tsdn_fetch(void);
bool tsdn_null(const tsdn_t *tsdn);
tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
@ -628,6 +677,13 @@ tsd_fetch(void)
return (tsd);
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd)
{
return ((tsdn_t *)tsd);
}
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
@ -659,6 +715,32 @@ tsd_##n##_set(tsd_t *tsd, t n) \
}
MALLOC_TSD
#undef O
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void)
{
if (!tsd_booted_get())
return (NULL);
return (tsd_tsdn(tsd_fetch()));
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn)
{
return (tsdn == NULL);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn)
{
assert(!tsdn_null(tsdn));
return (&tsdn->tsd);
}
#endif
#endif /* JEMALLOC_H_INLINES */

View File

@ -40,6 +40,10 @@
*/
#define MALLOC_PRINTF_BUFSIZE 4096
/* Junk fill patterns. */
#define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
#define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
@ -73,12 +77,12 @@
JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
# define unreachable() __builtin_unreachable()
# else
# define unreachable()
# define unreachable() abort()
# endif
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
# define unreachable()
# define unreachable() abort()
#endif
#include "jemalloc/internal/assert.h"
@ -106,9 +110,9 @@ void malloc_write(const char *s);
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
int malloc_vsnprintf(char *str, size_t size, const char *format,
size_t malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
int malloc_snprintf(char *str, size_t size, const char *format, ...)
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);

View File

@ -30,15 +30,17 @@
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) { \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
zero); \
} \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do { \
if (unlikely(in_valgrind)) { \
size_t rzsize = p2rz(ptr); \
size_t rzsize = p2rz(tsdn, ptr); \
\
if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
@ -81,8 +83,8 @@
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)

View File

@ -0,0 +1,249 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct witness_s witness_t;
typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, const witness_t *);
/*
* Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
* the witness machinery.
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_ARENAS 2U
#define WITNESS_RANK_PROF_DUMP 3U
#define WITNESS_RANK_PROF_BT2GCTX 4U
#define WITNESS_RANK_PROF_TDATAS 5U
#define WITNESS_RANK_PROF_TDATA 6U
#define WITNESS_RANK_PROF_GCTX 7U
#define WITNESS_RANK_ARENA 8U
#define WITNESS_RANK_ARENA_CHUNKS 9U
#define WITNESS_RANK_ARENA_NODE_CACHE 10
#define WITNESS_RANK_BASE 11U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct witness_s {
/* Name, used for printing lock order reversal messages. */
const char *name;
/*
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
* must be acquired in order of increasing rank.
*/
witness_rank_t rank;
/*
* If two witnesses are of equal rank and they have the samp comp
* function pointer, it is called as a last attempt to differentiate
* between witnesses of equal rank.
*/
witness_comp_t *comp;
/* Linkage for thread's currently owned locks. */
ql_elm(witness_t) link;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp);
#ifdef JEMALLOC_JET
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
extern witness_lock_error_t *witness_lock_error;
#else
void witness_lock_error(const witness_list_t *witnesses,
const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_owner_error_t)(const witness_t *);
extern witness_owner_error_t *witness_owner_error;
#else
void witness_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_not_owner_error_t)(const witness_t *);
extern witness_not_owner_error_t *witness_not_owner_error;
#else
void witness_not_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_lockless_error_t)(const witness_list_t *);
extern witness_lockless_error_t *witness_lockless_error;
#else
void witness_lockless_error(const witness_list_t *witnesses);
#endif
void witnesses_cleanup(tsd_t *tsd);
void witness_fork_cleanup(tsd_t *tsd);
void witness_prefork(tsd_t *tsd);
void witness_postfork_parent(tsd_t *tsd);
void witness_postfork_child(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_lockless(tsdn_t *tsdn);
void witness_lock(tsdn_t *tsdn, witness_t *witness);
void witness_unlock(tsdn_t *tsdn, witness_t *witness);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
return;
}
witness_owner_error(witness);
}
JEMALLOC_INLINE void
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
witness_not_owner_error(witness);
}
}
JEMALLOC_INLINE void
witness_assert_lockless(tsdn_t *tsdn)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w != NULL)
witness_lockless_error(witnesses);
}
JEMALLOC_INLINE void
witness_lock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
witness_t *w;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witness_assert_not_owner(tsdn, witness);
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w == NULL) {
/* No other locks; do nothing. */
} else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
/* Forking, and relaxed ranking satisfied. */
} else if (w->rank > witness->rank) {
/* Not forking, rank order reversal. */
witness_lock_error(witnesses, witness);
} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
witness->comp || w->comp(w, witness) > 0)) {
/*
* Missing/incompatible comparison function, or comparison
* function indicates rank order reversal.
*/
witness_lock_error(witnesses, witness);
}
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
}
JEMALLOC_INLINE void
witness_unlock(tsdn_t *tsdn, witness_t *witness)
{
tsd_t *tsd;
witness_list_t *witnesses;
if (!config_debug)
return;
if (tsdn_null(tsdn))
return;
tsd = tsdn_tsd(tsdn);
if (witness->rank == WITNESS_RANK_OMIT)
return;
witness_assert_owner(tsdn, witness);
witnesses = tsd_witnessesp_get(tsd);
ql_remove(witnesses, witness, link);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -87,20 +87,20 @@ extern "C" {
#include <limits.h>
#include <strings.h>
#define JEMALLOC_VERSION "4.1.0-1-g994da4232621dd1210fcf39bdf0d6454cefda473"
#define JEMALLOC_VERSION "4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc"
#define JEMALLOC_VERSION_MAJOR 4
#define JEMALLOC_VERSION_MINOR 1
#define JEMALLOC_VERSION_MINOR 2
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 1
#define JEMALLOC_VERSION_GID "994da4232621dd1210fcf39bdf0d6454cefda473"
#define JEMALLOC_VERSION_GID "dc7ff6306d7a15b53479e2fb8e5546404b82e6fc"
# define MALLOCX_LG_ALIGN(la) ((int)(la))
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) ((int)(ffs(a)-1))
# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
# else
# define MALLOCX_ALIGN(a) \
((int)(((a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
ffs((int)((a)>>32))+31))
((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
ffs((int)(((size_t)(a))>>32))+31))
# endif
# define MALLOCX_ZERO ((int)0x40)
/*
@ -112,7 +112,7 @@ extern "C" {
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()

File diff suppressed because it is too large Load Diff

View File

@ -13,12 +13,13 @@ static size_t base_mapped;
/******************************************************************************/
/* base_mtx must be held. */
static extent_node_t *
base_node_try_alloc(void)
base_node_try_alloc(tsdn_t *tsdn)
{
extent_node_t *node;
malloc_mutex_assert_owner(tsdn, &base_mtx);
if (base_nodes == NULL)
return (NULL);
node = base_nodes;
@ -27,33 +28,34 @@ base_node_try_alloc(void)
return (node);
}
/* base_mtx must be held. */
static void
base_node_dalloc(extent_node_t *node)
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
{
malloc_mutex_assert_owner(tsdn, &base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes;
base_nodes = node;
}
/* base_mtx must be held. */
static extent_node_t *
base_chunk_alloc(size_t minsize)
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
extent_node_t *node;
size_t csize, nsize;
void *addr;
malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
node = base_node_try_alloc();
node = base_node_try_alloc(tsdn);
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize);
if (addr == NULL) {
if (node != NULL)
base_node_dalloc(node);
base_node_dalloc(tsdn, node);
return (NULL);
}
base_mapped += csize;
@ -76,7 +78,7 @@ base_chunk_alloc(size_t minsize)
* physical memory usage.
*/
void *
base_alloc(size_t size)
base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
size_t csize, usize;
@ -91,14 +93,14 @@ base_alloc(size_t size)
usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, false, false);
malloc_mutex_lock(&base_mtx);
malloc_mutex_lock(tsdn, &base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) {
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node);
} else {
/* Try to allocate more space. */
node = base_chunk_alloc(csize);
node = base_chunk_alloc(tsdn, csize);
}
if (node == NULL) {
ret = NULL;
@ -111,7 +113,7 @@ base_alloc(size_t size)
extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node);
} else
base_node_dalloc(node);
base_node_dalloc(tsdn, node);
if (config_stats) {
base_allocated += csize;
/*
@ -123,28 +125,29 @@ base_alloc(size_t size)
}
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
malloc_mutex_unlock(&base_mtx);
malloc_mutex_unlock(tsdn, &base_mtx);
return (ret);
}
void
base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
size_t *mapped)
{
malloc_mutex_lock(&base_mtx);
malloc_mutex_lock(tsdn, &base_mtx);
assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped);
*allocated = base_allocated;
*resident = base_resident;
*mapped = base_mapped;
malloc_mutex_unlock(&base_mtx);
malloc_mutex_unlock(tsdn, &base_mtx);
}
bool
base_boot(void)
{
if (malloc_mutex_init(&base_mtx))
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
extent_tree_szad_new(&base_avail_szad);
base_nodes = NULL;
@ -153,22 +156,22 @@ base_boot(void)
}
void
base_prefork(void)
base_prefork(tsdn_t *tsdn)
{
malloc_mutex_prefork(&base_mtx);
malloc_mutex_prefork(tsdn, &base_mtx);
}
void
base_postfork_parent(void)
base_postfork_parent(tsdn_t *tsdn)
{
malloc_mutex_postfork_parent(&base_mtx);
malloc_mutex_postfork_parent(tsdn, &base_mtx);
}
void
base_postfork_child(void)
base_postfork_child(tsdn_t *tsdn)
{
malloc_mutex_postfork_child(&base_mtx);
malloc_mutex_postfork_child(tsdn, &base_mtx);
}

View File

@ -74,15 +74,11 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
size_t i;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
i = nbits >> LG_BITMAP_GROUP_NBITS;
if (nbits % BITMAP_GROUP_NBITS != 0)
i++;
binfo->ngroups = i;
binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
@ -99,9 +95,10 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
size_t extra;
memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (binfo->nbits % (binfo->ngroups * BITMAP_GROUP_NBITS));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
bitmap[binfo->ngroups - 1] >>= (BITMAP_GROUP_NBITS - extra);
bitmap[binfo->ngroups - 1] >>= extra;
}
#endif /* USE_TREE */

View File

@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = {
* definition.
*/
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed);
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
bool committed);
/******************************************************************************/
@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
}
chunk_hooks_t
chunk_hooks_get(arena_t *arena)
chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
{
chunk_hooks_t chunk_hooks;
malloc_mutex_lock(&arena->chunks_mtx);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena);
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (chunk_hooks);
}
chunk_hooks_t
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
{
chunk_hooks_t old_chunk_hooks;
malloc_mutex_lock(&arena->chunks_mtx);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks;
/*
* Copy each field atomically so that it is impossible for readers to
@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (old_chunk_hooks);
}
static void
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
bool locked)
chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, bool locked)
{
static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER;
@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
chunk_hooks_get(arena);
chunk_hooks_get(tsdn, arena);
}
}
static void
chunk_hooks_assure_initialized_locked(arena_t *arena,
chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
}
static void
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
}
bool
chunk_register(const void *chunk, const extent_node_t *node)
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
{
assert(extent_node_addr_get(node) == chunk);
@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
prof_gdump();
prof_gdump(tsdn);
}
return (false);
@ -197,7 +199,7 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
}
static void *
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
@ -219,8 +221,8 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
@ -232,7 +234,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
@ -251,7 +253,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
@ -271,20 +273,21 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node);
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
cache, ret, size + trailsize, zeroed, committed);
arena_node_dalloc(tsdn, arena, node);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
chunks_ad, cache, ret, size + trailsize, zeroed,
committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
node = arena_node_alloc(arena);
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad,
chunks_ad, cache, ret, size + trailsize,
zeroed, committed);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
chunks_szad, chunks_ad, cache, ret, size +
trailsize, zeroed, committed);
return (NULL);
}
}
@ -296,16 +299,16 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(&arena->chunks_mtx);
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
ret, size, zeroed, committed);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
cache, ret, size, zeroed, committed);
return (NULL);
}
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
arena_node_dalloc(arena, node);
arena_node_dalloc(tsdn, arena, node);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
@ -328,8 +331,8 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned.
*/
static void *
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit, dss_prec_t dss_prec)
chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
@ -340,8 +343,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL)
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL)
return (ret);
/* mmap. */
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
@ -349,8 +352,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL)
chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL)
return (ret);
/* All strategies for allocation failed. */
@ -380,8 +383,8 @@ chunk_alloc_base(size_t size)
}
void *
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool dalloc_node)
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
bool commit;
@ -392,9 +395,9 @@ chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
assert((alignment & chunksize_mask) == 0);
commit = true;
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
&commit, dalloc_node);
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
new_addr, size, alignment, zero, &commit, dalloc_node);
if (ret == NULL)
return (NULL);
assert(commit);
@ -404,11 +407,11 @@ chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
}
static arena_t *
chunk_arena_get(unsigned arena_ind)
chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
{
arena_t *arena;
arena = arena_get(arena_ind, false);
arena = arena_get(tsdn, arena_ind, false);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
@ -422,10 +425,12 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
void *ret;
tsdn_t *tsdn;
arena_t *arena;
arena = chunk_arena_get(arena_ind);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
tsdn = tsdn_fetch();
arena = chunk_arena_get(tsdn, arena_ind);
ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
@ -436,29 +441,35 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
}
static void *
chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit)
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
commit, true));
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
new_addr, size, alignment, zero, commit, true);
if (config_stats && ret != NULL)
arena->stats.retained -= size;
return (ret);
}
void *
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit)
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, commit);
if (ret == NULL) {
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
@ -473,7 +484,7 @@ chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
}
static void
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed)
{
@ -485,8 +496,8 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
@ -511,7 +522,7 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(arena);
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
/*
* Node allocation failed, which is an exceedingly
@ -520,8 +531,8 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak.
*/
if (cache) {
chunk_purge_wrapper(arena, chunk_hooks, chunk,
size, 0, size);
chunk_purge_wrapper(tsdn, arena, chunk_hooks,
chunk, size, 0, size);
}
goto label_return;
}
@ -557,16 +568,16 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
arena_node_dalloc(arena, prev);
arena_node_dalloc(tsdn, arena, prev);
}
label_return:
malloc_mutex_unlock(&arena->chunks_mtx);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
}
void
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed)
{
assert(chunk != NULL);
@ -574,14 +585,24 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, chunk, size, false, committed);
arena_maybe_purge(arena);
arena_maybe_purge(tsdn, arena);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(tsdn_fetch(), chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool zeroed, bool committed)
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed)
{
assert(chunk != NULL);
@ -589,7 +610,7 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
assert(size != 0);
assert((size & chunksize_mask) == 0);
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
return;
@ -600,29 +621,11 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
if (config_stats)
arena->stats.retained += size;
}
static bool
@ -643,8 +646,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
bool
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
assert(chunk != NULL);
@ -657,21 +661,12 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
length));
}
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
length));
}
bool
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, size_t offset, size_t length)
chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t offset, size_t length)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
}
@ -692,8 +687,11 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
if (!maps_coalesce)
return (true);
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
return (true);
if (have_dss) {
tsdn_t *tsdn = tsdn_fetch();
if (chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn, chunk_b))
return (true);
}
return (false);
}
@ -702,7 +700,7 @@ static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
return ((rtree_node_elm_t *)base_alloc(nelms *
return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms *
sizeof(rtree_node_elm_t)));
}
@ -749,22 +747,22 @@ chunk_boot(void)
}
void
chunk_prefork(void)
chunk_prefork(tsdn_t *tsdn)
{
chunk_dss_prefork();
chunk_dss_prefork(tsdn);
}
void
chunk_postfork_parent(void)
chunk_postfork_parent(tsdn_t *tsdn)
{
chunk_dss_postfork_parent();
chunk_dss_postfork_parent(tsdn);
}
void
chunk_postfork_child(void)
chunk_postfork_child(tsdn_t *tsdn)
{
chunk_dss_postfork_child();
chunk_dss_postfork_child(tsdn);
}

View File

@ -41,33 +41,33 @@ chunk_dss_sbrk(intptr_t increment)
}
dss_prec_t
chunk_dss_prec_get(void)
chunk_dss_prec_get(tsdn_t *tsdn)
{
dss_prec_t ret;
if (!have_dss)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
malloc_mutex_lock(tsdn, &dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (ret);
}
bool
chunk_dss_prec_set(dss_prec_t dss_prec)
chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
malloc_mutex_lock(tsdn, &dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (false);
}
void *
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit)
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit)
{
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
@ -80,7 +80,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
if ((intptr_t)size < 0)
return (NULL);
malloc_mutex_lock(&dss_mtx);
malloc_mutex_lock(tsdn, &dss_mtx);
if (dss_prev != (void *)-1) {
/*
@ -122,7 +122,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
malloc_mutex_unlock(&dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size;
@ -130,13 +130,13 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
if (cpad_size != 0) {
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(arena,
chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size,
true);
false, true);
}
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
@ -149,25 +149,25 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
}
} while (dss_prev != (void *)-1);
}
malloc_mutex_unlock(&dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (NULL);
}
bool
chunk_in_dss(void *chunk)
chunk_in_dss(tsdn_t *tsdn, void *chunk)
{
bool ret;
cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
malloc_mutex_lock(tsdn, &dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
malloc_mutex_unlock(tsdn, &dss_mtx);
return (ret);
}
@ -178,7 +178,7 @@ chunk_dss_boot(void)
cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
if (malloc_mutex_init(&dss_mtx, "dss", WITNESS_RANK_DSS))
return (true);
dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
@ -188,27 +188,27 @@ chunk_dss_boot(void)
}
void
chunk_dss_prefork(void)
chunk_dss_prefork(tsdn_t *tsdn)
{
if (have_dss)
malloc_mutex_prefork(&dss_mtx);
malloc_mutex_prefork(tsdn, &dss_mtx);
}
void
chunk_dss_postfork_parent(void)
chunk_dss_postfork_parent(tsdn_t *tsdn)
{
if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
malloc_mutex_postfork_parent(tsdn, &dss_mtx);
}
void
chunk_dss_postfork_child(void)
chunk_dss_postfork_child(tsdn_t *tsdn)
{
if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
malloc_mutex_postfork_child(tsdn, &dss_mtx);
}
/******************************************************************************/

View File

@ -9,25 +9,23 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
void *ret;
size_t alloc_size;
alloc_size = size + alignment - PAGE;
alloc_size = size + alignment;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
void *pages;
size_t leadsize;
pages = pages_map(NULL, alloc_size);
pages = pages_map(NULL, alloc_size, commit);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size);
ret = pages_trim(pages, alloc_size, leadsize, size, commit);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret);
}
@ -54,7 +52,7 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
ret = pages_map(new_addr, size);
ret = pages_map(new_addr, size, commit);
if (ret == NULL || ret == new_addr)
return (ret);
assert(new_addr == NULL);
@ -66,8 +64,6 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(ret != NULL);
*zero = true;
if (!*commit)
*commit = pages_decommit(ret, size);
return (ret);
}

View File

@ -40,8 +40,8 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
static bool ckh_grow(tsdn_t *tsdn, ckh_t *ckh);
static void ckh_shrink(tsdn_t *tsdn, ckh_t *ckh);
/******************************************************************************/
@ -244,7 +244,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
}
static bool
ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh_grow(tsdn_t *tsdn, ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
@ -270,8 +270,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ret = true;
goto label_return;
}
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
true, NULL);
tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL,
true, arena_ichoose(tsdn, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
@ -283,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, tcache_get(tsd, false), true, true);
idalloctm(tsdn, tab, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
idalloctm(tsdn, ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@ -299,7 +299,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
}
static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh_shrink(tsdn_t *tsdn, ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t usize;
@ -314,8 +314,8 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return;
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
NULL);
tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL, true,
arena_ichoose(tsdn, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@ -330,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsd, tab, tcache_get(tsd, false), true, true);
idalloctm(tsdn, tab, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@ -338,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
idalloctm(tsdn, ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@ -347,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
bool
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp)
{
bool ret;
@ -391,8 +391,8 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret = true;
goto label_return;
}
ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
NULL);
ckh->tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL,
true, arena_ichoose(tsdn, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@ -404,7 +404,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
}
void
ckh_delete(tsd_t *tsd, ckh_t *ckh)
ckh_delete(tsdn_t *tsdn, ckh_t *ckh)
{
assert(ckh != NULL);
@ -421,9 +421,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
idalloctm(tsdn, ckh->tab, NULL, true, true);
if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t));
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}
size_t
@ -456,7 +456,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
}
bool
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
ckh_insert(tsdn_t *tsdn, ckh_t *ckh, const void *key, const void *data)
{
bool ret;
@ -468,7 +468,7 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
#endif
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(tsd, ckh)) {
if (ckh_grow(tsdn, ckh)) {
ret = true;
goto label_return;
}
@ -480,7 +480,7 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
}
bool
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
ckh_remove(tsdn_t *tsdn, ckh_t *ckh, const void *searchkey, void **key,
void **data)
{
size_t cell;
@ -502,7 +502,7 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
ckh_shrink(tsd, ckh);
ckh_shrink(tsdn, ckh);
}
return (false);

File diff suppressed because it is too large Load Diff

View File

@ -15,12 +15,21 @@ huge_node_get(const void *ptr)
}
static bool
huge_node_set(const void *ptr, extent_node_t *node)
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
return (chunk_register(ptr, node));
return (chunk_register(tsdn, ptr, node));
}
static void
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
{
bool err;
err = huge_node_set(tsdn, ptr, node);
assert(!err);
}
static void
@ -31,18 +40,17 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
}
void *
huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
tcache_t *tcache)
huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
{
assert(usize == s2u(usize));
return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
return (huge_palloc(tsdn, arena, usize, chunksize, zero));
}
void *
huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
void *ret;
size_t ausize;
@ -51,14 +59,16 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
/* Allocate one or more contiguous chunks for this request. */
assert(!tsdn_null(tsdn) || arena != NULL);
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
return (NULL);
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, tcache, true, arena);
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
if (node == NULL)
return (NULL);
@ -67,34 +77,35 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsd, node, tcache, true, true);
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
extent_node_init(node, arena, ret, usize, is_zeroed, true);
if (huge_node_set(ret, node)) {
arena_chunk_dalloc_huge(arena, ret, usize);
idalloctm(tsd, node, tcache, true, true);
if (huge_node_set(tsdn, ret, node)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
/* Insert node into huge. */
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
malloc_mutex_unlock(&arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
memset(ret, 0, usize);
} else if (config_fill && unlikely(opt_junk_alloc))
memset(ret, 0xa5, usize);
memset(ret, JEMALLOC_ALLOC_JUNK, usize);
arena_decay_tick(tsd, arena);
arena_decay_tick(tsdn, arena);
return (ret);
}
@ -103,7 +114,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static void
huge_dalloc_junk(void *ptr, size_t usize)
huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk_free)) {
@ -111,8 +122,8 @@ huge_dalloc_junk(void *ptr, size_t usize)
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
}
#ifdef JEMALLOC_JET
@ -122,8 +133,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero)
{
size_t usize, usize_next;
extent_node_t *node;
@ -147,24 +158,28 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
memset((void *)((uintptr_t)ptr + usize),
JEMALLOC_FREE_JUNK, sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
ptr, CHUNK_CEILING(oldsize), usize, sdiff);
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
&chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
sdiff);
}
} else
post_zeroed = pre_zeroed;
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(&arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
@ -174,14 +189,15 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
usize - oldsize);
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
oldsize);
memset((void *)((uintptr_t)ptr + oldsize),
JEMALLOC_ALLOC_JUNK, usize - oldsize);
}
}
}
static bool
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize)
{
extent_node_t *node;
arena_t *arena;
@ -192,7 +208,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
chunk_hooks = chunk_hooks_get(arena);
chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
@ -205,42 +221,45 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
huge_dalloc_junk(tsdn, (void *)((uintptr_t)ptr + usize),
sdiff);
post_zeroed = false;
} else {
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
CHUNK_CEILING(oldsize),
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
&chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
usize), CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
}
} else
post_zeroed = pre_zeroed;
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(&arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
return (false);
}
static bool
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(&arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/*
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
@ -248,14 +267,16 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
*/
is_zeroed_chunk = zero;
if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true);
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
malloc_mutex_unlock(&arena->huge_mtx);
huge_node_reset(tsdn, ptr, node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {
@ -268,15 +289,15 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
CHUNK_CEILING(oldsize));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
oldsize);
memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
usize - oldsize);
}
return (false);
}
bool
huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
@ -290,16 +311,16 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */
if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
zero)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
oldsize, usize_min, zero)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
ptr, oldsize, usize_min, zero)) {
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
}
@ -310,16 +331,17 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
zero);
arena_decay_tick(tsd, huge_aalloc(ptr));
huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
usize_max, zero);
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
/* Attempt to shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
arena_decay_tick(tsd, huge_aalloc(ptr));
if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
usize_max)) {
arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
}
@ -327,18 +349,18 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
}
static void *
huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero)
{
if (alignment <= chunksize)
return (huge_malloc(tsd, arena, usize, zero, tcache));
return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
return (huge_malloc(tsdn, arena, usize, zero));
return (huge_palloc(tsdn, arena, usize, alignment, zero));
}
void *
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
@ -347,7 +369,8 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
zero))
return (ptr);
/*
@ -355,19 +378,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
tcache);
ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
zero);
if (ret == NULL)
return (NULL);
copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
isqalloc(tsd, ptr, oldsize, tcache, true);
return (ret);
}
void
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
huge_dalloc(tsdn_t *tsdn, void *ptr)
{
extent_node_t *node;
arena_t *arena;
@ -375,17 +398,17 @@ huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
malloc_mutex_unlock(&arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
huge_dalloc_junk(extent_node_addr_get(node),
huge_dalloc_junk(tsdn, extent_node_addr_get(node),
extent_node_size_get(node));
arena_chunk_dalloc_huge(extent_node_arena_get(node),
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node));
idalloctm(tsd, node, tcache, true, true);
idalloctm(tsdn, node, NULL, true, true);
arena_decay_tick(tsd, arena);
arena_decay_tick(tsdn, arena);
}
arena_t *
@ -396,7 +419,7 @@ huge_aalloc(const void *ptr)
}
size_t
huge_salloc(const void *ptr)
huge_salloc(tsdn_t *tsdn, const void *ptr)
{
size_t size;
extent_node_t *node;
@ -404,15 +427,15 @@ huge_salloc(const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
size = extent_node_size_get(node);
malloc_mutex_unlock(&arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (size);
}
prof_tctx_t *
huge_prof_tctx_get(const void *ptr)
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
@ -420,29 +443,29 @@ huge_prof_tctx_get(const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
malloc_mutex_unlock(&arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (tctx);
}
void
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
malloc_mutex_lock(&arena->huge_mtx);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
malloc_mutex_unlock(&arena->huge_mtx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
}
void
huge_prof_tctx_reset(const void *ptr)
huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
{
huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
}

File diff suppressed because it is too large Load Diff

View File

@ -80,7 +80,7 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
#endif
bool
malloc_mutex_init(malloc_mutex_t *mutex)
malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
{
#ifdef _WIN32
@ -114,31 +114,34 @@ malloc_mutex_init(malloc_mutex_t *mutex)
}
pthread_mutexattr_destroy(&attr);
#endif
if (config_debug)
witness_init(&mutex->witness, name, rank, NULL);
return (false);
}
void
malloc_mutex_prefork(malloc_mutex_t *mutex)
malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
malloc_mutex_lock(mutex);
malloc_mutex_lock(tsdn, mutex);
}
void
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
malloc_mutex_unlock(mutex);
malloc_mutex_unlock(tsdn, mutex);
}
void
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
malloc_mutex_unlock(mutex);
malloc_mutex_unlock(tsdn, mutex);
#else
if (malloc_mutex_init(mutex)) {
if (malloc_mutex_init(mutex, mutex->witness.name,
mutex->witness.rank)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
@ -164,7 +167,7 @@ malloc_mutex_first_thread(void)
}
bool
mutex_boot(void)
malloc_mutex_boot(void)
{
#ifndef JEMALLOC_MUTEX_INIT_CB

View File

@ -99,7 +99,7 @@ nstime_divide(const nstime_t *time, const nstime_t *divisor)
#ifdef JEMALLOC_JET
#undef nstime_update
#define nstime_update JEMALLOC_N(nstime_update_impl)
#define nstime_update JEMALLOC_N(n_nstime_update)
#endif
bool
nstime_update(nstime_t *time)
@ -144,5 +144,5 @@ nstime_update(nstime_t *time)
#ifdef JEMALLOC_JET
#undef nstime_update
#define nstime_update JEMALLOC_N(nstime_update)
nstime_update_t *nstime_update = JEMALLOC_N(nstime_update_impl);
nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
#endif

View File

@ -1,29 +1,49 @@
#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
#include <sys/sysctl.h>
#endif
/******************************************************************************/
/* Data. */
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
static int mmap_flags;
#endif
static bool os_overcommits;
/******************************************************************************/
void *
pages_map(void *addr, size_t size)
pages_map(void *addr, size_t size, bool *commit)
{
void *ret;
assert(size != 0);
if (os_overcommits)
*commit = true;
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
-1, 0);
{
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
ret = mmap(addr, size, prot, mmap_flags, -1, 0);
}
assert(ret != NULL);
if (ret == MAP_FAILED)
@ -67,7 +87,8 @@ pages_unmap(void *addr, size_t size)
}
void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
bool *commit)
{
void *ret = (void *)((uintptr_t)addr + leadsize);
@ -77,7 +98,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
void *new_addr;
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size);
new_addr = pages_map(ret, size, commit);
if (new_addr == ret)
return (ret);
if (new_addr)
@ -101,17 +122,17 @@ static bool
pages_commit_impl(void *addr, size_t size, bool commit)
{
#ifndef _WIN32
/*
* The following decommit/commit implementation is functional, but
* always disabled because it doesn't add value beyong improved
* debugging (at the cost of extra system calls) on systems that
* overcommit.
*/
if (false) {
int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
MAP_FIXED, -1, 0);
if (os_overcommits)
return (true);
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
#else
{
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
-1, 0);
if (result == MAP_FAILED)
return (true);
if (result != addr) {
@ -125,7 +146,6 @@ pages_commit_impl(void *addr, size_t size, bool commit)
return (false);
}
#endif
return (true);
}
bool
@ -171,3 +191,63 @@ pages_purge(void *addr, size_t size)
return (unzeroed);
}
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void)
{
int vm_overcommit;
size_t sz;
sz = sizeof(vm_overcommit);
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
return (false); /* Error. */
return ((vm_overcommit & 0x3) == 0);
}
#endif
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
static bool
os_overcommits_proc(void)
{
int fd;
char buf[1];
ssize_t nread;
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
if (fd == -1)
return (false); /* Error. */
nread = read(fd, &buf, sizeof(buf));
if (nread < 1)
return (false); /* Error. */
/*
* /proc/sys/vm/overcommit_memory meanings:
* 0: Heuristic overcommit.
* 1: Always overcommit.
* 2: Never overcommit.
*/
return (buf[0] == '0' || buf[0] == '1');
}
#endif
void
pages_boot(void)
{
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
os_overcommits = os_overcommits_sysctl();
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
os_overcommits = os_overcommits_proc();
# ifdef MAP_NORESERVE
if (os_overcommits)
mmap_flags |= MAP_NORESERVE;
# endif
#else
os_overcommits = false;
#endif
}

File diff suppressed because it is too large Load Diff

View File

@ -13,24 +13,22 @@
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
size_t upper_bound);
/******************************************************************************/
static quarantine_t *
quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
{
quarantine_t *quarantine;
size_t size;
assert(tsd_nominal(tsd));
size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
sizeof(quarantine_obj_t));
quarantine = (quarantine_t *)iallocztm(tsd, size, size2index(size),
false, tcache_get(tsd, true), true, NULL, true);
quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
@ -49,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (!tsd_nominal(tsd))
return;
quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
/*
* Check again whether quarantine has been initialized, because
* quarantine_init() may have triggered recursive initialization.
@ -57,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine);
else
idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
}
static quarantine_t *
@ -65,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_t *ret;
ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
if (ret == NULL) {
quarantine_drain_one(tsd, quarantine);
quarantine_drain_one(tsd_tsdn(tsd), quarantine);
return (quarantine);
}
@ -89,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
tsd_quarantine_set(tsd, ret);
return (ret);
}
static void
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(obj->ptr, config_prof));
idalloctm(tsd, obj->ptr, NULL, false, true);
assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
idalloctm(tsdn, obj->ptr, NULL, false, true);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
@ -108,24 +106,24 @@ quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
}
static void
quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
quarantine_drain_one(tsd, quarantine);
quarantine_drain_one(tsdn, quarantine);
}
void
quarantine(tsd_t *tsd, void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
idalloctm(tsd, ptr, NULL, false, true);
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
return;
}
/*
@ -135,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
quarantine_drain(tsd, quarantine, upper_bound);
quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
@ -160,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
memset(ptr, 0x5a, usize);
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
} else {
assert(quarantine->curbytes == 0);
idalloctm(tsd, ptr, NULL, false, true);
idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
}
}
@ -178,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) {
quarantine_drain(tsd, quarantine, 0);
idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
tsd_quarantine_set(tsd, NULL);
}
}

View File

@ -15,6 +15,8 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
{
unsigned bits_in_leaf, height, i;
assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
RTREE_BITS_PER_LEVEL));
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL

View File

@ -259,7 +259,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult, decay_time;
size_t page, pactive, pdirty, mapped;
size_t page, pactive, pdirty, mapped, retained;
size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
@ -349,6 +349,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"mapped: %12zu\n", mapped);
CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
malloc_cprintf(write_cb, cbopaque,
"retained: %12zu\n", retained);
CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
size_t);
CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
@ -468,7 +471,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
#define OPT_WRITE_UNSIGNED(n) \
if (je_mallctl("opt."#n, &uv, &usz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
" opt."#n": %u\n", uv); \
}
#define OPT_WRITE_SIZE_T(n) \
if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
@ -597,7 +600,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (config_stats) {
size_t *cactive;
size_t allocated, active, metadata, resident, mapped;
size_t allocated, active, metadata, resident, mapped, retained;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
@ -605,10 +608,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
CTL_GET("stats.retained", &retained, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu,"
" resident: %zu, mapped: %zu\n",
allocated, active, metadata, resident, mapped);
" resident: %zu, mapped: %zu, retained: %zu\n",
allocated, active, metadata, resident, mapped, retained);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n",
atomic_read_z(cactive));

View File

@ -23,10 +23,11 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/
size_t tcache_salloc(const void *ptr)
size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr)
{
return (arena_salloc(ptr, false));
return (arena_salloc(tsdn, ptr, false));
}
void
@ -70,12 +71,12 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
}
void *
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
{
void *ret;
arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ?
arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
@ -106,12 +107,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
if (arena_prof_accum(tsd_tsdn(tsd), arena,
tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd));
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (config_stats && bin_arena == arena) {
assert(!merged_stats);
merged_stats = true;
@ -128,9 +130,9 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
arena_bitselm_get(chunk, pageind);
arena_dalloc_bin_junked_locked(bin_arena, chunk,
ptr, bitselm);
arena_bitselm_get_mutable(chunk, pageind);
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
bin_arena, chunk, ptr, bitselm);
} else {
/*
* This object was allocated via a different
@ -142,8 +144,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
ndeferred++;
}
}
malloc_mutex_unlock(&bin->lock);
arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
}
if (config_stats && !merged_stats) {
/*
@ -151,11 +153,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
@ -188,7 +190,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
if (config_prof)
idump = false;
malloc_mutex_lock(&locked_arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
@ -211,8 +213,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
arena_dalloc_large_junked_locked(locked_arena,
chunk, ptr);
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
locked_arena, chunk, ptr);
} else {
/*
* This object was allocated via a different
@ -224,22 +226,23 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
ndeferred++;
}
}
malloc_mutex_unlock(&locked_arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
if (config_prof && idump)
prof_idump();
arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
prof_idump(tsd_tsdn(tsd));
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
ndeferred);
}
if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
@ -249,34 +252,26 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
tbin->low_water = tbin->ncached;
}
void
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
static void
tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsdn, &arena->lock);
}
}
void
tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
{
tcache_arena_dissociate(tcache, oldarena);
tcache_arena_associate(tcache, newarena);
}
void
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
static void
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
@ -289,11 +284,20 @@ tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
assert(in_ql);
}
ql_remove(&arena->tcache_ql, tcache, link);
tcache_stats_merge(tcache, arena);
malloc_mutex_unlock(&arena->lock);
tcache_stats_merge(tsdn, tcache, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
}
}
void
tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
arena_t *newarena)
{
tcache_arena_dissociate(tsdn, tcache, oldarena);
tcache_arena_associate(tsdn, tcache, newarena);
}
tcache_t *
tcache_get_hard(tsd_t *tsd)
{
@ -307,11 +311,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
return (tcache_create(tsd, arena));
return (tcache_create(tsd_tsdn(tsd), arena));
}
tcache_t *
tcache_create(tsd_t *tsd, arena_t *arena)
tcache_create(tsdn_t *tsdn, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@ -325,12 +329,12 @@ tcache_create(tsd_t *tsd, arena_t *arena)
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true,
arena_get(0, false));
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
if (tcache == NULL)
return (NULL);
tcache_arena_associate(tcache, arena);
tcache_arena_associate(tsdn, tcache, arena);
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
@ -357,7 +361,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
unsigned i;
arena = arena_choose(tsd, NULL);
tcache_arena_dissociate(tcache, arena);
tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
@ -365,9 +369,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
}
@ -376,19 +380,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
malloc_mutex_lock(&arena->lock);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd));
idalloctm(tsd, tcache, false, true, true);
idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
}
void
@ -412,21 +416,22 @@ tcache_enabled_cleanup(tsd_t *tsd)
/* Do nothing. */
}
/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
malloc_mutex_assert_owner(tsdn, &arena->lock);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(&bin->lock);
malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
malloc_mutex_unlock(tsdn, &bin->lock);
tbin->tstats.nrequests = 0;
}
@ -440,13 +445,14 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
}
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind)
tcaches_create(tsdn_t *tsdn, unsigned *r_ind)
{
arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
if (tcaches == NULL) {
tcaches = base_alloc(sizeof(tcache_t *) *
tcaches = base_alloc(tsdn, sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL)
return (true);
@ -454,7 +460,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
tcache = tcache_create(tsd, arena_get(0, false));
arena = arena_ichoose(tsdn, NULL);
if (unlikely(arena == NULL))
return (true);
tcache = tcache_create(tsdn, arena);
if (tcache == NULL)
return (true);
@ -500,7 +509,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
}
bool
tcache_boot(void)
tcache_boot(tsdn_t *tsdn)
{
unsigned i;
@ -518,7 +527,7 @@ tcache_boot(void)
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);

View File

@ -77,7 +77,7 @@ tsd_cleanup(void *arg)
/* Do nothing. */
break;
case tsd_state_nominal:
#define O(n, t) \
#define O(n, t) \
n##_cleanup(tsd);
MALLOC_TSD
#undef O
@ -106,15 +106,17 @@ MALLOC_TSD
}
}
bool
tsd_t *
malloc_tsd_boot0(void)
{
tsd_t *tsd;
ncleanups = 0;
if (tsd_boot0())
return (true);
*tsd_arenas_tdata_bypassp_get(tsd_fetch()) = true;
return (false);
return (NULL);
tsd = tsd_fetch();
*tsd_arenas_tdata_bypassp_get(tsd) = true;
return (tsd);
}
void
@ -169,10 +171,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
malloc_mutex_lock(&head->lock);
malloc_mutex_lock(NULL, &head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
malloc_mutex_unlock(&head->lock);
malloc_mutex_unlock(NULL, &head->lock);
return (iter->data);
}
}
@ -180,7 +182,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
malloc_mutex_unlock(&head->lock);
malloc_mutex_unlock(NULL, &head->lock);
return (NULL);
}
@ -188,8 +190,8 @@ void
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
{
malloc_mutex_lock(&head->lock);
malloc_mutex_lock(NULL, &head->lock);
ql_remove(&head->blocks, block, link);
malloc_mutex_unlock(&head->lock);
malloc_mutex_unlock(NULL, &head->lock);
}
#endif

View File

@ -14,6 +14,7 @@
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
unreachable(); \
} while (0)
#define not_implemented() do { \
@ -330,10 +331,9 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
return (s);
}
int
size_t
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
int ret;
size_t i;
const char *f;
@ -424,6 +424,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
int prec = -1;
int width = -1;
unsigned char len = '?';
char *s;
size_t slen;
f++;
/* Flags. */
@ -514,8 +516,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
}
/* Conversion specifier. */
switch (*f) {
char *s;
size_t slen;
case '%':
/* %% */
APPEND_C(*f);
@ -601,21 +601,19 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
str[i] = '\0';
else
str[size - 1] = '\0';
assert(i < INT_MAX);
ret = (int)i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return (ret);
return (i);
}
JEMALLOC_FORMAT_PRINTF(3, 4)
int
size_t
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
size_t ret;
va_list ap;
va_start(ap, format);

View File

@ -0,0 +1,136 @@
#define JEMALLOC_WITNESS_C_
#include "jemalloc/internal/jemalloc_internal.h"
void
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
witness_comp_t *comp)
{
witness->name = name;
witness->rank = rank;
witness->comp = comp;
}
#ifdef JEMALLOC_JET
#undef witness_lock_error
#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
#endif
void
witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
{
witness_t *w;
malloc_printf("<jemalloc>: Lock rank order reversal:");
ql_foreach(w, witnesses, link) {
malloc_printf(" %s(%u)", w->name, w->rank);
}
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_lock_error
#define witness_lock_error JEMALLOC_N(witness_lock_error)
witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_owner_error
#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
#endif
void
witness_owner_error(const witness_t *witness)
{
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_owner_error
#define witness_owner_error JEMALLOC_N(witness_owner_error)
witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_not_owner_error
#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
#endif
void
witness_not_owner_error(const witness_t *witness)
{
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
witness->rank);
abort();
}
#ifdef JEMALLOC_JET
#undef witness_not_owner_error
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
witness_not_owner_error_t *witness_not_owner_error =
JEMALLOC_N(n_witness_not_owner_error);
#endif
#ifdef JEMALLOC_JET
#undef witness_lockless_error
#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
#endif
void
witness_lockless_error(const witness_list_t *witnesses)
{
witness_t *w;
malloc_printf("<jemalloc>: Should not own any locks:");
ql_foreach(w, witnesses, link) {
malloc_printf(" %s(%u)", w->name, w->rank);
}
malloc_printf("\n");
abort();
}
#ifdef JEMALLOC_JET
#undef witness_lockless_error
#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
witness_lockless_error_t *witness_lockless_error =
JEMALLOC_N(n_witness_lockless_error);
#endif
void
witnesses_cleanup(tsd_t *tsd)
{
witness_assert_lockless(tsd_tsdn(tsd));
/* Do nothing. */
}
void
witness_fork_cleanup(tsd_t *tsd)
{
/* Do nothing. */
}
void
witness_prefork(tsd_t *tsd)
{
tsd_witness_fork_set(tsd, true);
}
void
witness_postfork_parent(tsd_t *tsd)
{
tsd_witness_fork_set(tsd, false);
}
void
witness_postfork_child(tsd_t *tsd)
{
#ifndef JEMALLOC_MUTEX_INIT_CB
witness_list_t *witnesses;
witnesses = tsd_witnessesp_get(tsd);
ql_new(witnesses);
#endif
tsd_witness_fork_set(tsd, false);
}

View File

@ -5,7 +5,7 @@
JEMALLOCSRCS:= jemalloc.c arena.c atomic.c base.c bitmap.c chunk.c \
chunk_dss.c chunk_mmap.c ckh.c ctl.c extent.c hash.c huge.c mb.c \
mutex.c nstime.c pages.c prng.c prof.c quarantine.c rtree.c stats.c \
tcache.c ticker.c tsd.c util.c
tcache.c ticker.c tsd.c util.c witness.c
SYM_MAPS+=${LIBC_SRCTOP}/stdlib/jemalloc/Symbol.map