Update jemalloc to 4.4.0.
This commit is contained in:
parent
435da98564
commit
7fa7f12ff8
@ -4,6 +4,33 @@ brevity. Much more detail can be found in the git revision history:
|
|||||||
|
|
||||||
https://github.com/jemalloc/jemalloc
|
https://github.com/jemalloc/jemalloc
|
||||||
|
|
||||||
|
* 4.4.0 (December 3, 2016)
|
||||||
|
|
||||||
|
New features:
|
||||||
|
- Add configure support for *-*-linux-android. (@cferris1000, @jasone)
|
||||||
|
- Add the --disable-syscall configure option, for use on systems that place
|
||||||
|
security-motivated limitations on syscall(2). (@jasone)
|
||||||
|
- Add support for Debian GNU/kFreeBSD. (@thesam)
|
||||||
|
|
||||||
|
Optimizations:
|
||||||
|
- Add extent serial numbers and use them where appropriate as a sort key that
|
||||||
|
is higher priority than address, so that the allocation policy prefers older
|
||||||
|
extents. This tends to improve locality (decrease fragmentation) when
|
||||||
|
memory grows downward. (@jasone)
|
||||||
|
- Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
|
||||||
|
on Linux 4.5 and newer. (@jasone)
|
||||||
|
- Mark partially purged arena chunks as non-huge-page. This improves
|
||||||
|
interaction with Linux's transparent huge page functionality. (@jasone)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
- Fix size class computations for edge conditions involving extremely large
|
||||||
|
allocations. This regression was first released in 4.0.0. (@jasone,
|
||||||
|
@ingvarha)
|
||||||
|
- Remove overly restrictive assertions related to the cactive statistic. This
|
||||||
|
regression was first released in 4.1.0. (@jasone)
|
||||||
|
- Implement a more reliable detection scheme for os_unfair_lock on macOS.
|
||||||
|
(@jszakmeister)
|
||||||
|
|
||||||
* 4.3.1 (November 7, 2016)
|
* 4.3.1 (November 7, 2016)
|
||||||
|
|
||||||
Bug fixes:
|
Bug fixes:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
|
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
|
||||||
index 3d2e721..b361db2 100644
|
index d9c8345..9898c3c 100644
|
||||||
--- a/doc/jemalloc.xml.in
|
--- a/doc/jemalloc.xml.in
|
||||||
+++ b/doc/jemalloc.xml.in
|
+++ b/doc/jemalloc.xml.in
|
||||||
@@ -53,11 +53,23 @@
|
@@ -53,11 +53,23 @@
|
||||||
@ -47,10 +47,10 @@ index 3d2e721..b361db2 100644
|
|||||||
+ </refsect1>
|
+ </refsect1>
|
||||||
</refentry>
|
</refentry>
|
||||||
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
|
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
|
||||||
index f39ce54..a3ba55d 100644
|
index ce4e602..35360b6 100644
|
||||||
--- a/include/jemalloc/internal/arena.h
|
--- a/include/jemalloc/internal/arena.h
|
||||||
+++ b/include/jemalloc/internal/arena.h
|
+++ b/include/jemalloc/internal/arena.h
|
||||||
@@ -719,8 +719,13 @@ arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
@@ -730,8 +730,13 @@ arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
||||||
JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
|
JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
|
||||||
arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
|
arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
|
||||||
{
|
{
|
||||||
@ -64,7 +64,7 @@ index f39ce54..a3ba55d 100644
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
@@ -779,8 +784,13 @@ arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
@@ -790,8 +795,13 @@ arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
||||||
JEMALLOC_ALWAYS_INLINE const size_t *
|
JEMALLOC_ALWAYS_INLINE const size_t *
|
||||||
arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
|
arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
|
||||||
{
|
{
|
||||||
@ -79,7 +79,7 @@ index f39ce54..a3ba55d 100644
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
|
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
|
||||||
index fdc8fef..56a35a4 100644
|
index e7ace7d..d86c61d 100644
|
||||||
--- a/include/jemalloc/internal/jemalloc_internal.h.in
|
--- a/include/jemalloc/internal/jemalloc_internal.h.in
|
||||||
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
|
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
|
||||||
@@ -8,6 +8,9 @@
|
@@ -8,6 +8,9 @@
|
||||||
@ -144,10 +144,10 @@ index b442d2d..76518db 100644
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
|
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
|
||||||
index 87c8c9b..df576f6 100644
|
index c1c6c40..c6395fd 100644
|
||||||
--- a/include/jemalloc/internal/private_symbols.txt
|
--- a/include/jemalloc/internal/private_symbols.txt
|
||||||
+++ b/include/jemalloc/internal/private_symbols.txt
|
+++ b/include/jemalloc/internal/private_symbols.txt
|
||||||
@@ -307,7 +307,6 @@ iralloct_realign
|
@@ -310,7 +310,6 @@ iralloct_realign
|
||||||
isalloc
|
isalloc
|
||||||
isdalloct
|
isdalloct
|
||||||
isqalloc
|
isqalloc
|
||||||
@ -335,7 +335,7 @@ index f943891..47d032c 100755
|
|||||||
+#include "jemalloc_FreeBSD.h"
|
+#include "jemalloc_FreeBSD.h"
|
||||||
EOF
|
EOF
|
||||||
diff --git a/src/jemalloc.c b/src/jemalloc.c
|
diff --git a/src/jemalloc.c b/src/jemalloc.c
|
||||||
index 38650ff..f659b55 100644
|
index baead66..8a49f26 100644
|
||||||
--- a/src/jemalloc.c
|
--- a/src/jemalloc.c
|
||||||
+++ b/src/jemalloc.c
|
+++ b/src/jemalloc.c
|
||||||
@@ -4,6 +4,10 @@
|
@@ -4,6 +4,10 @@
|
||||||
@ -349,7 +349,7 @@ index 38650ff..f659b55 100644
|
|||||||
/* Runtime configuration options. */
|
/* Runtime configuration options. */
|
||||||
const char *je_malloc_conf
|
const char *je_malloc_conf
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
@@ -2756,6 +2760,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
@@ -2775,6 +2779,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
||||||
*/
|
*/
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/*
|
/*
|
||||||
@ -457,7 +457,7 @@ index 38650ff..f659b55 100644
|
|||||||
* The following functions are used by threading libraries for protection of
|
* The following functions are used by threading libraries for protection of
|
||||||
* malloc during fork().
|
* malloc during fork().
|
||||||
*/
|
*/
|
||||||
@@ -2894,4 +2999,11 @@ jemalloc_postfork_child(void)
|
@@ -2913,4 +3018,11 @@ jemalloc_postfork_child(void)
|
||||||
ctl_postfork_child(tsd_tsdn(tsd));
|
ctl_postfork_child(tsd_tsdn(tsd));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -516,7 +516,7 @@ index 6333e73..13f8d79 100644
|
|||||||
+#endif
|
+#endif
|
||||||
+}
|
+}
|
||||||
diff --git a/src/util.c b/src/util.c
|
diff --git a/src/util.c b/src/util.c
|
||||||
index 7905267..bee1c77 100644
|
index dd8c236..a4ff287 100755
|
||||||
--- a/src/util.c
|
--- a/src/util.c
|
||||||
+++ b/src/util.c
|
+++ b/src/util.c
|
||||||
@@ -67,6 +67,22 @@ wrtmessage(void *cbopaque, const char *s)
|
@@ -67,6 +67,22 @@ wrtmessage(void *cbopaque, const char *s)
|
||||||
|
@ -1 +1 @@
|
|||||||
4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2
|
4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc
|
||||||
|
@ -2,12 +2,12 @@
|
|||||||
.\" Title: JEMALLOC
|
.\" Title: JEMALLOC
|
||||||
.\" Author: Jason Evans
|
.\" Author: Jason Evans
|
||||||
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
|
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
|
||||||
.\" Date: 11/08/2016
|
.\" Date: 12/04/2016
|
||||||
.\" Manual: User Manual
|
.\" Manual: User Manual
|
||||||
.\" Source: jemalloc 4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2
|
.\" Source: jemalloc 4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc
|
||||||
.\" Language: English
|
.\" Language: English
|
||||||
.\"
|
.\"
|
||||||
.TH "JEMALLOC" "3" "11/08/2016" "jemalloc 4.3.1-0-g0110fa8451af" "User Manual"
|
.TH "JEMALLOC" "3" "12/04/2016" "jemalloc 4.4.0-0-gf1f76357313e" "User Manual"
|
||||||
.\" -----------------------------------------------------------------
|
.\" -----------------------------------------------------------------
|
||||||
.\" * Define some portability stuff
|
.\" * Define some portability stuff
|
||||||
.\" -----------------------------------------------------------------
|
.\" -----------------------------------------------------------------
|
||||||
@ -31,7 +31,7 @@
|
|||||||
jemalloc \- general purpose memory allocation functions
|
jemalloc \- general purpose memory allocation functions
|
||||||
.SH "LIBRARY"
|
.SH "LIBRARY"
|
||||||
.PP
|
.PP
|
||||||
This manual describes jemalloc 4\&.3\&.1\-0\-g0110fa8451af905affd77c3bea0d545fee2251b2\&. More information can be found at the
|
This manual describes jemalloc 4\&.4\&.0\-0\-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc\&. More information can be found at the
|
||||||
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
|
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
|
||||||
.PP
|
.PP
|
||||||
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
|
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
|
||||||
@ -365,7 +365,7 @@ for (i = 0; i < nbins; i++) {
|
|||||||
|
|
||||||
mib[2] = i;
|
mib[2] = i;
|
||||||
len = sizeof(bin_size);
|
len = sizeof(bin_size);
|
||||||
mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
|
mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
|
||||||
/* Do something with bin_size\&.\&.\&. */
|
/* Do something with bin_size\&.\&.\&. */
|
||||||
}
|
}
|
||||||
.fi
|
.fi
|
||||||
|
@ -190,6 +190,14 @@ struct arena_chunk_s {
|
|||||||
*/
|
*/
|
||||||
extent_node_t node;
|
extent_node_t node;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* True if memory could be backed by transparent huge pages. This is
|
||||||
|
* only directly relevant to Linux, since it is the only supported
|
||||||
|
* platform on which jemalloc interacts with explicit transparent huge
|
||||||
|
* page controls.
|
||||||
|
*/
|
||||||
|
bool hugepage;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map of pages within chunk that keeps track of free/large/small. The
|
* Map of pages within chunk that keeps track of free/large/small. The
|
||||||
* first map_bias entries are omitted, since the chunk header does not
|
* first map_bias entries are omitted, since the chunk header does not
|
||||||
@ -374,10 +382,12 @@ struct arena_s {
|
|||||||
|
|
||||||
dss_prec_t dss_prec;
|
dss_prec_t dss_prec;
|
||||||
|
|
||||||
|
|
||||||
/* Extant arena chunks. */
|
/* Extant arena chunks. */
|
||||||
ql_head(extent_node_t) achunks;
|
ql_head(extent_node_t) achunks;
|
||||||
|
|
||||||
|
/* Extent serial number generator state. */
|
||||||
|
size_t extent_sn_next;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In order to avoid rapid chunk allocation/deallocation when an arena
|
* In order to avoid rapid chunk allocation/deallocation when an arena
|
||||||
* oscillates right on the cusp of needing a new chunk, cache the most
|
* oscillates right on the cusp of needing a new chunk, cache the most
|
||||||
@ -453,9 +463,9 @@ struct arena_s {
|
|||||||
* orderings are needed, which is why there are two trees with the same
|
* orderings are needed, which is why there are two trees with the same
|
||||||
* contents.
|
* contents.
|
||||||
*/
|
*/
|
||||||
extent_tree_t chunks_szad_cached;
|
extent_tree_t chunks_szsnad_cached;
|
||||||
extent_tree_t chunks_ad_cached;
|
extent_tree_t chunks_ad_cached;
|
||||||
extent_tree_t chunks_szad_retained;
|
extent_tree_t chunks_szsnad_retained;
|
||||||
extent_tree_t chunks_ad_retained;
|
extent_tree_t chunks_ad_retained;
|
||||||
|
|
||||||
malloc_mutex_t chunks_mtx;
|
malloc_mutex_t chunks_mtx;
|
||||||
@ -522,13 +532,13 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
|
|||||||
extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
|
extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||||
void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
|
void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
|
||||||
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool *zero);
|
size_t alignment, size_t *sn, bool *zero);
|
||||||
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
||||||
size_t usize);
|
size_t usize, size_t sn);
|
||||||
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
|
||||||
void *chunk, size_t oldsize, size_t usize);
|
void *chunk, size_t oldsize, size_t usize);
|
||||||
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||||
void *chunk, size_t oldsize, size_t usize);
|
void *chunk, size_t oldsize, size_t usize, size_t sn);
|
||||||
bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
||||||
void *chunk, size_t oldsize, size_t usize, bool *zero);
|
void *chunk, size_t oldsize, size_t usize, bool *zero);
|
||||||
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
|
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
|
||||||
@ -601,6 +611,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||||
|
size_t arena_extent_sn_next(arena_t *arena);
|
||||||
arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
|
arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
|
||||||
void arena_boot(void);
|
void arena_boot(void);
|
||||||
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
@ -58,15 +58,16 @@ void chunk_deregister(const void *chunk, const extent_node_t *node);
|
|||||||
void *chunk_alloc_base(size_t size);
|
void *chunk_alloc_base(size_t size);
|
||||||
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool *commit, bool dalloc_node);
|
size_t *sn, bool *zero, bool *commit, bool dalloc_node);
|
||||||
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool *commit);
|
size_t *sn, bool *zero, bool *commit);
|
||||||
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
|
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
|
||||||
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
|
|
||||||
bool committed);
|
bool committed);
|
||||||
|
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
|
||||||
|
bool zeroed, bool committed);
|
||||||
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
|
||||||
size_t length);
|
size_t length);
|
||||||
|
@ -18,6 +18,20 @@ struct extent_node_s {
|
|||||||
/* Total region size. */
|
/* Total region size. */
|
||||||
size_t en_size;
|
size_t en_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Serial number (potentially non-unique).
|
||||||
|
*
|
||||||
|
* In principle serial numbers can wrap around on 32-bit systems if
|
||||||
|
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
|
||||||
|
* back on address comparison for equal serial numbers, stable (if
|
||||||
|
* imperfect) ordering is maintained.
|
||||||
|
*
|
||||||
|
* Serial numbers may not be unique even in the absence of wrap-around,
|
||||||
|
* e.g. when splitting an extent and assigning the same serial number to
|
||||||
|
* both resulting adjacent extents.
|
||||||
|
*/
|
||||||
|
size_t en_sn;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The zeroed flag is used by chunk recycling code to track whether
|
* The zeroed flag is used by chunk recycling code to track whether
|
||||||
* memory is zero-filled.
|
* memory is zero-filled.
|
||||||
@ -45,8 +59,8 @@ struct extent_node_s {
|
|||||||
qr(extent_node_t) cc_link;
|
qr(extent_node_t) cc_link;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
/* Linkage for the size/address-ordered tree. */
|
/* Linkage for the size/sn/address-ordered tree. */
|
||||||
rb_node(extent_node_t) szad_link;
|
rb_node(extent_node_t) szsnad_link;
|
||||||
|
|
||||||
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
||||||
ql_elm(extent_node_t) ql_link;
|
ql_elm(extent_node_t) ql_link;
|
||||||
@ -61,7 +75,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
|
||||||
|
|
||||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
||||||
|
|
||||||
@ -73,6 +87,7 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
|||||||
arena_t *extent_node_arena_get(const extent_node_t *node);
|
arena_t *extent_node_arena_get(const extent_node_t *node);
|
||||||
void *extent_node_addr_get(const extent_node_t *node);
|
void *extent_node_addr_get(const extent_node_t *node);
|
||||||
size_t extent_node_size_get(const extent_node_t *node);
|
size_t extent_node_size_get(const extent_node_t *node);
|
||||||
|
size_t extent_node_sn_get(const extent_node_t *node);
|
||||||
bool extent_node_zeroed_get(const extent_node_t *node);
|
bool extent_node_zeroed_get(const extent_node_t *node);
|
||||||
bool extent_node_committed_get(const extent_node_t *node);
|
bool extent_node_committed_get(const extent_node_t *node);
|
||||||
bool extent_node_achunk_get(const extent_node_t *node);
|
bool extent_node_achunk_get(const extent_node_t *node);
|
||||||
@ -80,12 +95,13 @@ prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
|
|||||||
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
||||||
void extent_node_addr_set(extent_node_t *node, void *addr);
|
void extent_node_addr_set(extent_node_t *node, void *addr);
|
||||||
void extent_node_size_set(extent_node_t *node, size_t size);
|
void extent_node_size_set(extent_node_t *node, size_t size);
|
||||||
|
void extent_node_sn_set(extent_node_t *node, size_t sn);
|
||||||
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
||||||
void extent_node_committed_set(extent_node_t *node, bool committed);
|
void extent_node_committed_set(extent_node_t *node, bool committed);
|
||||||
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
||||||
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
||||||
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
|
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
|
||||||
size_t size, bool zeroed, bool committed);
|
size_t size, size_t sn, bool zeroed, bool committed);
|
||||||
void extent_node_dirty_linkage_init(extent_node_t *node);
|
void extent_node_dirty_linkage_init(extent_node_t *node);
|
||||||
void extent_node_dirty_insert(extent_node_t *node,
|
void extent_node_dirty_insert(extent_node_t *node,
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
|
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
|
||||||
@ -114,6 +130,13 @@ extent_node_size_get(const extent_node_t *node)
|
|||||||
return (node->en_size);
|
return (node->en_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE size_t
|
||||||
|
extent_node_sn_get(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (node->en_sn);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
extent_node_zeroed_get(const extent_node_t *node)
|
extent_node_zeroed_get(const extent_node_t *node)
|
||||||
{
|
{
|
||||||
@ -164,6 +187,13 @@ extent_node_size_set(extent_node_t *node, size_t size)
|
|||||||
node->en_size = size;
|
node->en_size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_node_sn_set(extent_node_t *node, size_t sn)
|
||||||
|
{
|
||||||
|
|
||||||
|
node->en_sn = sn;
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
||||||
{
|
{
|
||||||
@ -194,12 +224,13 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
|
|||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
|
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
|
||||||
bool zeroed, bool committed)
|
size_t sn, bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
|
|
||||||
extent_node_arena_set(node, arena);
|
extent_node_arena_set(node, arena);
|
||||||
extent_node_addr_set(node, addr);
|
extent_node_addr_set(node, addr);
|
||||||
extent_node_size_set(node, size);
|
extent_node_size_set(node, size);
|
||||||
|
extent_node_sn_set(node, sn);
|
||||||
extent_node_zeroed_set(node, zeroed);
|
extent_node_zeroed_set(node, zeroed);
|
||||||
extent_node_committed_set(node, committed);
|
extent_node_committed_set(node, committed);
|
||||||
extent_node_achunk_set(node, false);
|
extent_node_achunk_set(node, false);
|
||||||
|
@ -334,7 +334,7 @@ typedef unsigned szind_t;
|
|||||||
|
|
||||||
/* Return the nearest aligned address at or below a. */
|
/* Return the nearest aligned address at or below a. */
|
||||||
#define ALIGNMENT_ADDR2BASE(a, alignment) \
|
#define ALIGNMENT_ADDR2BASE(a, alignment) \
|
||||||
((void *)((uintptr_t)(a) & (-(alignment))))
|
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
|
||||||
|
|
||||||
/* Return the offset between a and the nearest aligned address at or below a. */
|
/* Return the offset between a and the nearest aligned address at or below a. */
|
||||||
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
|
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
|
||||||
@ -342,7 +342,7 @@ typedef unsigned szind_t;
|
|||||||
|
|
||||||
/* Return the smallest alignment multiple that is >= s. */
|
/* Return the smallest alignment multiple that is >= s. */
|
||||||
#define ALIGNMENT_CEILING(s, alignment) \
|
#define ALIGNMENT_CEILING(s, alignment) \
|
||||||
(((s) + (alignment - 1)) & (-(alignment)))
|
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
|
||||||
|
|
||||||
/* Declare a variable-length array. */
|
/* Declare a variable-length array. */
|
||||||
#if __STDC_VERSION__ < 199901L
|
#if __STDC_VERSION__ < 199901L
|
||||||
|
@ -56,11 +56,6 @@
|
|||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
#define JEMALLOC_HAVE_BUILTIN_CLZ
|
||||||
|
|
||||||
/*
|
|
||||||
* Defined if madvise(2) is available.
|
|
||||||
*/
|
|
||||||
#define JEMALLOC_HAVE_MADVISE
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
* Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
|
||||||
*/
|
*/
|
||||||
@ -72,8 +67,8 @@
|
|||||||
*/
|
*/
|
||||||
/* #undef JEMALLOC_OSSPIN */
|
/* #undef JEMALLOC_OSSPIN */
|
||||||
|
|
||||||
/* Defined if syscall(2) is available. */
|
/* Defined if syscall(2) is usable. */
|
||||||
#define JEMALLOC_HAVE_SYSCALL
|
#define JEMALLOC_USE_SYSCALL
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if secure_getenv(3) is available.
|
* Defined if secure_getenv(3) is available.
|
||||||
@ -85,6 +80,9 @@
|
|||||||
*/
|
*/
|
||||||
#define JEMALLOC_HAVE_ISSETUGID
|
#define JEMALLOC_HAVE_ISSETUGID
|
||||||
|
|
||||||
|
/* Defined if pthread_atfork(3) is available. */
|
||||||
|
#define JEMALLOC_HAVE_PTHREAD_ATFORK
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
|
||||||
*/
|
*/
|
||||||
@ -253,18 +251,26 @@
|
|||||||
#define JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
#define JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||||
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
|
/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
|
||||||
|
|
||||||
|
/* Defined if madvise(2) is available. */
|
||||||
|
#define JEMALLOC_HAVE_MADVISE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Methods for purging unused pages differ between operating systems.
|
* Methods for purging unused pages differ between operating systems.
|
||||||
*
|
*
|
||||||
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
|
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||||
* such that new pages will be demand-zeroed if
|
* will be discarded rather than swapped out.
|
||||||
* the address region is later touched.
|
* madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
|
||||||
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
|
* new pages will be demand-zeroed if the
|
||||||
* unused, such that they will be discarded rather
|
* address region is later touched.
|
||||||
* than swapped out.
|
|
||||||
*/
|
*/
|
||||||
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
|
|
||||||
#define JEMALLOC_PURGE_MADVISE_FREE
|
#define JEMALLOC_PURGE_MADVISE_FREE
|
||||||
|
#define JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||||
|
* arguments to madvise(2).
|
||||||
|
*/
|
||||||
|
/* #undef JEMALLOC_THP */
|
||||||
|
|
||||||
/* Define if operating system has alloca.h header. */
|
/* Define if operating system has alloca.h header. */
|
||||||
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
/* #undef JEMALLOC_HAS_ALLOCA_H */
|
||||||
|
@ -16,6 +16,8 @@ void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
|
|||||||
bool pages_commit(void *addr, size_t size);
|
bool pages_commit(void *addr, size_t size);
|
||||||
bool pages_decommit(void *addr, size_t size);
|
bool pages_decommit(void *addr, size_t size);
|
||||||
bool pages_purge(void *addr, size_t size);
|
bool pages_purge(void *addr, size_t size);
|
||||||
|
bool pages_huge(void *addr, size_t size);
|
||||||
|
bool pages_nohuge(void *addr, size_t size);
|
||||||
void pages_boot(void);
|
void pages_boot(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
|
#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
|
||||||
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
|
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
|
||||||
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
|
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
|
||||||
|
#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
|
||||||
#define arena_get JEMALLOC_N(arena_get)
|
#define arena_get JEMALLOC_N(arena_get)
|
||||||
#define arena_ichoose JEMALLOC_N(arena_ichoose)
|
#define arena_ichoose JEMALLOC_N(arena_ichoose)
|
||||||
#define arena_init JEMALLOC_N(arena_init)
|
#define arena_init JEMALLOC_N(arena_init)
|
||||||
@ -218,6 +219,8 @@
|
|||||||
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
|
#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
|
||||||
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
|
#define extent_node_size_get JEMALLOC_N(extent_node_size_get)
|
||||||
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
|
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
|
||||||
|
#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get)
|
||||||
|
#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set)
|
||||||
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
|
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
|
||||||
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
|
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
|
||||||
#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
|
#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
|
||||||
@ -239,25 +242,25 @@
|
|||||||
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
|
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
|
||||||
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
|
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
|
||||||
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
|
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
|
||||||
#define extent_tree_szad_destroy JEMALLOC_N(extent_tree_szad_destroy)
|
#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy)
|
||||||
#define extent_tree_szad_destroy_recurse JEMALLOC_N(extent_tree_szad_destroy_recurse)
|
#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse)
|
||||||
#define extent_tree_szad_empty JEMALLOC_N(extent_tree_szad_empty)
|
#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty)
|
||||||
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
|
#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first)
|
||||||
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
|
#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert)
|
||||||
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
|
#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter)
|
||||||
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
|
#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse)
|
||||||
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
|
#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start)
|
||||||
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
|
#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last)
|
||||||
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
|
#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new)
|
||||||
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
|
#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next)
|
||||||
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
|
#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch)
|
||||||
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
|
#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev)
|
||||||
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
|
#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch)
|
||||||
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
|
#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove)
|
||||||
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
|
#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter)
|
||||||
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
|
#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse)
|
||||||
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
|
#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start)
|
||||||
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
|
#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search)
|
||||||
#define ffs_llu JEMALLOC_N(ffs_llu)
|
#define ffs_llu JEMALLOC_N(ffs_llu)
|
||||||
#define ffs_lu JEMALLOC_N(ffs_lu)
|
#define ffs_lu JEMALLOC_N(ffs_lu)
|
||||||
#define ffs_u JEMALLOC_N(ffs_u)
|
#define ffs_u JEMALLOC_N(ffs_u)
|
||||||
@ -393,7 +396,9 @@
|
|||||||
#define pages_boot JEMALLOC_N(pages_boot)
|
#define pages_boot JEMALLOC_N(pages_boot)
|
||||||
#define pages_commit JEMALLOC_N(pages_commit)
|
#define pages_commit JEMALLOC_N(pages_commit)
|
||||||
#define pages_decommit JEMALLOC_N(pages_decommit)
|
#define pages_decommit JEMALLOC_N(pages_decommit)
|
||||||
|
#define pages_huge JEMALLOC_N(pages_huge)
|
||||||
#define pages_map JEMALLOC_N(pages_map)
|
#define pages_map JEMALLOC_N(pages_map)
|
||||||
|
#define pages_nohuge JEMALLOC_N(pages_nohuge)
|
||||||
#define pages_purge JEMALLOC_N(pages_purge)
|
#define pages_purge JEMALLOC_N(pages_purge)
|
||||||
#define pages_trim JEMALLOC_N(pages_trim)
|
#define pages_trim JEMALLOC_N(pages_trim)
|
||||||
#define pages_unmap JEMALLOC_N(pages_unmap)
|
#define pages_unmap JEMALLOC_N(pages_unmap)
|
||||||
|
@ -175,25 +175,21 @@ stats_cactive_get(void)
|
|||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
stats_cactive_add(size_t size)
|
stats_cactive_add(size_t size)
|
||||||
{
|
{
|
||||||
UNUSED size_t cactive;
|
|
||||||
|
|
||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
cactive = atomic_add_z(&stats_cactive, size);
|
atomic_add_z(&stats_cactive, size);
|
||||||
assert(cactive - size < cactive);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
stats_cactive_sub(size_t size)
|
stats_cactive_sub(size_t size)
|
||||||
{
|
{
|
||||||
UNUSED size_t cactive;
|
|
||||||
|
|
||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
cactive = atomic_sub_z(&stats_cactive, size);
|
atomic_sub_z(&stats_cactive, size);
|
||||||
assert(cactive + size > cactive);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -41,8 +41,12 @@
|
|||||||
#define MALLOC_PRINTF_BUFSIZE 4096
|
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||||
|
|
||||||
/* Junk fill patterns. */
|
/* Junk fill patterns. */
|
||||||
#define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
|
#ifndef JEMALLOC_ALLOC_JUNK
|
||||||
#define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
|
# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
|
||||||
|
#endif
|
||||||
|
#ifndef JEMALLOC_FREE_JUNK
|
||||||
|
# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wrap a cpp argument that contains commas such that it isn't broken up into
|
* Wrap a cpp argument that contains commas such that it isn't broken up into
|
||||||
|
@ -36,13 +36,25 @@
|
|||||||
zero); \
|
zero); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
|
#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
|
||||||
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
|
(false)
|
||||||
zero) do { \
|
#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
|
||||||
|
((ptr) != (old_ptr))
|
||||||
|
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
|
||||||
|
(false)
|
||||||
|
#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
|
||||||
|
(ptr == NULL)
|
||||||
|
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
|
||||||
|
(false)
|
||||||
|
#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
|
||||||
|
(old_ptr == NULL)
|
||||||
|
#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
|
||||||
|
old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
|
||||||
if (unlikely(in_valgrind)) { \
|
if (unlikely(in_valgrind)) { \
|
||||||
size_t rzsize = p2rz(tsdn, ptr); \
|
size_t rzsize = p2rz(tsdn, ptr); \
|
||||||
\
|
\
|
||||||
if (!maybe_moved || ptr == old_ptr) { \
|
if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
|
||||||
|
old_ptr)) { \
|
||||||
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
|
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
|
||||||
usize, rzsize); \
|
usize, rzsize); \
|
||||||
if (zero && old_usize < usize) { \
|
if (zero && old_usize < usize) { \
|
||||||
@ -51,11 +63,13 @@
|
|||||||
old_usize), usize - old_usize); \
|
old_usize), usize - old_usize); \
|
||||||
} \
|
} \
|
||||||
} else { \
|
} else { \
|
||||||
if (!old_ptr_maybe_null || old_ptr != NULL) { \
|
if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
|
||||||
|
old_ptr_null(old_ptr)) { \
|
||||||
valgrind_freelike_block(old_ptr, \
|
valgrind_freelike_block(old_ptr, \
|
||||||
old_rzsize); \
|
old_rzsize); \
|
||||||
} \
|
} \
|
||||||
if (!ptr_maybe_null || ptr != NULL) { \
|
if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
|
||||||
|
ptr_null(ptr)) { \
|
||||||
size_t copy_size = (old_usize < usize) \
|
size_t copy_size = (old_usize < usize) \
|
||||||
? old_usize : usize; \
|
? old_usize : usize; \
|
||||||
size_t tail_size = usize - copy_size; \
|
size_t tail_size = usize - copy_size; \
|
||||||
|
@ -87,12 +87,12 @@ extern "C" {
|
|||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
#include <strings.h>
|
#include <strings.h>
|
||||||
|
|
||||||
#define JEMALLOC_VERSION "4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2"
|
#define JEMALLOC_VERSION "4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc"
|
||||||
#define JEMALLOC_VERSION_MAJOR 4
|
#define JEMALLOC_VERSION_MAJOR 4
|
||||||
#define JEMALLOC_VERSION_MINOR 3
|
#define JEMALLOC_VERSION_MINOR 4
|
||||||
#define JEMALLOC_VERSION_BUGFIX 1
|
#define JEMALLOC_VERSION_BUGFIX 0
|
||||||
#define JEMALLOC_VERSION_NREV 0
|
#define JEMALLOC_VERSION_NREV 0
|
||||||
#define JEMALLOC_VERSION_GID "0110fa8451af905affd77c3bea0d545fee2251b2"
|
#define JEMALLOC_VERSION_GID "f1f76357313e7dcad7262f17a48ff0a2e005fcdc"
|
||||||
|
|
||||||
# define MALLOCX_LG_ALIGN(la) ((int)(la))
|
# define MALLOCX_LG_ALIGN(la) ((int)(la))
|
||||||
# if LG_SIZEOF_PTR == 2
|
# if LG_SIZEOF_PTR == 2
|
||||||
|
@ -38,8 +38,8 @@ static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
|
|||||||
bool dirty, bool cleaned, bool decommitted);
|
bool dirty, bool cleaned, bool decommitted);
|
||||||
static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
|
static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
|
||||||
arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
|
arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
|
||||||
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
|
static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
|
||||||
arena_run_t *run, arena_bin_t *bin);
|
arena_bin_t *bin);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -55,8 +55,31 @@ arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
|
|||||||
return (arena_mapbits_size_decode(mapbits));
|
return (arena_mapbits_size_decode(mapbits));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE_C const extent_node_t *
|
||||||
|
arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
|
||||||
|
{
|
||||||
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
||||||
|
return (&chunk->node);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
JEMALLOC_INLINE_C int
|
||||||
arena_run_addr_comp(const arena_chunk_map_misc_t *a,
|
arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
|
||||||
|
{
|
||||||
|
size_t a_sn, b_sn;
|
||||||
|
|
||||||
|
assert(a != NULL);
|
||||||
|
assert(b != NULL);
|
||||||
|
|
||||||
|
a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
|
||||||
|
b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
|
||||||
|
|
||||||
|
return ((a_sn > b_sn) - (a_sn < b_sn));
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE_C int
|
||||||
|
arena_ad_comp(const arena_chunk_map_misc_t *a,
|
||||||
const arena_chunk_map_misc_t *b)
|
const arena_chunk_map_misc_t *b)
|
||||||
{
|
{
|
||||||
uintptr_t a_miscelm = (uintptr_t)a;
|
uintptr_t a_miscelm = (uintptr_t)a;
|
||||||
@ -68,9 +91,26 @@ arena_run_addr_comp(const arena_chunk_map_misc_t *a,
|
|||||||
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
|
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE_C int
|
||||||
|
arena_snad_comp(const arena_chunk_map_misc_t *a,
|
||||||
|
const arena_chunk_map_misc_t *b)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
assert(a != NULL);
|
||||||
|
assert(b != NULL);
|
||||||
|
|
||||||
|
ret = arena_sn_comp(a, b);
|
||||||
|
if (ret != 0)
|
||||||
|
return (ret);
|
||||||
|
|
||||||
|
ret = arena_ad_comp(a, b);
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
/* Generate pairing heap functions. */
|
/* Generate pairing heap functions. */
|
||||||
ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
|
ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
|
||||||
ph_link, arena_run_addr_comp)
|
ph_link, arena_snad_comp)
|
||||||
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
#undef run_quantize_floor
|
#undef run_quantize_floor
|
||||||
@ -529,7 +569,7 @@ arena_chunk_init_spare(arena_t *arena)
|
|||||||
|
|
||||||
static bool
|
static bool
|
||||||
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||||
bool zero)
|
size_t sn, bool zero)
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -538,7 +578,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
|||||||
* of runs is tracked individually, and upon chunk deallocation the
|
* of runs is tracked individually, and upon chunk deallocation the
|
||||||
* entire chunk is in a consistent commit state.
|
* entire chunk is in a consistent commit state.
|
||||||
*/
|
*/
|
||||||
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
|
extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
|
||||||
extent_node_achunk_set(&chunk->node, true);
|
extent_node_achunk_set(&chunk->node, true);
|
||||||
return (chunk_register(tsdn, chunk, &chunk->node));
|
return (chunk_register(tsdn, chunk, &chunk->node));
|
||||||
}
|
}
|
||||||
@ -548,28 +588,30 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
|
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
size_t sn;
|
||||||
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
|
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
|
||||||
NULL, chunksize, chunksize, zero, commit);
|
NULL, chunksize, chunksize, &sn, zero, commit);
|
||||||
if (chunk != NULL && !*commit) {
|
if (chunk != NULL && !*commit) {
|
||||||
/* Commit header. */
|
/* Commit header. */
|
||||||
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
|
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
|
||||||
LG_PAGE, arena->ind)) {
|
LG_PAGE, arena->ind)) {
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
|
||||||
(void *)chunk, chunksize, *zero, *commit);
|
(void *)chunk, chunksize, sn, *zero, *commit);
|
||||||
chunk = NULL;
|
chunk = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
|
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
|
||||||
|
*zero)) {
|
||||||
if (!*commit) {
|
if (!*commit) {
|
||||||
/* Undo commit of header. */
|
/* Undo commit of header. */
|
||||||
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
|
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
|
||||||
LG_PAGE, arena->ind);
|
LG_PAGE, arena->ind);
|
||||||
}
|
}
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
|
||||||
chunksize, *zero, *commit);
|
chunksize, sn, *zero, *commit);
|
||||||
chunk = NULL;
|
chunk = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -583,13 +625,14 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
|
|||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
size_t sn;
|
||||||
|
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
||||||
chunksize, zero, commit, true);
|
chunksize, &sn, zero, commit, true);
|
||||||
if (chunk != NULL) {
|
if (chunk != NULL) {
|
||||||
if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
|
if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
|
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
|
||||||
chunksize, true);
|
chunksize, sn, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -621,6 +664,8 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
|
|||||||
if (chunk == NULL)
|
if (chunk == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
|
chunk->hugepage = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the map to contain one maximal free untouched run. Mark
|
* Initialize the map to contain one maximal free untouched run. Mark
|
||||||
* the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
|
* the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
|
||||||
@ -684,11 +729,14 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
|
|||||||
static void
|
static void
|
||||||
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
||||||
{
|
{
|
||||||
|
size_t sn, hugepage;
|
||||||
bool committed;
|
bool committed;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
chunk_deregister(chunk, &chunk->node);
|
chunk_deregister(chunk, &chunk->node);
|
||||||
|
|
||||||
|
sn = extent_node_sn_get(&chunk->node);
|
||||||
|
hugepage = chunk->hugepage;
|
||||||
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
|
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
|
||||||
if (!committed) {
|
if (!committed) {
|
||||||
/*
|
/*
|
||||||
@ -701,9 +749,17 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
|||||||
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
|
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
|
||||||
arena->ind);
|
arena->ind);
|
||||||
}
|
}
|
||||||
|
if (!hugepage) {
|
||||||
|
/*
|
||||||
|
* Convert chunk back to the default state, so that all
|
||||||
|
* subsequent chunk allocations start out with chunks that can
|
||||||
|
* be backed by transparent huge pages.
|
||||||
|
*/
|
||||||
|
pages_huge(chunk, chunksize);
|
||||||
|
}
|
||||||
|
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
|
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
|
||||||
committed);
|
sn, committed);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena->stats.mapped -= chunksize;
|
arena->stats.mapped -= chunksize;
|
||||||
@ -859,14 +915,14 @@ arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
|
|||||||
|
|
||||||
static void *
|
static void *
|
||||||
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
|
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
|
||||||
size_t csize)
|
bool *zero, size_t csize)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
|
|
||||||
ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
|
ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
|
||||||
alignment, zero, &commit);
|
alignment, sn, zero, &commit);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
/* Revert optimistic stats updates. */
|
/* Revert optimistic stats updates. */
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
@ -883,7 +939,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool *zero)
|
size_t alignment, size_t *sn, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
@ -900,18 +956,19 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
arena_nactive_add(arena, usize >> LG_PAGE);
|
arena_nactive_add(arena, usize >> LG_PAGE);
|
||||||
|
|
||||||
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
|
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
|
||||||
alignment, zero, &commit, true);
|
alignment, sn, zero, &commit, true);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
||||||
usize, alignment, zero, csize);
|
usize, alignment, sn, zero, csize);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
|
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
|
||||||
|
size_t sn)
|
||||||
{
|
{
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
size_t csize;
|
size_t csize;
|
||||||
@ -924,7 +981,7 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
|
|||||||
}
|
}
|
||||||
arena_nactive_sub(arena, usize >> LG_PAGE);
|
arena_nactive_sub(arena, usize >> LG_PAGE);
|
||||||
|
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
|
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -948,7 +1005,7 @@ arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
||||||
size_t oldsize, size_t usize)
|
size_t oldsize, size_t usize, size_t sn)
|
||||||
{
|
{
|
||||||
size_t udiff = oldsize - usize;
|
size_t udiff = oldsize - usize;
|
||||||
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
||||||
@ -967,7 +1024,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
CHUNK_CEILING(usize));
|
CHUNK_CEILING(usize));
|
||||||
|
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
||||||
true);
|
sn, true);
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
@ -975,13 +1032,13 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
static bool
|
static bool
|
||||||
arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
|
arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
|
||||||
bool *zero, void *nchunk, size_t udiff, size_t cdiff)
|
size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
|
||||||
{
|
{
|
||||||
bool err;
|
bool err;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
|
|
||||||
err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
|
err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
|
||||||
chunksize, zero, &commit) == NULL);
|
chunksize, sn, zero, &commit) == NULL);
|
||||||
if (err) {
|
if (err) {
|
||||||
/* Revert optimistic stats updates. */
|
/* Revert optimistic stats updates. */
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
@ -995,7 +1052,7 @@ arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
|
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
|
||||||
cdiff, true, arena->ind)) {
|
cdiff, true, arena->ind)) {
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
|
||||||
*zero, true);
|
*sn, *zero, true);
|
||||||
err = true;
|
err = true;
|
||||||
}
|
}
|
||||||
return (err);
|
return (err);
|
||||||
@ -1010,6 +1067,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
|
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
|
||||||
size_t udiff = usize - oldsize;
|
size_t udiff = usize - oldsize;
|
||||||
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
||||||
|
size_t sn;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
@ -1022,16 +1080,16 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
arena_nactive_add(arena, udiff >> LG_PAGE);
|
arena_nactive_add(arena, udiff >> LG_PAGE);
|
||||||
|
|
||||||
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
||||||
chunksize, zero, &commit, true) == NULL);
|
chunksize, &sn, zero, &commit, true) == NULL);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
|
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
|
||||||
&chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
|
&chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
|
||||||
cdiff);
|
udiff, cdiff);
|
||||||
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
|
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
|
||||||
cdiff, true, arena->ind)) {
|
cdiff, true, arena->ind)) {
|
||||||
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
||||||
*zero, true);
|
sn, *zero, true);
|
||||||
err = true;
|
err = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1519,6 +1577,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
if (rdelm == &chunkselm->rd) {
|
if (rdelm == &chunkselm->rd) {
|
||||||
extent_node_t *chunkselm_next;
|
extent_node_t *chunkselm_next;
|
||||||
|
size_t sn;
|
||||||
bool zero, commit;
|
bool zero, commit;
|
||||||
UNUSED void *chunk;
|
UNUSED void *chunk;
|
||||||
|
|
||||||
@ -1536,8 +1595,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
commit = false;
|
commit = false;
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
|
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
|
||||||
extent_node_addr_get(chunkselm),
|
extent_node_addr_get(chunkselm),
|
||||||
extent_node_size_get(chunkselm), chunksize, &zero,
|
extent_node_size_get(chunkselm), chunksize, &sn,
|
||||||
&commit, false);
|
&zero, &commit, false);
|
||||||
assert(chunk == extent_node_addr_get(chunkselm));
|
assert(chunk == extent_node_addr_get(chunkselm));
|
||||||
assert(zero == extent_node_zeroed_get(chunkselm));
|
assert(zero == extent_node_zeroed_get(chunkselm));
|
||||||
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
|
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
|
||||||
@ -1634,6 +1693,17 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
run_size = arena_mapbits_large_size_get(chunk, pageind);
|
run_size = arena_mapbits_large_size_get(chunk, pageind);
|
||||||
npages = run_size >> LG_PAGE;
|
npages = run_size >> LG_PAGE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is the first run purged within chunk, mark
|
||||||
|
* the chunk as non-huge. This will prevent all use of
|
||||||
|
* transparent huge pages for this chunk until the chunk
|
||||||
|
* as a whole is deallocated.
|
||||||
|
*/
|
||||||
|
if (chunk->hugepage) {
|
||||||
|
pages_nohuge(chunk, chunksize);
|
||||||
|
chunk->hugepage = false;
|
||||||
|
}
|
||||||
|
|
||||||
assert(pageind + npages <= chunk_npages);
|
assert(pageind + npages <= chunk_npages);
|
||||||
assert(!arena_mapbits_decommitted_get(chunk, pageind));
|
assert(!arena_mapbits_decommitted_get(chunk, pageind));
|
||||||
assert(!arena_mapbits_decommitted_get(chunk,
|
assert(!arena_mapbits_decommitted_get(chunk,
|
||||||
@ -1703,13 +1773,14 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
cc_link);
|
cc_link);
|
||||||
void *addr = extent_node_addr_get(chunkselm);
|
void *addr = extent_node_addr_get(chunkselm);
|
||||||
size_t size = extent_node_size_get(chunkselm);
|
size_t size = extent_node_size_get(chunkselm);
|
||||||
|
size_t sn = extent_node_sn_get(chunkselm);
|
||||||
bool zeroed = extent_node_zeroed_get(chunkselm);
|
bool zeroed = extent_node_zeroed_get(chunkselm);
|
||||||
bool committed = extent_node_committed_get(chunkselm);
|
bool committed = extent_node_committed_get(chunkselm);
|
||||||
extent_node_dirty_remove(chunkselm);
|
extent_node_dirty_remove(chunkselm);
|
||||||
arena_node_dalloc(tsdn, arena, chunkselm);
|
arena_node_dalloc(tsdn, arena, chunkselm);
|
||||||
chunkselm = chunkselm_next;
|
chunkselm = chunkselm_next;
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
|
||||||
size, zeroed, committed);
|
size, sn, zeroed, committed);
|
||||||
} else {
|
} else {
|
||||||
arena_chunk_t *chunk =
|
arena_chunk_t *chunk =
|
||||||
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
|
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
|
||||||
@ -2315,7 +2386,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
|
|||||||
arena_dalloc_bin_run(tsdn, arena, chunk, run,
|
arena_dalloc_bin_run(tsdn, arena, chunk, run,
|
||||||
bin);
|
bin);
|
||||||
} else
|
} else
|
||||||
arena_bin_lower_run(arena, chunk, run, bin);
|
arena_bin_lower_run(arena, run, bin);
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -2820,16 +2891,18 @@ arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
|
||||||
arena_bin_t *bin)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure that if bin->runcur is non-NULL, it refers to the lowest
|
* Make sure that if bin->runcur is non-NULL, it refers to the
|
||||||
* non-full run. It is okay to NULL runcur out rather than proactively
|
* oldest/lowest non-full run. It is okay to NULL runcur out rather
|
||||||
* keeping it pointing at the lowest non-full run.
|
* than proactively keeping it pointing at the oldest/lowest non-full
|
||||||
|
* run.
|
||||||
*/
|
*/
|
||||||
if ((uintptr_t)run < (uintptr_t)bin->runcur) {
|
if (bin->runcur != NULL &&
|
||||||
|
arena_snad_comp(arena_run_to_miscelm(bin->runcur),
|
||||||
|
arena_run_to_miscelm(run)) > 0) {
|
||||||
/* Switch runcur. */
|
/* Switch runcur. */
|
||||||
if (bin->runcur->nfree > 0)
|
if (bin->runcur->nfree > 0)
|
||||||
arena_bin_runs_insert(bin, bin->runcur);
|
arena_bin_runs_insert(bin, bin->runcur);
|
||||||
@ -2865,7 +2938,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
|||||||
arena_dissociate_bin_run(chunk, run, bin);
|
arena_dissociate_bin_run(chunk, run, bin);
|
||||||
arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
|
arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
|
||||||
} else if (run->nfree == 1 && run != bin->runcur)
|
} else if (run->nfree == 1 && run != bin->runcur)
|
||||||
arena_bin_lower_run(arena, chunk, run, bin);
|
arena_bin_lower_run(arena, run, bin);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
bin->stats.ndalloc++;
|
bin->stats.ndalloc++;
|
||||||
@ -3452,6 +3525,13 @@ arena_nthreads_dec(arena_t *arena, bool internal)
|
|||||||
atomic_sub_u(&arena->nthreads[internal], 1);
|
atomic_sub_u(&arena->nthreads[internal], 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t
|
||||||
|
arena_extent_sn_next(arena_t *arena)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
|
||||||
|
}
|
||||||
|
|
||||||
arena_t *
|
arena_t *
|
||||||
arena_new(tsdn_t *tsdn, unsigned ind)
|
arena_new(tsdn_t *tsdn, unsigned ind)
|
||||||
{
|
{
|
||||||
@ -3511,6 +3591,8 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
|||||||
|
|
||||||
ql_new(&arena->achunks);
|
ql_new(&arena->achunks);
|
||||||
|
|
||||||
|
arena->extent_sn_next = 0;
|
||||||
|
|
||||||
arena->spare = NULL;
|
arena->spare = NULL;
|
||||||
|
|
||||||
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
|
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
|
||||||
@ -3532,9 +3614,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
|||||||
WITNESS_RANK_ARENA_HUGE))
|
WITNESS_RANK_ARENA_HUGE))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
extent_tree_szad_new(&arena->chunks_szad_cached);
|
extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
|
||||||
extent_tree_ad_new(&arena->chunks_ad_cached);
|
extent_tree_ad_new(&arena->chunks_ad_cached);
|
||||||
extent_tree_szad_new(&arena->chunks_szad_retained);
|
extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
|
||||||
extent_tree_ad_new(&arena->chunks_ad_retained);
|
extent_tree_ad_new(&arena->chunks_ad_retained);
|
||||||
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
|
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
|
||||||
WITNESS_RANK_ARENA_CHUNKS))
|
WITNESS_RANK_ARENA_CHUNKS))
|
||||||
|
@ -5,7 +5,8 @@
|
|||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
static malloc_mutex_t base_mtx;
|
static malloc_mutex_t base_mtx;
|
||||||
static extent_tree_t base_avail_szad;
|
static size_t base_extent_sn_next;
|
||||||
|
static extent_tree_t base_avail_szsnad;
|
||||||
static extent_node_t *base_nodes;
|
static extent_node_t *base_nodes;
|
||||||
static size_t base_allocated;
|
static size_t base_allocated;
|
||||||
static size_t base_resident;
|
static size_t base_resident;
|
||||||
@ -39,6 +40,14 @@ base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
|
|||||||
base_nodes = node;
|
base_nodes = node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
base_extent_node_init(extent_node_t *node, void *addr, size_t size)
|
||||||
|
{
|
||||||
|
size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
|
||||||
|
|
||||||
|
extent_node_init(node, NULL, addr, size, sn, true, true);
|
||||||
|
}
|
||||||
|
|
||||||
static extent_node_t *
|
static extent_node_t *
|
||||||
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
||||||
{
|
{
|
||||||
@ -68,7 +77,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
|||||||
base_resident += PAGE_CEILING(nsize);
|
base_resident += PAGE_CEILING(nsize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_node_init(node, NULL, addr, csize, true, true);
|
base_extent_node_init(node, addr, csize);
|
||||||
return (node);
|
return (node);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,12 +101,12 @@ base_alloc(tsdn_t *tsdn, size_t size)
|
|||||||
csize = CACHELINE_CEILING(size);
|
csize = CACHELINE_CEILING(size);
|
||||||
|
|
||||||
usize = s2u(csize);
|
usize = s2u(csize);
|
||||||
extent_node_init(&key, NULL, NULL, usize, false, false);
|
extent_node_init(&key, NULL, NULL, usize, 0, false, false);
|
||||||
malloc_mutex_lock(tsdn, &base_mtx);
|
malloc_mutex_lock(tsdn, &base_mtx);
|
||||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
|
||||||
if (node != NULL) {
|
if (node != NULL) {
|
||||||
/* Use existing space. */
|
/* Use existing space. */
|
||||||
extent_tree_szad_remove(&base_avail_szad, node);
|
extent_tree_szsnad_remove(&base_avail_szsnad, node);
|
||||||
} else {
|
} else {
|
||||||
/* Try to allocate more space. */
|
/* Try to allocate more space. */
|
||||||
node = base_chunk_alloc(tsdn, csize);
|
node = base_chunk_alloc(tsdn, csize);
|
||||||
@ -111,7 +120,7 @@ base_alloc(tsdn_t *tsdn, size_t size)
|
|||||||
if (extent_node_size_get(node) > csize) {
|
if (extent_node_size_get(node) > csize) {
|
||||||
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
|
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
|
||||||
extent_node_size_set(node, extent_node_size_get(node) - csize);
|
extent_node_size_set(node, extent_node_size_get(node) - csize);
|
||||||
extent_tree_szad_insert(&base_avail_szad, node);
|
extent_tree_szsnad_insert(&base_avail_szsnad, node);
|
||||||
} else
|
} else
|
||||||
base_node_dalloc(tsdn, node);
|
base_node_dalloc(tsdn, node);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -149,7 +158,8 @@ base_boot(void)
|
|||||||
|
|
||||||
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
|
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
|
||||||
return (true);
|
return (true);
|
||||||
extent_tree_szad_new(&base_avail_szad);
|
base_extent_sn_next = 0;
|
||||||
|
extent_tree_szsnad_new(&base_avail_szsnad);
|
||||||
base_nodes = NULL;
|
base_nodes = NULL;
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
|
@ -50,9 +50,9 @@ const chunk_hooks_t chunk_hooks_default = {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
|
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
|
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
|
||||||
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
|
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
|
||||||
bool committed);
|
bool zeroed, bool committed);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -183,33 +183,35 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
|
* Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
|
||||||
* fits.
|
* best fits.
|
||||||
*/
|
*/
|
||||||
static extent_node_t *
|
static extent_node_t *
|
||||||
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
|
||||||
extent_tree_t *chunks_ad, size_t size)
|
|
||||||
{
|
{
|
||||||
extent_node_t key;
|
extent_node_t key;
|
||||||
|
|
||||||
assert(size == CHUNK_CEILING(size));
|
assert(size == CHUNK_CEILING(size));
|
||||||
|
|
||||||
extent_node_init(&key, arena, NULL, size, false, false);
|
extent_node_init(&key, arena, NULL, size, 0, false, false);
|
||||||
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||||
bool dalloc_node)
|
bool *commit, bool dalloc_node)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
size_t alloc_size, leadsize, trailsize;
|
size_t alloc_size, leadsize, trailsize;
|
||||||
bool zeroed, committed;
|
bool zeroed, committed;
|
||||||
|
|
||||||
|
assert(CHUNK_CEILING(size) == size);
|
||||||
|
assert(alignment > 0);
|
||||||
assert(new_addr == NULL || alignment == chunksize);
|
assert(new_addr == NULL || alignment == chunksize);
|
||||||
|
assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
|
||||||
/*
|
/*
|
||||||
* Cached chunks use the node linkage embedded in their headers, in
|
* Cached chunks use the node linkage embedded in their headers, in
|
||||||
* which case dalloc_node is true, and new_addr is non-NULL because
|
* which case dalloc_node is true, and new_addr is non-NULL because
|
||||||
@ -217,7 +219,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
*/
|
*/
|
||||||
assert(dalloc_node || new_addr != NULL);
|
assert(dalloc_node || new_addr != NULL);
|
||||||
|
|
||||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
|
||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -225,12 +227,11 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||||
if (new_addr != NULL) {
|
if (new_addr != NULL) {
|
||||||
extent_node_t key;
|
extent_node_t key;
|
||||||
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
|
||||||
false);
|
false);
|
||||||
node = extent_tree_ad_search(chunks_ad, &key);
|
node = extent_tree_ad_search(chunks_ad, &key);
|
||||||
} else {
|
} else {
|
||||||
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
|
||||||
alloc_size);
|
|
||||||
}
|
}
|
||||||
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
||||||
size)) {
|
size)) {
|
||||||
@ -243,6 +244,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert(extent_node_size_get(node) >= leadsize + size);
|
assert(extent_node_size_get(node) >= leadsize + size);
|
||||||
trailsize = extent_node_size_get(node) - leadsize - size;
|
trailsize = extent_node_size_get(node) - leadsize - size;
|
||||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||||
|
*sn = extent_node_sn_get(node);
|
||||||
zeroed = extent_node_zeroed_get(node);
|
zeroed = extent_node_zeroed_get(node);
|
||||||
if (zeroed)
|
if (zeroed)
|
||||||
*zero = true;
|
*zero = true;
|
||||||
@ -257,13 +259,13 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
/* Remove node from the tree. */
|
/* Remove node from the tree. */
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||||
extent_tree_ad_remove(chunks_ad, node);
|
extent_tree_ad_remove(chunks_ad, node);
|
||||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||||
if (leadsize != 0) {
|
if (leadsize != 0) {
|
||||||
/* Insert the leading space as a smaller chunk. */
|
/* Insert the leading space as a smaller chunk. */
|
||||||
extent_node_size_set(node, leadsize);
|
extent_node_size_set(node, leadsize);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
node = NULL;
|
node = NULL;
|
||||||
@ -275,9 +277,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (dalloc_node && node != NULL)
|
if (dalloc_node && node != NULL)
|
||||||
arena_node_dalloc(tsdn, arena, node);
|
arena_node_dalloc(tsdn, arena, node);
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
|
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
|
||||||
chunks_ad, cache, ret, size + trailsize, zeroed,
|
chunks_ad, cache, ret, size + trailsize, *sn,
|
||||||
committed);
|
zeroed, committed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
/* Insert the trailing space as a smaller chunk. */
|
/* Insert the trailing space as a smaller chunk. */
|
||||||
@ -286,22 +288,22 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks,
|
chunk_record(tsdn, arena, chunk_hooks,
|
||||||
chunks_szad, chunks_ad, cache, ret, size +
|
chunks_szsnad, chunks_ad, cache, ret, size
|
||||||
trailsize, zeroed, committed);
|
+ trailsize, *sn, zeroed, committed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
||||||
trailsize, zeroed, committed);
|
trailsize, *sn, zeroed, committed);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
node = NULL;
|
node = NULL;
|
||||||
}
|
}
|
||||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
|
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
|
||||||
cache, ret, size, zeroed, committed);
|
cache, ret, size, *sn, zeroed, committed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
@ -385,8 +387,8 @@ chunk_alloc_base(size_t size)
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||||
bool dalloc_node)
|
bool *commit, bool dalloc_node)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -396,8 +398,8 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||||
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
|
&arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
|
||||||
new_addr, size, alignment, zero, commit, dalloc_node);
|
new_addr, size, alignment, sn, zero, commit, dalloc_node);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
if (config_valgrind)
|
if (config_valgrind)
|
||||||
@ -451,7 +453,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||||
|
bool *commit)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -461,8 +464,8 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||||
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
&arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
|
||||||
new_addr, size, alignment, zero, commit, true);
|
new_addr, size, alignment, sn, zero, commit, true);
|
||||||
|
|
||||||
if (config_stats && ret != NULL)
|
if (config_stats && ret != NULL)
|
||||||
arena->stats.retained -= size;
|
arena->stats.retained -= size;
|
||||||
@ -472,14 +475,15 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
|
||||||
|
bool *commit)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
|
|
||||||
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
||||||
alignment, zero, commit);
|
alignment, sn, zero, commit);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
if (chunk_hooks->alloc == chunk_alloc_default) {
|
if (chunk_hooks->alloc == chunk_alloc_default) {
|
||||||
/* Call directly to propagate tsdn. */
|
/* Call directly to propagate tsdn. */
|
||||||
@ -493,6 +497,8 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
|
*sn = arena_extent_sn_next(arena);
|
||||||
|
|
||||||
if (config_valgrind && chunk_hooks->alloc !=
|
if (config_valgrind && chunk_hooks->alloc !=
|
||||||
chunk_alloc_default)
|
chunk_alloc_default)
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||||
@ -503,8 +509,8 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
|
||||||
void *chunk, size_t size, bool zeroed, bool committed)
|
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
bool unzeroed;
|
bool unzeroed;
|
||||||
extent_node_t *node, *prev;
|
extent_node_t *node, *prev;
|
||||||
@ -516,7 +522,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
|
||||||
false, false);
|
false, false);
|
||||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||||
/* Try to coalesce forward. */
|
/* Try to coalesce forward. */
|
||||||
@ -528,15 +534,17 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
/*
|
/*
|
||||||
* Coalesce chunk with the following address range. This does
|
* Coalesce chunk with the following address range. This does
|
||||||
* not change the position within chunks_ad, so only
|
* not change the position within chunks_ad, so only
|
||||||
* remove/insert from/into chunks_szad.
|
* remove/insert from/into chunks_szsnad.
|
||||||
*/
|
*/
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||||
extent_node_addr_set(node, chunk);
|
extent_node_addr_set(node, chunk);
|
||||||
extent_node_size_set(node, size + extent_node_size_get(node));
|
extent_node_size_set(node, size + extent_node_size_get(node));
|
||||||
|
if (sn < extent_node_sn_get(node))
|
||||||
|
extent_node_sn_set(node, sn);
|
||||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||||
!unzeroed);
|
!unzeroed);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
} else {
|
} else {
|
||||||
/* Coalescing forward failed, so insert a new node. */
|
/* Coalescing forward failed, so insert a new node. */
|
||||||
@ -554,10 +562,10 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
extent_node_init(node, arena, chunk, size, !unzeroed,
|
extent_node_init(node, arena, chunk, size, sn, !unzeroed,
|
||||||
committed);
|
committed);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -571,19 +579,21 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
/*
|
/*
|
||||||
* Coalesce chunk with the previous address range. This does
|
* Coalesce chunk with the previous address range. This does
|
||||||
* not change the position within chunks_ad, so only
|
* not change the position within chunks_ad, so only
|
||||||
* remove/insert node from/into chunks_szad.
|
* remove/insert node from/into chunks_szsnad.
|
||||||
*/
|
*/
|
||||||
extent_tree_szad_remove(chunks_szad, prev);
|
extent_tree_szsnad_remove(chunks_szsnad, prev);
|
||||||
extent_tree_ad_remove(chunks_ad, prev);
|
extent_tree_ad_remove(chunks_ad, prev);
|
||||||
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szsnad_remove(chunks_szsnad, node);
|
||||||
arena_chunk_cache_maybe_remove(arena, node, cache);
|
arena_chunk_cache_maybe_remove(arena, node, cache);
|
||||||
extent_node_addr_set(node, extent_node_addr_get(prev));
|
extent_node_addr_set(node, extent_node_addr_get(prev));
|
||||||
extent_node_size_set(node, extent_node_size_get(prev) +
|
extent_node_size_set(node, extent_node_size_get(prev) +
|
||||||
extent_node_size_get(node));
|
extent_node_size_get(node));
|
||||||
|
if (extent_node_sn_get(prev) < extent_node_sn_get(node))
|
||||||
|
extent_node_sn_set(node, extent_node_sn_get(prev));
|
||||||
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
|
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
|
||||||
extent_node_zeroed_get(node));
|
extent_node_zeroed_get(node));
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szsnad_insert(chunks_szsnad, node);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
|
|
||||||
arena_node_dalloc(tsdn, arena, prev);
|
arena_node_dalloc(tsdn, arena, prev);
|
||||||
@ -595,7 +605,7 @@ label_return:
|
|||||||
|
|
||||||
void
|
void
|
||||||
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *chunk, size_t size, bool committed)
|
void *chunk, size_t size, size_t sn, bool committed)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
@ -603,8 +613,9 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
|
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
|
||||||
&arena->chunks_ad_cached, true, chunk, size, false, committed);
|
&arena->chunks_ad_cached, true, chunk, size, sn, false,
|
||||||
|
committed);
|
||||||
arena_maybe_purge(tsdn, arena);
|
arena_maybe_purge(tsdn, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -627,7 +638,7 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
|||||||
|
|
||||||
void
|
void
|
||||||
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *chunk, size_t size, bool zeroed, bool committed)
|
void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
bool err;
|
bool err;
|
||||||
|
|
||||||
@ -653,8 +664,9 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
||||||
arena->ind);
|
arena->ind);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
|
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
|
||||||
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
|
&arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
|
||||||
|
committed);
|
||||||
|
|
||||||
if (config_stats)
|
if (config_stats)
|
||||||
arena->stats.retained += size;
|
arena->stats.retained += size;
|
||||||
|
@ -162,7 +162,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
CHUNK_HOOKS_INITIALIZER;
|
CHUNK_HOOKS_INITIALIZER;
|
||||||
chunk_dalloc_wrapper(tsdn, arena,
|
chunk_dalloc_wrapper(tsdn, arena,
|
||||||
&chunk_hooks, cpad, cpad_size,
|
&chunk_hooks, cpad, cpad_size,
|
||||||
false, true);
|
arena_extent_sn_next(arena), false,
|
||||||
|
true);
|
||||||
}
|
}
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||||
|
@ -3,42 +3,45 @@
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Round down to the nearest chunk size that can actually be requested during
|
||||||
|
* normal huge allocation.
|
||||||
|
*/
|
||||||
JEMALLOC_INLINE_C size_t
|
JEMALLOC_INLINE_C size_t
|
||||||
extent_quantize(size_t size)
|
extent_quantize(size_t size)
|
||||||
{
|
{
|
||||||
|
size_t ret;
|
||||||
|
szind_t ind;
|
||||||
|
|
||||||
/*
|
assert(size > 0);
|
||||||
* Round down to the nearest chunk size that can actually be requested
|
|
||||||
* during normal huge allocation.
|
|
||||||
*/
|
|
||||||
return (index2size(size2index(size + 1) - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
ind = size2index(size + 1);
|
||||||
extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
|
if (ind == 0) {
|
||||||
{
|
/* Avoid underflow. */
|
||||||
int ret;
|
return (index2size(0));
|
||||||
size_t a_qsize = extent_quantize(extent_node_size_get(a));
|
|
||||||
size_t b_qsize = extent_quantize(extent_node_size_get(b));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Compare based on quantized size rather than size, in order to sort
|
|
||||||
* equally useful extents only by address.
|
|
||||||
*/
|
|
||||||
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
|
|
||||||
if (ret == 0) {
|
|
||||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
|
||||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
|
||||||
|
|
||||||
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
|
||||||
}
|
}
|
||||||
|
ret = index2size(ind - 1);
|
||||||
|
assert(ret <= size);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Generate red-black tree functions. */
|
JEMALLOC_INLINE_C int
|
||||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
|
extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
|
||||||
extent_szad_comp)
|
{
|
||||||
|
size_t a_qsize = extent_quantize(extent_node_size_get(a));
|
||||||
|
size_t b_qsize = extent_quantize(extent_node_size_get(b));
|
||||||
|
|
||||||
|
return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE_C int
|
||||||
|
extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
|
||||||
|
{
|
||||||
|
size_t a_sn = extent_node_sn_get(a);
|
||||||
|
size_t b_sn = extent_node_sn_get(b);
|
||||||
|
|
||||||
|
return ((a_sn > b_sn) - (a_sn < b_sn));
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
JEMALLOC_INLINE_C int
|
||||||
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
|
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
|
||||||
@ -49,5 +52,26 @@ extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
|
|||||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE_C int
|
||||||
|
extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = extent_sz_comp(a, b);
|
||||||
|
if (ret != 0)
|
||||||
|
return (ret);
|
||||||
|
|
||||||
|
ret = extent_sn_comp(a, b);
|
||||||
|
if (ret != 0)
|
||||||
|
return (ret);
|
||||||
|
|
||||||
|
ret = extent_ad_comp(a, b);
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Generate red-black tree functions. */
|
||||||
|
rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
|
||||||
|
extent_szsnad_comp)
|
||||||
|
|
||||||
/* Generate red-black tree functions. */
|
/* Generate red-black tree functions. */
|
||||||
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
|
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
|
||||||
|
@ -56,6 +56,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
size_t ausize;
|
size_t ausize;
|
||||||
arena_t *iarena;
|
arena_t *iarena;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
|
size_t sn;
|
||||||
bool is_zeroed;
|
bool is_zeroed;
|
||||||
|
|
||||||
/* Allocate one or more contiguous chunks for this request. */
|
/* Allocate one or more contiguous chunks for this request. */
|
||||||
@ -68,7 +69,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
assert(ausize >= chunksize);
|
assert(ausize >= chunksize);
|
||||||
|
|
||||||
/* Allocate an extent node with which to track the chunk. */
|
/* Allocate an extent node with which to track the chunk. */
|
||||||
iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : a0get();
|
iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
|
||||||
|
a0get();
|
||||||
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
|
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
|
||||||
CACHELINE, false, NULL, true, iarena);
|
CACHELINE, false, NULL, true, iarena);
|
||||||
if (node == NULL)
|
if (node == NULL)
|
||||||
@ -82,15 +84,15 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
if (likely(!tsdn_null(tsdn)))
|
if (likely(!tsdn_null(tsdn)))
|
||||||
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
||||||
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
|
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
|
||||||
arena, usize, alignment, &is_zeroed)) == NULL) {
|
arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
|
||||||
idalloctm(tsdn, node, NULL, true, true);
|
idalloctm(tsdn, node, NULL, true, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_node_init(node, arena, ret, usize, is_zeroed, true);
|
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
|
||||||
|
|
||||||
if (huge_node_set(tsdn, ret, node)) {
|
if (huge_node_set(tsdn, ret, node)) {
|
||||||
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
|
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
|
||||||
idalloctm(tsdn, node, NULL, true, true);
|
idalloctm(tsdn, node, NULL, true, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
@ -245,7 +247,8 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
|
||||||
/* Zap the excess chunks. */
|
/* Zap the excess chunks. */
|
||||||
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
|
||||||
|
extent_node_sn_get(node));
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
@ -407,7 +410,8 @@ huge_dalloc(tsdn_t *tsdn, void *ptr)
|
|||||||
huge_dalloc_junk(extent_node_addr_get(node),
|
huge_dalloc_junk(extent_node_addr_get(node),
|
||||||
extent_node_size_get(node));
|
extent_node_size_get(node));
|
||||||
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
|
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
|
||||||
extent_node_addr_get(node), extent_node_size_get(node));
|
extent_node_addr_get(node), extent_node_size_get(node),
|
||||||
|
extent_node_sn_get(node));
|
||||||
idalloctm(tsdn, node, NULL, true, true);
|
idalloctm(tsdn, node, NULL, true, true);
|
||||||
|
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
|
@ -1060,7 +1060,11 @@ malloc_conf_init(void)
|
|||||||
if (cont) \
|
if (cont) \
|
||||||
continue; \
|
continue; \
|
||||||
}
|
}
|
||||||
#define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
|
#define CONF_MIN_no(um, min) false
|
||||||
|
#define CONF_MIN_yes(um, min) ((um) < (min))
|
||||||
|
#define CONF_MAX_no(um, max) false
|
||||||
|
#define CONF_MAX_yes(um, max) ((um) > (max))
|
||||||
|
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
|
||||||
if (CONF_MATCH(n)) { \
|
if (CONF_MATCH(n)) { \
|
||||||
uintmax_t um; \
|
uintmax_t um; \
|
||||||
char *end; \
|
char *end; \
|
||||||
@ -1073,15 +1077,19 @@ malloc_conf_init(void)
|
|||||||
"Invalid conf value", \
|
"Invalid conf value", \
|
||||||
k, klen, v, vlen); \
|
k, klen, v, vlen); \
|
||||||
} else if (clip) { \
|
} else if (clip) { \
|
||||||
if ((min) != 0 && um < (min)) \
|
if (CONF_MIN_##check_min(um, \
|
||||||
|
(min))) \
|
||||||
o = (t)(min); \
|
o = (t)(min); \
|
||||||
else if (um > (max)) \
|
else if (CONF_MAX_##check_max( \
|
||||||
|
um, (max))) \
|
||||||
o = (t)(max); \
|
o = (t)(max); \
|
||||||
else \
|
else \
|
||||||
o = (t)um; \
|
o = (t)um; \
|
||||||
} else { \
|
} else { \
|
||||||
if (((min) != 0 && um < (min)) \
|
if (CONF_MIN_##check_min(um, \
|
||||||
|| um > (max)) { \
|
(min)) || \
|
||||||
|
CONF_MAX_##check_max(um, \
|
||||||
|
(max))) { \
|
||||||
malloc_conf_error( \
|
malloc_conf_error( \
|
||||||
"Out-of-range " \
|
"Out-of-range " \
|
||||||
"conf value", \
|
"conf value", \
|
||||||
@ -1091,10 +1099,13 @@ malloc_conf_init(void)
|
|||||||
} \
|
} \
|
||||||
continue; \
|
continue; \
|
||||||
}
|
}
|
||||||
#define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
|
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
|
||||||
CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
|
clip) \
|
||||||
#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
|
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
|
||||||
CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
|
check_min, check_max, clip)
|
||||||
|
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
|
||||||
|
CONF_HANDLE_T_U(size_t, o, n, min, max, \
|
||||||
|
check_min, check_max, clip)
|
||||||
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
|
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
|
||||||
if (CONF_MATCH(n)) { \
|
if (CONF_MATCH(n)) { \
|
||||||
long l; \
|
long l; \
|
||||||
@ -1137,7 +1148,7 @@ malloc_conf_init(void)
|
|||||||
*/
|
*/
|
||||||
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
||||||
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
|
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
|
||||||
(sizeof(size_t) << 3) - 1, true)
|
(sizeof(size_t) << 3) - 1, yes, yes, true)
|
||||||
if (strncmp("dss", k, klen) == 0) {
|
if (strncmp("dss", k, klen) == 0) {
|
||||||
int i;
|
int i;
|
||||||
bool match = false;
|
bool match = false;
|
||||||
@ -1163,7 +1174,7 @@ malloc_conf_init(void)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
|
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
|
||||||
UINT_MAX, false)
|
UINT_MAX, yes, no, false)
|
||||||
if (strncmp("purge", k, klen) == 0) {
|
if (strncmp("purge", k, klen) == 0) {
|
||||||
int i;
|
int i;
|
||||||
bool match = false;
|
bool match = false;
|
||||||
@ -1234,7 +1245,7 @@ malloc_conf_init(void)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
|
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
|
||||||
0, SIZE_T_MAX, false)
|
0, SIZE_T_MAX, no, no, false)
|
||||||
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
|
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
|
||||||
CONF_HANDLE_BOOL(opt_zero, "zero", true)
|
CONF_HANDLE_BOOL(opt_zero, "zero", true)
|
||||||
}
|
}
|
||||||
@ -1271,8 +1282,8 @@ malloc_conf_init(void)
|
|||||||
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
|
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
|
||||||
"prof_thread_active_init", true)
|
"prof_thread_active_init", true)
|
||||||
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
|
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
|
||||||
"lg_prof_sample", 0,
|
"lg_prof_sample", 0, (sizeof(uint64_t) << 3)
|
||||||
(sizeof(uint64_t) << 3) - 1, true)
|
- 1, no, yes, true)
|
||||||
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
|
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
|
||||||
true)
|
true)
|
||||||
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
|
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
|
||||||
@ -1288,7 +1299,14 @@ malloc_conf_init(void)
|
|||||||
malloc_conf_error("Invalid conf pair", k, klen, v,
|
malloc_conf_error("Invalid conf pair", k, klen, v,
|
||||||
vlen);
|
vlen);
|
||||||
#undef CONF_MATCH
|
#undef CONF_MATCH
|
||||||
|
#undef CONF_MATCH_VALUE
|
||||||
#undef CONF_HANDLE_BOOL
|
#undef CONF_HANDLE_BOOL
|
||||||
|
#undef CONF_MIN_no
|
||||||
|
#undef CONF_MIN_yes
|
||||||
|
#undef CONF_MAX_no
|
||||||
|
#undef CONF_MAX_yes
|
||||||
|
#undef CONF_HANDLE_T_U
|
||||||
|
#undef CONF_HANDLE_UNSIGNED
|
||||||
#undef CONF_HANDLE_SIZE_T
|
#undef CONF_HANDLE_SIZE_T
|
||||||
#undef CONF_HANDLE_SSIZE_T
|
#undef CONF_HANDLE_SSIZE_T
|
||||||
#undef CONF_HANDLE_CHAR_P
|
#undef CONF_HANDLE_CHAR_P
|
||||||
@ -1397,8 +1415,9 @@ malloc_init_hard_recursible(void)
|
|||||||
|
|
||||||
ncpus = malloc_ncpus();
|
ncpus = malloc_ncpus();
|
||||||
|
|
||||||
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
|
#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
|
||||||
&& !defined(_WIN32) && !defined(__native_client__))
|
&& !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
|
||||||
|
!defined(__native_client__))
|
||||||
/* LinuxThreads' pthread_atfork() allocates. */
|
/* LinuxThreads' pthread_atfork() allocates. */
|
||||||
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
|
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
|
||||||
jemalloc_postfork_child) != 0) {
|
jemalloc_postfork_child) != 0) {
|
||||||
@ -1977,8 +1996,8 @@ je_realloc(void *ptr, size_t size)
|
|||||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||||
}
|
}
|
||||||
UTRACE(ptr, size, ret);
|
UTRACE(ptr, size, ret);
|
||||||
JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
|
JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
|
||||||
old_rzsize, true, false);
|
old_usize, old_rzsize, maybe, false);
|
||||||
witness_assert_lockless(tsdn);
|
witness_assert_lockless(tsdn);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -2404,8 +2423,8 @@ je_rallocx(void *ptr, size_t size, int flags)
|
|||||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||||
}
|
}
|
||||||
UTRACE(ptr, size, p);
|
UTRACE(ptr, size, p);
|
||||||
JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
|
JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
|
||||||
old_usize, old_rzsize, false, zero);
|
old_usize, old_rzsize, no, zero);
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
return (p);
|
return (p);
|
||||||
label_oom:
|
label_oom:
|
||||||
@ -2547,8 +2566,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
|||||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||||
}
|
}
|
||||||
JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
|
JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
|
||||||
old_usize, old_rzsize, false, zero);
|
old_usize, old_rzsize, no, zero);
|
||||||
label_not_resized:
|
label_not_resized:
|
||||||
UTRACE(ptr, size, ptr);
|
UTRACE(ptr, size, ptr);
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
|
@ -170,15 +170,16 @@ pages_purge(void *addr, size_t size)
|
|||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||||
unzeroed = true;
|
unzeroed = true;
|
||||||
#elif defined(JEMALLOC_HAVE_MADVISE)
|
#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
|
||||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
|
||||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
# if defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||||
# define JEMALLOC_MADV_ZEROS true
|
|
||||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
|
||||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||||
# define JEMALLOC_MADV_ZEROS false
|
# define JEMALLOC_MADV_ZEROS false
|
||||||
|
# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
|
||||||
|
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||||
|
# define JEMALLOC_MADV_ZEROS true
|
||||||
# else
|
# else
|
||||||
# error "No madvise(2) flag defined for purging unused dirty pages."
|
# error No madvise(2) flag defined for purging unused dirty pages
|
||||||
# endif
|
# endif
|
||||||
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
|
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
|
||||||
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
||||||
@ -191,6 +192,34 @@ pages_purge(void *addr, size_t size)
|
|||||||
return (unzeroed);
|
return (unzeroed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pages_huge(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||||
|
assert(PAGE_CEILING(size) == size);
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_THP
|
||||||
|
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
|
||||||
|
#else
|
||||||
|
return (false);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pages_nohuge(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||||
|
assert(PAGE_CEILING(size) == size);
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_THP
|
||||||
|
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
|
||||||
|
#else
|
||||||
|
return (false);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||||
static bool
|
static bool
|
||||||
os_overcommits_sysctl(void)
|
os_overcommits_sysctl(void)
|
||||||
@ -219,7 +248,7 @@ os_overcommits_proc(void)
|
|||||||
char buf[1];
|
char buf[1];
|
||||||
ssize_t nread;
|
ssize_t nread;
|
||||||
|
|
||||||
#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_open)
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||||
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||||
#else
|
#else
|
||||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
|
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||||
@ -227,13 +256,13 @@ os_overcommits_proc(void)
|
|||||||
if (fd == -1)
|
if (fd == -1)
|
||||||
return (false); /* Error. */
|
return (false); /* Error. */
|
||||||
|
|
||||||
#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_read)
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
||||||
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
|
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
|
||||||
#else
|
#else
|
||||||
nread = read(fd, &buf, sizeof(buf));
|
nread = read(fd, &buf, sizeof(buf));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_close)
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
||||||
syscall(SYS_close, fd);
|
syscall(SYS_close, fd);
|
||||||
#else
|
#else
|
||||||
close(fd);
|
close(fd);
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
#define CTL_GET(n, v, t) do { \
|
#define CTL_GET(n, v, t) do { \
|
||||||
size_t sz = sizeof(t); \
|
size_t sz = sizeof(t); \
|
||||||
xmallctl(n, v, &sz, NULL, 0); \
|
xmallctl(n, (void *)v, &sz, NULL, 0); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define CTL_M2_GET(n, i, v, t) do { \
|
#define CTL_M2_GET(n, i, v, t) do { \
|
||||||
@ -12,7 +12,7 @@
|
|||||||
size_t sz = sizeof(t); \
|
size_t sz = sizeof(t); \
|
||||||
xmallctlnametomib(n, mib, &miblen); \
|
xmallctlnametomib(n, mib, &miblen); \
|
||||||
mib[2] = (i); \
|
mib[2] = (i); \
|
||||||
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
|
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
|
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
|
||||||
@ -22,7 +22,7 @@
|
|||||||
xmallctlnametomib(n, mib, &miblen); \
|
xmallctlnametomib(n, mib, &miblen); \
|
||||||
mib[2] = (i); \
|
mib[2] = (i); \
|
||||||
mib[4] = (j); \
|
mib[4] = (j); \
|
||||||
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
|
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -647,7 +647,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
|
#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
|
||||||
bool bv2; \
|
bool bv2; \
|
||||||
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
|
if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
|
||||||
je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
|
je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \
|
||||||
if (json) { \
|
if (json) { \
|
||||||
malloc_cprintf(write_cb, cbopaque, \
|
malloc_cprintf(write_cb, cbopaque, \
|
||||||
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
|
"\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
|
||||||
@ -692,7 +692,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
|
#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
|
||||||
ssize_t ssv2; \
|
ssize_t ssv2; \
|
||||||
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
|
if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
|
||||||
je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
|
je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
|
||||||
if (json) { \
|
if (json) { \
|
||||||
malloc_cprintf(write_cb, cbopaque, \
|
malloc_cprintf(write_cb, cbopaque, \
|
||||||
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
|
"\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
|
||||||
@ -1084,7 +1084,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
* */
|
* */
|
||||||
epoch = 1;
|
epoch = 1;
|
||||||
u64sz = sizeof(uint64_t);
|
u64sz = sizeof(uint64_t);
|
||||||
err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
|
err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
|
||||||
|
sizeof(uint64_t));
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
if (err == EAGAIN) {
|
if (err == EAGAIN) {
|
||||||
malloc_write("<jemalloc>: Memory allocation failure in "
|
malloc_write("<jemalloc>: Memory allocation failure in "
|
||||||
|
@ -517,12 +517,12 @@ tcache_boot(tsdn_t *tsdn)
|
|||||||
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
|
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
|
||||||
* known.
|
* known.
|
||||||
*/
|
*/
|
||||||
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||||
tcache_maxclass = SMALL_MAXCLASS;
|
tcache_maxclass = SMALL_MAXCLASS;
|
||||||
else if ((1U << opt_lg_tcache_max) > large_maxclass)
|
else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
|
||||||
tcache_maxclass = large_maxclass;
|
tcache_maxclass = large_maxclass;
|
||||||
else
|
else
|
||||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
|
||||||
|
|
||||||
nhbins = size2index(tcache_maxclass) + 1;
|
nhbins = size2index(tcache_maxclass) + 1;
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ static void
|
|||||||
wrtmessage(void *cbopaque, const char *s)
|
wrtmessage(void *cbopaque, const char *s)
|
||||||
{
|
{
|
||||||
|
|
||||||
#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_write)
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
|
||||||
/*
|
/*
|
||||||
* Use syscall(2) rather than write(2) when possible in order to avoid
|
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||||
* the possibility of memory allocation within libc. This is necessary
|
* the possibility of memory allocation within libc. This is necessary
|
||||||
@ -216,7 +216,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
|
|||||||
p++;
|
p++;
|
||||||
}
|
}
|
||||||
if (neg)
|
if (neg)
|
||||||
ret = -ret;
|
ret = (uintmax_t)(-((intmax_t)ret));
|
||||||
|
|
||||||
if (p == ns) {
|
if (p == ns) {
|
||||||
/* No conversion performed. */
|
/* No conversion performed. */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user