Import jemalloc a8f8d7540d66ddee7337db80c92890916e1063ca (dev branch,
prior to 3.0.0 release). This fixes several bugs related to memory initialization. Mangle __jemalloc_a0{malloc,calloc,free}() just like all the other library-internal symbols in jemalloc, and adjust the tls allocation code in libc to use the mangled names.
This commit is contained in:
parent
0f83f74ce1
commit
b378da045b
@ -70,6 +70,8 @@ found in the git revision history:
|
||||
invalid statistics and crashes.
|
||||
- Work around TLS dallocation via free() on Linux. This bug could cause
|
||||
write-after-free memory corruption.
|
||||
- Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
|
||||
cause memory corruption and crashes with --enable-dss specified.
|
||||
- Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
|
||||
- Fix realloc(p, 0) to act like free(p).
|
||||
- Do not enforce minimum alignment in memalign().
|
||||
|
@ -1,5 +1,5 @@
|
||||
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
|
||||
index f78f423..ce6df80 100644
|
||||
index e8a5722..cec85b5 100644
|
||||
--- a/doc/jemalloc.xml.in
|
||||
+++ b/doc/jemalloc.xml.in
|
||||
@@ -51,12 +51,23 @@
|
||||
@ -82,17 +82,10 @@ index 8837ef5..d7133f4 100644
|
||||
|
||||
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
||||
diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h
|
||||
index 15fe3c5..be94eb8 100644
|
||||
index bb1b63e..00eb169 100644
|
||||
--- a/include/jemalloc/internal/private_namespace.h
|
||||
+++ b/include/jemalloc/internal/private_namespace.h
|
||||
@@ -1,6 +1,3 @@
|
||||
-#define a0calloc JEMALLOC_N(a0calloc)
|
||||
-#define a0free JEMALLOC_N(a0free)
|
||||
-#define a0malloc JEMALLOC_N(a0malloc)
|
||||
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
|
||||
#define arena_bin_index JEMALLOC_N(arena_bin_index)
|
||||
#define arena_bin_info JEMALLOC_N(arena_bin_info)
|
||||
@@ -167,7 +164,6 @@
|
||||
@@ -165,7 +165,6 @@
|
||||
#define iqalloc JEMALLOC_N(iqalloc)
|
||||
#define iralloc JEMALLOC_N(iralloc)
|
||||
#define isalloc JEMALLOC_N(isalloc)
|
||||
@ -195,7 +188,7 @@ index 0000000..2c5797f
|
||||
+#define pthread_mutex_lock _pthread_mutex_lock
|
||||
+#define pthread_mutex_unlock _pthread_mutex_unlock
|
||||
diff --git a/src/jemalloc.c b/src/jemalloc.c
|
||||
index 00c2b23..729f4e1 100644
|
||||
index f9c8916..8e24a5a 100644
|
||||
--- a/src/jemalloc.c
|
||||
+++ b/src/jemalloc.c
|
||||
@@ -8,6 +8,9 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
|
||||
|
@ -1 +1 @@
|
||||
1.0.0-283-g606f1fdc3cdbc700717133ca56685313caea24bb
|
||||
1.0.0-286-ga8f8d7540d66ddee7337db80c92890916e1063ca
|
||||
|
@ -2,12 +2,12 @@
|
||||
.\" Title: JEMALLOC
|
||||
.\" Author: Jason Evans
|
||||
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
|
||||
.\" Date: 04/20/2012
|
||||
.\" Date: 04/21/2012
|
||||
.\" Manual: User Manual
|
||||
.\" Source: jemalloc 1.0.0-283-g606f1fdc3cdbc700717133ca56685313caea24bb
|
||||
.\" Source: jemalloc 1.0.0-286-ga8f8d7540d66ddee7337db80c92890916e1063ca
|
||||
.\" Language: English
|
||||
.\"
|
||||
.TH "JEMALLOC" "3" "04/20/2012" "jemalloc 1.0.0-283-g606f1fdc3c" "User Manual"
|
||||
.TH "JEMALLOC" "3" "04/21/2012" "jemalloc 1.0.0-286-ga8f8d7540d" "User Manual"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * Define some portability stuff
|
||||
.\" -----------------------------------------------------------------
|
||||
@ -31,7 +31,7 @@
|
||||
jemalloc \- general purpose memory allocation functions
|
||||
.SH "LIBRARY"
|
||||
.PP
|
||||
This manual describes jemalloc 1\&.0\&.0\-283\-g606f1fdc3cdbc700717133ca56685313caea24bb\&. More information can be found at the
|
||||
This manual describes jemalloc 1\&.0\&.0\-286\-ga8f8d7540d66ddee7337db80c92890916e1063ca\&. More information can be found at the
|
||||
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
|
||||
.PP
|
||||
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
|
||||
@ -404,9 +404,9 @@ Traditionally, allocators have used
|
||||
to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory\&. If
|
||||
\fB\-\-enable\-dss\fR
|
||||
is specified during configuration, this allocator uses both
|
||||
\fBsbrk\fR(2)
|
||||
\fBmmap\fR(2)
|
||||
and
|
||||
\fBmmap\fR(2), in that order of preference; otherwise only
|
||||
\fBsbrk\fR(2), in that order of preference; otherwise only
|
||||
\fBmmap\fR(2)
|
||||
is used\&.
|
||||
.PP
|
||||
|
@ -44,8 +44,7 @@ extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
|
||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||
bool chunk_boot0(void);
|
||||
bool chunk_boot1(void);
|
||||
bool chunk_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
|
@ -11,11 +11,9 @@
|
||||
|
||||
void pages_purge(void *addr, size_t length);
|
||||
|
||||
void *chunk_alloc_mmap(size_t size, size_t alignment);
|
||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
||||
|
||||
bool chunk_mmap_boot(void);
|
||||
|
||||
#endif /* JEMALLOC_H_EXTERNS */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
@ -1,3 +1,6 @@
|
||||
#define a0calloc JEMALLOC_N(a0calloc)
|
||||
#define a0free JEMALLOC_N(a0free)
|
||||
#define a0malloc JEMALLOC_N(a0malloc)
|
||||
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
|
||||
#define arena_bin_index JEMALLOC_N(arena_bin_index)
|
||||
#define arena_bin_info JEMALLOC_N(arena_bin_info)
|
||||
@ -71,8 +74,7 @@
|
||||
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
||||
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
||||
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
||||
#define chunk_boot0 JEMALLOC_N(chunk_boot0)
|
||||
#define chunk_boot1 JEMALLOC_N(chunk_boot1)
|
||||
#define chunk_boot JEMALLOC_N(chunk_boot)
|
||||
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
||||
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
||||
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
||||
@ -80,7 +82,6 @@
|
||||
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
||||
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||
#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot)
|
||||
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||
@ -188,10 +189,6 @@
|
||||
#define malloc_write JEMALLOC_N(malloc_write)
|
||||
#define map_bias JEMALLOC_N(map_bias)
|
||||
#define mb_write JEMALLOC_N(mb_write)
|
||||
#define mmap_unaligned_tsd_boot JEMALLOC_N(mmap_unaligned_tsd_boot)
|
||||
#define mmap_unaligned_tsd_cleanup_wrapper JEMALLOC_N(mmap_unaligned_tsd_cleanup_wrapper)
|
||||
#define mmap_unaligned_tsd_get JEMALLOC_N(mmap_unaligned_tsd_get)
|
||||
#define mmap_unaligned_tsd_set JEMALLOC_N(mmap_unaligned_tsd_set)
|
||||
#define mutex_boot JEMALLOC_N(mutex_boot)
|
||||
#define narenas JEMALLOC_N(narenas)
|
||||
#define ncpus JEMALLOC_N(ncpus)
|
||||
|
@ -111,7 +111,7 @@ a_name##_tsd_cleanup_wrapper(void) \
|
||||
\
|
||||
if (a_name##_initialized) { \
|
||||
a_name##_initialized = false; \
|
||||
a_cleanup(&a_name##_tls); \
|
||||
a_cleanup(&a_name##_tls); \
|
||||
} \
|
||||
return (a_name##_initialized); \
|
||||
} \
|
||||
|
@ -7,12 +7,12 @@ extern "C" {
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "1.0.0-283-g606f1fdc3cdbc700717133ca56685313caea24bb"
|
||||
#define JEMALLOC_VERSION "1.0.0-286-ga8f8d7540d66ddee7337db80c92890916e1063ca"
|
||||
#define JEMALLOC_VERSION_MAJOR 1
|
||||
#define JEMALLOC_VERSION_MINOR 0
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 283
|
||||
#define JEMALLOC_VERSION_GID "606f1fdc3cdbc700717133ca56685313caea24bb"
|
||||
#define JEMALLOC_VERSION_NREV 286
|
||||
#define JEMALLOC_VERSION_GID "a8f8d7540d66ddee7337db80c92890916e1063ca"
|
||||
|
||||
#include "jemalloc_defs.h"
|
||||
#include "jemalloc_FreeBSD.h"
|
||||
|
@ -98,7 +98,10 @@ chunk_recycle(size_t size, size_t alignment, bool *zero)
|
||||
|
||||
if (node != NULL)
|
||||
base_node_dealloc(node);
|
||||
#ifdef JEMALLOC_PURGE_MADVISE_FREE
|
||||
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
/* Pages are zeroed as a side effect of pages_purge(). */
|
||||
*zero = true;
|
||||
#else
|
||||
if (*zero) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
@ -125,16 +128,16 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
|
||||
ret = chunk_recycle(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
goto label_return;
|
||||
|
||||
ret = chunk_alloc_mmap(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
goto label_return;
|
||||
|
||||
if (config_dss) {
|
||||
ret = chunk_alloc_dss(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
goto label_return;
|
||||
}
|
||||
ret = chunk_alloc_mmap(size, alignment);
|
||||
if (ret != NULL) {
|
||||
*zero = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
ret = NULL;
|
||||
@ -161,7 +164,13 @@ label_return:
|
||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||
prof_gdump();
|
||||
}
|
||||
if (config_debug && *zero && ret != NULL) {
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||
return (ret);
|
||||
}
|
||||
@ -258,14 +267,14 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
||||
}
|
||||
|
||||
if (unmap) {
|
||||
if (chunk_dealloc_mmap(chunk, size) == false)
|
||||
return;
|
||||
chunk_record(chunk, size);
|
||||
if ((config_dss && chunk_in_dss(chunk)) ||
|
||||
chunk_dealloc_mmap(chunk, size))
|
||||
chunk_record(chunk, size);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_boot0(void)
|
||||
chunk_boot(void)
|
||||
{
|
||||
|
||||
/* Set variables according to the value of opt_lg_chunk. */
|
||||
@ -292,13 +301,3 @@ chunk_boot0(void)
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_boot1(void)
|
||||
{
|
||||
|
||||
if (chunk_mmap_boot())
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
@ -89,7 +89,10 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0)
|
||||
chunk_dealloc(cpad, cpad_size, true);
|
||||
*zero = true;
|
||||
if (*zero) {
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
} while (dss_prev != (void *)-1);
|
||||
|
@ -1,24 +1,13 @@
|
||||
#define JEMALLOC_CHUNK_MMAP_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
/*
|
||||
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
|
||||
* potentially avoid some system calls.
|
||||
*/
|
||||
malloc_tsd_data(static, mmap_unaligned, bool, false)
|
||||
malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
|
||||
malloc_tsd_no_cleanup)
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void *pages_map(void *addr, size_t size);
|
||||
static void pages_unmap(void *addr, size_t size);
|
||||
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
||||
bool unaligned);
|
||||
bool unaligned, bool *zero);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@ -87,7 +76,7 @@ pages_purge(void *addr, size_t length)
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
|
||||
{
|
||||
void *ret, *pages;
|
||||
size_t alloc_size, leadsize, trailsize;
|
||||
@ -112,23 +101,16 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
|
||||
if (trailsize != 0)
|
||||
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||
|
||||
/*
|
||||
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
|
||||
* the next chunk_alloc_mmap() execution tries the fast allocation
|
||||
* method.
|
||||
*/
|
||||
if (unaligned == false && mmap_unaligned_booted) {
|
||||
bool mu = false;
|
||||
mmap_unaligned_tsd_set(&mu);
|
||||
}
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_mmap(size_t size, size_t alignment)
|
||||
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t offset;
|
||||
|
||||
/*
|
||||
* Ideally, there would be a way to specify alignment to mmap() (like
|
||||
@ -150,45 +132,37 @@ chunk_alloc_mmap(size_t size, size_t alignment)
|
||||
*
|
||||
* Another possible confounding factor is address space layout
|
||||
* randomization (ASLR), which causes mmap(2) to disregard the
|
||||
* requested address. mmap_unaligned tracks whether the previous
|
||||
* chunk_alloc_mmap() execution received any unaligned or relocated
|
||||
* mappings, and if so, the current execution will immediately fall
|
||||
* back to the slow method. However, we keep track of whether the fast
|
||||
* method would have succeeded, and if so, we make a note to try the
|
||||
* fast method next time.
|
||||
* requested address. As such, repeatedly trying to extend unaligned
|
||||
* mappings could result in an infinite loop, so if extension fails,
|
||||
* immediately fall back to the reliable method of over-allocation
|
||||
* followed by trimming.
|
||||
*/
|
||||
|
||||
if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
|
||||
size_t offset;
|
||||
ret = pages_map(NULL, size);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
ret = pages_map(NULL, size);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
||||
if (offset != 0) {
|
||||
bool mu = true;
|
||||
mmap_unaligned_tsd_set(&mu);
|
||||
/* Try to extend chunk boundary. */
|
||||
if (pages_map((void *)((uintptr_t)ret + size),
|
||||
chunksize - offset) == NULL) {
|
||||
/*
|
||||
* Extension failed. Clean up, then revert to
|
||||
* the reliable-but-expensive method.
|
||||
*/
|
||||
pages_unmap(ret, size);
|
||||
ret = chunk_alloc_mmap_slow(size, alignment,
|
||||
true);
|
||||
} else {
|
||||
/* Clean up unneeded leading space. */
|
||||
pages_unmap(ret, chunksize - offset);
|
||||
ret = (void *)((uintptr_t)ret + (chunksize -
|
||||
offset));
|
||||
}
|
||||
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
||||
if (offset != 0) {
|
||||
/* Try to extend chunk boundary. */
|
||||
if (pages_map((void *)((uintptr_t)ret + size), chunksize -
|
||||
offset) == NULL) {
|
||||
/*
|
||||
* Extension failed. Clean up, then fall back to the
|
||||
* reliable-but-expensive method.
|
||||
*/
|
||||
pages_unmap(ret, size);
|
||||
return (chunk_alloc_mmap_slow(size, alignment, true,
|
||||
zero));
|
||||
} else {
|
||||
/* Clean up unneeded leading space. */
|
||||
pages_unmap(ret, chunksize - offset);
|
||||
ret = (void *)((uintptr_t)ret + (chunksize - offset));
|
||||
}
|
||||
} else
|
||||
ret = chunk_alloc_mmap_slow(size, alignment, false);
|
||||
}
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -201,21 +175,3 @@ chunk_dealloc_mmap(void *chunk, size_t size)
|
||||
|
||||
return (config_munmap == false);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_mmap_boot(void)
|
||||
{
|
||||
|
||||
/*
|
||||
* XXX For the non-TLS implementation of tsd, the first access from
|
||||
* each thread causes memory allocation. The result is a bootstrapping
|
||||
* problem for this particular use case, so for now just disable it by
|
||||
* leaving it in an unbooted state.
|
||||
*/
|
||||
#ifdef JEMALLOC_TLS
|
||||
if (mmap_unaligned_tsd_boot())
|
||||
return (true);
|
||||
#endif
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
void *ret;
|
||||
size_t csize;
|
||||
extent_node_t *node;
|
||||
bool is_zeroed;
|
||||
|
||||
/* Allocate one or more contiguous chunks for this request. */
|
||||
|
||||
@ -42,7 +43,12 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
ret = chunk_alloc(csize, alignment, false, &zero);
|
||||
/*
|
||||
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
|
||||
* it is possible to make correct junk/zero fill decisions below.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
ret = chunk_alloc(csize, alignment, false, &is_zeroed);
|
||||
if (ret == NULL) {
|
||||
base_node_dealloc(node);
|
||||
return (NULL);
|
||||
@ -64,7 +70,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
if (config_fill && zero == false) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, csize);
|
||||
else if (opt_zero)
|
||||
else if (opt_zero && is_zeroed == false)
|
||||
memset(ret, 0, csize);
|
||||
}
|
||||
|
||||
|
@ -638,7 +638,7 @@ malloc_init_hard(void)
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (chunk_boot0()) {
|
||||
if (chunk_boot()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
@ -715,11 +715,6 @@ malloc_init_hard(void)
|
||||
ncpus = malloc_ncpus();
|
||||
malloc_mutex_lock(&init_lock);
|
||||
|
||||
if (chunk_boot1()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (mutex_boot()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
|
@ -40,9 +40,9 @@
|
||||
#include "libc_private.h"
|
||||
|
||||
/* Provided by jemalloc to avoid bootstrapping issues. */
|
||||
void *a0malloc(size_t size);
|
||||
void *a0calloc(size_t num, size_t size);
|
||||
void a0free(void *ptr);
|
||||
void *__jemalloc_a0malloc(size_t size);
|
||||
void *__jemalloc_a0calloc(size_t num, size_t size);
|
||||
void __jemalloc_a0free(void *ptr);
|
||||
|
||||
__weak_reference(__libc_allocate_tls, _rtld_allocate_tls);
|
||||
__weak_reference(__libc_free_tls, _rtld_free_tls);
|
||||
@ -125,8 +125,8 @@ __libc_free_tls(void *tcb, size_t tcbsize, size_t tcbalign __unused)
|
||||
|
||||
tls = (Elf_Addr **)((Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE);
|
||||
dtv = tls[0];
|
||||
a0free(dtv);
|
||||
a0free(tcb);
|
||||
__jemalloc_a0free(dtv);
|
||||
__jemalloc_a0free(tcb);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -142,18 +142,18 @@ __libc_allocate_tls(void *oldtcb, size_t tcbsize, size_t tcbalign __unused)
|
||||
if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
|
||||
return (oldtcb);
|
||||
|
||||
tcb = a0calloc(1, tls_static_space + tcbsize - TLS_TCB_SIZE);
|
||||
tcb = __jemalloc_a0calloc(1, tls_static_space + tcbsize - TLS_TCB_SIZE);
|
||||
tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE);
|
||||
|
||||
if (oldtcb != NULL) {
|
||||
memcpy(tls, oldtcb, tls_static_space);
|
||||
a0free(oldtcb);
|
||||
__jemalloc_a0free(oldtcb);
|
||||
|
||||
/* Adjust the DTV. */
|
||||
dtv = tls[0];
|
||||
dtv[2] = (Elf_Addr)tls + TLS_TCB_SIZE;
|
||||
} else {
|
||||
dtv = a0malloc(3 * sizeof(Elf_Addr));
|
||||
dtv = __jemalloc_a0malloc(3 * sizeof(Elf_Addr));
|
||||
tls[0] = dtv;
|
||||
dtv[0] = 1;
|
||||
dtv[1] = 1;
|
||||
@ -194,8 +194,8 @@ __libc_free_tls(void *tcb, size_t tcbsize __unused, size_t tcbalign)
|
||||
dtv = ((Elf_Addr**)tcb)[1];
|
||||
tlsend = (Elf_Addr) tcb;
|
||||
tlsstart = tlsend - size;
|
||||
a0free((void*) tlsstart);
|
||||
a0free(dtv);
|
||||
__jemalloc_a0free((void*) tlsstart);
|
||||
__jemalloc_a0free(dtv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -213,8 +213,8 @@ __libc_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
|
||||
|
||||
if (tcbsize < 2 * sizeof(Elf_Addr))
|
||||
tcbsize = 2 * sizeof(Elf_Addr);
|
||||
tls = a0calloc(1, size + tcbsize);
|
||||
dtv = a0malloc(3 * sizeof(Elf_Addr));
|
||||
tls = __jemalloc_a0calloc(1, size + tcbsize);
|
||||
dtv = __jemalloc_a0malloc(3 * sizeof(Elf_Addr));
|
||||
|
||||
segbase = (Elf_Addr)(tls + size);
|
||||
((Elf_Addr*)segbase)[0] = segbase;
|
||||
|
Loading…
x
Reference in New Issue
Block a user