Import jemalloc 3.0.0. This fixes memory zeroing bugs that manifested as
jemalloc assertion failures for debug builds, or as calloc() sometimes returning non-zeroed memory for production builds.
This commit is contained in:
parent
9a4e738a44
commit
35dad07381
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=235322
@ -6,7 +6,7 @@ found in the git revision history:
|
||||
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
|
||||
git://canonware.com/jemalloc.git
|
||||
|
||||
* 3.0.0 (XXX not yet released)
|
||||
* 3.0.0 (May 11, 2012)
|
||||
|
||||
Although this version adds some major new features, the primary focus is on
|
||||
internal code cleanup that facilitates maintainability and portability, most
|
||||
@ -23,6 +23,7 @@ found in the git revision history:
|
||||
+ FreeBSD
|
||||
+ Mac OS X Lion
|
||||
+ MinGW
|
||||
+ Windows (no support yet for replacing the system malloc)
|
||||
- Add support for additional architectures:
|
||||
+ MIPS
|
||||
+ SH4
|
||||
@ -31,12 +32,13 @@ found in the git revision history:
|
||||
- Add nallocm(), which rounds a request size up to the nearest size class
|
||||
without actually allocating.
|
||||
- Implement aligned_alloc() (blame C11).
|
||||
- Add the --disable-munmap option, and make it the default on Linux.
|
||||
- Add the --with-mangling option.
|
||||
- Add the --disable-experimental option.
|
||||
- Add the "thread.tcache.enabled" mallctl.
|
||||
- Add the "opt.prof_final" mallctl.
|
||||
- Update pprof (from gperftools 2.0).
|
||||
- Add the --with-mangling option.
|
||||
- Add the --disable-experimental option.
|
||||
- Add the --disable-munmap option, and make it the default on Linux.
|
||||
- Add the --enable-mremap option, which disables use of mremap(2) by default.
|
||||
|
||||
Incompatible changes:
|
||||
- Enable stats by default.
|
||||
@ -71,6 +73,7 @@ found in the git revision history:
|
||||
write-after-free memory corruption.
|
||||
- Fix a potential deadlock that could occur during interval- and
|
||||
growth-triggered heap profile dumps.
|
||||
- Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags.
|
||||
- Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
|
||||
cause memory corruption and crashes with --enable-dss specified.
|
||||
- Fix fork-related bugs that could cause deadlock in children between fork
|
||||
|
@ -1,5 +1,5 @@
|
||||
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
|
||||
index 93c16dc..b5c5595 100644
|
||||
index 877c500..7d659a7 100644
|
||||
--- a/doc/jemalloc.xml.in
|
||||
+++ b/doc/jemalloc.xml.in
|
||||
@@ -51,12 +51,23 @@
|
||||
@ -192,7 +192,7 @@ index 0000000..9efab93
|
||||
+#define pthread_mutex_lock _pthread_mutex_lock
|
||||
+#define pthread_mutex_unlock _pthread_mutex_unlock
|
||||
diff --git a/src/jemalloc.c b/src/jemalloc.c
|
||||
index d42e91d..cdf6222 100644
|
||||
index bc54cd7..fa9fcf0 100644
|
||||
--- a/src/jemalloc.c
|
||||
+++ b/src/jemalloc.c
|
||||
@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
|
||||
|
@ -1 +1 @@
|
||||
1.0.0-335-g37b6f95dcd866f51c91488531a2efc3ed4c2b754
|
||||
3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
|
||||
|
@ -2,12 +2,12 @@
|
||||
.\" Title: JEMALLOC
|
||||
.\" Author: Jason Evans
|
||||
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
|
||||
.\" Date: 05/09/2012
|
||||
.\" Date: 05/11/2012
|
||||
.\" Manual: User Manual
|
||||
.\" Source: jemalloc 1.0.0-335-g37b6f95dcd866f51c91488531a2efc3ed4c2b754
|
||||
.\" Source: jemalloc 3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
|
||||
.\" Language: English
|
||||
.\"
|
||||
.TH "JEMALLOC" "3" "05/09/2012" "jemalloc 1.0.0-335-g37b6f95dcd" "User Manual"
|
||||
.TH "JEMALLOC" "3" "05/11/2012" "jemalloc 3.0.0-0-gfc9b1dbf69f5" "User Manual"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * Define some portability stuff
|
||||
.\" -----------------------------------------------------------------
|
||||
@ -31,7 +31,7 @@
|
||||
jemalloc \- general purpose memory allocation functions
|
||||
.SH "LIBRARY"
|
||||
.PP
|
||||
This manual describes jemalloc 1\&.0\&.0\-335\-g37b6f95dcd866f51c91488531a2efc3ed4c2b754\&. More information can be found at the
|
||||
This manual describes jemalloc 3\&.0\&.0\-0\-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046\&. More information can be found at the
|
||||
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
|
||||
.PP
|
||||
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
|
||||
@ -1003,12 +1003,12 @@ Total number of bytes in active pages allocated by the application\&. This is a
|
||||
"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
||||
.RS 4
|
||||
Total number of bytes in chunks mapped on behalf of the application\&. This is a multiple of the chunk size, and is at least as large as
|
||||
"stats\&.active"\&. This does not include inactive chunks embedded in the DSS\&.
|
||||
"stats\&.active"\&. This does not include inactive chunks\&.
|
||||
.RE
|
||||
.PP
|
||||
"stats\&.chunks\&.current" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
||||
.RS 4
|
||||
Total number of chunks actively mapped on behalf of the application\&. This does not include inactive chunks embedded in the DSS\&.
|
||||
Total number of chunks actively mapped on behalf of the application\&. This does not include inactive chunks\&.
|
||||
.RE
|
||||
.PP
|
||||
"stats\&.chunks\&.total" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
|
||||
|
@ -93,13 +93,13 @@ struct arena_chunk_map_s {
|
||||
* Run address (or size) and various flags are stored together. The bit
|
||||
* layout looks like (assuming 32-bit system):
|
||||
*
|
||||
* ???????? ???????? ????---- ----dula
|
||||
* ???????? ???????? ????nnnn nnnndula
|
||||
*
|
||||
* ? : Unallocated: Run address for first/last pages, unset for internal
|
||||
* pages.
|
||||
* Small: Run page offset.
|
||||
* Large: Run size for first page, unset for trailing pages.
|
||||
* - : Unused.
|
||||
* n : binind for small size class, BININD_INVALID for large size class.
|
||||
* d : dirty?
|
||||
* u : unzeroed?
|
||||
* l : large?
|
||||
@ -118,14 +118,14 @@ struct arena_chunk_map_s {
|
||||
* [dula] : bit unset
|
||||
*
|
||||
* Unallocated (clean):
|
||||
* ssssssss ssssssss ssss1111 1111du-a
|
||||
* ssssssss ssssssss ssss++++ ++++du-a
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
|
||||
* ssssssss ssssssss ssss1111 1111dU-a
|
||||
* ssssssss ssssssss ssss++++ ++++dU-a
|
||||
*
|
||||
* Unallocated (dirty):
|
||||
* ssssssss ssssssss ssss1111 1111D--a
|
||||
* ssssssss ssssssss ssss++++ ++++D--a
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
||||
* ssssssss ssssssss ssss1111 1111D--a
|
||||
* ssssssss ssssssss ssss++++ ++++D--a
|
||||
*
|
||||
* Small:
|
||||
* pppppppp pppppppp ppppnnnn nnnnd--A
|
||||
@ -133,15 +133,15 @@ struct arena_chunk_map_s {
|
||||
* pppppppp pppppppp ppppnnnn nnnnd--A
|
||||
*
|
||||
* Large:
|
||||
* ssssssss ssssssss ssss1111 1111D-LA
|
||||
* ssssssss ssssssss ssss++++ ++++D-LA
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
||||
* -------- -------- ----1111 1111D-LA
|
||||
* -------- -------- ----++++ ++++D-LA
|
||||
*
|
||||
* Large (sampled, size <= PAGE):
|
||||
* ssssssss ssssssss ssssnnnn nnnnD-LA
|
||||
*
|
||||
* Large (not sampled, size == PAGE):
|
||||
* ssssssss ssssssss ssss1111 1111D-LA
|
||||
* ssssssss ssssssss ssss++++ ++++D-LA
|
||||
*/
|
||||
size_t bits;
|
||||
#define CHUNK_MAP_BININD_SHIFT 4
|
||||
@ -591,6 +591,7 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
||||
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
|
||||
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
|
||||
}
|
||||
|
||||
@ -611,12 +612,14 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
||||
size_t flags)
|
||||
{
|
||||
size_t *mapbitsp;
|
||||
size_t unzeroed;
|
||||
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
||||
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
|
||||
CHUNK_MAP_ALLOCATED;
|
||||
assert((flags & CHUNK_MAP_DIRTY) == flags);
|
||||
unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
|
||||
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
|
||||
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
@ -637,13 +640,15 @@ arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
||||
size_t binind, size_t flags)
|
||||
{
|
||||
size_t *mapbitsp;
|
||||
size_t unzeroed;
|
||||
|
||||
assert(binind < BININD_INVALID);
|
||||
mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
assert(pageind - runind >= map_bias);
|
||||
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
|
||||
assert((flags & CHUNK_MAP_DIRTY) == flags);
|
||||
unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
|
||||
*mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
|
||||
flags | CHUNK_MAP_ALLOCATED;
|
||||
flags | unzeroed | CHUNK_MAP_ALLOCATED;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
|
@ -7,12 +7,12 @@ extern "C" {
|
||||
#include <limits.h>
|
||||
#include <strings.h>
|
||||
|
||||
#define JEMALLOC_VERSION "1.0.0-335-g37b6f95dcd866f51c91488531a2efc3ed4c2b754"
|
||||
#define JEMALLOC_VERSION_MAJOR 1
|
||||
#define JEMALLOC_VERSION "3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046"
|
||||
#define JEMALLOC_VERSION_MAJOR 3
|
||||
#define JEMALLOC_VERSION_MINOR 0
|
||||
#define JEMALLOC_VERSION_BUGFIX 0
|
||||
#define JEMALLOC_VERSION_NREV 335
|
||||
#define JEMALLOC_VERSION_GID "37b6f95dcd866f51c91488531a2efc3ed4c2b754"
|
||||
#define JEMALLOC_VERSION_NREV 0
|
||||
#define JEMALLOC_VERSION_GID "fc9b1dbf69f59d7ecfc4ac68da9847e017e1d046"
|
||||
|
||||
#include "jemalloc_defs.h"
|
||||
#include "jemalloc_FreeBSD.h"
|
||||
|
@ -44,6 +44,8 @@ static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
|
||||
bool large, size_t binind, bool zero);
|
||||
static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
|
||||
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
|
||||
static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
|
||||
bool large, size_t binind, bool zero);
|
||||
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
|
||||
size_t binind, bool zero);
|
||||
static void arena_purge(arena_t *arena, bool all);
|
||||
@ -309,8 +311,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
* small run, so that arena_dalloc_bin_run() has the ability to
|
||||
* conditionally trim clean pages.
|
||||
*/
|
||||
arena_mapbits_small_set(chunk, run_ind, 0, binind,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind) | flag_dirty);
|
||||
arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
|
||||
/*
|
||||
* The first page will always be dirtied during small run
|
||||
* initialization, so a validation failure here would not
|
||||
@ -320,16 +321,13 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
|
||||
arena_chunk_validate_zeroed(chunk, run_ind);
|
||||
for (i = 1; i < need_pages - 1; i++) {
|
||||
arena_mapbits_small_set(chunk, run_ind+i, i,
|
||||
binind, arena_mapbits_unzeroed_get(chunk,
|
||||
run_ind+i));
|
||||
arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
|
||||
if (config_debug && flag_dirty == 0 &&
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
|
||||
arena_chunk_validate_zeroed(chunk, run_ind+i);
|
||||
}
|
||||
arena_mapbits_small_set(chunk, run_ind+need_pages-1,
|
||||
need_pages-1, binind, arena_mapbits_unzeroed_get(chunk,
|
||||
run_ind+need_pages-1) | flag_dirty);
|
||||
need_pages-1, binind, flag_dirty);
|
||||
if (config_debug && flag_dirty == 0 &&
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
|
||||
0) {
|
||||
@ -351,17 +349,20 @@ arena_chunk_alloc(arena_t *arena)
|
||||
chunk = arena->spare;
|
||||
arena->spare = NULL;
|
||||
|
||||
/* Insert the run into the appropriate runs_avail_* tree. */
|
||||
if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
|
||||
runs_avail = &arena->runs_avail_clean;
|
||||
else
|
||||
runs_avail = &arena->runs_avail_dirty;
|
||||
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
||||
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
|
||||
arena_maxclass);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk,
|
||||
chunk_npages-1) == arena_maxclass);
|
||||
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
|
||||
arena_mapbits_dirty_get(chunk, chunk_npages-1));
|
||||
|
||||
/* Insert the run into the appropriate runs_avail_* tree. */
|
||||
if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
|
||||
runs_avail = &arena->runs_avail_clean;
|
||||
else
|
||||
runs_avail = &arena->runs_avail_dirty;
|
||||
arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
|
||||
map_bias));
|
||||
} else {
|
||||
@ -425,6 +426,15 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
||||
{
|
||||
arena_avail_tree_t *runs_avail;
|
||||
|
||||
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
||||
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
|
||||
arena_maxclass);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
|
||||
arena_maxclass);
|
||||
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
|
||||
arena_mapbits_dirty_get(chunk, chunk_npages-1));
|
||||
|
||||
/*
|
||||
* Remove run from the appropriate runs_avail_* tree, so that the arena
|
||||
* does not use it.
|
||||
@ -454,19 +464,12 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
||||
}
|
||||
|
||||
static arena_run_t *
|
||||
arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
|
||||
arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
|
||||
bool zero)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
arena_run_t *run;
|
||||
arena_chunk_map_t *mapelm, key;
|
||||
|
||||
assert(size <= arena_maxclass);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert((large && binind == BININD_INVALID) || (large == false && binind
|
||||
!= BININD_INVALID));
|
||||
|
||||
/* Search the arena's chunks for the lowest best fit. */
|
||||
key.bits = size | CHUNK_MAP_KEY;
|
||||
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
|
||||
if (mapelm != NULL) {
|
||||
@ -493,6 +496,26 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
|
||||
return (run);
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static arena_run_t *
|
||||
arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
|
||||
bool zero)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
arena_run_t *run;
|
||||
|
||||
assert(size <= arena_maxclass);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert((large && binind == BININD_INVALID) || (large == false && binind
|
||||
!= BININD_INVALID));
|
||||
|
||||
/* Search the arena's chunks for the lowest best fit. */
|
||||
run = arena_run_alloc_helper(arena, size, large, binind, zero);
|
||||
if (run != NULL)
|
||||
return (run);
|
||||
|
||||
/*
|
||||
* No usable runs. Create a new chunk from which to allocate the run.
|
||||
*/
|
||||
@ -508,32 +531,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
|
||||
* sufficient memory available while this one dropped arena->lock in
|
||||
* arena_chunk_alloc(), so search one more time.
|
||||
*/
|
||||
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
|
||||
if (mapelm != NULL) {
|
||||
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
|
||||
size_t pageind = (((uintptr_t)mapelm -
|
||||
(uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
|
||||
+ map_bias;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, binind, zero);
|
||||
return (run);
|
||||
}
|
||||
mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
|
||||
if (mapelm != NULL) {
|
||||
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
|
||||
size_t pageind = (((uintptr_t)mapelm -
|
||||
(uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
|
||||
+ map_bias;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, binind, zero);
|
||||
return (run);
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
return (arena_run_alloc_helper(arena, size, large, binind, zero));
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -588,6 +586,8 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
*/
|
||||
if (chunk == arena->spare) {
|
||||
assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
|
||||
assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
|
||||
|
||||
arena_chunk_alloc(arena);
|
||||
}
|
||||
|
||||
@ -600,14 +600,18 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
npages = arena_mapbits_unallocated_size_get(chunk,
|
||||
pageind) >> LG_PAGE;
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
if (arena_mapbits_dirty_get(chunk, pageind)) {
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
||||
arena_mapbits_dirty_get(chunk, pageind+npages-1));
|
||||
if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
|
||||
size_t i;
|
||||
|
||||
arena_avail_tree_remove(
|
||||
&arena->runs_avail_dirty, mapelm);
|
||||
|
||||
arena_mapbits_unzeroed_set(chunk, pageind,
|
||||
flag_unzeroed);
|
||||
arena_mapbits_large_set(chunk, pageind,
|
||||
(npages << LG_PAGE), flag_unzeroed);
|
||||
(npages << LG_PAGE), 0);
|
||||
/*
|
||||
* Update internal elements in the page map, so
|
||||
* that CHUNK_MAP_UNZEROED is properly set.
|
||||
@ -617,8 +621,10 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
pageind+i, flag_unzeroed);
|
||||
}
|
||||
if (npages > 1) {
|
||||
arena_mapbits_unzeroed_set(chunk,
|
||||
pageind+npages-1, flag_unzeroed);
|
||||
arena_mapbits_large_set(chunk,
|
||||
pageind+npages-1, 0, flag_unzeroed);
|
||||
pageind+npages-1, 0, 0);
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
@ -842,6 +848,8 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
* The run is dirty if the caller claims to have dirtied it, as well as
|
||||
* if it was already dirty before being allocated.
|
||||
*/
|
||||
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
|
||||
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
|
||||
if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
|
||||
dirty = true;
|
||||
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
|
||||
@ -941,9 +949,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
if (size == arena_maxclass) {
|
||||
assert(run_ind == map_bias);
|
||||
assert(run_pages == (arena_maxclass >> LG_PAGE));
|
||||
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
||||
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
|
||||
arena_maxclass);
|
||||
arena_chunk_dealloc(arena, chunk);
|
||||
}
|
||||
|
||||
@ -974,10 +979,8 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
* run first, in case of single-page runs.
|
||||
*/
|
||||
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
||||
arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
|
||||
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
|
||||
arena_mapbits_unzeroed_get(chunk, pageind));
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
|
||||
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
|
||||
|
||||
if (config_debug) {
|
||||
UNUSED size_t tail_npages = newsize >> LG_PAGE;
|
||||
@ -986,8 +989,8 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
assert(arena_mapbits_dirty_get(chunk,
|
||||
pageind+head_npages+tail_npages-1) == flag_dirty);
|
||||
}
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty
|
||||
| arena_mapbits_unzeroed_get(chunk, pageind+head_npages));
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
|
||||
flag_dirty);
|
||||
|
||||
arena_run_dalloc(arena, run, false);
|
||||
}
|
||||
@ -1008,10 +1011,8 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
* run first, in case of single-page runs.
|
||||
*/
|
||||
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
||||
arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
|
||||
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
|
||||
arena_mapbits_unzeroed_get(chunk, pageind));
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
|
||||
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
|
||||
|
||||
if (config_debug) {
|
||||
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
|
||||
@ -1021,8 +1022,7 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
pageind+head_npages+tail_npages-1) == flag_dirty);
|
||||
}
|
||||
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
|
||||
flag_dirty | arena_mapbits_unzeroed_get(chunk,
|
||||
pageind+head_npages));
|
||||
flag_dirty);
|
||||
|
||||
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
|
||||
dirty);
|
||||
@ -1524,16 +1524,14 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
* trim the clean pages before deallocating the dirty portion of the
|
||||
* run.
|
||||
*/
|
||||
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
|
||||
arena_mapbits_dirty_get(chunk, run_ind+npages-1));
|
||||
if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
|
||||
npages) {
|
||||
/*
|
||||
* Trim clean pages. Convert to large run beforehand. Set the
|
||||
* last map element first, in case this is a one-page run.
|
||||
*/
|
||||
arena_mapbits_large_set(chunk, run_ind+npages-1, 0,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind+npages-1));
|
||||
arena_mapbits_large_set(chunk, run_ind, bin_info->run_size,
|
||||
arena_mapbits_unzeroed_get(chunk, run_ind));
|
||||
/* Trim clean pages. Convert to large run beforehand. */
|
||||
assert(npages > 0);
|
||||
arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
|
||||
arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
|
||||
arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
|
||||
((past - run_ind) << LG_PAGE), false);
|
||||
/* npages = past - run_ind; */
|
||||
|
@ -1626,6 +1626,12 @@ _malloc_prefork(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
if (malloc_initialized == false)
|
||||
return;
|
||||
#endif
|
||||
assert(malloc_initialized);
|
||||
|
||||
/* Acquire all mutexes in a safe order. */
|
||||
malloc_mutex_prefork(&arenas_lock);
|
||||
for (i = 0; i < narenas; i++) {
|
||||
@ -1647,6 +1653,12 @@ _malloc_postfork(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||
if (malloc_initialized == false)
|
||||
return;
|
||||
#endif
|
||||
assert(malloc_initialized);
|
||||
|
||||
/* Release all mutexes, now that fork() has completed. */
|
||||
chunk_dss_postfork_parent();
|
||||
huge_postfork_parent();
|
||||
@ -1663,6 +1675,8 @@ jemalloc_postfork_child(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
assert(malloc_initialized);
|
||||
|
||||
/* Release all mutexes, now that fork() has completed. */
|
||||
chunk_dss_postfork_child();
|
||||
huge_postfork_child();
|
||||
|
Loading…
Reference in New Issue
Block a user