Avoid re-zeroing memory in calloc() when possible.

This commit is contained in:
Jason Evans 2007-11-27 03:12:15 +00:00
parent 1bbd1b8613
commit bcd3523138
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=173965

View File

@ -374,16 +374,26 @@ typedef struct arena_bin_s arena_bin_t;
typedef struct arena_chunk_map_s arena_chunk_map_t; typedef struct arena_chunk_map_s arena_chunk_map_t;
struct arena_chunk_map_s { struct arena_chunk_map_s {
/* Number of pages in run. */ /*
* Number of pages in run. For a free run that has never been touched,
* this is NPAGES_EMPTY for the central pages, which allows us to avoid
* zero-filling untouched pages for calloc().
*/
#define NPAGES_EMPTY ((uint32_t)0x0U)
uint32_t npages; uint32_t npages;
/* /*
* Position within run. For a free run, this is POS_FREE for the first * Position within run. For a free run, this is POS_EMPTY/POS_FREE for
* and last pages. The POS_FREE special value makes it possible to * the first and last pages. The special values make it possible to
* quickly coalesce free runs. * quickly coalesce free runs. POS_EMPTY indicates that the run has
* never been touched, which allows us to avoid zero-filling untouched
* pages for calloc().
* *
* This is the limiting factor for chunksize; there can be at most 2^31 * This is the limiting factor for chunksize; there can be at most 2^31
* pages in a run. * pages in a run.
*
* POS_EMPTY is assumed by arena_run_dalloc() to be less than POS_FREE.
*/ */
#define POS_EMPTY ((uint32_t)0xfffffffeU)
#define POS_FREE ((uint32_t)0xffffffffU) #define POS_FREE ((uint32_t)0xffffffffU)
uint32_t pos; uint32_t pos;
}; };
@ -729,15 +739,16 @@ static void chunk_dealloc(void *chunk, size_t size);
#ifndef NO_TLS #ifndef NO_TLS
static arena_t *choose_arena_hard(void); static arena_t *choose_arena_hard(void);
#endif #endif
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size); static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool zero);
static arena_chunk_t *arena_chunk_alloc(arena_t *arena); static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size); static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool zero);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size); static void arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size);
static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size); static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
static void *arena_malloc(arena_t *arena, size_t size); static void *arena_malloc(arena_t *arena, size_t size, bool zero);
static void *arena_palloc(arena_t *arena, size_t alignment, size_t size, static void *arena_palloc(arena_t *arena, size_t alignment, size_t size,
size_t alloc_size); size_t alloc_size);
static size_t arena_salloc(const void *ptr); static size_t arena_salloc(const void *ptr);
@ -745,7 +756,7 @@ static void *arena_ralloc(void *ptr, size_t size, size_t oldsize);
static void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr); static void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
static bool arena_new(arena_t *arena); static bool arena_new(arena_t *arena);
static arena_t *arenas_extend(unsigned ind); static arena_t *arenas_extend(unsigned ind);
static void *huge_malloc(size_t size); static void *huge_malloc(size_t size, bool zero);
static void *huge_palloc(size_t alignment, size_t size); static void *huge_palloc(size_t alignment, size_t size);
static void *huge_ralloc(void *ptr, size_t size, size_t oldsize); static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
static void huge_dalloc(void *ptr); static void huge_dalloc(void *ptr);
@ -1202,6 +1213,11 @@ chunk_alloc(size_t size)
&& (uintptr_t)chunk < (uintptr_t)brk_max) { && (uintptr_t)chunk < (uintptr_t)brk_max) {
/* Re-use a previously freed brk chunk. */ /* Re-use a previously freed brk chunk. */
ret = chunk; ret = chunk;
/*
* Maintain invariant that all newly allocated
* chunks are untouched or zero-filled.
*/
memset(ret, 0, size);
goto RETURN; goto RETURN;
} }
#endif #endif
@ -1727,35 +1743,76 @@ arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
} }
static void static void
arena_run_split(arena_t *arena, arena_run_t *run, size_t size) arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
unsigned run_ind, map_offset, total_pages, need_pages, rem_pages; unsigned run_ind, map_offset, total_pages, need_pages, rem_pages;
unsigned i; unsigned i;
uint32_t pos_beg, pos_end;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
>> pagesize_2pow); >> pagesize_2pow);
total_pages = chunk->map[run_ind].npages; total_pages = chunk->map[run_ind].npages;
need_pages = (size >> pagesize_2pow); need_pages = (size >> pagesize_2pow);
assert(need_pages > 0);
assert(need_pages <= total_pages); assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages; rem_pages = total_pages - need_pages;
/* Split enough pages from the front of run to fit allocation size. */ /* Split enough pages from the front of run to fit allocation size. */
map_offset = run_ind; map_offset = run_ind;
pos_beg = chunk->map[map_offset].pos;
pos_end = chunk->map[map_offset + total_pages - 1].pos;
if (zero == false) {
for (i = 0; i < need_pages; i++) { for (i = 0; i < need_pages; i++) {
chunk->map[map_offset + i].npages = need_pages; chunk->map[map_offset + i].npages = need_pages;
chunk->map[map_offset + i].pos = i; chunk->map[map_offset + i].pos = i;
} }
} else {
/*
* Handle first page specially, since we need to look for
* POS_EMPTY rather than NPAGES_EMPTY.
*/
i = 0;
if (chunk->map[map_offset + i].pos != POS_EMPTY) {
memset((void *)((uintptr_t)chunk + ((map_offset + i) <<
pagesize_2pow)), 0, pagesize);
}
chunk->map[map_offset + i].npages = need_pages;
chunk->map[map_offset + i].pos = i;
/* Handle central pages. */
for (i++; i < need_pages - 1; i++) {
if (chunk->map[map_offset + i].npages != NPAGES_EMPTY) {
memset((void *)((uintptr_t)chunk + ((map_offset
+ i) << pagesize_2pow)), 0, pagesize);
}
chunk->map[map_offset + i].npages = need_pages;
chunk->map[map_offset + i].pos = i;
}
/*
* Handle last page specially, since we need to look for
* POS_EMPTY rather than NPAGES_EMPTY.
*/
if (i < need_pages) {
if (chunk->map[map_offset + i].npages != POS_EMPTY) {
memset((void *)((uintptr_t)chunk + ((map_offset
+ i) << pagesize_2pow)), 0, pagesize);
}
chunk->map[map_offset + i].npages = need_pages;
chunk->map[map_offset + i].pos = i;
}
}
/* Keep track of trailing unused pages for later use. */ /* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) { if (rem_pages > 0) {
/* Update map for trailing pages. */ /* Update map for trailing pages. */
map_offset += need_pages; map_offset += need_pages;
chunk->map[map_offset].npages = rem_pages; chunk->map[map_offset].npages = rem_pages;
chunk->map[map_offset].pos = POS_FREE; chunk->map[map_offset].pos = pos_beg;
chunk->map[map_offset + rem_pages - 1].npages = rem_pages; chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
chunk->map[map_offset + rem_pages - 1].pos = POS_FREE; chunk->map[map_offset + rem_pages - 1].pos = pos_end;
} }
chunk->pages_used += need_pages; chunk->pages_used += need_pages;
@ -1772,6 +1829,8 @@ arena_chunk_alloc(arena_t *arena)
RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk); RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
} else { } else {
unsigned i;
chunk = (arena_chunk_t *)chunk_alloc(chunksize); chunk = (arena_chunk_t *)chunk_alloc(chunksize);
if (chunk == NULL) if (chunk == NULL)
return (NULL); return (NULL);
@ -1796,12 +1855,20 @@ arena_chunk_alloc(arena_t *arena)
/* /*
* Initialize enough of the map to support one maximal free run. * Initialize enough of the map to support one maximal free run.
*/ */
chunk->map[arena_chunk_header_npages].npages = chunk_npages - i = arena_chunk_header_npages;
chunk->map[i].npages = chunk_npages - arena_chunk_header_npages;
chunk->map[i].pos = POS_EMPTY;
/* Mark the free run's central pages as untouched. */
for (i++; i < chunk_npages - 1; i++)
chunk->map[i].npages = NPAGES_EMPTY;
/* Take care when (chunk_npages == 2). */
if (i < chunk_npages) {
chunk->map[i].npages = chunk_npages -
arena_chunk_header_npages; arena_chunk_header_npages;
chunk->map[arena_chunk_header_npages].pos = POS_FREE; chunk->map[i].pos = POS_EMPTY;
chunk->map[chunk_npages - 1].npages = chunk_npages - }
arena_chunk_header_npages;
chunk->map[chunk_npages - 1].pos = POS_FREE;
} }
return (chunk); return (chunk);
@ -1835,7 +1902,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
} }
static arena_run_t * static arena_run_t *
arena_run_alloc(arena_t *arena, size_t size) arena_run_alloc(arena_t *arena, size_t size, bool zero)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
arena_run_t *run; arena_run_t *run;
@ -1869,14 +1936,14 @@ arena_run_alloc(arena_t *arena, size_t size)
arena_chunk_header_npages); arena_chunk_header_npages);
for (i = chunk->min_frun_ind; i < chunk_npages;) { for (i = chunk->min_frun_ind; i < chunk_npages;) {
mapelm = &chunk->map[i]; mapelm = &chunk->map[i];
if (mapelm->pos == POS_FREE) { if (mapelm->pos >= POS_EMPTY) {
if (mapelm->npages >= need_npages) { if (mapelm->npages >= need_npages) {
run = (arena_run_t *) run = (arena_run_t *)
((uintptr_t)chunk + (i << ((uintptr_t)chunk + (i <<
pagesize_2pow)); pagesize_2pow));
/* Update page map. */ /* Update page map. */
arena_run_split(arena, run, arena_run_split(arena, run,
size); size, zero);
return (run); return (run);
} }
if (mapelm->npages > if (mapelm->npages >
@ -1910,7 +1977,7 @@ arena_run_alloc(arena_t *arena, size_t size)
run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages << run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
pagesize_2pow)); pagesize_2pow));
/* Update page map. */ /* Update page map. */
arena_run_split(arena, run, size); arena_run_split(arena, run, size, zero);
return (run); return (run);
} }
@ -1947,37 +2014,51 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size)
/* Try to coalesce with neighboring runs. */ /* Try to coalesce with neighboring runs. */
if (run_ind > arena_chunk_header_npages && if (run_ind > arena_chunk_header_npages &&
chunk->map[run_ind - 1].pos == POS_FREE) { chunk->map[run_ind - 1].pos >= POS_EMPTY) {
unsigned prev_npages; unsigned prev_npages;
/* Coalesce with previous run. */ /* Coalesce with previous run. */
prev_npages = chunk->map[run_ind - 1].npages; prev_npages = chunk->map[run_ind - 1].npages;
/*
* The way run allocation currently works (lowest first fit),
* it is impossible for a free run to have empty (untouched)
* pages followed by dirty pages. If the run allocation policy
* changes, then we will need to account for it here.
*/
assert(chunk->map[run_ind - 1].pos != POS_EMPTY);
#if 0
if (prev_npages > 1 && chunk->map[run_ind - 1].pos == POS_EMPTY)
chunk->map[run_ind - 1].npages = NPAGES_EMPTY;
#endif
run_ind -= prev_npages; run_ind -= prev_npages;
assert(chunk->map[run_ind].npages == prev_npages); assert(chunk->map[run_ind].npages == prev_npages);
assert(chunk->map[run_ind].pos == POS_FREE); assert(chunk->map[run_ind].pos >= POS_EMPTY);
run_pages += prev_npages; run_pages += prev_npages;
chunk->map[run_ind].npages = run_pages; chunk->map[run_ind].npages = run_pages;
assert(chunk->map[run_ind].pos == POS_FREE); assert(chunk->map[run_ind].pos >= POS_EMPTY);
chunk->map[run_ind + run_pages - 1].npages = run_pages; chunk->map[run_ind + run_pages - 1].npages = run_pages;
assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE); assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
} }
if (run_ind + run_pages < chunk_npages && if (run_ind + run_pages < chunk_npages &&
chunk->map[run_ind + run_pages].pos == POS_FREE) { chunk->map[run_ind + run_pages].pos >= POS_EMPTY) {
unsigned next_npages; unsigned next_npages;
/* Coalesce with next run. */ /* Coalesce with next run. */
next_npages = chunk->map[run_ind + run_pages].npages; next_npages = chunk->map[run_ind + run_pages].npages;
if (next_npages > 1 && chunk->map[run_ind + run_pages].pos ==
POS_EMPTY)
chunk->map[run_ind + run_pages].npages = NPAGES_EMPTY;
run_pages += next_npages; run_pages += next_npages;
assert(chunk->map[run_ind + run_pages - 1].npages == assert(chunk->map[run_ind + run_pages - 1].npages ==
next_npages); next_npages);
assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE); assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
chunk->map[run_ind].npages = run_pages; chunk->map[run_ind].npages = run_pages;
chunk->map[run_ind].pos = POS_FREE; assert(chunk->map[run_ind].pos >= POS_EMPTY);
chunk->map[run_ind + run_pages - 1].npages = run_pages; chunk->map[run_ind + run_pages - 1].npages = run_pages;
assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE); assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
} }
if (chunk->map[run_ind].npages > chunk->max_frun_npages) if (chunk->map[run_ind].npages > chunk->max_frun_npages)
@ -2008,7 +2089,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
/* No existing runs have any space available. */ /* No existing runs have any space available. */
/* Allocate a new run. */ /* Allocate a new run. */
run = arena_run_alloc(arena, bin->run_size); run = arena_run_alloc(arena, bin->run_size, false);
if (run == NULL) if (run == NULL)
return (NULL); return (NULL);
@ -2156,7 +2237,7 @@ arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
} }
static void * static void *
arena_malloc(arena_t *arena, size_t size) arena_malloc(arena_t *arena, size_t size, bool zero)
{ {
void *ret; void *ret;
@ -2214,11 +2295,20 @@ arena_malloc(arena_t *arena, size_t size)
arena->stats.nmalloc_small++; arena->stats.nmalloc_small++;
arena->stats.allocated_small += size; arena->stats.allocated_small += size;
#endif #endif
malloc_mutex_unlock(&arena->mtx);
if (zero == false) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
} else
memset(ret, 0, size);
} else { } else {
/* Large allocation. */ /* Large allocation. */
size = PAGE_CEILING(size); size = PAGE_CEILING(size);
malloc_mutex_lock(&arena->mtx); malloc_mutex_lock(&arena->mtx);
ret = (void *)arena_run_alloc(arena, size); ret = (void *)arena_run_alloc(arena, size, true); // XXX zero?
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(&arena->mtx); malloc_mutex_unlock(&arena->mtx);
return (NULL); return (NULL);
@ -2227,14 +2317,16 @@ arena_malloc(arena_t *arena, size_t size)
arena->stats.nmalloc_large++; arena->stats.nmalloc_large++;
arena->stats.allocated_large += size; arena->stats.allocated_large += size;
#endif #endif
}
malloc_mutex_unlock(&arena->mtx); malloc_mutex_unlock(&arena->mtx);
if (zero == false) {
if (opt_junk) if (opt_junk)
memset(ret, 0xa5, size); memset(ret, 0xa5, size);
else if (opt_zero) else if (opt_zero)
memset(ret, 0, size); memset(ret, 0, size);
}
}
return (ret); return (ret);
} }
@ -2273,7 +2365,7 @@ arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
npages = size >> pagesize_2pow; npages = size >> pagesize_2pow;
malloc_mutex_lock(&arena->mtx); malloc_mutex_lock(&arena->mtx);
ret = (void *)arena_run_alloc(arena, alloc_size); ret = (void *)arena_run_alloc(arena, alloc_size, false);
if (ret == NULL) { if (ret == NULL) {
malloc_mutex_unlock(&arena->mtx); malloc_mutex_unlock(&arena->mtx);
return (NULL); return (NULL);
@ -2402,7 +2494,7 @@ arena_ralloc(void *ptr, size_t size, size_t oldsize)
* need to use a different size class. In that case, fall back to * need to use a different size class. In that case, fall back to
* allocating new space and copying. * allocating new space and copying.
*/ */
ret = arena_malloc(choose_arena(), size); ret = arena_malloc(choose_arena(), size, false);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
@ -2421,32 +2513,17 @@ arena_ralloc(void *ptr, size_t size, size_t oldsize)
return (ptr); return (ptr);
} }
static void static inline void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
unsigned pageind, arena_chunk_map_t *mapelm)
{ {
unsigned pageind;
arena_chunk_map_t *mapelm;
size_t size;
assert(arena != NULL);
assert(arena->magic == ARENA_MAGIC);
assert(chunk->arena == arena);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
mapelm = &chunk->map[pageind];
if (mapelm->pos != 0 || ptr != (void *)((uintptr_t)chunk) + (pageind <<
pagesize_2pow)) {
arena_run_t *run; arena_run_t *run;
arena_bin_t *bin; arena_bin_t *bin;
size_t size;
/* Small allocation. */
pageind -= mapelm->pos; pageind -= mapelm->pos;
run = (arena_run_t *)((uintptr_t)chunk + (pageind << run = (arena_run_t *)((uintptr_t)chunk + (pageind << pagesize_2pow));
pagesize_2pow));
assert(run->magic == ARENA_RUN_MAGIC); assert(run->magic == ARENA_RUN_MAGIC);
bin = run->bin; bin = run->bin;
size = bin->reg_size; size = bin->reg_size;
@ -2454,7 +2531,6 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
if (opt_junk) if (opt_junk)
memset(ptr, 0x5a, size); memset(ptr, 0x5a, size);
malloc_mutex_lock(&arena->mtx);
arena_run_reg_dalloc(run, bin, ptr, size); arena_run_reg_dalloc(run, bin, ptr, size);
run->nfree++; run->nfree++;
@ -2464,10 +2540,9 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
bin->runcur = NULL; bin->runcur = NULL;
else if (bin->nregs != 1) { else if (bin->nregs != 1) {
/* /*
* This block's conditional is necessary because * This block's conditional is necessary because if the
* if the run only contains one region, then it * run only contains one region, then it never gets
* never gets inserted into the non-full runs * inserted into the non-full runs tree.
* tree.
*/ */
RB_REMOVE(arena_run_tree_s, &bin->runs, run); RB_REMOVE(arena_run_tree_s, &bin->runs, run);
} }
@ -2480,8 +2555,8 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
#endif #endif
} else if (run->nfree == 1 && run != bin->runcur) { } else if (run->nfree == 1 && run != bin->runcur) {
/* /*
* Make sure that bin->runcur always refers to the * Make sure that bin->runcur always refers to the lowest
* lowest non-full run, if one exists. * non-full run, if one exists.
*/ */
if (bin->runcur == NULL) if (bin->runcur == NULL)
bin->runcur = run; bin->runcur = run;
@ -2500,7 +2575,31 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
arena->stats.allocated_small -= size; arena->stats.allocated_small -= size;
arena->stats.ndalloc_small++; arena->stats.ndalloc_small++;
#endif #endif
}
static void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
unsigned pageind;
arena_chunk_map_t *mapelm;
assert(arena != NULL);
assert(arena->magic == ARENA_MAGIC);
assert(chunk->arena == arena);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
mapelm = &chunk->map[pageind];
if (mapelm->pos != 0 || ptr != (void *)((uintptr_t)chunk) + (pageind <<
pagesize_2pow)) {
/* Small allocation. */
malloc_mutex_lock(&arena->mtx);
arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
malloc_mutex_unlock(&arena->mtx);
} else { } else {
size_t size;
/* Large allocation. */ /* Large allocation. */
size = mapelm->npages << pagesize_2pow; size = mapelm->npages << pagesize_2pow;
@ -2515,10 +2614,9 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
arena->stats.allocated_large -= size; arena->stats.allocated_large -= size;
arena->stats.ndalloc_large++; arena->stats.ndalloc_large++;
#endif #endif
}
malloc_mutex_unlock(&arena->mtx); malloc_mutex_unlock(&arena->mtx);
} }
}
static bool static bool
arena_new(arena_t *arena) arena_new(arena_t *arena)
@ -2631,7 +2729,7 @@ arenas_extend(unsigned ind)
*/ */
static void * static void *
huge_malloc(size_t size) huge_malloc(size_t size, bool zero)
{ {
void *ret; void *ret;
size_t csize; size_t csize;
@ -2668,10 +2766,12 @@ huge_malloc(size_t size)
#endif #endif
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
if (zero == false) {
if (opt_junk) if (opt_junk)
memset(ret, 0xa5, csize); memset(ret, 0xa5, csize);
else if (opt_zero) else if (opt_zero)
memset(ret, 0, csize); memset(ret, 0, csize);
}
return (ret); return (ret);
} }
@ -2779,7 +2879,7 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize)
* need to use a different size class. In that case, fall back to * need to use a different size class. In that case, fall back to
* allocating new space and copying. * allocating new space and copying.
*/ */
ret = huge_malloc(size); ret = huge_malloc(size, false);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
@ -2838,9 +2938,9 @@ imalloc(size_t size)
assert(size != 0); assert(size != 0);
if (size <= arena_maxclass) if (size <= arena_maxclass)
ret = arena_malloc(choose_arena(), size); ret = arena_malloc(choose_arena(), size, false);
else else
ret = huge_malloc(size); ret = huge_malloc(size, false);
return (ret); return (ret);
} }
@ -2881,7 +2981,7 @@ ipalloc(size_t alignment, size_t size)
if (ceil_size <= pagesize || (alignment <= pagesize if (ceil_size <= pagesize || (alignment <= pagesize
&& ceil_size <= arena_maxclass)) && ceil_size <= arena_maxclass))
ret = arena_malloc(choose_arena(), ceil_size); ret = arena_malloc(choose_arena(), ceil_size, false);
else { else {
size_t run_size; size_t run_size;
@ -2931,7 +3031,7 @@ ipalloc(size_t alignment, size_t size)
ret = arena_palloc(choose_arena(), alignment, ceil_size, ret = arena_palloc(choose_arena(), alignment, ceil_size,
run_size); run_size);
} else if (alignment <= chunksize) } else if (alignment <= chunksize)
ret = huge_malloc(ceil_size); ret = huge_malloc(ceil_size, false);
else else
ret = huge_palloc(alignment, ceil_size); ret = huge_palloc(alignment, ceil_size);
} }
@ -2945,35 +3045,10 @@ icalloc(size_t size)
{ {
void *ret; void *ret;
if (size <= arena_maxclass) { if (size <= arena_maxclass)
ret = arena_malloc(choose_arena(), size); ret = arena_malloc(choose_arena(), size, true);
if (ret == NULL) else
return (NULL); ret = huge_malloc(size, true);
memset(ret, 0, size);
} else {
/*
* The virtual memory system provides zero-filled pages, so
* there is no need to do so manually, unless opt_junk is
* enabled, in which case huge_malloc() fills huge allocations
* with junk.
*/
ret = huge_malloc(size);
if (ret == NULL)
return (NULL);
if (opt_junk)
memset(ret, 0, size);
#ifdef USE_BRK
else if ((uintptr_t)ret >= (uintptr_t)brk_base
&& (uintptr_t)ret < (uintptr_t)brk_max) {
/*
* This may be a re-used brk chunk. Therefore, zero
* the memory.
*/
memset(ret, 0, size);
}
#endif
}
return (ret); return (ret);
} }