Add init_lock, and use it to protect against allocator initialization

races.  This isn't currently necessary for libpthread or libthr, but
without it external threads libraries like the linuxthreads port are
not safe to use.

Reported by:	ganbold@micom.mng.net
This commit is contained in:
jasone 2006-04-04 19:46:28 +00:00
parent c95a267038
commit b2f560b56d

View File

@ -350,8 +350,12 @@ typedef struct {
spinlock_t lock;
} malloc_mutex_t;
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
/* Used to avoid initialization races. */
static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
/******************************************************************************/
/*
* Statistics data structures.
@ -2969,12 +2973,6 @@ static inline bool
malloc_init(void)
{
/*
* We always initialize before threads are created, since any thread
* creation first triggers allocations.
*/
assert(__isthreaded == 0 || malloc_initialized);
if (malloc_initialized == false)
return (malloc_init_hard());
@ -2989,6 +2987,16 @@ malloc_init_hard(void)
char buf[PATH_MAX + 1];
const char *opts;
malloc_mutex_lock(&init_lock);
if (malloc_initialized) {
/*
* Another thread initialized the allocator before this one
* acquired init_lock.
*/
malloc_mutex_unlock(&init_lock);
return (false);
}
/* Get number of CPUs. */
{
int mib[2];
@ -3339,8 +3347,10 @@ malloc_init_hard(void)
/* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
if (arenas == NULL)
if (arenas == NULL) {
malloc_mutex_unlock(&init_lock);
return (true);
}
/*
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
@ -3352,12 +3362,15 @@ malloc_init_hard(void)
* arena_choose_hard().
*/
arenas_extend(0);
if (arenas[0] == NULL)
if (arenas[0] == NULL) {
malloc_mutex_unlock(&init_lock);
return (true);
}
malloc_mutex_init(&arenas_mtx);
malloc_initialized = true;
malloc_mutex_unlock(&init_lock);
return (false);
}