Import new libcxxrt into vendor branch.

This commit is contained in:
David Chisnall 2013-07-10 10:48:22 +00:00
parent af04c9e2c0
commit c7b3fb9657
7 changed files with 138 additions and 121 deletions

View File

@ -27,3 +27,4 @@
#define ATOMIC_LOAD(addr)\
(__sync_synchronize(), *addr)
#endif

View File

@ -65,3 +65,13 @@ extern "C" void __cxa_pure_virtual()
abort();
}
/**
* Compilers may (but are not required to) set any deleted-virtual function's
* vtable entry to this function. This makes debugging slightly easier, as
* users can add a breakpoint on this function to tell if they've accidentally
* called a deleted-virtual function.
*/
extern "C" void __cxa_deleted_virtual()
{
abort();
}

View File

@ -193,6 +193,8 @@ __cxa_eh_globals *__cxa_get_globals(void);
*/
__cxa_eh_globals *__cxa_get_globals_fast(void);
std::type_info * __cxa_current_exception_type();
/**
* Throws an exception returned by __cxa_current_primary_exception(). This
* exception may have been caught in another thread.

View File

@ -57,6 +57,8 @@ typedef unsigned char *dw_eh_ptr_t;
/// DWARF data encoding types.
enum dwarf_data_encoding
{
/// Absolute pointer value
DW_EH_PE_absptr = 0x00,
/// Unsigned, little-endian, base 128-encoded (variable length).
DW_EH_PE_uleb128 = 0x01,
/// Unsigned 16-bit integer.
@ -95,8 +97,6 @@ enum dwarf_data_relative
{
/// Value is omitted
DW_EH_PE_omit = 0xff,
/// Absolute pointer value
DW_EH_PE_absptr = 0x00,
/// Value relative to program counter
DW_EH_PE_pcrel = 0x10,
/// Value relative to the text segment

View File

@ -39,6 +39,24 @@
#pragma weak pthread_setspecific
#pragma weak pthread_getspecific
#pragma weak pthread_once
#ifdef LIBCXXRT_WEAK_LOCKS
#pragma weak pthread_mutex_lock
#define pthread_mutex_lock(mtx) do {\
if (pthread_mutex_lock) pthread_mutex_lock(mtx);\
} while(0)
#pragma weak pthread_mutex_unlock
#define pthread_mutex_unlock(mtx) do {\
if (pthread_mutex_unlock) pthread_mutex_unlock(mtx);\
} while(0)
#pragma weak pthread_cond_signal
#define pthread_cond_signal(cv) do {\
if (pthread_cond_signal) pthread_cond_signal(cv);\
} while(0)
#pragma weak pthread_cond_wait
#define pthread_cond_wait(cv, mtx) do {\
if (pthread_cond_wait) pthread_cond_wait(cv, mtx);\
} while(0)
#endif
using namespace ABI_NAMESPACE;
@ -214,8 +232,6 @@ namespace std
}
extern "C" std::type_info *__cxa_current_exception_type();
/**
* Class of exceptions to distinguish between this and other exception types.
*

164
guard.cc
View File

@ -41,37 +41,90 @@
* initialised.
*/
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <pthread.h>
#include <assert.h>
#include "atomic.h"
// Older GCC doesn't define __LITTLE_ENDIAN__
#ifndef __LITTLE_ENDIAN__
// If __BYTE_ORDER__ is defined, use that instead
# ifdef __BYTE_ORDER__
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define __LITTLE_ENDIAN__
# endif
// x86 and ARM are the most common little-endian CPUs, so let's have a
// special case for them (ARM is already special cased). Assume everything
// else is big endian.
# elif defined(__x86_64) || defined(__i386)
# define __LITTLE_ENDIAN__
# endif
#endif
/*
* The least significant bit of the guard variable indicates that the object
* has been initialised, the most significant bit is used for a spinlock.
*/
#ifdef __arm__
// ARM ABI - 32-bit guards.
typedef uint32_t guard_t;
static const uint32_t LOCKED = ((guard_t)1) << 31;
static const uint32_t INITIALISED = 1;
#else
typedef uint64_t guard_t;
# if defined(__LITTLE_ENDIAN__)
static const guard_t LOCKED = ((guard_t)1) << 63;
static const guard_t INITIALISED = 1;
# else
static const guard_t LOCKED = 1;
static const guard_t INITIALISED = ((guard_t)1) << 56;
# endif
#endif
/**
* Acquires a lock on a guard, returning 0 if the object has already been
* initialised, and 1 if it has not. If the object is already constructed then
* this function just needs to read a byte from memory and return.
*/
extern "C" int __cxa_guard_acquire(volatile int32_t *guard_object)
extern "C" int __cxa_guard_acquire(volatile guard_t *guard_object)
{
if ((1<<31) == *guard_object) { return 0; }
// If we can atomically move the value from 0 -> 1, then this is
// uninitialised.
if (__sync_bool_compare_and_swap(guard_object, 0, 1))
// Not an atomic read, doesn't establish a happens-before relationship, but
// if one is already established and we end up seeing an initialised state
// then it's a fast path, otherwise we'll do something more expensive than
// this test anyway...
if ((INITIALISED == *guard_object)) { return 0; }
// Spin trying to do the initialisation
while (1)
{
return 1;
}
// If the value is not 0, some other thread was initialising this. Spin
// until it's finished.
while (__sync_bool_compare_and_swap(guard_object, (1<<31), (1<<31)))
{
// If the other thread aborted, then we grab the lock
if (__sync_bool_compare_and_swap(guard_object, 0, 1))
// Loop trying to move the value of the guard from 0 (not
// locked, not initialised) to the locked-uninitialised
// position.
switch (__sync_val_compare_and_swap(guard_object, 0, LOCKED))
{
return 1;
// If the old value was 0, we succeeded, so continue
// initialising
case 0:
return 1;
// If this was already initialised, return and let the caller skip
// initialising it again.
case INITIALISED:
return 0;
// If it is locked by another thread, relinquish the CPU and try
// again later.
case LOCKED:
case LOCKED | INITIALISED:
sched_yield();
break;
// If it is some other value, then something has gone badly wrong.
// Give up.
default:
fprintf(stderr, "Invalid state detected attempting to lock static initialiser.\n");
abort();
}
sched_yield();
}
//__builtin_unreachable();
return 0;
}
@ -79,86 +132,21 @@ extern "C" int __cxa_guard_acquire(volatile int32_t *guard_object)
* Releases the lock without marking the object as initialised. This function
* is called if initialising a static causes an exception to be thrown.
*/
extern "C" void __cxa_guard_abort(int32_t *guard_object)
extern "C" void __cxa_guard_abort(volatile guard_t *guard_object)
{
assert(__sync_bool_compare_and_swap(guard_object, 1, 0));
__attribute__((unused))
bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, 0);
assert(reset);
}
/**
* Releases the guard and marks the object as initialised. This function is
* called after successful initialisation of a static.
*/
extern "C" void __cxa_guard_release(int32_t *guard_object)
extern "C" void __cxa_guard_release(volatile guard_t *guard_object)
{
assert(__sync_bool_compare_and_swap(guard_object, 1, (1<<31)));
__attribute__((unused))
bool reset = __sync_bool_compare_and_swap(guard_object, LOCKED, INITIALISED);
assert(reset);
}
#else
// Itanium ABI: 64-bit guards
/**
* Returns a pointer to the low 32 bits in a 64-bit value, respecting the
* platform's byte order.
*/
static int32_t *low_32_bits(volatile int64_t *ptr)
{
int32_t *low= (int32_t*)ptr;
// Test if the machine is big endian - constant propagation at compile time
// should eliminate this completely.
int one = 1;
if (*(char*)&one != 1)
{
low++;
}
return low;
}
/**
* Acquires a lock on a guard, returning 0 if the object has already been
* initialised, and 1 if it has not. If the object is already constructed then
* this function just needs to read a byte from memory and return.
*/
extern "C" int __cxa_guard_acquire(volatile int64_t *guard_object)
{
char first_byte = (*guard_object) >> 56;
if (1 == first_byte) { return 0; }
int32_t *lock = low_32_bits(guard_object);
// Simple spin lock using the low 32 bits. We assume that concurrent
// attempts to initialize statics are very rare, so we don't need to
// optimise for the case where we have lots of threads trying to acquire
// the lock at the same time.
while (!__sync_bool_compare_and_swap_4(lock, 0, 1))
{
if (1 == ((*guard_object) >> 56))
{
break;
}
sched_yield();
}
// We have to test the guard again, in case another thread has performed
// the initialisation while we were trying to acquire the lock.
first_byte = (*guard_object) >> 56;
return (1 != first_byte);
}
/**
* Releases the lock without marking the object as initialised. This function
* is called if initialising a static causes an exception to be thrown.
*/
extern "C" void __cxa_guard_abort(int64_t *guard_object)
{
int32_t *lock = low_32_bits(guard_object);
*lock = 0;
}
/**
* Releases the guard and marks the object as initialised. This function is
* called after successful initialisation of a static.
*/
extern "C" void __cxa_guard_release(int64_t *guard_object)
{
// Set the first byte to 1
*guard_object |= ((int64_t)1) << 56;
__cxa_guard_abort(guard_object);
}
#endif

View File

@ -99,40 +99,21 @@ void* operator new(size_t size)
__attribute__((weak))
void* operator new(size_t size, const std::nothrow_t &) throw()
{
if (0 == size)
{
size = 1;
try {
return :: operator new(size);
} catch (...) {
// nothrow operator new should return NULL in case of
// std::bad_alloc exception in new handler
return NULL;
}
void *mem = malloc(size);
while (0 == mem)
{
new_handler h = std::get_new_handler();
if (0 != h)
{
try
{
h();
}
catch (...)
{
// nothrow operator new should return NULL in case of
// std::bad_alloc exception in new handler
return NULL;
}
}
else
{
return NULL;
}
mem = malloc(size);
}
return mem;
}
__attribute__((weak))
void operator delete(void * ptr)
#if __cplusplus < 201000L
throw()
#endif
{
free(ptr);
}
@ -140,13 +121,32 @@ void operator delete(void * ptr)
__attribute__((weak))
void * operator new[](size_t size)
#if __cplusplus < 201000L
throw(std::bad_alloc)
#endif
{
return ::operator new(size);
}
__attribute__((weak))
void operator delete[](void * ptr) throw()
void * operator new[](size_t size, const std::nothrow_t &) throw()
{
try {
return ::operator new[](size);
} catch (...) {
// nothrow operator new should return NULL in case of
// std::bad_alloc exception in new handler
return NULL;
}
}
__attribute__((weak))
void operator delete[](void * ptr)
#if __cplusplus < 201000L
throw()
#endif
{
::operator delete(ptr);
}