Import libcompiler_rt into HEAD and add Makefiles.

Obtained from:	user/ed/compiler-rt
This commit is contained in:
Ed Schouten 2010-11-11 15:13:11 +00:00
commit a3cf0ef5a2
210 changed files with 12568 additions and 0 deletions

View File

@ -0,0 +1,59 @@
/*
* Block.h
*
* Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
* to any person obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _BLOCK_H_
#define _BLOCK_H_
#if !defined(BLOCK_EXPORT)
# if defined(__cplusplus)
# define BLOCK_EXPORT extern "C"
# else
# define BLOCK_EXPORT extern
# endif
#endif
#if defined(__cplusplus)
extern "C" {
#endif
/* Create a heap based copy of a Block or simply add a reference to an existing one.
* This must be paired with Block_release to recover memory, even when running
* under Objective-C Garbage Collection.
*/
BLOCK_EXPORT void *_Block_copy(const void *aBlock);
/* Lose the reference, and if heap based and last reference, recover the memory. */
BLOCK_EXPORT void _Block_release(const void *aBlock);
#if defined(__cplusplus)
}
#endif
/* Type correct macros. */
#define Block_copy(...) ((__typeof(__VA_ARGS__))_Block_copy((const void *)(__VA_ARGS__)))
#define Block_release(...) _Block_release((const void *)(__VA_ARGS__))
#endif

View File

@ -0,0 +1,179 @@
/*
* Block_private.h
*
* Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
* to any person obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _BLOCK_PRIVATE_H_
#define _BLOCK_PRIVATE_H_
#if !defined(BLOCK_EXPORT)
# if defined(__cplusplus)
# define BLOCK_EXPORT extern "C"
# else
# define BLOCK_EXPORT extern
# endif
#endif
#ifndef _MSC_VER
#include <stdbool.h>
#else
/* MSVC doesn't have <stdbool.h>. Compensate. */
typedef char bool;
#define true (bool)1
#define false (bool)0
#endif
#if defined(__cplusplus)
extern "C" {
#endif
enum {
BLOCK_REFCOUNT_MASK = (0xffff),
BLOCK_NEEDS_FREE = (1 << 24),
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CTOR = (1 << 26), /* Helpers have C++ code. */
BLOCK_IS_GC = (1 << 27),
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_HAS_DESCRIPTOR = (1 << 29)
};
/* Revised new layout. */
struct Block_descriptor {
unsigned long int reserved;
unsigned long int size;
void (*copy)(void *dst, void *src);
void (*dispose)(void *);
};
struct Block_layout {
void *isa;
int flags;
int reserved;
void (*invoke)(void *, ...);
struct Block_descriptor *descriptor;
/* Imported variables. */
};
struct Block_byref {
void *isa;
struct Block_byref *forwarding;
int flags; /* refcount; */
int size;
void (*byref_keep)(struct Block_byref *dst, struct Block_byref *src);
void (*byref_destroy)(struct Block_byref *);
/* long shared[0]; */
};
struct Block_byref_header {
void *isa;
struct Block_byref *forwarding;
int flags;
int size;
};
/* Runtime support functions used by compiler when generating copy/dispose helpers. */
enum {
/* See function implementation for a more complete description of these fields and combinations */
BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)), block, ... */
BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block variable */
BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy helpers */
BLOCK_BYREF_CALLER = 128 /* called from __block (byref) copy/dispose support routines. */
};
/* Runtime entry point called by compiler when assigning objects inside copy helper routines */
BLOCK_EXPORT void _Block_object_assign(void *destAddr, const void *object, const int flags);
/* BLOCK_FIELD_IS_BYREF is only used from within block copy helpers */
/* runtime entry point called by the compiler when disposing of objects inside dispose helper routine */
BLOCK_EXPORT void _Block_object_dispose(const void *object, const int flags);
/* Other support functions */
/* Runtime entry to get total size of a closure */
BLOCK_EXPORT unsigned long int Block_size(void *block_basic);
/* the raw data space for runtime classes for blocks */
/* class+meta used for stack, malloc, and collectable based blocks */
BLOCK_EXPORT void * _NSConcreteStackBlock[32];
BLOCK_EXPORT void * _NSConcreteMallocBlock[32];
BLOCK_EXPORT void * _NSConcreteAutoBlock[32];
BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32];
BLOCK_EXPORT void * _NSConcreteGlobalBlock[32];
BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32];
/* the intercept routines that must be used under GC */
BLOCK_EXPORT void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
void (*setHasRefcount)(const void *, const bool),
void (*gc_assign_strong)(void *, void **),
void (*gc_assign_weak)(const void *, void *),
void (*gc_memmove)(void *, void *, unsigned long));
/* earlier version, now simply transitional */
BLOCK_EXPORT void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
void (*setHasRefcount)(const void *, const bool),
void (*gc_assign_strong)(void *, void **),
void (*gc_assign_weak)(const void *, void *));
BLOCK_EXPORT void _Block_use_RR( void (*retain)(const void *),
void (*release)(const void *));
/* make a collectable GC heap based Block. Not useful under non-GC. */
BLOCK_EXPORT void *_Block_copy_collectable(const void *aBlock);
/* thread-unsafe diagnostic */
BLOCK_EXPORT const char *_Block_dump(const void *block);
/* Obsolete */
/* first layout */
struct Block_basic {
void *isa;
int Block_flags; /* int32_t */
int Block_size; /* XXX should be packed into Block_flags */
void (*Block_invoke)(void *);
void (*Block_copy)(void *dst, void *src); /* iff BLOCK_HAS_COPY_DISPOSE */
void (*Block_dispose)(void *); /* iff BLOCK_HAS_COPY_DISPOSE */
/* long params[0]; // where const imports, __block storage references, etc. get laid down */
};
#if defined(__cplusplus)
}
#endif
#endif /* _BLOCK_PRIVATE_H_ */

View File

@ -0,0 +1,41 @@
/*
* data.c
*
* Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
* to any person obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
/********************
NSBlock support
We allocate space and export a symbol to be used as the Class for the on-stack and malloc'ed copies until ObjC arrives on the scene. These data areas are set up by Foundation to link in as real classes post facto.
We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block.
**********************/
void * _NSConcreteStackBlock[32] = { 0 };
void * _NSConcreteMallocBlock[32] = { 0 };
void * _NSConcreteAutoBlock[32] = { 0 };
void * _NSConcreteFinalizingBlock[32] = { 0 };
void * _NSConcreteGlobalBlock[32] = { 0 };
void * _NSConcreteWeakBlockVariable[32] = { 0 };
void _Block_copy_error(void) {
}

View File

@ -0,0 +1,700 @@
/*
* runtime.c
*
* Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
* to any person obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "Block_private.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include "config.h"
#ifdef HAVE_AVAILABILITY_MACROS_H
#include <AvailabilityMacros.h>
#endif /* HAVE_AVAILABILITY_MACROS_H */
#ifdef HAVE_TARGET_CONDITIONALS_H
#include <TargetConditionals.h>
#endif /* HAVE_TARGET_CONDITIONALS_H */
#if defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_INT) && defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG)
#ifdef HAVE_LIBKERN_OSATOMIC_H
#include <libkern/OSAtomic.h>
#endif /* HAVE_LIBKERN_OSATOMIC_H */
#elif defined(__WIN32__) || defined(_WIN32)
#define _CRT_SECURE_NO_WARNINGS 1
#include <windows.h>
static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
/* fixme barrier is overkill -- see objc-os.h */
long original = InterlockedCompareExchange(dst, newl, oldl);
return (original == oldl);
}
static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
/* fixme barrier is overkill -- see objc-os.h */
int original = InterlockedCompareExchange(dst, newi, oldi);
return (original == oldi);
}
/*
* Check to see if the GCC atomic built-ins are available. If we're on
* a 64-bit system, make sure we have an 8-byte atomic function
* available.
*
*/
#elif defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_INT) && defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_LONG)
static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
return __sync_bool_compare_and_swap(dst, oldl, newl);
}
static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
return __sync_bool_compare_and_swap(dst, oldi, newi);
}
#else
#error unknown atomic compare-and-swap primitive
#endif /* HAVE_OSATOMIC_COMPARE_AND_SWAP_INT && HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG */
/*
* Globals:
*/
static void *_Block_copy_class = _NSConcreteMallocBlock;
static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
static int _Block_copy_flag = BLOCK_NEEDS_FREE;
static int _Byref_flag_initial_value = BLOCK_NEEDS_FREE | 2;
static const int WANTS_ONE = (1 << 16);
static bool isGC = false;
/*
* Internal Utilities:
*/
#if 0
static unsigned long int latching_incr_long(unsigned long int *where) {
while (1) {
unsigned long int old_value = *(volatile unsigned long int *)where;
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
return BLOCK_REFCOUNT_MASK;
}
if (OSAtomicCompareAndSwapLong(old_value, old_value+1, (volatile long int *)where)) {
return old_value+1;
}
}
}
#endif /* if 0 */
static int latching_incr_int(int *where) {
while (1) {
int old_value = *(volatile int *)where;
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
return BLOCK_REFCOUNT_MASK;
}
if (OSAtomicCompareAndSwapInt(old_value, old_value+1, (volatile int *)where)) {
return old_value+1;
}
}
}
#if 0
static int latching_decr_long(unsigned long int *where) {
while (1) {
unsigned long int old_value = *(volatile int *)where;
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
return BLOCK_REFCOUNT_MASK;
}
if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
return 0;
}
if (OSAtomicCompareAndSwapLong(old_value, old_value-1, (volatile long int *)where)) {
return old_value-1;
}
}
}
#endif /* if 0 */
static int latching_decr_int(int *where) {
while (1) {
int old_value = *(volatile int *)where;
if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
return BLOCK_REFCOUNT_MASK;
}
if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
return 0;
}
if (OSAtomicCompareAndSwapInt(old_value, old_value-1, (volatile int *)where)) {
return old_value-1;
}
}
}
/*
* GC support stub routines:
*/
#if 0
#pragma mark GC Support Routines
#endif /* if 0 */
static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
return malloc(size);
}
static void _Block_assign_default(void *value, void **destptr) {
*destptr = value;
}
static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
}
static void _Block_do_nothing(const void *aBlock) { }
static void _Block_retain_object_default(const void *ptr) {
if (!ptr) return;
}
static void _Block_release_object_default(const void *ptr) {
if (!ptr) return;
}
static void _Block_assign_weak_default(const void *ptr, void *dest) {
*(void **)dest = (void *)ptr;
}
static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
memmove(dst, src, (size_t)size);
}
static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
void **destp = (void **)dest;
void **srcp = (void **)src;
while (size) {
_Block_assign_default(*srcp, destp);
destp++;
srcp++;
size -= sizeof(void *);
}
}
/*
* GC support callout functions - initially set to stub routines:
*/
static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
/*
* GC support SPI functions - called from ObjC runtime and CoreFoundation:
*/
/* Public SPI
* Called from objc-auto to turn on GC.
* version 3, 4 arg, but changed 1st arg
*/
void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
void (*setHasRefcount)(const void *, const bool),
void (*gc_assign)(void *, void **),
void (*gc_assign_weak)(const void *, void *),
void (*gc_memmove)(void *, void *, unsigned long)) {
isGC = true;
_Block_allocator = alloc;
_Block_deallocator = _Block_do_nothing;
_Block_assign = gc_assign;
_Block_copy_flag = BLOCK_IS_GC;
_Block_copy_class = _NSConcreteAutoBlock;
/* blocks with ctors & dtors need to have the dtor run from a class with a finalizer */
_Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
_Block_setHasRefcount = setHasRefcount;
_Byref_flag_initial_value = BLOCK_IS_GC; // no refcount
_Block_retain_object = _Block_do_nothing;
_Block_release_object = _Block_do_nothing;
_Block_assign_weak = gc_assign_weak;
_Block_memmove = gc_memmove;
}
/* transitional */
void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
void (*setHasRefcount)(const void *, const bool),
void (*gc_assign)(void *, void **),
void (*gc_assign_weak)(const void *, void *)) {
/* until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then */
_Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
}
/*
* Called from objc-auto to alternatively turn on retain/release.
* Prior to this the only "object" support we can provide is for those
* super special objects that live in libSystem, namely dispatch queues.
* Blocks and Block_byrefs have their own special entry points.
*
*/
void _Block_use_RR( void (*retain)(const void *),
void (*release)(const void *)) {
_Block_retain_object = retain;
_Block_release_object = release;
}
/*
* Internal Support routines for copying:
*/
#if 0
#pragma mark Copy/Release support
#endif /* if 0 */
/* Copy, or bump refcount, of a block. If really copying, call the copy helper if present. */
static void *_Block_copy_internal(const void *arg, const int flags) {
struct Block_layout *aBlock;
const bool wantsOne = (WANTS_ONE & flags) == WANTS_ONE;
//printf("_Block_copy_internal(%p, %x)\n", arg, flags);
if (!arg) return NULL;
// The following would be better done as a switch statement
aBlock = (struct Block_layout *)arg;
if (aBlock->flags & BLOCK_NEEDS_FREE) {
// latches on high
latching_incr_int(&aBlock->flags);
return aBlock;
}
else if (aBlock->flags & BLOCK_IS_GC) {
// GC refcounting is expensive so do most refcounting here.
if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 1)) {
// Tell collector to hang on this - it will bump the GC refcount version
_Block_setHasRefcount(aBlock, true);
}
return aBlock;
}
else if (aBlock->flags & BLOCK_IS_GLOBAL) {
return aBlock;
}
// Its a stack block. Make a copy.
if (!isGC) {
struct Block_layout *result = malloc(aBlock->descriptor->size);
if (!result) return (void *)0;
memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
// reset refcount
result->flags &= ~(BLOCK_REFCOUNT_MASK); // XXX not needed
result->flags |= BLOCK_NEEDS_FREE | 1;
result->isa = _NSConcreteMallocBlock;
if (result->flags & BLOCK_HAS_COPY_DISPOSE) {
//printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock);
(*aBlock->descriptor->copy)(result, aBlock); // do fixup
}
return result;
}
else {
// Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
// This allows the copy helper routines to make non-refcounted block copies under GC
unsigned long int flags = aBlock->flags;
bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR);
if (!result) return (void *)0;
memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
// reset refcount
// if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK); // XXX not needed
if (wantsOne)
flags |= BLOCK_IS_GC | 1;
else
flags |= BLOCK_IS_GC;
result->flags = flags;
if (flags & BLOCK_HAS_COPY_DISPOSE) {
//printf("calling block copy helper...\n");
(*aBlock->descriptor->copy)(result, aBlock); // do fixup
}
if (hasCTOR) {
result->isa = _NSConcreteFinalizingBlock;
}
else {
result->isa = _NSConcreteAutoBlock;
}
return result;
}
}
/*
* Runtime entry points for maintaining the sharing knowledge of byref data blocks.
*
* A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
* Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
* We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
* Otherwise we need to copy it and update the stack forwarding pointer
* XXX We need to account for weak/nonretained read-write barriers.
*/
static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
struct Block_byref **destp = (struct Block_byref **)dest;
struct Block_byref *src = (struct Block_byref *)arg;
//printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags);
//printf("src dump: %s\n", _Block_byref_dump(src));
if (src->forwarding->flags & BLOCK_IS_GC) {
; // don't need to do any more work
}
else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
//printf("making copy\n");
// src points to stack
bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
// if its weak ask for an object (only matters under GC)
struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
src->forwarding = copy; // patch stack to point to heap copy
copy->size = src->size;
if (isWeak) {
copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning
}
if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
// Trust copy helper to copy everything of interest
// If more than one field shows up in a byref block this is wrong XXX
copy->byref_keep = src->byref_keep;
copy->byref_destroy = src->byref_destroy;
(*src->byref_keep)(copy, src);
}
else {
// just bits. Blast 'em using _Block_memmove in case they're __strong
_Block_memmove(
(void *)&copy->byref_keep,
(void *)&src->byref_keep,
src->size - sizeof(struct Block_byref_header));
}
}
// already copied to heap
else if ((src->forwarding->flags & BLOCK_NEEDS_FREE) == BLOCK_NEEDS_FREE) {
latching_incr_int(&src->forwarding->flags);
}
// assign byref data block pointer into new Block
_Block_assign(src->forwarding, (void **)destp);
}
// Old compiler SPI
static void _Block_byref_release(const void *arg) {
struct Block_byref *shared_struct = (struct Block_byref *)arg;
int refcount;
// dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
shared_struct = shared_struct->forwarding;
//printf("_Block_byref_release %p called, flags are %x\n", shared_struct, shared_struct->flags);
// To support C++ destructors under GC we arrange for there to be a finalizer for this
// by using an isa that directs the code to a finalizer that calls the byref_destroy method.
if ((shared_struct->flags & BLOCK_NEEDS_FREE) == 0) {
return; // stack or GC or global
}
refcount = shared_struct->flags & BLOCK_REFCOUNT_MASK;
if (refcount <= 0) {
printf("_Block_byref_release: Block byref data structure at %p underflowed\n", arg);
}
else if ((latching_decr_int(&shared_struct->flags) & BLOCK_REFCOUNT_MASK) == 0) {
//printf("disposing of heap based byref block\n");
if (shared_struct->flags & BLOCK_HAS_COPY_DISPOSE) {
//printf("calling out to helper\n");
(*shared_struct->byref_destroy)(shared_struct);
}
_Block_deallocator((struct Block_layout *)shared_struct);
}
}
/*
*
* API supporting SPI
* _Block_copy, _Block_release, and (old) _Block_destroy
*
*/
#if 0
#pragma mark SPI/API
#endif /* if 0 */
void *_Block_copy(const void *arg) {
return _Block_copy_internal(arg, WANTS_ONE);
}
// API entry point to release a copied Block
void _Block_release(void *arg) {
struct Block_layout *aBlock = (struct Block_layout *)arg;
int32_t newCount;
if (!aBlock) return;
newCount = latching_decr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK;
if (newCount > 0) return;
// Hit zero
if (aBlock->flags & BLOCK_IS_GC) {
// Tell GC we no longer have our own refcounts. GC will decr its refcount
// and unless someone has done a CFRetain or marked it uncollectable it will
// now be subject to GC reclamation.
_Block_setHasRefcount(aBlock, false);
}
else if (aBlock->flags & BLOCK_NEEDS_FREE) {
if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)(*aBlock->descriptor->dispose)(aBlock);
_Block_deallocator(aBlock);
}
else if (aBlock->flags & BLOCK_IS_GLOBAL) {
;
}
else {
printf("Block_release called upon a stack Block: %p, ignored\n", (void *)aBlock);
}
}
// Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
static void _Block_destroy(const void *arg) {
struct Block_layout *aBlock;
if (!arg) return;
aBlock = (struct Block_layout *)arg;
if (aBlock->flags & BLOCK_IS_GC) {
// assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
return; // ignore, we are being called because of a DTOR
}
_Block_release(aBlock);
}
/*
*
* SPI used by other layers
*
*/
// SPI, also internal. Called from NSAutoBlock only under GC
void *_Block_copy_collectable(const void *aBlock) {
return _Block_copy_internal(aBlock, 0);
}
// SPI
unsigned long int Block_size(void *arg) {
return ((struct Block_layout *)arg)->descriptor->size;
}
#if 0
#pragma mark Compiler SPI entry points
#endif /* if 0 */
/*******************************************************
Entry points used by the compiler - the real API!
A Block can reference four different kinds of things that require help when the Block is copied to the heap.
1) C++ stack based objects
2) References to Objective-C objects
3) Other Blocks
4) __block variables
In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
The flags parameter of _Block_object_assign and _Block_object_dispose is set to
* BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
* BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
* BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16).
So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
__block id 128+3
__weak block id 128+3+16
__block (^Block) 128+7
__weak __block (^Block) 128+7+16
The implementation of the two routines would be improved by switch statements enumerating the eight cases.
********************************************************/
/*
* When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
* to do the assignment.
*/
void _Block_object_assign(void *destAddr, const void *object, const int flags) {
//printf("_Block_object_assign(*%p, %p, %x)\n", destAddr, object, flags);
if ((flags & BLOCK_BYREF_CALLER) == BLOCK_BYREF_CALLER) {
if ((flags & BLOCK_FIELD_IS_WEAK) == BLOCK_FIELD_IS_WEAK) {
_Block_assign_weak(object, destAddr);
}
else {
// do *not* retain or *copy* __block variables whatever they are
_Block_assign((void *)object, destAddr);
}
}
else if ((flags & BLOCK_FIELD_IS_BYREF) == BLOCK_FIELD_IS_BYREF) {
// copying a __block reference from the stack Block to the heap
// flags will indicate if it holds a __weak reference and needs a special isa
_Block_byref_assign_copy(destAddr, object, flags);
}
// (this test must be before next one)
else if ((flags & BLOCK_FIELD_IS_BLOCK) == BLOCK_FIELD_IS_BLOCK) {
// copying a Block declared variable from the stack Block to the heap
_Block_assign(_Block_copy_internal(object, flags), destAddr);
}
// (this test must be after previous one)
else if ((flags & BLOCK_FIELD_IS_OBJECT) == BLOCK_FIELD_IS_OBJECT) {
//printf("retaining object at %p\n", object);
_Block_retain_object(object);
//printf("done retaining object at %p\n", object);
_Block_assign((void *)object, destAddr);
}
}
// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
// to help dispose of the contents
// Used initially only for __attribute__((NSObject)) marked pointers.
void _Block_object_dispose(const void *object, const int flags) {
//printf("_Block_object_dispose(%p, %x)\n", object, flags);
if (flags & BLOCK_FIELD_IS_BYREF) {
// get rid of the __block data structure held in a Block
_Block_byref_release(object);
}
else if ((flags & (BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_BLOCK) {
// get rid of a referenced Block held by this Block
// (ignore __block Block variables, compiler doesn't need to call us)
_Block_destroy(object);
}
else if ((flags & (BLOCK_FIELD_IS_WEAK|BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_OBJECT) {
// get rid of a referenced object held by this Block
// (ignore __block object variables, compiler doesn't need to call us)
_Block_release_object(object);
}
}
/*
* Debugging support:
*/
#if 0
#pragma mark Debugging
#endif /* if 0 */
const char *_Block_dump(const void *block) {
struct Block_layout *closure = (struct Block_layout *)block;
static char buffer[512];
char *cp = buffer;
if (closure == NULL) {
sprintf(cp, "NULL passed to _Block_dump\n");
return buffer;
}
if (! (closure->flags & BLOCK_HAS_DESCRIPTOR)) {
printf("Block compiled by obsolete compiler, please recompile source for this Block\n");
exit(1);
}
cp += sprintf(cp, "^%p (new layout) =\n", (void *)closure);
if (closure->isa == NULL) {
cp += sprintf(cp, "isa: NULL\n");
}
else if (closure->isa == _NSConcreteStackBlock) {
cp += sprintf(cp, "isa: stack Block\n");
}
else if (closure->isa == _NSConcreteMallocBlock) {
cp += sprintf(cp, "isa: malloc heap Block\n");
}
else if (closure->isa == _NSConcreteAutoBlock) {
cp += sprintf(cp, "isa: GC heap Block\n");
}
else if (closure->isa == _NSConcreteGlobalBlock) {
cp += sprintf(cp, "isa: global Block\n");
}
else if (closure->isa == _NSConcreteFinalizingBlock) {
cp += sprintf(cp, "isa: finalizing Block\n");
}
else {
cp += sprintf(cp, "isa?: %p\n", (void *)closure->isa);
}
cp += sprintf(cp, "flags:");
if (closure->flags & BLOCK_HAS_DESCRIPTOR) {
cp += sprintf(cp, " HASDESCRIPTOR");
}
if (closure->flags & BLOCK_NEEDS_FREE) {
cp += sprintf(cp, " FREEME");
}
if (closure->flags & BLOCK_IS_GC) {
cp += sprintf(cp, " ISGC");
}
if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
cp += sprintf(cp, " HASHELP");
}
if (closure->flags & BLOCK_HAS_CTOR) {
cp += sprintf(cp, " HASCTOR");
}
cp += sprintf(cp, "\nrefcount: %u\n", closure->flags & BLOCK_REFCOUNT_MASK);
cp += sprintf(cp, "invoke: %p\n", (void *)(uintptr_t)closure->invoke);
{
struct Block_descriptor *dp = closure->descriptor;
cp += sprintf(cp, "descriptor: %p\n", (void *)dp);
cp += sprintf(cp, "descriptor->reserved: %lu\n", dp->reserved);
cp += sprintf(cp, "descriptor->size: %lu\n", dp->size);
if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
cp += sprintf(cp, "descriptor->copy helper: %p\n", (void *)(uintptr_t)dp->copy);
cp += sprintf(cp, "descriptor->dispose helper: %p\n", (void *)(uintptr_t)dp->dispose);
}
}
return buffer;
}
const char *_Block_byref_dump(struct Block_byref *src) {
static char buffer[256];
char *cp = buffer;
cp += sprintf(cp, "byref data block %p contents:\n", (void *)src);
cp += sprintf(cp, " forwarding: %p\n", (void *)src->forwarding);
cp += sprintf(cp, " flags: 0x%x\n", src->flags);
cp += sprintf(cp, " size: %d\n", src->size);
if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
cp += sprintf(cp, " copy helper: %p\n", (void *)(uintptr_t)src->byref_keep);
cp += sprintf(cp, " dispose helper: %p\n", (void *)(uintptr_t)src->byref_destroy);
}
return buffer;
}

View File

@ -0,0 +1,21 @@
This file is a partial list of people who have contributed to the LLVM/CompilerRT
project. If you have contributed a patch or made some other contribution to
LLVM/CompilerRT, please submit a patch to this file to add yourself, and it will be
done!
The list is sorted by surname and formatted to allow easy grepping and
beautification by scripts. The fields are: name (N), email (E), web-address
(W), PGP key ID and fingerprint (P), description (D), and snail-mail address
(S).
N: Craig van Vliet
E: cvanvliet@auroraux.org
W: http://www.auroraux.org
D: Code style and Readability fixes.
N: Edward O'Callaghan
E: eocallaghan@auroraux.org
W: http://www.auroraux.org
D: CMake'ify Compiler-RT build system
D: Maintain Solaris & AuroraUX ports of Compiler-RT

View File

@ -0,0 +1,63 @@
==============================================================================
LLVM Release License
==============================================================================
University of Illinois/NCSA
Open Source License
Copyright (c) 2003-2009 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:
LLVM Team
University of Illinois at Urbana-Champaign
http://llvm.org
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal with
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimers in the
documentation and/or other materials provided with the distribution.
* Neither the names of the LLVM Team, University of Illinois at
Urbana-Champaign, nor the names of its contributors may be used to
endorse or promote products derived from this Software without specific
prior written permission.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
SOFTWARE.
==============================================================================
Copyrights and Licenses for Third Party Software Distributed with LLVM:
==============================================================================
The LLVM software contains code written by third parties. Such software will
have its own individual LICENSE.TXT file in the directory in which it appears.
This file will describe the copyrights, license, and restrictions which apply
to that code.
The disclaimer of warranty in the University of Illinois Open Source License
applies to all code in the LLVM Distribution, and nothing in any of the
other licenses gives permission to use the names of the LLVM Team or the
University of Illinois to endorse or promote products derived from this
Software.
The following pieces of software have additional or alternate copyrights,
licenses, and/or restrictions:
Program Directory
------- ---------

View File

@ -0,0 +1,330 @@
Compiler-RT
================================
This directory and its subdirectories contain source code for the compiler
support routines.
Compiler-RT is open source software. You may freely distribute it under the
terms of the license agreement found in LICENSE.txt.
================================
This is a replacement library for libgcc. Each function is contained
in its own file. Each function has a corresponding unit test under
test/Unit.
A rudimentary script to test each file is in the file called
test/Unit/test.
Here is the specification for this library:
http://gcc.gnu.org/onlinedocs/gccint/Libgcc.html#Libgcc
Here is a synopsis of the contents of this library:
typedef int si_int;
typedef unsigned su_int;
typedef long long di_int;
typedef unsigned long long du_int;
// Integral bit manipulation
di_int __ashldi3(di_int a, si_int b); // a << b
ti_int __ashlti3(ti_int a, si_int b); // a << b
di_int __ashrdi3(di_int a, si_int b); // a >> b arithmetic (sign fill)
ti_int __ashrti3(ti_int a, si_int b); // a >> b arithmetic (sign fill)
di_int __lshrdi3(di_int a, si_int b); // a >> b logical (zero fill)
ti_int __lshrti3(ti_int a, si_int b); // a >> b logical (zero fill)
si_int __clzsi2(si_int a); // count leading zeros
si_int __clzdi2(di_int a); // count leading zeros
si_int __clzti2(ti_int a); // count leading zeros
si_int __ctzsi2(si_int a); // count trailing zeros
si_int __ctzdi2(di_int a); // count trailing zeros
si_int __ctzti2(ti_int a); // count trailing zeros
si_int __ffsdi2(di_int a); // find least significant 1 bit
si_int __ffsti2(ti_int a); // find least significant 1 bit
si_int __paritysi2(si_int a); // bit parity
si_int __paritydi2(di_int a); // bit parity
si_int __parityti2(ti_int a); // bit parity
si_int __popcountsi2(si_int a); // bit population
si_int __popcountdi2(di_int a); // bit population
si_int __popcountti2(ti_int a); // bit population
uint32_t __bswapsi2(uint32_t a); // a byteswapped, arm only
uint64_t __bswapdi2(uint64_t a); // a byteswapped, arm only
// Integral arithmetic
di_int __negdi2 (di_int a); // -a
ti_int __negti2 (ti_int a); // -a
di_int __muldi3 (di_int a, di_int b); // a * b
ti_int __multi3 (ti_int a, ti_int b); // a * b
si_int __divsi3 (si_int a, si_int b); // a / b signed
di_int __divdi3 (di_int a, di_int b); // a / b signed
ti_int __divti3 (ti_int a, ti_int b); // a / b signed
su_int __udivsi3 (su_int n, su_int d); // a / b unsigned
du_int __udivdi3 (du_int a, du_int b); // a / b unsigned
tu_int __udivti3 (tu_int a, tu_int b); // a / b unsigned
si_int __modsi3 (si_int a, si_int b); // a % b signed
di_int __moddi3 (di_int a, di_int b); // a % b signed
ti_int __modti3 (ti_int a, ti_int b); // a % b signed
su_int __umodsi3 (su_int a, su_int b); // a % b unsigned
du_int __umoddi3 (du_int a, du_int b); // a % b unsigned
tu_int __umodti3 (tu_int a, tu_int b); // a % b unsigned
du_int __udivmoddi4(du_int a, du_int b, du_int* rem); // a / b, *rem = a % b
tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem); // a / b, *rem = a % b
// Integral arithmetic with trapping overflow
si_int __absvsi2(si_int a); // abs(a)
di_int __absvdi2(di_int a); // abs(a)
ti_int __absvti2(ti_int a); // abs(a)
si_int __negvsi2(si_int a); // -a
di_int __negvdi2(di_int a); // -a
ti_int __negvti2(ti_int a); // -a
si_int __addvsi3(si_int a, si_int b); // a + b
di_int __addvdi3(di_int a, di_int b); // a + b
ti_int __addvti3(ti_int a, ti_int b); // a + b
si_int __subvsi3(si_int a, si_int b); // a - b
di_int __subvdi3(di_int a, di_int b); // a - b
ti_int __subvti3(ti_int a, ti_int b); // a - b
si_int __mulvsi3(si_int a, si_int b); // a * b
di_int __mulvdi3(di_int a, di_int b); // a * b
ti_int __mulvti3(ti_int a, ti_int b); // a * b
// Integral comparison: a < b -> 0
// a == b -> 1
// a > b -> 2
si_int __cmpdi2 (di_int a, di_int b);
si_int __cmpti2 (ti_int a, ti_int b);
si_int __ucmpdi2(du_int a, du_int b);
si_int __ucmpti2(tu_int a, tu_int b);
// Integral / floating point conversion
di_int __fixsfdi( float a);
di_int __fixdfdi( double a);
di_int __fixxfdi(long double a);
ti_int __fixsfti( float a);
ti_int __fixdfti( double a);
ti_int __fixxfti(long double a);
uint64_t __fixtfdi(long double input); // ppc only, doesn't match documentation
su_int __fixunssfsi( float a);
su_int __fixunsdfsi( double a);
su_int __fixunsxfsi(long double a);
du_int __fixunssfdi( float a);
du_int __fixunsdfdi( double a);
du_int __fixunsxfdi(long double a);
tu_int __fixunssfti( float a);
tu_int __fixunsdfti( double a);
tu_int __fixunsxfti(long double a);
uint64_t __fixunstfdi(long double input); // ppc only
float __floatdisf(di_int a);
double __floatdidf(di_int a);
long double __floatdixf(di_int a);
long double __floatditf(int64_t a); // ppc only
float __floattisf(ti_int a);
double __floattidf(ti_int a);
long double __floattixf(ti_int a);
float __floatundisf(du_int a);
double __floatundidf(du_int a);
long double __floatundixf(du_int a);
long double __floatunditf(uint64_t a); // ppc only
float __floatuntisf(tu_int a);
double __floatuntidf(tu_int a);
long double __floatuntixf(tu_int a);
// Floating point raised to integer power
float __powisf2( float a, si_int b); // a ^ b
double __powidf2( double a, si_int b); // a ^ b
long double __powixf2(long double a, si_int b); // a ^ b
long double __powitf2(long double a, si_int b); // ppc only, a ^ b
// Complex arithmetic
// (a + ib) * (c + id)
float _Complex __mulsc3( float a, float b, float c, float d);
double _Complex __muldc3(double a, double b, double c, double d);
long double _Complex __mulxc3(long double a, long double b,
long double c, long double d);
long double _Complex __multc3(long double a, long double b,
long double c, long double d); // ppc only
// (a + ib) / (c + id)
float _Complex __divsc3( float a, float b, float c, float d);
double _Complex __divdc3(double a, double b, double c, double d);
long double _Complex __divxc3(long double a, long double b,
long double c, long double d);
long double _Complex __divtc3(long double a, long double b,
long double c, long double d); // ppc only
// Runtime support
// __clear_cache() is used to tell process that new instructions have been
// written to an address range. Necessary on processors that do not have
// a unified instuction and data cache.
void __clear_cache(void* start, void* end);
// __enable_execute_stack() is used with nested functions when a trampoline
// function is written onto the stack and that page range needs to be made
// executable.
void __enable_execute_stack(void* addr);
// __gcc_personality_v0() is normally only called by the system unwinder.
// C code (as opposed to C++) normally does not need a personality function
// because there are no catch clauses or destructors to be run. But there
// is a C language extension __attribute__((cleanup(func))) which marks local
// variables as needing the cleanup function "func" to be run when the
// variable goes out of scope. That includes when an exception is thrown,
// so a personality handler is needed.
_Unwind_Reason_Code __gcc_personality_v0(int version, _Unwind_Action actions,
uint64_t exceptionClass, struct _Unwind_Exception* exceptionObject,
_Unwind_Context_t context);
// for use with some implementations of assert() in <assert.h>
void __eprintf(const char* format, const char* assertion_expression,
const char* line, const char* file);
// Power PC specific functions
// There is no C interface to the saveFP/restFP functions. They are helper
// functions called by the prolog and epilog of functions that need to save
// a number of non-volatile float point registers.
saveFP
restFP
// PowerPC has a standard template for trampoline functions. This function
// generates a custom trampoline function with the specific realFunc
// and localsPtr values.
void __trampoline_setup(uint32_t* trampOnStack, int trampSizeAllocated,
const void* realFunc, void* localsPtr);
// adds two 128-bit double-double precision values ( x + y )
long double __gcc_qadd(long double x, long double y);
// subtracts two 128-bit double-double precision values ( x - y )
long double __gcc_qsub(long double x, long double y);
// multiples two 128-bit double-double precision values ( x * y )
long double __gcc_qmul(long double x, long double y);
// divides two 128-bit double-double precision values ( x / y )
long double __gcc_qdiv(long double a, long double b);
// ARM specific functions
// There is no C interface to the switch* functions. These helper functions
// are only needed by Thumb1 code for efficient switch table generation.
switch16
switch32
switch8
switchu8
// There is no C interface to the *_vfp_d8_d15_regs functions. There are
// called in the prolog and epilog of Thumb1 functions. When the C++ ABI use
// SJLJ for exceptions, each function with a catch clause or destuctors needs
// to save and restore all registers in it prolog and epliog. But there is
// no way to access vector and high float registers from thumb1 code, so the
// compiler must add call outs to these helper functions in the prolog and
// epilog.
restore_vfp_d8_d15_regs
save_vfp_d8_d15_regs
// Note: long ago ARM processors did not have floating point hardware support.
// Floating point was done in software and floating point parameters were
// passed in integer registers. When hardware support was added for floating
// point, new *vfp functions were added to do the same operations but with
// floating point parameters in floating point registers.
// Undocumented functions
float __addsf3vfp(float a, float b); // Appears to return a + b
double __adddf3vfp(double a, double b); // Appears to return a + b
float __divsf3vfp(float a, float b); // Appears to return a / b
double __divdf3vfp(double a, double b); // Appears to return a / b
int __eqsf2vfp(float a, float b); // Appears to return one
// iff a == b and neither is NaN.
int __eqdf2vfp(double a, double b); // Appears to return one
// iff a == b and neither is NaN.
double __extendsfdf2vfp(float a); // Appears to convert from
// float to double.
int __fixdfsivfp(double a); // Appears to convert from
// double to int.
int __fixsfsivfp(float a); // Appears to convert from
// float to int.
unsigned int __fixunssfsivfp(float a); // Appears to convert from
// float to unsigned int.
unsigned int __fixunsdfsivfp(double a); // Appears to convert from
// double to unsigned int.
double __floatsidfvfp(int a); // Appears to convert from
// int to double.
float __floatsisfvfp(int a); // Appears to convert from
// int to float.
double __floatunssidfvfp(unsigned int a); // Appears to convert from
// unisgned int to double.
float __floatunssisfvfp(unsigned int a); // Appears to convert from
// unisgned int to float.
int __gedf2vfp(double a, double b); // Appears to return __gedf2
// (a >= b)
int __gesf2vfp(float a, float b); // Appears to return __gesf2
// (a >= b)
int __gtdf2vfp(double a, double b); // Appears to return __gtdf2
// (a > b)
int __gtsf2vfp(float a, float b); // Appears to return __gtsf2
// (a > b)
int __ledf2vfp(double a, double b); // Appears to return __ledf2
// (a <= b)
int __lesf2vfp(float a, float b); // Appears to return __lesf2
// (a <= b)
int __ltdf2vfp(double a, double b); // Appears to return __ltdf2
// (a < b)
int __ltsf2vfp(float a, float b); // Appears to return __ltsf2
// (a < b)
double __muldf3vfp(double a, double b); // Appears to return a * b
float __mulsf3vfp(float a, float b); // Appears to return a * b
int __nedf2vfp(double a, double b); // Appears to return __nedf2
// (a != b)
double __negdf2vfp(double a); // Appears to return -a
float __negsf2vfp(float a); // Appears to return -a
float __negsf2vfp(float a); // Appears to return -a
double __subdf3vfp(double a, double b); // Appears to return a - b
float __subsf3vfp(float a, float b); // Appears to return a - b
float __truncdfsf2vfp(double a); // Appears to convert from
// double to float.
int __unorddf2vfp(double a, double b); // Appears to return __unorddf2
int __unordsf2vfp(float a, float b); // Appears to return __unordsf2
Preconditions are listed for each function at the definition when there are any.
Any preconditions reflect the specification at
http://gcc.gnu.org/onlinedocs/gccint/Libgcc.html#Libgcc.
Assumptions are listed in "int_lib.h", and in individual files. Where possible
assumptions are checked at compile time.

View File

@ -0,0 +1,30 @@
/*===-- absvdi2.c - Implement __absvdi2 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
*===----------------------------------------------------------------------===
*
* This file implements __absvdi2 for the compiler_rt library.
*
*===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <stdlib.h>
/* Returns: absolute value */
/* Effects: aborts if abs(x) < 0 */
di_int
__absvdi2(di_int a)
{
const int N = (int)(sizeof(di_int) * CHAR_BIT);
if (a == ((di_int)1 << (N-1)))
compilerrt_abort();
const di_int t = a >> (N - 1);
return (a ^ t) - t;
}

View File

@ -0,0 +1,30 @@
/* ===-- absvsi2.c - Implement __absvsi2 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __absvsi2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <stdlib.h>
/* Returns: absolute value */
/* Effects: aborts if abs(x) < 0 */
si_int
__absvsi2(si_int a)
{
const int N = (int)(sizeof(si_int) * CHAR_BIT);
if (a == (1 << (N-1)))
compilerrt_abort();
const si_int t = a >> (N - 1);
return (a ^ t) - t;
}

View File

@ -0,0 +1,34 @@
/* ===-- absvti2.c - Implement __absvdi2 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __absvti2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
#include <stdlib.h>
/* Returns: absolute value */
/* Effects: aborts if abs(x) < 0 */
ti_int
__absvti2(ti_int a)
{
const int N = (int)(sizeof(ti_int) * CHAR_BIT);
if (a == ((ti_int)1 << (N-1)))
compilerrt_abort();
const ti_int s = a >> (N - 1);
return (a ^ s) - s;
}
#endif

View File

@ -0,0 +1,154 @@
//===-- lib/adddf3.c - Double-precision addition and subtraction --*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements double-precision soft-float addition and subtraction
// with the IEEE-754 default rounding (to nearest, ties to even).
//
//===----------------------------------------------------------------------===//
#define DOUBLE_PRECISION
#include "fp_lib.h"
fp_t __adddf3(fp_t a, fp_t b) {
rep_t aRep = toRep(a);
rep_t bRep = toRep(b);
const rep_t aAbs = aRep & absMask;
const rep_t bAbs = bRep & absMask;
// Detect if a or b is zero, infinity, or NaN.
if (aAbs - 1U >= infRep - 1U || bAbs - 1U >= infRep - 1U) {
// NaN + anything = qNaN
if (aAbs > infRep) return fromRep(toRep(a) | quietBit);
// anything + NaN = qNaN
if (bAbs > infRep) return fromRep(toRep(b) | quietBit);
if (aAbs == infRep) {
// +/-infinity + -/+infinity = qNaN
if ((toRep(a) ^ toRep(b)) == signBit) return fromRep(qnanRep);
// +/-infinity + anything remaining = +/- infinity
else return a;
}
// anything remaining + +/-infinity = +/-infinity
if (bAbs == infRep) return b;
// zero + anything = anything
if (!aAbs) {
// but we need to get the sign right for zero + zero
if (!bAbs) return fromRep(toRep(a) & toRep(b));
else return b;
}
// anything + zero = anything
if (!bAbs) return a;
}
// Swap a and b if necessary so that a has the larger absolute value.
if (bAbs > aAbs) {
const rep_t temp = aRep;
aRep = bRep;
bRep = temp;
}
// Extract the exponent and significand from the (possibly swapped) a and b.
int aExponent = aRep >> significandBits & maxExponent;
int bExponent = bRep >> significandBits & maxExponent;
rep_t aSignificand = aRep & significandMask;
rep_t bSignificand = bRep & significandMask;
// Normalize any denormals, and adjust the exponent accordingly.
if (aExponent == 0) aExponent = normalize(&aSignificand);
if (bExponent == 0) bExponent = normalize(&bSignificand);
// The sign of the result is the sign of the larger operand, a. If they
// have opposite signs, we are performing a subtraction; otherwise addition.
const rep_t resultSign = aRep & signBit;
const bool subtraction = (aRep ^ bRep) & signBit;
// Shift the significands to give us round, guard and sticky, and or in the
// implicit significand bit. (If we fell through from the denormal path it
// was already set by normalize( ), but setting it twice won't hurt
// anything.)
aSignificand = (aSignificand | implicitBit) << 3;
bSignificand = (bSignificand | implicitBit) << 3;
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
const int align = aExponent - bExponent;
if (align) {
if (align < typeWidth) {
const bool sticky = bSignificand << (typeWidth - align);
bSignificand = bSignificand >> align | sticky;
} else {
bSignificand = 1; // sticky; b is known to be non-zero.
}
}
if (subtraction) {
aSignificand -= bSignificand;
// If a == -b, return +zero.
if (aSignificand == 0) return fromRep(0);
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < implicitBit << 3) {
const int shift = rep_clz(aSignificand) - rep_clz(implicitBit << 3);
aSignificand <<= shift;
aExponent -= shift;
}
}
else /* addition */ {
aSignificand += bSignificand;
// If the addition carried up, we need to right-shift the result and
// adjust the exponent:
if (aSignificand & implicitBit << 4) {
const bool sticky = aSignificand & 1;
aSignificand = aSignificand >> 1 | sticky;
aExponent += 1;
}
}
// If we have overflowed the type, return +/- infinity:
if (aExponent >= maxExponent) return fromRep(infRep | resultSign);
if (aExponent <= 0) {
// Result is denormal before rounding; the exponent is zero and we
// need to shift the significand.
const int shift = 1 - aExponent;
const bool sticky = aSignificand << (typeWidth - shift);
aSignificand = aSignificand >> shift | sticky;
aExponent = 0;
}
// Low three bits are round, guard, and sticky.
const int roundGuardSticky = aSignificand & 0x7;
// Shift the significand into place, and mask off the implicit bit.
rep_t result = aSignificand >> 3 & significandMask;
// Insert the exponent and sign.
result |= (rep_t)aExponent << significandBits;
result |= resultSign;
// Final rounding. The result may overflow to infinity, but that is the
// correct result in that case.
if (roundGuardSticky > 0x4) result++;
if (roundGuardSticky == 0x4) result += result & 1;
return fromRep(result);
}
// Subtraction; flip the sign bit of b and add.
fp_t __subdf3(fp_t a, fp_t b) {
return __adddf3(a, fromRep(toRep(b) ^ signBit));
}

View File

@ -0,0 +1,164 @@
//===-- lib/addsf3.c - Single-precision addition and subtraction --*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements single-precision soft-float addition and subtraction
// with the IEEE-754 default rounding (to nearest, ties to even).
//
//===----------------------------------------------------------------------===//
#define SINGLE_PRECISION
#include "fp_lib.h"
fp_t __addsf3(fp_t a, fp_t b) {
rep_t aRep = toRep(a);
rep_t bRep = toRep(b);
const rep_t aAbs = aRep & absMask;
const rep_t bAbs = bRep & absMask;
// Detect if a or b is zero, infinity, or NaN.
if (aAbs - 1U >= infRep - 1U || bAbs - 1U >= infRep - 1U) {
// NaN + anything = qNaN
if (aAbs > infRep) return fromRep(toRep(a) | quietBit);
// anything + NaN = qNaN
if (bAbs > infRep) return fromRep(toRep(b) | quietBit);
if (aAbs == infRep) {
// +/-infinity + -/+infinity = qNaN
if ((toRep(a) ^ toRep(b)) == signBit) return fromRep(qnanRep);
// +/-infinity + anything remaining = +/- infinity
else return a;
}
// anything remaining + +/-infinity = +/-infinity
if (bAbs == infRep) return b;
// zero + anything = anything
if (!aAbs) {
// but we need to get the sign right for zero + zero
if (!bAbs) return fromRep(toRep(a) & toRep(b));
else return b;
}
// anything + zero = anything
if (!bAbs) return a;
}
// Swap a and b if necessary so that a has the larger absolute value.
if (bAbs > aAbs) {
const rep_t temp = aRep;
aRep = bRep;
bRep = temp;
}
// Extract the exponent and significand from the (possibly swapped) a and b.
int aExponent = aRep >> significandBits & maxExponent;
int bExponent = bRep >> significandBits & maxExponent;
rep_t aSignificand = aRep & significandMask;
rep_t bSignificand = bRep & significandMask;
// Normalize any denormals, and adjust the exponent accordingly.
if (aExponent == 0) aExponent = normalize(&aSignificand);
if (bExponent == 0) bExponent = normalize(&bSignificand);
// The sign of the result is the sign of the larger operand, a. If they
// have opposite signs, we are performing a subtraction; otherwise addition.
const rep_t resultSign = aRep & signBit;
const bool subtraction = (aRep ^ bRep) & signBit;
// Shift the significands to give us round, guard and sticky, and or in the
// implicit significand bit. (If we fell through from the denormal path it
// was already set by normalize( ), but setting it twice won't hurt
// anything.)
aSignificand = (aSignificand | implicitBit) << 3;
bSignificand = (bSignificand | implicitBit) << 3;
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
const int align = aExponent - bExponent;
if (align) {
if (align < typeWidth) {
const bool sticky = bSignificand << (typeWidth - align);
bSignificand = bSignificand >> align | sticky;
} else {
bSignificand = 1; // sticky; b is known to be non-zero.
}
}
if (subtraction) {
aSignificand -= bSignificand;
// If a == -b, return +zero.
if (aSignificand == 0) return fromRep(0);
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < implicitBit << 3) {
const int shift = rep_clz(aSignificand) - rep_clz(implicitBit << 3);
aSignificand <<= shift;
aExponent -= shift;
}
}
else /* addition */ {
aSignificand += bSignificand;
// If the addition carried up, we need to right-shift the result and
// adjust the exponent:
if (aSignificand & implicitBit << 4) {
const bool sticky = aSignificand & 1;
aSignificand = aSignificand >> 1 | sticky;
aExponent += 1;
}
}
// If we have overflowed the type, return +/- infinity:
if (aExponent >= maxExponent) return fromRep(infRep | resultSign);
if (aExponent <= 0) {
// Result is denormal before rounding; the exponent is zero and we
// need to shift the significand.
const int shift = 1 - aExponent;
const bool sticky = aSignificand << (typeWidth - shift);
aSignificand = aSignificand >> shift | sticky;
aExponent = 0;
}
// Low three bits are round, guard, and sticky.
const int roundGuardSticky = aSignificand & 0x7;
// Shift the significand into place, and mask off the implicit bit.
rep_t result = aSignificand >> 3 & significandMask;
// Insert the exponent and sign.
result |= (rep_t)aExponent << significandBits;
result |= resultSign;
// Final rounding. The result may overflow to infinity, but that is the
// correct result in that case.
if (roundGuardSticky > 0x4) result++;
if (roundGuardSticky == 0x4) result += result & 1;
return fromRep(result);
}
// Subtraction; flip the sign bit of b and add.
fp_t __subsf3(fp_t a, fp_t b) {
return __addsf3(a, fromRep(toRep(b) ^ signBit));
}

View File

@ -0,0 +1,37 @@
/* ===-- addvdi3.c - Implement __addvdi3 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __addvdi3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <stdlib.h>
/* Returns: a + b */
/* Effects: aborts if a + b overflows */
di_int
__addvdi3(di_int a, di_int b)
{
di_int s = a + b;
if (b >= 0)
{
if (s < a)
compilerrt_abort();
}
else
{
if (s >= a)
compilerrt_abort();
}
return s;
}

View File

@ -0,0 +1,37 @@
/* ===-- addvsi3.c - Implement __addvsi3 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __addvsi3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <stdlib.h>
/* Returns: a + b */
/* Effects: aborts if a + b overflows */
si_int
__addvsi3(si_int a, si_int b)
{
si_int s = a + b;
if (b >= 0)
{
if (s < a)
compilerrt_abort();
}
else
{
if (s >= a)
compilerrt_abort();
}
return s;
}

View File

@ -0,0 +1,41 @@
/* ===-- addvti3.c - Implement __addvti3 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __addvti3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
#include <stdlib.h>
/* Returns: a + b */
/* Effects: aborts if a + b overflows */
ti_int
__addvti3(ti_int a, ti_int b)
{
ti_int s = a + b;
if (b >= 0)
{
if (s < a)
compilerrt_abort();
}
else
{
if (s >= a)
compilerrt_abort();
}
return s;
}
#endif

View File

@ -0,0 +1,150 @@
/* ===-- apple_versioning.c - Adds versioning symbols for ld ---------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*/
#if __APPLE__
#if __arm__
#define NOT_HERE_BEFORE_10_6(sym)
#elif __ppc__
#define NOT_HERE_BEFORE_10_6(sym) \
extern const char sym##_tmp3 __asm("$ld$hide$os10.3$_" #sym ); \
__attribute__((visibility("default"))) const char sym##_tmp3 = 0; \
extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym ); \
__attribute__((visibility("default"))) const char sym##_tmp4 = 0; \
extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym ); \
__attribute__((visibility("default"))) const char sym##_tmp5 = 0;
#else
#define NOT_HERE_BEFORE_10_6(sym) \
extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym ); \
__attribute__((visibility("default"))) const char sym##_tmp4 = 0; \
extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym ); \
__attribute__((visibility("default"))) const char sym##_tmp5 = 0;
#endif /* __ppc__ */
/* Symbols in libSystem.dylib in 10.6 and later,
* but are in libgcc_s.dylib in earlier versions
*/
NOT_HERE_BEFORE_10_6(__absvdi2)
NOT_HERE_BEFORE_10_6(__absvsi2)
NOT_HERE_BEFORE_10_6(__absvti2)
NOT_HERE_BEFORE_10_6(__addvdi3)
NOT_HERE_BEFORE_10_6(__addvsi3)
NOT_HERE_BEFORE_10_6(__addvti3)
NOT_HERE_BEFORE_10_6(__ashldi3)
NOT_HERE_BEFORE_10_6(__ashlti3)
NOT_HERE_BEFORE_10_6(__ashrdi3)
NOT_HERE_BEFORE_10_6(__ashrti3)
NOT_HERE_BEFORE_10_6(__clear_cache)
NOT_HERE_BEFORE_10_6(__clzdi2)
NOT_HERE_BEFORE_10_6(__clzsi2)
NOT_HERE_BEFORE_10_6(__clzti2)
NOT_HERE_BEFORE_10_6(__cmpdi2)
NOT_HERE_BEFORE_10_6(__cmpti2)
NOT_HERE_BEFORE_10_6(__ctzdi2)
NOT_HERE_BEFORE_10_6(__ctzsi2)
NOT_HERE_BEFORE_10_6(__ctzti2)
NOT_HERE_BEFORE_10_6(__divdc3)
NOT_HERE_BEFORE_10_6(__divdi3)
NOT_HERE_BEFORE_10_6(__divsc3)
NOT_HERE_BEFORE_10_6(__divtc3)
NOT_HERE_BEFORE_10_6(__divti3)
NOT_HERE_BEFORE_10_6(__divxc3)
NOT_HERE_BEFORE_10_6(__enable_execute_stack)
NOT_HERE_BEFORE_10_6(__ffsdi2)
NOT_HERE_BEFORE_10_6(__ffsti2)
NOT_HERE_BEFORE_10_6(__fixdfdi)
NOT_HERE_BEFORE_10_6(__fixdfti)
NOT_HERE_BEFORE_10_6(__fixsfdi)
NOT_HERE_BEFORE_10_6(__fixsfti)
NOT_HERE_BEFORE_10_6(__fixtfdi)
NOT_HERE_BEFORE_10_6(__fixunsdfdi)
NOT_HERE_BEFORE_10_6(__fixunsdfsi)
NOT_HERE_BEFORE_10_6(__fixunsdfti)
NOT_HERE_BEFORE_10_6(__fixunssfdi)
NOT_HERE_BEFORE_10_6(__fixunssfsi)
NOT_HERE_BEFORE_10_6(__fixunssfti)
NOT_HERE_BEFORE_10_6(__fixunstfdi)
NOT_HERE_BEFORE_10_6(__fixunsxfdi)
NOT_HERE_BEFORE_10_6(__fixunsxfsi)
NOT_HERE_BEFORE_10_6(__fixunsxfti)
NOT_HERE_BEFORE_10_6(__fixxfdi)
NOT_HERE_BEFORE_10_6(__fixxfti)
NOT_HERE_BEFORE_10_6(__floatdidf)
NOT_HERE_BEFORE_10_6(__floatdisf)
NOT_HERE_BEFORE_10_6(__floatditf)
NOT_HERE_BEFORE_10_6(__floatdixf)
NOT_HERE_BEFORE_10_6(__floattidf)
NOT_HERE_BEFORE_10_6(__floattisf)
NOT_HERE_BEFORE_10_6(__floattixf)
NOT_HERE_BEFORE_10_6(__floatundidf)
NOT_HERE_BEFORE_10_6(__floatundisf)
NOT_HERE_BEFORE_10_6(__floatunditf)
NOT_HERE_BEFORE_10_6(__floatundixf)
NOT_HERE_BEFORE_10_6(__floatuntidf)
NOT_HERE_BEFORE_10_6(__floatuntisf)
NOT_HERE_BEFORE_10_6(__floatuntixf)
NOT_HERE_BEFORE_10_6(__gcc_personality_v0)
NOT_HERE_BEFORE_10_6(__lshrdi3)
NOT_HERE_BEFORE_10_6(__lshrti3)
NOT_HERE_BEFORE_10_6(__moddi3)
NOT_HERE_BEFORE_10_6(__modti3)
NOT_HERE_BEFORE_10_6(__muldc3)
NOT_HERE_BEFORE_10_6(__muldi3)
NOT_HERE_BEFORE_10_6(__mulsc3)
NOT_HERE_BEFORE_10_6(__multc3)
NOT_HERE_BEFORE_10_6(__multi3)
NOT_HERE_BEFORE_10_6(__mulvdi3)
NOT_HERE_BEFORE_10_6(__mulvsi3)
NOT_HERE_BEFORE_10_6(__mulvti3)
NOT_HERE_BEFORE_10_6(__mulxc3)
NOT_HERE_BEFORE_10_6(__negdi2)
NOT_HERE_BEFORE_10_6(__negti2)
NOT_HERE_BEFORE_10_6(__negvdi2)
NOT_HERE_BEFORE_10_6(__negvsi2)
NOT_HERE_BEFORE_10_6(__negvti2)
NOT_HERE_BEFORE_10_6(__paritydi2)
NOT_HERE_BEFORE_10_6(__paritysi2)
NOT_HERE_BEFORE_10_6(__parityti2)
NOT_HERE_BEFORE_10_6(__popcountdi2)
NOT_HERE_BEFORE_10_6(__popcountsi2)
NOT_HERE_BEFORE_10_6(__popcountti2)
NOT_HERE_BEFORE_10_6(__powidf2)
NOT_HERE_BEFORE_10_6(__powisf2)
NOT_HERE_BEFORE_10_6(__powitf2)
NOT_HERE_BEFORE_10_6(__powixf2)
NOT_HERE_BEFORE_10_6(__subvdi3)
NOT_HERE_BEFORE_10_6(__subvsi3)
NOT_HERE_BEFORE_10_6(__subvti3)
NOT_HERE_BEFORE_10_6(__ucmpdi2)
NOT_HERE_BEFORE_10_6(__ucmpti2)
NOT_HERE_BEFORE_10_6(__udivdi3)
NOT_HERE_BEFORE_10_6(__udivmoddi4)
NOT_HERE_BEFORE_10_6(__udivmodti4)
NOT_HERE_BEFORE_10_6(__udivti3)
NOT_HERE_BEFORE_10_6(__umoddi3)
NOT_HERE_BEFORE_10_6(__umodti3)
#if __ppc__
NOT_HERE_BEFORE_10_6(__gcc_qadd)
NOT_HERE_BEFORE_10_6(__gcc_qdiv)
NOT_HERE_BEFORE_10_6(__gcc_qmul)
NOT_HERE_BEFORE_10_6(__gcc_qsub)
NOT_HERE_BEFORE_10_6(__trampoline_setup)
#endif /* __ppc__ */
#else /* !__APPLE__ */
extern int avoid_empty_file;
#endif /* !__APPLE__*/

View File

@ -0,0 +1,24 @@
//===-- adddf3vfp.S - Implement adddf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// double __adddf3vfp(double a, double b) { return a + b; }
//
// Adds two double precision floating point numbers using the Darwin
// calling convention where double arguments are passsed in GPR pairs
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__adddf3vfp)
fmdrr d6, r0, r1 // move first param from r0/r1 pair into d6
fmdrr d7, r2, r3 // move second param from r2/r3 pair into d7
faddd d6, d6, d7
fmrrd r0, r1, d6 // move result back to r0/r1 pair
bx lr

View File

@ -0,0 +1,24 @@
//===-- addsf3vfp.S - Implement addsf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __addsf3vfp(float a, float b);
//
// Adds two single precision floating point numbers using the Darwin
// calling convention where single arguments are passsed in GPRs
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__addsf3vfp)
fmsr s14, r0 // move first param from r0 into float register
fmsr s15, r1 // move second param from r1 into float register
fadds s14, s14, s15
fmrs r0, s14 // move result back to r0
bx lr

View File

@ -0,0 +1,36 @@
//===------- bswapdi2 - Implement bswapdi2 --------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern uint64_t __bswapdi2(uint64_t);
//
// Reverse all the bytes in a 64-bit integer.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__bswapdi2)
#if __ARM_ARCH_5TEJ__ || __ARM_ARCH_4T__
// before armv6 does not have "rev" instruction
// r2 = rev(r0)
eor r2, r0, r0, ror #16
bic r2, r2, #0xff0000
mov r2, r2, lsr #8
eor r2, r2, r0, ror #8
// r0 = rev(r1)
eor r0, r1, r1, ror #16
bic r0, r0, #0xff0000
mov r0, r0, lsr #8
eor r0, r0, r1, ror #8
#else
rev r2, r0 // r2 = rev(r0)
rev r0, r1 // r0 = rev(r1)
#endif
mov r1, r2 // r1 = r2 = rev(r0)
bx lr

View File

@ -0,0 +1,28 @@
//===------- bswapsi2 - Implement bswapsi2 --------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern uint32_t __bswapsi2(uint32_t);
//
// Reverse all the bytes in a 32-bit integer.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__bswapsi2)
#if __ARM_ARCH_5TEJ__ || __ARM_ARCH_4T__
// before armv6 does not have "rev" instruction
eor r1, r0, r0, ror #16
bic r1, r1, #0xff0000
mov r1, r1, lsr #8
eor r0, r1, r0, ror #8
#else
rev r0, r0
#endif
bx lr

View File

@ -0,0 +1,130 @@
//===-- comparesf2.S - Implement single-precision soft-float comparisons --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the following soft-fp_t comparison routines:
//
// __eqsf2 __gesf2 __unordsf2
// __lesf2 __gtsf2
// __ltsf2
// __nesf2
//
// The semantics of the routines grouped in each column are identical, so there
// is a single implementation for each, with multiple names.
//
// The routines behave as follows:
//
// __lesf2(a,b) returns -1 if a < b
// 0 if a == b
// 1 if a > b
// 1 if either a or b is NaN
//
// __gesf2(a,b) returns -1 if a < b
// 0 if a == b
// 1 if a > b
// -1 if either a or b is NaN
//
// __unordsf2(a,b) returns 0 if both a and b are numbers
// 1 if either a or b is NaN
//
// Note that __lesf2( ) and __gesf2( ) are identical except in their handling of
// NaN values.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
.syntax unified
.align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2)
DEFINE_COMPILERRT_FUNCTION(__lesf2)
DEFINE_COMPILERRT_FUNCTION(__ltsf2)
DEFINE_COMPILERRT_FUNCTION(__nesf2)
// Make copies of a and b with the sign bit shifted off the top. These will
// be used to detect zeros and NaNs.
mov r2, r0, lsl #1
mov r3, r1, lsl #1
// We do the comparison in three stages (ignoring NaN values for the time
// being). First, we orr the absolute values of a and b; this sets the Z
// flag if both a and b are zero (of either sign). The shift of r3 doesn't
// effect this at all, but it *does* make sure that the C flag is clear for
// the subsequent operations.
orrs r12, r2, r3, lsr #1
// Next, we check if a and b have the same or different signs. If they have
// opposite signs, this eor will set the N flag.
eorsne r12, r0, r1
// If a and b are equal (either both zeros or bit identical; again, we're
// ignoring NaNs for now), this subtract will zero out r0. If they have the
// same sign, the flags are updated as they would be for a comparison of the
// absolute values of a and b.
subspl r0, r2, r3
// If a is smaller in magnitude than b and both have the same sign, place
// the negation of the sign of b in r0. Thus, if both are negative and
// a > b, this sets r0 to 0; if both are positive and a < b, this sets
// r0 to -1.
//
// This is also done if a and b have opposite signs and are not both zero,
// because in that case the subtract was not performed and the C flag is
// still clear from the shift argument in orrs; if a is positive and b
// negative, this places 0 in r0; if a is negative and b positive, -1 is
// placed in r0.
mvnlo r0, r1, asr #31
// If a is greater in magnitude than b and both have the same sign, place
// the sign of b in r0. Thus, if both are negative and a < b, -1 is placed
// in r0, which is the desired result. Conversely, if both are positive
// and a > b, zero is placed in r0.
movhi r0, r1, asr #31
// If you've been keeping track, at this point r0 contains -1 if a < b and
// 0 if a >= b. All that remains to be done is to set it to 1 if a > b.
// If a == b, then the Z flag is set, so we can get the correct final value
// into r0 by simply or'ing with 1 if Z is clear.
orrne r0, r0, #1
// Finally, we need to deal with NaNs. If either argument is NaN, replace
// the value in r0 with 1.
cmp r2, #0xff000000
cmpls r3, #0xff000000
movhi r0, #1
bx lr
.align 2
DEFINE_COMPILERRT_FUNCTION(__gesf2)
DEFINE_COMPILERRT_FUNCTION(__gtsf2)
// Identical to the preceeding except in that we return -1 for NaN values.
// Given that the two paths share so much code, one might be tempted to
// unify them; however, the extra code needed to do so makes the code size
// to performance tradeoff very hard to justify for such small functions.
mov r2, r0, lsl #1
mov r3, r1, lsl #1
orrs r12, r2, r3, lsr #1
eorsne r12, r0, r1
subspl r0, r2, r3
mvnlo r0, r1, asr #31
movhi r0, r1, asr #31
orrne r0, r0, #1
cmp r2, #0xff000000
cmpls r3, #0xff000000
movhi r0, #-1
bx lr
.align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2)
// Return 1 for NaN values, 0 otherwise.
mov r2, r0, lsl #1
mov r3, r1, lsl #1
mov r0, #0
cmp r2, #0xff000000
cmpls r3, #0xff000000
movhi r0, #1
bx lr

View File

@ -0,0 +1,24 @@
//===-- divdf3vfp.S - Implement divdf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __divdf3vfp(double a, double b);
//
// Divides two double precision floating point numbers using the Darwin
// calling convention where double arguments are passsed in GPR pairs
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__divdf3vfp)
fmdrr d6, r0, r1 // move first param from r0/r1 pair into d6
fmdrr d7, r2, r3 // move second param from r2/r3 pair into d7
fdivd d5, d6, d7
fmrrd r0, r1, d5 // move result back to r0/r1 pair
bx lr

View File

@ -0,0 +1,24 @@
//===-- divsf3vfp.S - Implement divsf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __divsf3vfp(float a, float b);
//
// Divides two single precision floating point numbers using the Darwin
// calling convention where single arguments are passsed like 32-bit ints.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__divsf3vfp)
fmsr s14, r0 // move first param from r0 into float register
fmsr s15, r1 // move second param from r1 into float register
fdivs s13, s14, s15
fmrs r0, s13 // move result back to r0
bx lr

View File

@ -0,0 +1,27 @@
//===-- eqdf2vfp.S - Implement eqdf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __eqdf2vfp(double a, double b);
//
// Returns one iff a == b and neither is NaN.
// Uses Darwin calling convention where double precision arguments are passsed
// like in GPR pairs.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__eqdf2vfp)
fmdrr d6, r0, r1 // load r0/r1 pair in double register
fmdrr d7, r2, r3 // load r2/r3 pair in double register
fcmpd d6, d7
fmstat
moveq r0, #1 // set result register to 1 if equal
movne r0, #0
bx lr

View File

@ -0,0 +1,28 @@
//===-- eqsf2vfp.S - Implement eqsf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __eqsf2vfp(float a, float b);
//
// Returns one iff a == b and neither is NaN.
// Uses Darwin calling convention where single precision arguments are passsed
// like 32-bit ints
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2vfp)
fmsr s14, r0 // move from GPR 0 to float register
fmsr s15, r1 // move from GPR 1 to float register
fcmps s14, s15
fmstat
moveq r0, #1 // set result register to 1 if equal
movne r0, #0
bx lr

View File

@ -0,0 +1,24 @@
//===-- extendsfdf2vfp.S - Implement extendsfdf2vfp -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __extendsfdf2vfp(float a);
//
// Converts single precision float to double precision result.
// Uses Darwin calling convention where a single precision parameter is
// passed in a GPR and a double precision result is returned in R0/R1 pair.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__extendsfdf2vfp)
fmsr s15, r0 // load float register from R0
fcvtds d7, s15 // convert single to double
fmrrd r0, r1, d7 // return result in r0/r1 pair
bx lr

View File

@ -0,0 +1,24 @@
//===-- fixdfsivfp.S - Implement fixdfsivfp -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __fixdfsivfp(double a);
//
// Converts double precision float to a 32-bit int rounding towards zero.
// Uses Darwin calling convention where a double precision parameter is
// passed in GPR register pair.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__fixdfsivfp)
fmdrr d7, r0, r1 // load double register from R0/R1
ftosizd s15, d7 // convert double to 32-bit int into s15
fmrs r0, s15 // move s15 to result register
bx lr

View File

@ -0,0 +1,24 @@
//===-- fixsfsivfp.S - Implement fixsfsivfp -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __fixsfsivfp(float a);
//
// Converts single precision float to a 32-bit int rounding towards zero.
// Uses Darwin calling convention where a single precision parameter is
// passed in a GPR..
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__fixsfsivfp)
fmsr s15, r0 // load float register from R0
ftosizs s15, s15 // convert single to 32-bit int into s15
fmrs r0, s15 // move s15 to result register
bx lr

View File

@ -0,0 +1,25 @@
//===-- fixunsdfsivfp.S - Implement fixunsdfsivfp -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern unsigned int __fixunsdfsivfp(double a);
//
// Converts double precision float to a 32-bit unsigned int rounding towards
// zero. All negative values become zero.
// Uses Darwin calling convention where a double precision parameter is
// passed in GPR register pair.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__fixunsdfsivfp)
fmdrr d7, r0, r1 // load double register from R0/R1
ftouizd s15, d7 // convert double to 32-bit int into s15
fmrs r0, s15 // move s15 to result register
bx lr

View File

@ -0,0 +1,25 @@
//===-- fixunssfsivfp.S - Implement fixunssfsivfp -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern unsigned int __fixunssfsivfp(float a);
//
// Converts single precision float to a 32-bit unsigned int rounding towards
// zero. All negative values become zero.
// Uses Darwin calling convention where a single precision parameter is
// passed in a GPR..
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__fixunssfsivfp)
fmsr s15, r0 // load float register from R0
ftouizs s15, s15 // convert single to 32-bit unsigned into s15
fmrs r0, s15 // move s15 to result register
bx lr

View File

@ -0,0 +1,24 @@
//===-- floatsidfvfp.S - Implement floatsidfvfp ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __floatsidfvfp(int a);
//
// Converts a 32-bit int to a double precision float.
// Uses Darwin calling convention where a double precision result is
// return in GPR register pair.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__floatsidfvfp)
fmsr s15, r0 // move int to float register s15
fsitod d7, s15 // convert 32-bit int in s15 to double in d7
fmrrd r0, r1, d7 // move d7 to result register pair r0/r1
bx lr

View File

@ -0,0 +1,24 @@
//===-- floatsisfvfp.S - Implement floatsisfvfp ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __floatsisfvfp(int a);
//
// Converts single precision float to a 32-bit int rounding towards zero.
// Uses Darwin calling convention where a single precision result is
// return in a GPR..
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__floatsisfvfp)
fmsr s15, r0 // move int to float register s15
fsitos s15, s15 // convert 32-bit int in s15 to float in s15
fmrs r0, s15 // move s15 to result register
bx lr

View File

@ -0,0 +1,24 @@
//===-- floatunssidfvfp.S - Implement floatunssidfvfp ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __floatunssidfvfp(unsigned int a);
//
// Converts a 32-bit int to a double precision float.
// Uses Darwin calling convention where a double precision result is
// return in GPR register pair.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__floatunssidfvfp)
fmsr s15, r0 // move int to float register s15
fuitod d7, s15 // convert 32-bit int in s15 to double in d7
fmrrd r0, r1, d7 // move d7 to result register pair r0/r1
bx lr

View File

@ -0,0 +1,24 @@
//===-- floatunssisfvfp.S - Implement floatunssisfvfp ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __floatunssisfvfp(unsigned int a);
//
// Converts single precision float to a 32-bit int rounding towards zero.
// Uses Darwin calling convention where a single precision result is
// return in a GPR..
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__floatunssisfvfp)
fmsr s15, r0 // move int to float register s15
fuitos s15, s15 // convert 32-bit int in s15 to float in s15
fmrs r0, s15 // move s15 to result register
bx lr

View File

@ -0,0 +1,27 @@
//===-- gedf2vfp.S - Implement gedf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __gedf2vfp(double a, double b);
//
// Returns one iff a >= b and neither is NaN.
// Uses Darwin calling convention where double precision arguments are passsed
// like in GPR pairs.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__gedf2vfp)
fmdrr d6, r0, r1 // load r0/r1 pair in double register
fmdrr d7, r2, r3 // load r2/r3 pair in double register
fcmpd d6, d7
fmstat
movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0
bx lr

View File

@ -0,0 +1,28 @@
//===-- gesf2vfp.S - Implement gesf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __gesf2vfp(float a, float b);
//
// Returns one iff a >= b and neither is NaN.
// Uses Darwin calling convention where single precision arguments are passsed
// like 32-bit ints
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__gesf2vfp)
fmsr s14, r0 // move from GPR 0 to float register
fmsr s15, r1 // move from GPR 1 to float register
fcmps s14, s15
fmstat
movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0
bx lr

View File

@ -0,0 +1,27 @@
//===-- gtdf2vfp.S - Implement gtdf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __gtdf2vfp(double a, double b);
//
// Returns one iff a > b and neither is NaN.
// Uses Darwin calling convention where double precision arguments are passsed
// like in GPR pairs.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__gtdf2vfp)
fmdrr d6, r0, r1 // load r0/r1 pair in double register
fmdrr d7, r2, r3 // load r2/r3 pair in double register
fcmpd d6, d7
fmstat
movgt r0, #1 // set result register to 1 if equal
movle r0, #0
bx lr

View File

@ -0,0 +1,28 @@
//===-- gtsf2vfp.S - Implement gtsf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __gtsf2vfp(float a, float b);
//
// Returns one iff a > b and neither is NaN.
// Uses Darwin calling convention where single precision arguments are passsed
// like 32-bit ints
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__gtsf2vfp)
fmsr s14, r0 // move from GPR 0 to float register
fmsr s15, r1 // move from GPR 1 to float register
fcmps s14, s15
fmstat
movgt r0, #1 // set result register to 1 if equal
movle r0, #0
bx lr

View File

@ -0,0 +1,27 @@
//===-- ledf2vfp.S - Implement ledf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __ledf2vfp(double a, double b);
//
// Returns one iff a <= b and neither is NaN.
// Uses Darwin calling convention where double precision arguments are passsed
// like in GPR pairs.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__ledf2vfp)
fmdrr d6, r0, r1 // load r0/r1 pair in double register
fmdrr d7, r2, r3 // load r2/r3 pair in double register
fcmpd d6, d7
fmstat
movls r0, #1 // set result register to 1 if equal
movhi r0, #0
bx lr

View File

@ -0,0 +1,28 @@
//===-- lesf2vfp.S - Implement lesf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __lesf2vfp(float a, float b);
//
// Returns one iff a <= b and neither is NaN.
// Uses Darwin calling convention where single precision arguments are passsed
// like 32-bit ints
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__lesf2vfp)
fmsr s14, r0 // move from GPR 0 to float register
fmsr s15, r1 // move from GPR 1 to float register
fcmps s14, s15
fmstat
movls r0, #1 // set result register to 1 if equal
movhi r0, #0
bx lr

View File

@ -0,0 +1,27 @@
//===-- ltdf2vfp.S - Implement ltdf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __ltdf2vfp(double a, double b);
//
// Returns one iff a < b and neither is NaN.
// Uses Darwin calling convention where double precision arguments are passsed
// like in GPR pairs.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__ltdf2vfp)
fmdrr d6, r0, r1 // load r0/r1 pair in double register
fmdrr d7, r2, r3 // load r2/r3 pair in double register
fcmpd d6, d7
fmstat
movmi r0, #1 // set result register to 1 if equal
movpl r0, #0
bx lr

View File

@ -0,0 +1,28 @@
//===-- ltsf2vfp.S - Implement ltsf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __ltsf2vfp(float a, float b);
//
// Returns one iff a < b and neither is NaN.
// Uses Darwin calling convention where single precision arguments are passsed
// like 32-bit ints
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__ltsf2vfp)
fmsr s14, r0 // move from GPR 0 to float register
fmsr s15, r1 // move from GPR 1 to float register
fcmps s14, s15
fmstat
movmi r0, #1 // set result register to 1 if equal
movpl r0, #0
bx lr

View File

@ -0,0 +1,36 @@
//===-------- modsi3.S - Implement modsi3 ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int32_t __modsi3(int32_t a, int32_t b);
//
// Returns the remainder when dividing two 32-bit signed integers.
// Conceptually, the function is: { return a - (a / b) * b; }
// But if you write that in C, llvm compiles it to a call to __modsi3...
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__modsi3)
push {r4, r5, r7, lr}
add r7, sp, #8 // set stack frame
mov r5, r0 // save a
mov r4, r1 // save b
bl ___divsi3 // compute a/b
#if __ARM_ARCH_7A__
mls r0, r4, r0, r5 // mulitple result * b and subtract from a
#else
// before armv7, does not have "mls" instruction
mul r3, r0, r4 // multiple result * b
sub r0, r5, r3 // a - result
#endif
pop {r4, r5, r7, pc}

View File

@ -0,0 +1,24 @@
//===-- muldf3vfp.S - Implement muldf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __muldf3vfp(double a, double b);
//
// Multiplies two double precision floating point numbers using the Darwin
// calling convention where double arguments are passsed in GPR pairs
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__muldf3vfp)
fmdrr d6, r0, r1 // move first param from r0/r1 pair into d6
fmdrr d7, r2, r3 // move second param from r2/r3 pair into d7
fmuld d6, d6, d7
fmrrd r0, r1, d6 // move result back to r0/r1 pair
bx lr

View File

@ -0,0 +1,24 @@
//===-- mulsf3vfp.S - Implement mulsf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __mulsf3vfp(float a, float b);
//
// Multiplies two single precision floating point numbers using the Darwin
// calling convention where single arguments are passsed like 32-bit ints.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__mulsf3vfp)
fmsr s14, r0 // move first param from r0 into float register
fmsr s15, r1 // move second param from r1 into float register
fmuls s13, s14, s15
fmrs r0, s13 // move result back to r0
bx lr

View File

@ -0,0 +1,27 @@
//===-- nedf2vfp.S - Implement nedf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __nedf2vfp(double a, double b);
//
// Returns zero if a and b are unequal and neither is NaN.
// Uses Darwin calling convention where double precision arguments are passsed
// like in GPR pairs.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__nedf2vfp)
fmdrr d6, r0, r1 // load r0/r1 pair in double register
fmdrr d7, r2, r3 // load r2/r3 pair in double register
fcmpd d6, d7
fmstat
movne r0, #1 // set result register to 0 if unequal
moveq r0, #0
bx lr

View File

@ -0,0 +1,21 @@
//===-- negdf2vfp.S - Implement negdf2vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __negdf2vfp(double a, double b);
//
// Returns the negation a double precision floating point numbers using the
// Darwin calling convention where double arguments are passsed in GPR pairs.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__negdf2vfp)
eor r1, r1, #-2147483648 // flip sign bit on double in r0/r1 pair
bx lr

View File

@ -0,0 +1,21 @@
//===-- negsf2vfp.S - Implement negsf2vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __negsf2vfp(float a);
//
// Returns the negation of a single precision floating point numbers using the
// Darwin calling convention where single arguments are passsed like 32-bit ints
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__negsf2vfp)
eor r0, r0, #-2147483648 // flip sign bit on float in r0
bx lr

View File

@ -0,0 +1,28 @@
//===-- nesf2vfp.S - Implement nesf2vfp -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __nesf2vfp(float a, float b);
//
// Returns one iff a != b and neither is NaN.
// Uses Darwin calling convention where single precision arguments are passsed
// like 32-bit ints
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__nesf2vfp)
fmsr s14, r0 // move from GPR 0 to float register
fmsr s15, r1 // move from GPR 1 to float register
fcmps s14, s15
fmstat
movne r0, #1 // set result register to 1 if unequal
moveq r0, #0
bx lr

View File

@ -0,0 +1,37 @@
//===-- save_restore_regs.S - Implement save/restore* ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// When compiling C++ functions that need to handle thrown exceptions the
// compiler is required to save all registers and call __Unwind_SjLj_Register
// in the function prolog. But when compiling for thumb1, there are
// no instructions to access the floating point registers, so the
// compiler needs to add a call to the helper function _save_vfp_d8_d15_regs
// written in ARM to save the float registers. In the epilog, the compiler
// must also add a call to __restore_vfp_d8_d15_regs to restore those registers.
//
.text
.syntax unified
//
// Restore registers d8-d15 from stack
//
.align 2
DEFINE_COMPILERRT_PRIVATE_FUNCTION(__restore_vfp_d8_d15_regs)
vldmia sp!, {d8-d15} // pop registers d8-d15 off stack
bx lr // return to prolog
// tell linker it can break up file at label boundaries
.subsections_via_symbols

View File

@ -0,0 +1,35 @@
//===-- save_restore_regs.S - Implement save/restore* ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// When compiling C++ functions that need to handle thrown exceptions the
// compiler is required to save all registers and call __Unwind_SjLj_Register
// in the function prolog. But when compiling for thumb1, there are
// no instructions to access the floating point registers, so the
// compiler needs to add a call to the helper function _save_vfp_d8_d15_regs
// written in ARM to save the float registers. In the epilog, the compiler
// must also add a call to __restore_vfp_d8_d15_regs to restore those registers.
//
.text
.syntax unified
//
// Save registers d8-d15 onto stack
//
.align 2
DEFINE_COMPILERRT_PRIVATE_FUNCTION(__save_vfp_d8_d15_regs)
vstmdb sp!, {d8-d15} // push registers d8-d15 onto stack
bx lr // return to prolog
// tell linker it can break up file at label boundaries
.subsections_via_symbols

View File

@ -0,0 +1,24 @@
//===-- subdf3vfp.S - Implement subdf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern double __subdf3vfp(double a, double b);
//
// Returns difference between two double precision floating point numbers using
// the Darwin calling convention where double arguments are passsed in GPR pairs
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__subdf3vfp)
fmdrr d6, r0, r1 // move first param from r0/r1 pair into d6
fmdrr d7, r2, r3 // move second param from r2/r3 pair into d7
fsubd d6, d6, d7
fmrrd r0, r1, d6 // move result back to r0/r1 pair
bx lr

View File

@ -0,0 +1,25 @@
//===-- subsf3vfp.S - Implement subsf3vfp ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __subsf3vfp(float a, float b);
//
// Returns the difference between two single precision floating point numbers
// using the Darwin calling convention where single arguments are passsed
// like 32-bit ints.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__subsf3vfp)
fmsr s14, r0 // move first param from r0 into float register
fmsr s15, r1 // move second param from r1 into float register
fsubs s14, s14, s15
fmrs r0, s14 // move result back to r0
bx lr

View File

@ -0,0 +1,44 @@
//===-- switch.S - Implement switch* --------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// When compiling switch statements in thumb mode, the compiler
// can use these __switch* helper functions The compiler emits a blx to
// the __switch* function followed by a table of displacements for each
// case statement. On entry, R0 is the index into the table. The __switch*
// function uses the return address in lr to find the start of the table.
// The first entry in the table is the count of the entries in the table.
// It then uses R0 to index into the table and get the displacement of the
// address to jump to. If R0 is greater than the size of the table, it jumps
// to the last entry in the table. Each displacement in the table is actually
// the distance from lr to the label, thus making the tables PIC.
.text
.syntax unified
//
// The table contains signed 2-byte sized elements which are 1/2 the distance
// from lr to the target label.
//
.align 2
DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch16)
ldrh ip, [lr, #-1] // get first 16-bit word in table
cmp r0, ip // compare with index
add r0, lr, r0, lsl #1 // compute address of element in table
ldrshcc r0, [r0, #1] // load 16-bit element if r0 is in range
add ip, lr, ip, lsl #1 // compute address of last element in table
ldrshhs r0, [ip, #1] // load 16-bit element if r0 out of range
add ip, lr, r0, lsl #1 // compute label = lr + element*2
bx ip // jump to computed label
// tell linker it can break up file at label boundaries
.subsections_via_symbols

View File

@ -0,0 +1,46 @@
//===-- switch.S - Implement switch* --------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// When compiling switch statements in thumb mode, the compiler
// can use these __switch* helper functions The compiler emits a blx to
// the __switch* function followed by a table of displacements for each
// case statement. On entry, R0 is the index into the table. The __switch*
// function uses the return address in lr to find the start of the table.
// The first entry in the table is the count of the entries in the table.
// It then uses R0 to index into the table and get the displacement of the
// address to jump to. If R0 is greater than the size of the table, it jumps
// to the last entry in the table. Each displacement in the table is actually
// the distance from lr to the label, thus making the tables PIC.
.text
.syntax unified
//
// The table contains signed 4-byte sized elements which are the distance
// from lr to the target label.
//
.align 2
DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch32)
ldr ip, [lr, #-1] // get first 32-bit word in table
cmp r0, ip // compare with index
add r0, lr, r0, lsl #2 // compute address of element in table
ldrcc r0, [r0, #3] // load 32-bit element if r0 is in range
add ip, lr, ip, lsl #2 // compute address of last element in table
ldrcs r0, [ip, #3] // load 32-bit element if r0 out of range
add ip, lr, r0 // compute label = lr + element
bx ip // jump to computed label
// tell linker it can break up file at label boundaries
.subsections_via_symbols

View File

@ -0,0 +1,43 @@
//===-- switch.S - Implement switch* --------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// When compiling switch statements in thumb mode, the compiler
// can use these __switch* helper functions The compiler emits a blx to
// the __switch* function followed by a table of displacements for each
// case statement. On entry, R0 is the index into the table. The __switch*
// function uses the return address in lr to find the start of the table.
// The first entry in the table is the count of the entries in the table.
// It then uses R0 to index into the table and get the displacement of the
// address to jump to. If R0 is greater than the size of the table, it jumps
// to the last entry in the table. Each displacement in the table is actually
// the distance from lr to the label, thus making the tables PIC.
.text
.syntax unified
//
// The table contains signed byte sized elements which are 1/2 the distance
// from lr to the target label.
//
.align 2
DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch8)
ldrb ip, [lr, #-1] // get first byte in table
cmp r0, ip // signed compare with index
ldrsbcc r0, [lr, r0] // get indexed byte out of table
ldrsbhs r0, [lr, ip] // if out of range, use last entry in table
add ip, lr, r0, lsl #1 // compute label = lr + element*2
bx ip // jump to computed label
// tell linker it can break up file at label boundaries
.subsections_via_symbols

View File

@ -0,0 +1,43 @@
//===-- switch.S - Implement switch* --------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// When compiling switch statements in thumb mode, the compiler
// can use these __switch* helper functions The compiler emits a blx to
// the __switch* function followed by a table of displacements for each
// case statement. On entry, R0 is the index into the table. The __switch*
// function uses the return address in lr to find the start of the table.
// The first entry in the table is the count of the entries in the table.
// It then uses R0 to index into the table and get the displacement of the
// address to jump to. If R0 is greater than the size of the table, it jumps
// to the last entry in the table. Each displacement in the table is actually
// the distance from lr to the label, thus making the tables PIC.
.text
.syntax unified
//
// The table contains unsigned byte sized elements which are 1/2 the distance
// from lr to the target label.
//
.align 2
DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switchu8)
ldrb ip, [lr, #-1] // get first byte in table
cmp r0, ip // compare with index
ldrbcc r0, [lr, r0] // get indexed byte out of table
ldrbhs r0, [lr, ip] // if out of range, use last entry in table
add ip, lr, r0, lsl #1 // compute label = lr + element*2
bx ip // jump to computed label
// tell linker it can break up file at label boundaries
.subsections_via_symbols

View File

@ -0,0 +1,34 @@
//===-- sync_synchronize - Implement memory barrier * ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// When compiling a use of the gcc built-in __sync_synchronize() in thumb1 mode
// the compiler may emit a call to __sync_synchronize.
// On Darwin the implementation jumps to an OS supplied function named
// OSMemoryBarrier
//
.text
.syntax unified
#if __APPLE__
.align 2
DEFINE_COMPILERRT_PRIVATE_FUNCTION(__sync_synchronize)
stmfd sp!, {r7, lr}
add r7, sp, #0
bl _OSMemoryBarrier
ldmfd sp!, {r7, pc}
// tell linker it can break up file at label boundaries
.subsections_via_symbols
#endif

View File

@ -0,0 +1,24 @@
//===-- truncdfsf2vfp.S - Implement truncdfsf2vfp -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern float __truncdfsf2vfp(double a);
//
// Converts double precision float to signle precision result.
// Uses Darwin calling convention where a double precision parameter is
// passed in a R0/R1 pair and a signle precision result is returned in R0.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__truncdfsf2vfp)
fmdrr d7, r0, r1 // load double from r0/r1 pair
fcvtsd s15, d7 // convert double to single (trucate precision)
fmrs r0, s15 // return result in r0
bx lr

View File

@ -0,0 +1,27 @@
//===-- unorddf2vfp.S - Implement unorddf2vfp ------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __unorddf2vfp(double a, double b);
//
// Returns one iff a or b is NaN
// Uses Darwin calling convention where double precision arguments are passsed
// like in GPR pairs.
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__unorddf2vfp)
fmdrr d6, r0, r1 // load r0/r1 pair in double register
fmdrr d7, r2, r3 // load r2/r3 pair in double register
fcmpd d6, d7
fmstat
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0
bx lr

View File

@ -0,0 +1,28 @@
//===-- unordsf2vfp.S - Implement unordsf2vfp -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "../assembly.h"
//
// extern int __unordsf2vfp(float a, float b);
//
// Returns one iff a or b is NaN
// Uses Darwin calling convention where single precision arguments are passsed
// like 32-bit ints
//
.align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2vfp)
fmsr s14, r0 // move from GPR 0 to float register
fmsr s15, r1 // move from GPR 1 to float register
fcmps s14, s15
fmstat
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0
bx lr

View File

@ -0,0 +1,41 @@
/* ====-- ashldi3.c - Implement __ashldi3 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ashldi3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: a << b */
/* Precondition: 0 <= b < bits_in_dword */
di_int
__ashldi3(di_int a, si_int b)
{
const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT);
dwords input;
dwords result;
input.all = a;
if (b & bits_in_word) /* bits_in_word <= b < bits_in_dword */
{
result.s.low = 0;
result.s.high = input.s.low << (b - bits_in_word);
}
else /* 0 <= b < bits_in_word */
{
if (b == 0)
return a;
result.s.low = input.s.low << b;
result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_word - b));
}
return result.all;
}

View File

@ -0,0 +1,45 @@
/* ===-- ashlti3.c - Implement __ashlti3 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ashlti3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: a << b */
/* Precondition: 0 <= b < bits_in_tword */
ti_int
__ashlti3(ti_int a, si_int b)
{
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
twords input;
twords result;
input.all = a;
if (b & bits_in_dword) /* bits_in_dword <= b < bits_in_tword */
{
result.s.low = 0;
result.s.high = input.s.low << (b - bits_in_dword);
}
else /* 0 <= b < bits_in_dword */
{
if (b == 0)
return a;
result.s.low = input.s.low << b;
result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_dword - b));
}
return result.all;
}
#endif /* __x86_64 */

View File

@ -0,0 +1,42 @@
/*===-- ashrdi3.c - Implement __ashrdi3 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ashrdi3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: arithmetic a >> b */
/* Precondition: 0 <= b < bits_in_dword */
di_int
__ashrdi3(di_int a, si_int b)
{
const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT);
dwords input;
dwords result;
input.all = a;
if (b & bits_in_word) /* bits_in_word <= b < bits_in_dword */
{
/* result.s.high = input.s.high < 0 ? -1 : 0 */
result.s.high = input.s.high >> (bits_in_word - 1);
result.s.low = input.s.high >> (b - bits_in_word);
}
else /* 0 <= b < bits_in_word */
{
if (b == 0)
return a;
result.s.high = input.s.high >> b;
result.s.low = (input.s.high << (bits_in_word - b)) | (input.s.low >> b);
}
return result.all;
}

View File

@ -0,0 +1,46 @@
/* ===-- ashrti3.c - Implement __ashrti3 -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ashrti3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: arithmetic a >> b */
/* Precondition: 0 <= b < bits_in_tword */
ti_int
__ashrti3(ti_int a, si_int b)
{
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
twords input;
twords result;
input.all = a;
if (b & bits_in_dword) /* bits_in_dword <= b < bits_in_tword */
{
/* result.s.high = input.s.high < 0 ? -1 : 0 */
result.s.high = input.s.high >> (bits_in_dword - 1);
result.s.low = input.s.high >> (b - bits_in_dword);
}
else /* 0 <= b < bits_in_dword */
{
if (b == 0)
return a;
result.s.high = input.s.high >> b;
result.s.low = (input.s.high << (bits_in_dword - b)) | (input.s.low >> b);
}
return result.all;
}
#endif /* __x86_64 */

View File

@ -0,0 +1,54 @@
/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file defines macros for use in compiler-rt assembler source.
* This file is not part of the interface of this library.
*
* ===----------------------------------------------------------------------===
*/
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
#define SEPARATOR @
#else
#define SEPARATOR ;
#endif
/* We can't use __USER_LABEL_PREFIX__ here, it isn't possible to concatenate the
*values* of two macros. This is quite brittle, though. */
#if defined(__APPLE__)
#define SYMBOL_NAME(name) _##name
#else
#define SYMBOL_NAME(name) name
#endif
#ifdef VISIBILITY_HIDDEN
#define DEFINE_COMPILERRT_FUNCTION(name) \
.globl SYMBOL_NAME(name) SEPARATOR \
.private_extern SYMBOL_NAME(name) SEPARATOR \
SYMBOL_NAME(name):
#else
#define DEFINE_COMPILERRT_FUNCTION(name) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_NAME(name):
#endif
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
.globl SYMBOL_NAME(name) SEPARATOR \
.private_extern SYMBOL_NAME(name) SEPARATOR \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
.globl name SEPARATOR \
.private_extern name SEPARATOR \
name:
#endif /* COMPILERRT_ASSEMBLY_H */

View File

@ -0,0 +1,41 @@
/* ===-- clear_cache.c - Implement __clear_cache ---------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <stdlib.h>
#if __APPLE__
#include <libkern/OSCacheControl.h>
#endif
/*
* The compiler generates calls to __clear_cache() when creating
* trampoline functions on the stack for use with nested functions.
* It is expected to invalidate the instruction cache for the
* specified range.
*/
void __clear_cache(void* start, void* end)
{
#if __i386__ || __x86_64__
/*
* Intel processors have a unified instruction and data cache
* so there is nothing to do
*/
#else
#if __APPLE__
/* On Darwin, sys_icache_invalidate() provides this functionality */
sys_icache_invalidate(start, end-start);
#else
compilerrt_abort();
#endif
#endif
}

View File

@ -0,0 +1,29 @@
/* ===-- clzdi2.c - Implement __clzdi2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __clzdi2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: the number of leading 0-bits */
/* Precondition: a != 0 */
si_int
__clzdi2(di_int a)
{
dwords x;
x.all = a;
const si_int f = -(x.s.high == 0);
return __builtin_clz((x.s.high & ~f) | (x.s.low & f)) +
(f & ((si_int)(sizeof(si_int) * CHAR_BIT)));
}

View File

@ -0,0 +1,53 @@
/* ===-- clzsi2.c - Implement __clzsi2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __clzsi2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: the number of leading 0-bits */
/* Precondition: a != 0 */
si_int
__clzsi2(si_int a)
{
su_int x = (su_int)a;
si_int t = ((x & 0xFFFF0000) == 0) << 4; /* if (x is small) t = 16 else 0 */
x >>= 16 - t; /* x = [0 - 0xFFFF] */
su_int r = t; /* r = [0, 16] */
/* return r + clz(x) */
t = ((x & 0xFF00) == 0) << 3;
x >>= 8 - t; /* x = [0 - 0xFF] */
r += t; /* r = [0, 8, 16, 24] */
/* return r + clz(x) */
t = ((x & 0xF0) == 0) << 2;
x >>= 4 - t; /* x = [0 - 0xF] */
r += t; /* r = [0, 4, 8, 12, 16, 20, 24, 28] */
/* return r + clz(x) */
t = ((x & 0xC) == 0) << 1;
x >>= 2 - t; /* x = [0 - 3] */
r += t; /* r = [0 - 30] and is even */
/* return r + clz(x) */
/* switch (x)
* {
* case 0:
* return r + 2;
* case 1:
* return r + 1;
* case 2:
* case 3:
* return r;
* }
*/
return r + ((2 - x) & -((x & 2) == 0));
}

View File

@ -0,0 +1,33 @@
/* ===-- clzti2.c - Implement __clzti2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __clzti2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: the number of leading 0-bits */
/* Precondition: a != 0 */
si_int
__clzti2(ti_int a)
{
twords x;
x.all = a;
const di_int f = -(x.s.high == 0);
return __builtin_clzll((x.s.high & ~f) | (x.s.low & f)) +
((si_int)f & ((si_int)(sizeof(di_int) * CHAR_BIT)));
}
#endif /* __x86_64 */

View File

@ -0,0 +1,38 @@
/* ===-- cmpdi2.c - Implement __cmpdi2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __cmpdi2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: if (a < b) returns 0
* if (a == b) returns 1
* if (a > b) returns 2
*/
si_int
__cmpdi2(di_int a, di_int b)
{
dwords x;
x.all = a;
dwords y;
y.all = b;
if (x.s.high < y.s.high)
return 0;
if (x.s.high > y.s.high)
return 2;
if (x.s.low < y.s.low)
return 0;
if (x.s.low > y.s.low)
return 2;
return 1;
}

View File

@ -0,0 +1,42 @@
/* ===-- cmpti2.c - Implement __cmpti2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __cmpti2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: if (a < b) returns 0
* if (a == b) returns 1
* if (a > b) returns 2
*/
si_int
__cmpti2(ti_int a, ti_int b)
{
twords x;
x.all = a;
twords y;
y.all = b;
if (x.s.high < y.s.high)
return 0;
if (x.s.high > y.s.high)
return 2;
if (x.s.low < y.s.low)
return 0;
if (x.s.low > y.s.low)
return 2;
return 1;
}
#endif

View File

@ -0,0 +1,132 @@
//===-- lib/comparedf2.c - Double-precision comparisons -----------*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// // This file implements the following soft-float comparison routines:
//
// __eqdf2 __gedf2 __unorddf2
// __ledf2 __gtdf2
// __ltdf2
// __nedf2
//
// The semantics of the routines grouped in each column are identical, so there
// is a single implementation for each, and wrappers to provide the other names.
//
// The main routines behave as follows:
//
// __ledf2(a,b) returns -1 if a < b
// 0 if a == b
// 1 if a > b
// 1 if either a or b is NaN
//
// __gedf2(a,b) returns -1 if a < b
// 0 if a == b
// 1 if a > b
// -1 if either a or b is NaN
//
// __unorddf2(a,b) returns 0 if both a and b are numbers
// 1 if either a or b is NaN
//
// Note that __ledf2( ) and __gedf2( ) are identical except in their handling of
// NaN values.
//
//===----------------------------------------------------------------------===//
#define DOUBLE_PRECISION
#include "fp_lib.h"
enum LE_RESULT {
LE_LESS = -1,
LE_EQUAL = 0,
LE_GREATER = 1,
LE_UNORDERED = 1
};
enum LE_RESULT __ledf2(fp_t a, fp_t b) {
const srep_t aInt = toRep(a);
const srep_t bInt = toRep(b);
const rep_t aAbs = aInt & absMask;
const rep_t bAbs = bInt & absMask;
// If either a or b is NaN, they are unordered.
if (aAbs > infRep || bAbs > infRep) return LE_UNORDERED;
// If a and b are both zeros, they are equal.
if ((aAbs | bAbs) == 0) return LE_EQUAL;
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a floating-point compare.
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
}
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
else {
if (aInt > bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
}
}
enum GE_RESULT {
GE_LESS = -1,
GE_EQUAL = 0,
GE_GREATER = 1,
GE_UNORDERED = -1 // Note: different from LE_UNORDERED
};
enum GE_RESULT __gedf2(fp_t a, fp_t b) {
const srep_t aInt = toRep(a);
const srep_t bInt = toRep(b);
const rep_t aAbs = aInt & absMask;
const rep_t bAbs = bInt & absMask;
if (aAbs > infRep || bAbs > infRep) return GE_UNORDERED;
if ((aAbs | bAbs) == 0) return GE_EQUAL;
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
} else {
if (aInt > bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
}
}
int __unorddf2(fp_t a, fp_t b) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
return aAbs > infRep || bAbs > infRep;
}
// The following are alternative names for the preceeding routines.
enum LE_RESULT __eqdf2(fp_t a, fp_t b) {
return __ledf2(a, b);
}
enum LE_RESULT __ltdf2(fp_t a, fp_t b) {
return __ledf2(a, b);
}
enum LE_RESULT __nedf2(fp_t a, fp_t b) {
return __ledf2(a, b);
}
enum GE_RESULT __gtdf2(fp_t a, fp_t b) {
return __gedf2(a, b);
}

View File

@ -0,0 +1,131 @@
//===-- lib/comparesf2.c - Single-precision comparisons -----------*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the following soft-fp_t comparison routines:
//
// __eqsf2 __gesf2 __unordsf2
// __lesf2 __gtsf2
// __ltsf2
// __nesf2
//
// The semantics of the routines grouped in each column are identical, so there
// is a single implementation for each, and wrappers to provide the other names.
//
// The main routines behave as follows:
//
// __lesf2(a,b) returns -1 if a < b
// 0 if a == b
// 1 if a > b
// 1 if either a or b is NaN
//
// __gesf2(a,b) returns -1 if a < b
// 0 if a == b
// 1 if a > b
// -1 if either a or b is NaN
//
// __unordsf2(a,b) returns 0 if both a and b are numbers
// 1 if either a or b is NaN
//
// Note that __lesf2( ) and __gesf2( ) are identical except in their handling of
// NaN values.
//
//===----------------------------------------------------------------------===//
#define SINGLE_PRECISION
#include "fp_lib.h"
enum LE_RESULT {
LE_LESS = -1,
LE_EQUAL = 0,
LE_GREATER = 1,
LE_UNORDERED = 1
};
enum LE_RESULT __lesf2(fp_t a, fp_t b) {
const srep_t aInt = toRep(a);
const srep_t bInt = toRep(b);
const rep_t aAbs = aInt & absMask;
const rep_t bAbs = bInt & absMask;
// If either a or b is NaN, they are unordered.
if (aAbs > infRep || bAbs > infRep) return LE_UNORDERED;
// If a and b are both zeros, they are equal.
if ((aAbs | bAbs) == 0) return LE_EQUAL;
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a fp_ting-point compare.
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
}
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
else {
if (aInt > bInt) return LE_LESS;
else if (aInt == bInt) return LE_EQUAL;
else return LE_GREATER;
}
}
enum GE_RESULT {
GE_LESS = -1,
GE_EQUAL = 0,
GE_GREATER = 1,
GE_UNORDERED = -1 // Note: different from LE_UNORDERED
};
enum GE_RESULT __gesf2(fp_t a, fp_t b) {
const srep_t aInt = toRep(a);
const srep_t bInt = toRep(b);
const rep_t aAbs = aInt & absMask;
const rep_t bAbs = bInt & absMask;
if (aAbs > infRep || bAbs > infRep) return GE_UNORDERED;
if ((aAbs | bAbs) == 0) return GE_EQUAL;
if ((aInt & bInt) >= 0) {
if (aInt < bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
} else {
if (aInt > bInt) return GE_LESS;
else if (aInt == bInt) return GE_EQUAL;
else return GE_GREATER;
}
}
int __unordsf2(fp_t a, fp_t b) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
return aAbs > infRep || bAbs > infRep;
}
// The following are alternative names for the preceeding routines.
enum LE_RESULT __eqsf2(fp_t a, fp_t b) {
return __lesf2(a, b);
}
enum LE_RESULT __ltsf2(fp_t a, fp_t b) {
return __lesf2(a, b);
}
enum LE_RESULT __nesf2(fp_t a, fp_t b) {
return __lesf2(a, b);
}
enum GE_RESULT __gtsf2(fp_t a, fp_t b) {
return __gesf2(a, b);
}

View File

@ -0,0 +1,29 @@
/* ===-- ctzdi2.c - Implement __ctzdi2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ctzdi2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: the number of trailing 0-bits */
/* Precondition: a != 0 */
si_int
__ctzdi2(di_int a)
{
dwords x;
x.all = a;
const si_int f = -(x.s.low == 0);
return __builtin_ctz((x.s.high & f) | (x.s.low & ~f)) +
(f & ((si_int)(sizeof(si_int) * CHAR_BIT)));
}

View File

@ -0,0 +1,57 @@
/* ===-- ctzsi2.c - Implement __ctzsi2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ctzsi2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: the number of trailing 0-bits */
/* Precondition: a != 0 */
si_int
__ctzsi2(si_int a)
{
su_int x = (su_int)a;
si_int t = ((x & 0x0000FFFF) == 0) << 4; /* if (x has no small bits) t = 16 else 0 */
x >>= t; /* x = [0 - 0xFFFF] + higher garbage bits */
su_int r = t; /* r = [0, 16] */
/* return r + ctz(x) */
t = ((x & 0x00FF) == 0) << 3;
x >>= t; /* x = [0 - 0xFF] + higher garbage bits */
r += t; /* r = [0, 8, 16, 24] */
/* return r + ctz(x) */
t = ((x & 0x0F) == 0) << 2;
x >>= t; /* x = [0 - 0xF] + higher garbage bits */
r += t; /* r = [0, 4, 8, 12, 16, 20, 24, 28] */
/* return r + ctz(x) */
t = ((x & 0x3) == 0) << 1;
x >>= t;
x &= 3; /* x = [0 - 3] */
r += t; /* r = [0 - 30] and is even */
/* return r + ctz(x) */
/* The branch-less return statement below is equivalent
* to the following switch statement:
* switch (x)
* {
* case 0:
* return r + 2;
* case 2:
* return r + 1;
* case 1:
* case 3:
* return r;
* }
*/
return r + ((2 - (x >> 1)) & -((x & 1) == 0));
}

View File

@ -0,0 +1,33 @@
/* ===-- ctzti2.c - Implement __ctzti2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ctzti2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: the number of trailing 0-bits */
/* Precondition: a != 0 */
si_int
__ctzti2(ti_int a)
{
twords x;
x.all = a;
const di_int f = -(x.s.low == 0);
return __builtin_ctzll((x.s.high & f) | (x.s.low & ~f)) +
((si_int)f & ((si_int)(sizeof(di_int) * CHAR_BIT)));
}
#endif

View File

@ -0,0 +1,59 @@
/* ===-- divdc3.c - Implement __divdc3 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __divdc3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <math.h>
#include <complex.h>
/* Returns: the quotient of (a + ib) / (c + id) */
double _Complex
__divdc3(double __a, double __b, double __c, double __d)
{
int __ilogbw = 0;
double __logbw = logb(fmax(fabs(__c), fabs(__d)));
if (isfinite(__logbw))
{
__ilogbw = (int)__logbw;
__c = scalbn(__c, -__ilogbw);
__d = scalbn(__d, -__ilogbw);
}
double __denom = __c * __c + __d * __d;
double _Complex z;
__real__ z = scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
__imag__ z = scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
if (isnan(__real__ z) && isnan(__imag__ z))
{
if ((__denom == 0.0) && (!isnan(__a) || !isnan(__b)))
{
__real__ z = copysign(INFINITY, __c) * __a;
__imag__ z = copysign(INFINITY, __c) * __b;
}
else if ((isinf(__a) || isinf(__b)) && isfinite(__c) && isfinite(__d))
{
__a = copysign(isinf(__a) ? 1.0 : 0.0, __a);
__b = copysign(isinf(__b) ? 1.0 : 0.0, __b);
__real__ z = INFINITY * (__a * __c + __b * __d);
__imag__ z = INFINITY * (__b * __c - __a * __d);
}
else if (isinf(__logbw) && __logbw > 0.0 && isfinite(__a) && isfinite(__b))
{
__c = copysign(isinf(__c) ? 1.0 : 0.0, __c);
__d = copysign(isinf(__d) ? 1.0 : 0.0, __d);
__real__ z = 0.0 * (__a * __c + __b * __d);
__imag__ z = 0.0 * (__b * __c - __a * __d);
}
}
return z;
}

View File

@ -0,0 +1,182 @@
//===-- lib/divdf3.c - Double-precision division ------------------*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements double-precision soft-float division
// with the IEEE-754 default rounding (to nearest, ties to even).
//
// For simplicity, this implementation currently flushes denormals to zero.
// It should be a fairly straightforward exercise to implement gradual
// underflow with correct rounding.
//
//===----------------------------------------------------------------------===//
#define DOUBLE_PRECISION
#include "fp_lib.h"
fp_t __divdf3(fp_t a, fp_t b) {
const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
rep_t aSignificand = toRep(a) & significandMask;
rep_t bSignificand = toRep(b) & significandMask;
int scale = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent-1U >= maxExponent-1U || bExponent-1U >= maxExponent-1U) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
// NaN / anything = qNaN
if (aAbs > infRep) return fromRep(toRep(a) | quietBit);
// anything / NaN = qNaN
if (bAbs > infRep) return fromRep(toRep(b) | quietBit);
if (aAbs == infRep) {
// infinity / infinity = NaN
if (bAbs == infRep) return fromRep(qnanRep);
// infinity / anything else = +/- infinity
else return fromRep(aAbs | quotientSign);
}
// anything else / infinity = +/- 0
if (bAbs == infRep) return fromRep(quotientSign);
if (!aAbs) {
// zero / zero = NaN
if (!bAbs) return fromRep(qnanRep);
// zero / anything else = +/- zero
else return fromRep(quotientSign);
}
// anything else / zero = +/- infinity
if (!bAbs) return fromRep(infRep | quotientSign);
// one or both of a or b is denormal, the other (if applicable) is a
// normal number. Renormalize one or both of a and b, and set scale to
// include the necessary exponent adjustment.
if (aAbs < implicitBit) scale += normalize(&aSignificand);
if (bAbs < implicitBit) scale -= normalize(&bSignificand);
}
// Or in the implicit significand bit. (If we fell through from the
// denormal path it was already set by normalize( ), but setting it twice
// won't hurt anything.)
aSignificand |= implicitBit;
bSignificand |= implicitBit;
int quotientExponent = aExponent - bExponent + scale;
// Align the significand of b as a Q31 fixed-point number in the range
// [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
// polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
// is accurate to about 3.5 binary digits.
const uint32_t q31b = bSignificand >> 21;
uint32_t recip32 = UINT32_C(0x7504f333) - q31b;
// Now refine the reciprocal estimate using a Newton-Raphson iteration:
//
// x1 = x0 * (2 - x0 * b)
//
// This doubles the number of correct binary digits in the approximation
// with each iteration, so after three iterations, we have about 28 binary
// digits of accuracy.
uint32_t correction32;
correction32 = -((uint64_t)recip32 * q31b >> 32);
recip32 = (uint64_t)recip32 * correction32 >> 31;
correction32 = -((uint64_t)recip32 * q31b >> 32);
recip32 = (uint64_t)recip32 * correction32 >> 31;
correction32 = -((uint64_t)recip32 * q31b >> 32);
recip32 = (uint64_t)recip32 * correction32 >> 31;
// recip32 might have overflowed to exactly zero in the preceeding
// computation if the high word of b is exactly 1.0. This would sabotage
// the full-width final stage of the computation that follows, so we adjust
// recip32 downward by one bit.
recip32--;
// We need to perform one more iteration to get us to 56 binary digits;
// The last iteration needs to happen with extra precision.
const uint32_t q63blo = bSignificand << 11;
uint64_t correction, reciprocal;
correction = -((uint64_t)recip32*q31b + ((uint64_t)recip32*q63blo >> 32));
uint32_t cHi = correction >> 32;
uint32_t cLo = correction;
reciprocal = (uint64_t)recip32*cHi + ((uint64_t)recip32*cLo >> 32);
// We already adjusted the 32-bit estimate, now we need to adjust the final
// 64-bit reciprocal estimate downward to ensure that it is strictly smaller
// than the infinitely precise exact reciprocal. Because the computation
// of the Newton-Raphson step is truncating at every step, this adjustment
// is small; most of the work is already done.
reciprocal -= 2;
// The numerical reciprocal is accurate to within 2^-56, lies in the
// interval [0.5, 1.0), and is strictly smaller than the true reciprocal
// of b. Multiplying a by this reciprocal thus gives a numerical q = a/b
// in Q53 with the following properties:
//
// 1. q < a/b
// 2. q is in the interval [0.5, 2.0)
// 3. the error in q is bounded away from 2^-53 (actually, we have a
// couple of bits to spare, but this is all we need).
// We need a 64 x 64 multiply high to compute q, which isn't a basic
// operation in C, so we need to be a little bit fussy.
rep_t quotient, quotientLo;
wideMultiply(aSignificand << 2, reciprocal, &quotient, &quotientLo);
// Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
// In either case, we are going to compute a residual of the form
//
// r = a - q*b
//
// We know from the construction of q that r satisfies:
//
// 0 <= r < ulp(q)*b
//
// if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
// already have the correct result. The exact halfway case cannot occur.
// We also take this time to right shift quotient if it falls in the [1,2)
// range and adjust the exponent accordingly.
rep_t residual;
if (quotient < (implicitBit << 1)) {
residual = (aSignificand << 53) - quotient * bSignificand;
quotientExponent--;
} else {
quotient >>= 1;
residual = (aSignificand << 52) - quotient * bSignificand;
}
const int writtenExponent = quotientExponent + exponentBias;
if (writtenExponent >= maxExponent) {
// If we have overflowed the exponent, return infinity.
return fromRep(infRep | quotientSign);
}
else if (writtenExponent < 1) {
// Flush denormals to zero. In the future, it would be nice to add
// code to round them correctly.
return fromRep(quotientSign);
}
else {
const bool round = (residual << 1) > bSignificand;
// Clear the implicit bit
rep_t absResult = quotient & significandMask;
// Insert the exponent
absResult |= (rep_t)writtenExponent << significandBits;
// Round
absResult += round;
// Insert the sign and return
const double result = fromRep(absResult | quotientSign);
return result;
}
}

View File

@ -0,0 +1,31 @@
/* ===-- divdi3.c - Implement __divdi3 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __divdi3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
du_int __udivmoddi4(du_int a, du_int b, du_int* rem);
/* Returns: a / b */
di_int
__divdi3(di_int a, di_int b)
{
const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
di_int s_a = a >> bits_in_dword_m1; /* s_a = a < 0 ? -1 : 0 */
di_int s_b = b >> bits_in_dword_m1; /* s_b = b < 0 ? -1 : 0 */
a = (a ^ s_a) - s_a; /* negate if s_a == -1 */
b = (b ^ s_b) - s_b; /* negate if s_b == -1 */
s_a ^= s_b; /*sign of quotient */
return (__udivmoddi4(a, b, (du_int*)0) ^ s_a) - s_a; /* negate if s_a == -1 */
}

View File

@ -0,0 +1,59 @@
/*===-- divsc3.c - Implement __divsc3 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __divsc3 for the compiler_rt library.
*
*===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <math.h>
#include <complex.h>
/* Returns: the quotient of (a + ib) / (c + id) */
float _Complex
__divsc3(float __a, float __b, float __c, float __d)
{
int __ilogbw = 0;
float __logbw = logbf(fmaxf(fabsf(__c), fabsf(__d)));
if (isfinite(__logbw))
{
__ilogbw = (int)__logbw;
__c = scalbnf(__c, -__ilogbw);
__d = scalbnf(__d, -__ilogbw);
}
float __denom = __c * __c + __d * __d;
float _Complex z;
__real__ z = scalbnf((__a * __c + __b * __d) / __denom, -__ilogbw);
__imag__ z = scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw);
if (isnan(__real__ z) && isnan(__imag__ z))
{
if ((__denom == 0) && (!isnan(__a) || !isnan(__b)))
{
__real__ z = copysignf(INFINITY, __c) * __a;
__imag__ z = copysignf(INFINITY, __c) * __b;
}
else if ((isinf(__a) || isinf(__b)) && isfinite(__c) && isfinite(__d))
{
__a = copysignf(isinf(__a) ? 1 : 0, __a);
__b = copysignf(isinf(__b) ? 1 : 0, __b);
__real__ z = INFINITY * (__a * __c + __b * __d);
__imag__ z = INFINITY * (__b * __c - __a * __d);
}
else if (isinf(__logbw) && __logbw > 0 && isfinite(__a) && isfinite(__b))
{
__c = copysignf(isinf(__c) ? 1 : 0, __c);
__d = copysignf(isinf(__d) ? 1 : 0, __d);
__real__ z = 0 * (__a * __c + __b * __d);
__imag__ z = 0 * (__b * __c - __a * __d);
}
}
return z;
}

View File

@ -0,0 +1,166 @@
//===-- lib/divsf3.c - Single-precision division ------------------*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements single-precision soft-float division
// with the IEEE-754 default rounding (to nearest, ties to even).
//
// For simplicity, this implementation currently flushes denormals to zero.
// It should be a fairly straightforward exercise to implement gradual
// underflow with correct rounding.
//
//===----------------------------------------------------------------------===//
#define SINGLE_PRECISION
#include "fp_lib.h"
fp_t __divsf3(fp_t a, fp_t b) {
const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
rep_t aSignificand = toRep(a) & significandMask;
rep_t bSignificand = toRep(b) & significandMask;
int scale = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent-1U >= maxExponent-1U || bExponent-1U >= maxExponent-1U) {
const rep_t aAbs = toRep(a) & absMask;
const rep_t bAbs = toRep(b) & absMask;
// NaN / anything = qNaN
if (aAbs > infRep) return fromRep(toRep(a) | quietBit);
// anything / NaN = qNaN
if (bAbs > infRep) return fromRep(toRep(b) | quietBit);
if (aAbs == infRep) {
// infinity / infinity = NaN
if (bAbs == infRep) return fromRep(qnanRep);
// infinity / anything else = +/- infinity
else return fromRep(aAbs | quotientSign);
}
// anything else / infinity = +/- 0
if (bAbs == infRep) return fromRep(quotientSign);
if (!aAbs) {
// zero / zero = NaN
if (!bAbs) return fromRep(qnanRep);
// zero / anything else = +/- zero
else return fromRep(quotientSign);
}
// anything else / zero = +/- infinity
if (!bAbs) return fromRep(infRep | quotientSign);
// one or both of a or b is denormal, the other (if applicable) is a
// normal number. Renormalize one or both of a and b, and set scale to
// include the necessary exponent adjustment.
if (aAbs < implicitBit) scale += normalize(&aSignificand);
if (bAbs < implicitBit) scale -= normalize(&bSignificand);
}
// Or in the implicit significand bit. (If we fell through from the
// denormal path it was already set by normalize( ), but setting it twice
// won't hurt anything.)
aSignificand |= implicitBit;
bSignificand |= implicitBit;
int quotientExponent = aExponent - bExponent + scale;
// Align the significand of b as a Q31 fixed-point number in the range
// [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
// polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
// is accurate to about 3.5 binary digits.
uint32_t q31b = bSignificand << 8;
uint32_t reciprocal = UINT32_C(0x7504f333) - q31b;
// Now refine the reciprocal estimate using a Newton-Raphson iteration:
//
// x1 = x0 * (2 - x0 * b)
//
// This doubles the number of correct binary digits in the approximation
// with each iteration, so after three iterations, we have about 28 binary
// digits of accuracy.
uint32_t correction;
correction = -((uint64_t)reciprocal * q31b >> 32);
reciprocal = (uint64_t)reciprocal * correction >> 31;
correction = -((uint64_t)reciprocal * q31b >> 32);
reciprocal = (uint64_t)reciprocal * correction >> 31;
correction = -((uint64_t)reciprocal * q31b >> 32);
reciprocal = (uint64_t)reciprocal * correction >> 31;
// Exhaustive testing shows that the error in reciprocal after three steps
// is in the interval [-0x1.f58108p-31, 0x1.d0e48cp-29], in line with our
// expectations. We bump the reciprocal by a tiny value to force the error
// to be strictly positive (in the range [0x1.4fdfp-37,0x1.287246p-29], to
// be specific). This also causes 1/1 to give a sensible approximation
// instead of zero (due to overflow).
reciprocal -= 2;
// The numerical reciprocal is accurate to within 2^-28, lies in the
// interval [0x1.000000eep-1, 0x1.fffffffcp-1], and is strictly smaller
// than the true reciprocal of b. Multiplying a by this reciprocal thus
// gives a numerical q = a/b in Q24 with the following properties:
//
// 1. q < a/b
// 2. q is in the interval [0x1.000000eep-1, 0x1.fffffffcp0)
// 3. the error in q is at most 2^-24 + 2^-27 -- the 2^24 term comes
// from the fact that we truncate the product, and the 2^27 term
// is the error in the reciprocal of b scaled by the maximum
// possible value of a. As a consequence of this error bound,
// either q or nextafter(q) is the correctly rounded
rep_t quotient = (uint64_t)reciprocal*(aSignificand << 1) >> 32;
// Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
// In either case, we are going to compute a residual of the form
//
// r = a - q*b
//
// We know from the construction of q that r satisfies:
//
// 0 <= r < ulp(q)*b
//
// if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
// already have the correct result. The exact halfway case cannot occur.
// We also take this time to right shift quotient if it falls in the [1,2)
// range and adjust the exponent accordingly.
rep_t residual;
if (quotient < (implicitBit << 1)) {
residual = (aSignificand << 24) - quotient * bSignificand;
quotientExponent--;
} else {
quotient >>= 1;
residual = (aSignificand << 23) - quotient * bSignificand;
}
const int writtenExponent = quotientExponent + exponentBias;
if (writtenExponent >= maxExponent) {
// If we have overflowed the exponent, return infinity.
return fromRep(infRep | quotientSign);
}
else if (writtenExponent < 1) {
// Flush denormals to zero. In the future, it would be nice to add
// code to round them correctly.
return fromRep(quotientSign);
}
else {
const bool round = (residual << 1) > bSignificand;
// Clear the implicit bit
rep_t absResult = quotient & significandMask;
// Insert the exponent
absResult |= (rep_t)writtenExponent << significandBits;
// Round
absResult += round;
// Insert the sign and return
return fromRep(absResult | quotientSign);
}
}

View File

@ -0,0 +1,31 @@
/* ===-- divsi3.c - Implement __divsi3 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __divsi3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
su_int __udivsi3(su_int n, su_int d);
/* Returns: a / b */
si_int
__divsi3(si_int a, si_int b)
{
const int bits_in_word_m1 = (int)(sizeof(si_int) * CHAR_BIT) - 1;
si_int s_a = a >> bits_in_word_m1; /* s_a = a < 0 ? -1 : 0 */
si_int s_b = b >> bits_in_word_m1; /* s_b = b < 0 ? -1 : 0 */
a = (a ^ s_a) - s_a; /* negate if s_a == -1 */
b = (b ^ s_b) - s_b; /* negate if s_b == -1 */
s_a ^= s_b; /* sign of quotient */
return (__udivsi3(a, b) ^ s_a) - s_a; /* negate if s_a == -1 */
}

View File

@ -0,0 +1,35 @@
/* ===-- divti3.c - Implement __divti3 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __divti3 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
/* Returns: a / b */
ti_int
__divti3(ti_int a, ti_int b)
{
const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
ti_int s_a = a >> bits_in_tword_m1; /* s_a = a < 0 ? -1 : 0 */
ti_int s_b = b >> bits_in_tword_m1; /* s_b = b < 0 ? -1 : 0 */
a = (a ^ s_a) - s_a; /* negate if s_a == -1 */
b = (b ^ s_b) - s_b; /* negate if s_b == -1 */
s_a ^= s_b; /* sign of quotient */
return (__udivmodti4(a, b, (tu_int*)0) ^ s_a) - s_a; /* negate if s_a == -1 */
}
#endif

View File

@ -0,0 +1,62 @@
/* ===-- divxc3.c - Implement __divxc3 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __divxc3 for the compiler_rt library.
*
*/
#if !_ARCH_PPC
#include "int_lib.h"
#include <math.h>
#include <complex.h>
/* Returns: the quotient of (a + ib) / (c + id) */
long double _Complex
__divxc3(long double __a, long double __b, long double __c, long double __d)
{
int __ilogbw = 0;
long double __logbw = logbl(fmaxl(fabsl(__c), fabsl(__d)));
if (isfinite(__logbw))
{
__ilogbw = (int)__logbw;
__c = scalbnl(__c, -__ilogbw);
__d = scalbnl(__d, -__ilogbw);
}
long double __denom = __c * __c + __d * __d;
long double _Complex z;
__real__ z = scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
__imag__ z = scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
if (isnan(__real__ z) && isnan(__imag__ z))
{
if ((__denom == 0) && (!isnan(__a) || !isnan(__b)))
{
__real__ z = copysignl(INFINITY, __c) * __a;
__imag__ z = copysignl(INFINITY, __c) * __b;
}
else if ((isinf(__a) || isinf(__b)) && isfinite(__c) && isfinite(__d))
{
__a = copysignl(isinf(__a) ? 1 : 0, __a);
__b = copysignl(isinf(__b) ? 1 : 0, __b);
__real__ z = INFINITY * (__a * __c + __b * __d);
__imag__ z = INFINITY * (__b * __c - __a * __d);
}
else if (isinf(__logbw) && __logbw > 0 && isfinite(__a) && isfinite(__b))
{
__c = copysignl(isinf(__c) ? 1 : 0, __c);
__d = copysignl(isinf(__d) ? 1 : 0, __d);
__real__ z = 0 * (__a * __c + __b * __d);
__imag__ z = 0 * (__b * __c - __a * __d);
}
}
return z;
}
#endif

View File

@ -0,0 +1,58 @@
/* ===-- enable_execute_stack.c - Implement __enable_execute_stack ---------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*/
#include <stdint.h>
#include <sys/mman.h>
/* #include "config.h"
* FIXME: CMake - include when cmake system is ready.
* Remove #define HAVE_SYSCONF 1 line.
*/
#define HAVE_SYSCONF 1
#ifndef __APPLE__
#include <unistd.h>
#endif /* __APPLE__ */
#if __LP64__
#define TRAMPOLINE_SIZE 48
#else
#define TRAMPOLINE_SIZE 40
#endif
/*
* The compiler generates calls to __enable_execute_stack() when creating
* trampoline functions on the stack for use with nested functions.
* It is expected to mark the page(s) containing the address
* and the next 48 bytes as executable. Since the stack is normally rw-
* that means changing the protection on those page(s) to rwx.
*/
void __enable_execute_stack(void* addr)
{
#if __APPLE__
/* On Darwin, pagesize is always 4096 bytes */
const uintptr_t pageSize = 4096;
#elif !defined(HAVE_SYSCONF)
#error "HAVE_SYSCONF not defined! See enable_execute_stack.c"
#else
const uintptr_t pageSize = sysconf(_SC_PAGESIZE);
#endif /* __APPLE__ */
const uintptr_t pageAlignMask = ~(pageSize-1);
uintptr_t p = (uintptr_t)addr;
unsigned char* startPage = (unsigned char*)(p & pageAlignMask);
unsigned char* endPage = (unsigned char*)((p+TRAMPOLINE_SIZE+pageSize) & pageAlignMask);
size_t length = endPage - startPage;
(void) mprotect((void *)startPage, length, PROT_READ | PROT_WRITE | PROT_EXEC);
}

View File

@ -0,0 +1,94 @@
/* ===-- endianness.h - configuration header for compiler-rt ---------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file is a configuration header for compiler-rt.
* This file is not part of the interface of this library.
*
* ===----------------------------------------------------------------------===
*/
#ifndef ENDIANNESS_H
#define ENDIANNESS_H
/*
* Known limitations:
* Middle endian systems are not handled currently.
*/
#if defined(__SVR4) && defined(__sun)
#include <sys/byteorder.h>
#if _BYTE_ORDER == _BIG_ENDIAN
#define _YUGA_LITTLE_ENDIAN 0
#define _YUGA_BIG_ENDIAN 1
#elif _BYTE_ORDER == _LITTLE_ENDIAN
#define _YUGA_LITTLE_ENDIAN 1
#define _YUGA_BIG_ENDIAN 0
#endif /* _BYTE_ORDER */
#endif /* Solaris and AuroraUX. */
/* .. */
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
#include <sys/endian.h>
#if _BYTE_ORDER == _BIG_ENDIAN
#define _YUGA_LITTLE_ENDIAN 0
#define _YUGA_BIG_ENDIAN 1
#elif _BYTE_ORDER == _LITTLE_ENDIAN
#define _YUGA_LITTLE_ENDIAN 1
#define _YUGA_BIG_ENDIAN 0
#endif /* _BYTE_ORDER */
#endif /* *BSD */
/* .. */
/* Mac OSX has __BIG_ENDIAN__ or __LITTLE_ENDIAN__ automatically set by the compiler (at least with GCC) */
#if defined(__APPLE__) && defined(__MACH__) || defined(__ellcc__ )
#ifdef __BIG_ENDIAN__
#if __BIG_ENDIAN__
#define _YUGA_LITTLE_ENDIAN 0
#define _YUGA_BIG_ENDIAN 1
#endif
#endif /* __BIG_ENDIAN__ */
#ifdef __LITTLE_ENDIAN__
#if __LITTLE_ENDIAN__
#define _YUGA_LITTLE_ENDIAN 1
#define _YUGA_BIG_ENDIAN 0
#endif
#endif /* __LITTLE_ENDIAN__ */
#endif /* Mac OSX */
/* .. */
#if defined(__linux__)
#include <endian.h>
#if __BYTE_ORDER == __BIG_ENDIAN
#define _YUGA_LITTLE_ENDIAN 0
#define _YUGA_BIG_ENDIAN 1
#elif __BYTE_ORDER == __LITTLE_ENDIAN
#define _YUGA_LITTLE_ENDIAN 1
#define _YUGA_BIG_ENDIAN 0
#endif /* __BYTE_ORDER */
#endif /* GNU/Linux */
/* . */
#if !defined(_YUGA_LITTLE_ENDIAN) || !defined(_YUGA_BIG_ENDIAN)
#error Unable to determine endian
#endif /* Check we found an endianness correctly. */
#endif /* ENDIANNESS_H */

View File

@ -0,0 +1,33 @@
/* ===---------- eprintf.c - Implements __eprintf --------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
#include <stdio.h>
#include <stdlib.h>
/*
* __eprintf() was used in an old version of <assert.h>.
* It can eventually go away, but it is needed when linking
* .o files built with the old <assert.h>.
*
* It should never be exported from a dylib, so it is marked
* visibility hidden.
*/
__attribute__((visibility("hidden")))
void __eprintf(const char* format, const char* assertion_expression,
const char* line, const char* file)
{
fprintf(stderr, format, assertion_expression, line, file);
fflush(stderr);
compilerrt_abort();
}

View File

@ -0,0 +1,136 @@
//===-- lib/extendsfdf2.c - single -> double conversion -----------*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a fairly generic conversion from a narrower to a wider
// IEEE-754 floating-point type. The constants and types defined following the
// includes below parameterize the conversion.
//
// This routine can be trivially adapted to support conversions from
// half-precision or to quad-precision. It does not support types that don't
// use the usual IEEE-754 interchange formats; specifically, some work would be
// needed to adapt it to (for example) the Intel 80-bit format or PowerPC
// double-double format.
//
// Note please, however, that this implementation is only intended to support
// *widening* operations; if you need to convert to a *narrower* floating-point
// type (e.g. double -> float), then this routine will not do what you want it
// to.
//
// It also requires that integer types at least as large as both formats
// are available on the target platform; this may pose a problem when trying
// to add support for quad on some 32-bit systems, for example. You also may
// run into trouble finding an appropriate CLZ function for wide source types;
// you will likely need to roll your own on some platforms.
//
// Finally, the following assumptions are made:
//
// 1. floating-point types and integer types have the same endianness on the
// target platform
//
// 2. quiet NaNs, if supported, are indicated by the leading bit of the
// significand field being set
//
//===----------------------------------------------------------------------===//
#include <stdint.h>
#include <limits.h>
typedef float src_t;
typedef uint32_t src_rep_t;
#define SRC_REP_C UINT32_C
static const int srcSigBits = 23;
#define src_rep_t_clz __builtin_clz
typedef double dst_t;
typedef uint64_t dst_rep_t;
#define DST_REP_C UINT64_C
static const int dstSigBits = 52;
// End of specialization parameters. Two helper routines for conversion to and
// from the representation of floating-point data as integer values follow.
static inline src_rep_t srcToRep(src_t x) {
const union { src_t f; src_rep_t i; } rep = {.f = x};
return rep.i;
}
static inline dst_t dstFromRep(dst_rep_t x) {
const union { dst_t f; dst_rep_t i; } rep = {.i = x};
return rep.f;
}
// End helper routines. Conversion implementation follows.
dst_t __extendsfdf2(src_t a) {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const int srcBits = sizeof(src_t)*CHAR_BIT;
const int srcExpBits = srcBits - srcSigBits - 1;
const int srcInfExp = (1 << srcExpBits) - 1;
const int srcExpBias = srcInfExp >> 1;
const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
const src_rep_t srcAbsMask = srcSignMask - 1;
const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
const src_rep_t srcNaNCode = srcQNaN - 1;
const int dstBits = sizeof(dst_t)*CHAR_BIT;
const int dstExpBits = dstBits - dstSigBits - 1;
const int dstInfExp = (1 << dstExpBits) - 1;
const int dstExpBias = dstInfExp >> 1;
const dst_rep_t dstMinNormal = DST_REP_C(1) << dstSigBits;
// Break a into a sign and representation of the absolute value
const src_rep_t aRep = srcToRep(a);
const src_rep_t aAbs = aRep & srcAbsMask;
const src_rep_t sign = aRep & srcSignMask;
dst_rep_t absResult;
if (aAbs - srcMinNormal < srcInfinity - srcMinNormal) {
// a is a normal number.
// Extend to the destination type by shifting the significand and
// exponent into the proper position and rebiasing the exponent.
absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits);
absResult += (dst_rep_t)(dstExpBias - srcExpBias) << dstSigBits;
}
else if (aAbs >= srcInfinity) {
// a is NaN or infinity.
// Conjure the result by beginning with infinity, then setting the qNaN
// bit (if needed) and right-aligning the rest of the trailing NaN
// payload field.
absResult = (dst_rep_t)dstInfExp << dstSigBits;
absResult |= (dst_rep_t)(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
absResult |= aAbs & srcNaNCode;
}
else if (aAbs) {
// a is denormal.
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const int scale = src_rep_t_clz(aAbs) - src_rep_t_clz(srcMinNormal);
absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits + scale);
absResult ^= dstMinNormal;
const int resultExponent = dstExpBias - srcExpBias - scale + 1;
absResult |= (dst_rep_t)resultExponent << dstSigBits;
}
else {
// a is zero.
absResult = 0;
}
// Apply the signbit to (dst_t)abs(a).
const dst_rep_t result = absResult | (dst_rep_t)sign << (dstBits - srcBits);
return dstFromRep(result);
}

View File

@ -0,0 +1,33 @@
/* ===-- ffsdi2.c - Implement __ffsdi2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ffsdi2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: the index of the least significant 1-bit in a, or
* the value zero if a is zero. The least significant bit is index one.
*/
si_int
__ffsdi2(di_int a)
{
dwords x;
x.all = a;
if (x.s.low == 0)
{
if (x.s.high == 0)
return 0;
return __builtin_ctz(x.s.high) + (1 + sizeof(si_int) * CHAR_BIT);
}
return __builtin_ctz(x.s.low) + 1;
}

View File

@ -0,0 +1,37 @@
/* ===-- ffsti2.c - Implement __ffsti2 -------------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __ffsti2 for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: the index of the least significant 1-bit in a, or
* the value zero if a is zero. The least significant bit is index one.
*/
si_int
__ffsti2(ti_int a)
{
twords x;
x.all = a;
if (x.s.low == 0)
{
if (x.s.high == 0)
return 0;
return __builtin_ctzll(x.s.high) + (1 + sizeof(di_int) * CHAR_BIT);
}
return __builtin_ctzll(x.s.low) + 1;
}
#endif /* __x86_64 */

View File

@ -0,0 +1,43 @@
/* ===-- fixdfdi.c - Implement __fixdfdi -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __fixdfdi for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: double is a IEEE 64 bit floating point type
* su_int is a 32 bit integral type
* value in double is representable in di_int (no range checking performed)
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
di_int
__fixdfdi(double a)
{
double_bits fb;
fb.f = a;
int e = ((fb.u.s.high & 0x7FF00000) >> 20) - 1023;
if (e < 0)
return 0;
di_int s = (si_int)(fb.u.s.high & 0x80000000) >> 31;
dwords r;
r.s.high = (fb.u.s.high & 0x000FFFFF) | 0x00100000;
r.s.low = fb.u.s.low;
if (e > 52)
r.all <<= (e - 52);
else
r.all >>= (52 - e);
return (r.all ^ s) - s;
}

View File

@ -0,0 +1,45 @@
//===-- lib/fixdfsi.c - Double-precision -> integer conversion ----*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements double-precision to integer conversion for the
// compiler-rt library. No range checking is performed; the behavior of this
// conversion is undefined for out of range values in the C standard.
//
//===----------------------------------------------------------------------===//
#define DOUBLE_PRECISION
#include "fp_lib.h"
int __fixdfsi(fp_t a) {
// Break a into sign, exponent, significand
const rep_t aRep = toRep(a);
const rep_t aAbs = aRep & absMask;
const int sign = aRep & signBit ? -1 : 1;
const int exponent = (aAbs >> significandBits) - exponentBias;
const rep_t significand = (aAbs & significandMask) | implicitBit;
// If 0 < exponent < significandBits, right shift to get the result.
if ((unsigned int)exponent < significandBits) {
return sign * (significand >> (significandBits - exponent));
}
// If exponent is negative, the result is zero.
else if (exponent < 0) {
return 0;
}
// If significandBits < exponent, left shift to get the result. This shift
// may end up being larger than the type width, which incurs undefined
// behavior, but the conversion itself is undefined in that case, so
// whatever the compiler decides to do is fine.
else {
return sign * (significand << (exponent - significandBits));
}
}

View File

@ -0,0 +1,45 @@
/* ===-- fixdfti.c - Implement __fixdfti -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __fixdfti for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: double is a IEEE 64 bit floating point type
* su_int is a 32 bit integral type
* value in double is representable in ti_int (no range checking performed)
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
ti_int
__fixdfti(double a)
{
double_bits fb;
fb.f = a;
int e = ((fb.u.s.high & 0x7FF00000) >> 20) - 1023;
if (e < 0)
return 0;
ti_int s = (si_int)(fb.u.s.high & 0x80000000) >> 31;
ti_int r = 0x0010000000000000uLL | (0x000FFFFFFFFFFFFFuLL & fb.u.all);
if (e > 52)
r <<= (e - 52);
else
r >>= (52 - e);
return (r ^ s) - s;
}
#endif

View File

@ -0,0 +1,41 @@
/* ===-- fixsfdi.c - Implement __fixsfdi -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __fixsfdi for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: float is a IEEE 32 bit floating point type
* su_int is a 32 bit integral type
* value in float is representable in di_int (no range checking performed)
*/
/* seee eeee emmm mmmm mmmm mmmm mmmm mmmm */
di_int
__fixsfdi(float a)
{
float_bits fb;
fb.f = a;
int e = ((fb.u & 0x7F800000) >> 23) - 127;
if (e < 0)
return 0;
di_int s = (si_int)(fb.u & 0x80000000) >> 31;
di_int r = (fb.u & 0x007FFFFF) | 0x00800000;
if (e > 23)
r <<= (e - 23);
else
r >>= (23 - e);
return (r ^ s) - s;
}

View File

@ -0,0 +1,45 @@
//===-- lib/fixsfsi.c - Single-precision -> integer conversion ----*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements single-precision to integer conversion for the
// compiler-rt library. No range checking is performed; the behavior of this
// conversion is undefined for out of range values in the C standard.
//
//===----------------------------------------------------------------------===//
#define SINGLE_PRECISION
#include "fp_lib.h"
int __fixsfsi(fp_t a) {
// Break a into sign, exponent, significand
const rep_t aRep = toRep(a);
const rep_t aAbs = aRep & absMask;
const int sign = aRep & signBit ? -1 : 1;
const int exponent = (aAbs >> significandBits) - exponentBias;
const rep_t significand = (aAbs & significandMask) | implicitBit;
// If 0 < exponent < significandBits, right shift to get the result.
if ((unsigned int)exponent < significandBits) {
return sign * (significand >> (significandBits - exponent));
}
// If exponent is negative, the result is zero.
else if (exponent < 0) {
return 0;
}
// If significandBits < exponent, left shift to get the result. This shift
// may end up being larger than the type width, which incurs undefined
// behavior, but the conversion itself is undefined in that case, so
// whatever the compiler decides to do is fine.
else {
return sign * (significand << (exponent - significandBits));
}
}

View File

@ -0,0 +1,45 @@
/* ===-- fixsfti.c - Implement __fixsfti -----------------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __fixsfti for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: convert a to a signed long long, rounding toward zero. */
/* Assumption: float is a IEEE 32 bit floating point type
* su_int is a 32 bit integral type
* value in float is representable in ti_int (no range checking performed)
*/
/* seee eeee emmm mmmm mmmm mmmm mmmm mmmm */
ti_int
__fixsfti(float a)
{
float_bits fb;
fb.f = a;
int e = ((fb.u & 0x7F800000) >> 23) - 127;
if (e < 0)
return 0;
ti_int s = (si_int)(fb.u & 0x80000000) >> 31;
ti_int r = (fb.u & 0x007FFFFF) | 0x00800000;
if (e > 23)
r <<= (e - 23);
else
r >>= (23 - e);
return (r ^ s) - s;
}
#endif

View File

@ -0,0 +1,45 @@
/* ===-- fixunsdfdi.c - Implement __fixunsdfdi -----------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __fixunsdfdi for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: convert a to a unsigned long long, rounding toward zero.
* Negative values all become zero.
*/
/* Assumption: double is a IEEE 64 bit floating point type
* du_int is a 64 bit integral type
* value in double is representable in du_int or is negative
* (no range checking performed)
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
du_int
__fixunsdfdi(double a)
{
double_bits fb;
fb.f = a;
int e = ((fb.u.s.high & 0x7FF00000) >> 20) - 1023;
if (e < 0 || (fb.u.s.high & 0x80000000))
return 0;
udwords r;
r.s.high = (fb.u.s.high & 0x000FFFFF) | 0x00100000;
r.s.low = fb.u.s.low;
if (e > 52)
r.all <<= (e - 52);
else
r.all >>= (52 - e);
return r.all;
}

View File

@ -0,0 +1,42 @@
/* ===-- fixunsdfsi.c - Implement __fixunsdfsi -----------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __fixunsdfsi for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
/* Returns: convert a to a unsigned int, rounding toward zero.
* Negative values all become zero.
*/
/* Assumption: double is a IEEE 64 bit floating point type
* su_int is a 32 bit integral type
* value in double is representable in su_int or is negative
* (no range checking performed)
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
su_int
__fixunsdfsi(double a)
{
double_bits fb;
fb.f = a;
int e = ((fb.u.s.high & 0x7FF00000) >> 20) - 1023;
if (e < 0 || (fb.u.s.high & 0x80000000))
return 0;
return (
0x80000000u |
((fb.u.s.high & 0x000FFFFF) << 11) |
(fb.u.s.low >> 21)
) >> (31 - e);
}

View File

@ -0,0 +1,47 @@
/* ===-- fixunsdfti.c - Implement __fixunsdfti -----------------------------===
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*
* This file implements __fixunsdfti for the compiler_rt library.
*
* ===----------------------------------------------------------------------===
*/
#if __x86_64
#include "int_lib.h"
/* Returns: convert a to a unsigned long long, rounding toward zero.
* Negative values all become zero.
*/
/* Assumption: double is a IEEE 64 bit floating point type
* tu_int is a 64 bit integral type
* value in double is representable in tu_int or is negative
* (no range checking performed)
*/
/* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */
tu_int
__fixunsdfti(double a)
{
double_bits fb;
fb.f = a;
int e = ((fb.u.s.high & 0x7FF00000) >> 20) - 1023;
if (e < 0 || (fb.u.s.high & 0x80000000))
return 0;
tu_int r = 0x0010000000000000uLL | (fb.u.all & 0x000FFFFFFFFFFFFFuLL);
if (e > 52)
r <<= (e - 52);
else
r >>= (52 - e);
return r;
}
#endif

Some files were not shown because too many files have changed in this diff Show More