Teach bits of libc about Thumb. This adds the if-then instructions needed

to handle the ARM conditional execution.

While here fix a bug found by this in the hard-float code, cc is the
opposite of cs. The former is used for 'less than' in floating-point code
and is executed when the C (carry) bit is clear, the latter is used when
greater than, equal, or unordered, and is executed when the C bit is set.
This commit is contained in:
Andrew Turner 2015-05-12 10:03:14 +00:00
parent e4bc6b4c01
commit 2b6a6357f2
5 changed files with 48 additions and 4 deletions

View File

@ -62,6 +62,7 @@
#define _SYSCALL(x) \
_SYSCALL_NOERROR(x); \
it cs; \
bcs PIC_SYM(CERROR, PLT)
#define SYSCALL(x) \
@ -72,6 +73,7 @@
.weak _C_LABEL(__CONCAT(_,x)); \
.set _C_LABEL(__CONCAT(_,x)),_C_LABEL(__CONCAT(__sys_,x)); \
SYSTRAP(x); \
it cs; \
bcs PIC_SYM(CERROR, PLT); \
RET

View File

@ -66,6 +66,7 @@ AEABI_ENTRY(dcmpeq)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
ite ne
movne r0, #0
moveq r0, #1
RET
@ -77,8 +78,9 @@ AEABI_ENTRY(dcmplt)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
ite cs
movcs r0, #0
movlt r0, #1
movcc r0, #1
RET
AEABI_END(dcmplt)
@ -88,6 +90,7 @@ AEABI_ENTRY(dcmple)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
ite hi
movhi r0, #0
movls r0, #1
RET
@ -99,6 +102,7 @@ AEABI_ENTRY(dcmpge)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
ite lt
movlt r0, #0
movge r0, #1
RET
@ -110,6 +114,7 @@ AEABI_ENTRY(dcmpgt)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
ite le
movle r0, #0
movgt r0, #1
RET
@ -121,6 +126,7 @@ AEABI_ENTRY(dcmpun)
LOAD_DREG(d1, r2, r3)
vcmp.f64 d0, d1
vmrs APSR_nzcv, fpscr
ite vc
movvc r0, #0
movvs r0, #1
RET

View File

@ -62,6 +62,7 @@ AEABI_ENTRY(fcmpeq)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
ite ne
movne r0, #0
moveq r0, #1
RET
@ -72,8 +73,9 @@ AEABI_ENTRY(fcmplt)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
ite cs
movcs r0, #0
movlt r0, #1
movcc r0, #1
RET
AEABI_END(fcmplt)
@ -82,6 +84,7 @@ AEABI_ENTRY(fcmple)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
ite hi
movhi r0, #0
movls r0, #1
RET
@ -92,6 +95,7 @@ AEABI_ENTRY(fcmpge)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
ite lt
movlt r0, #0
movge r0, #1
RET
@ -102,6 +106,7 @@ AEABI_ENTRY(fcmpgt)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
ite le
movle r0, #0
movgt r0, #1
RET
@ -112,6 +117,7 @@ AEABI_ENTRY(fcmpun)
LOAD_SREGS(s0, s1, r0, r1)
vcmp.f32 s0, s1
vmrs APSR_nzcv, fpscr
ite vc
movvc r0, #0
movvs r0, #1
RET

View File

@ -85,7 +85,13 @@ ENTRY(_setjmp)
add r0, r0, #(_JB_REG_R4 * 4)
/* Store integer registers */
#ifndef __thumb__
stmia r0, {r4-r14}
#else
stmia r0, {r4-r12}
str r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
str r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
#endif
mov r0, #0x00000000
RET
@ -120,15 +126,24 @@ ENTRY(_longjmp)
add r0, r0, #(_JB_REG_R4 * 4)
/* Restore integer registers */
#ifndef __thumb__
ldmia r0, {r4-r14}
#else
ldmia r0, {r4-r12}
ldr r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
ldr r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
#endif
/* Validate sp and r14 */
teq sp, #0
it ne
teqne r14, #0
it eq
beq botch
/* Set return value */
movs r0, r1
it eq
moveq r0, #0x00000001
RET
@ -137,7 +152,7 @@ botch:
#if !defined(_STANDALONE)
bl PIC_SYM(_C_LABEL(longjmperror), PLT)
bl PIC_SYM(_C_LABEL(abort), PLT)
b . - 8 /* Cannot get here */
1: b 1b /* Cannot get here */
#else
b .
#endif

View File

@ -90,7 +90,13 @@ ENTRY(setjmp)
/* Store integer registers */
add r0, r0, #(_JB_REG_R4 * 4)
#ifndef __thumb__
stmia r0, {r4-r14}
#else
stmia r0, {r4-r12}
str r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
str r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
#endif
mov r0, #0x00000000
RET
@ -133,15 +139,24 @@ ENTRY(__longjmp)
add r0, r0, #(_JB_REG_R4 * 4)
/* Restore integer registers */
#ifndef __thumb__
ldmia r0, {r4-r14}
#else
ldmia r0, {r4-r12}
ldr r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)]
ldr r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)]
#endif
/* Validate sp and r14 */
teq sp, #0
it ne
teqne r14, #0
it eq
beq .Lbotch
/* Set return value */
movs r0, r1
it eq
moveq r0, #0x00000001
RET
@ -149,5 +164,5 @@ ENTRY(__longjmp)
.Lbotch:
bl PIC_SYM(_C_LABEL(longjmperror), PLT)
bl PIC_SYM(_C_LABEL(abort), PLT)
b . - 8 /* Cannot get here */
1: b 1b /* Cannot get here */
END(__longjmp)