Fix unwind-info errors in our hand-written arm assembler code.

We have functions nested within functions, and places where we start a
function then never end it, we just jump to the middle of something else.
We tried to express this with nested ENTRY()/END() macros (which result
in .fnstart and .fnend directives), but it turns out there's no way to
express that nesting in ARM EHABI unwind info, and newer tools treat
multiple .fnstart directives without an intervening .fnend as an error.

These changes introduce two new macros, EENTRY() and EEND().  EENTRY()
creates a global label you can call/jump to just like ENTRY(), but it
doesn't emit a .fnstart.  EEND() is a no-op that just documents the
conceptual endpoint that matches up with the same-named EENTRY().

This is based on patches submitted by Stepan Dyatkovskiy, but I made some
changes and added the EEND() stuff, so blame any problems on me.

Submitted by:	Stepan Dyatkovskiy <stpworld@narod.ru>
This commit is contained in:
Ian Lepore 2014-08-01 18:24:44 +00:00
parent 081b8e203b
commit 25166187e3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=269390
14 changed files with 82 additions and 60 deletions

View File

@ -209,7 +209,7 @@ ENTRY_NP(arm10_idcache_wbinv_all)
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to purge Dcache. */ /* Fall through to purge Dcache. */
ENTRY(arm10_dcache_wbinv_all) EENTRY(arm10_dcache_wbinv_all)
.Larm10_dcache_wbinv_all: .Larm10_dcache_wbinv_all:
ldr ip, .Larm10_cache_data ldr ip, .Larm10_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc} ldmia ip, {s_max, i_max, s_inc, i_inc}
@ -223,8 +223,8 @@ ENTRY(arm10_dcache_wbinv_all)
bhs .Lnext_set_inv /* Next set */ bhs .Lnext_set_inv /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
bx lr bx lr
EEND(arm10_dcache_wbinv_all)
END(arm10_idcache_wbinv_all) END(arm10_idcache_wbinv_all)
END(arm10_dcache_wbinv_all)
.Larm10_cache_data: .Larm10_cache_data:
.word _C_LABEL(arm10_dcache_sets_max) .word _C_LABEL(arm10_dcache_sets_max)

View File

@ -197,7 +197,7 @@ ENTRY_NP(arm9_idcache_wbinv_all)
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through */ /* Fall through */
ENTRY(arm9_dcache_wbinv_all) EENTRY(arm9_dcache_wbinv_all)
.Larm9_dcache_wbinv_all: .Larm9_dcache_wbinv_all:
ldr ip, .Larm9_cache_data ldr ip, .Larm9_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc} ldmia ip, {s_max, i_max, s_inc, i_inc}
@ -210,8 +210,8 @@ ENTRY(arm9_dcache_wbinv_all)
subs s_max, s_max, s_inc subs s_max, s_max, s_inc
bhs .Lnext_set_inv /* Next set */ bhs .Lnext_set_inv /* Next set */
mov pc, lr mov pc, lr
EEND(arm9_dcache_wbinv_all)
END(arm9_idcache_wbinv_all) END(arm9_idcache_wbinv_all)
END(arm9_dcache_wbinv_all)
.Larm9_cache_data: .Larm9_cache_data:
.word _C_LABEL(arm9_dcache_sets_max) .word _C_LABEL(arm9_dcache_sets_max)

View File

@ -194,6 +194,7 @@ ENTRY(armv5_idcache_wbinv_range)
END(armv5_idcache_wbinv_range) END(armv5_idcache_wbinv_range)
ENTRY_NP(armv5_idcache_wbinv_all) ENTRY_NP(armv5_idcache_wbinv_all)
armv5_idcache_wbinv_all:
.Larmv5_idcache_wbinv_all: .Larmv5_idcache_wbinv_all:
/* /*
* We assume that the code here can never be out of sync with the * We assume that the code here can never be out of sync with the
@ -203,7 +204,7 @@ ENTRY_NP(armv5_idcache_wbinv_all)
mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
/* Fall through to purge Dcache. */ /* Fall through to purge Dcache. */
ENTRY(armv5_dcache_wbinv_all) EENTRY(armv5_dcache_wbinv_all)
.Larmv5_dcache_wbinv_all: .Larmv5_dcache_wbinv_all:
ldr ip, .Larmv5_cache_data ldr ip, .Larmv5_cache_data
ldmia ip, {s_max, i_max, s_inc, i_inc} ldmia ip, {s_max, i_max, s_inc, i_inc}
@ -219,8 +220,8 @@ ENTRY(armv5_dcache_wbinv_all)
bpl 1b /* Next set */ bpl 1b /* Next set */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET RET
EEND(armv5_dcache_wbinv_all)
END(armv5_idcache_wbinv_all) END(armv5_idcache_wbinv_all)
END(armv5_dcache_wbinv_all)
.Larmv5_cache_data: .Larmv5_cache_data:
.word _C_LABEL(armv5_dcache_sets_max) .word _C_LABEL(armv5_dcache_sets_max)

View File

@ -137,12 +137,12 @@ ENTRY_NP(armv6_idcache_wbinv_all)
/* Fall through to purge Dcache. */ /* Fall through to purge Dcache. */
/* LINTSTUB: void armv6_dcache_wbinv_all(void); */ /* LINTSTUB: void armv6_dcache_wbinv_all(void); */
ENTRY(armv6_dcache_wbinv_all) EENTRY(armv6_dcache_wbinv_all)
mcr p15, 0, r0, c7, c14, 0 /* clean & invalidate D cache */ mcr p15, 0, r0, c7, c14, 0 /* clean & invalidate D cache */
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
RET RET
EEND(armv6_dcache_wbinv_all)
END(armv6_idcache_wbinv_all) END(armv6_idcache_wbinv_all)
END(armv6_dcache_wbinv_all)
ENTRY(armv6_idcache_inv_all) ENTRY(armv6_idcache_inv_all)
mov r0, #0 mov r0, #0

View File

@ -358,7 +358,7 @@ ENTRY(armv7_idcache_inv_all)
mcr p15, 0, r0, c7, c5, 0 @ invalidate instruction+branch cache mcr p15, 0, r0, c7, c5, 0 @ invalidate instruction+branch cache
isb @ instruction sync barrier isb @ instruction sync barrier
bx lr @ return bx lr @ return
END(armv7_l1cache_inv_all) END(armv7_idcache_inv_all)
ENTRY_NP(armv7_sleep) ENTRY_NP(armv7_sleep)
dsb dsb

View File

@ -306,11 +306,12 @@ _C_LABEL(xscale_minidata_clean_size):
XSCALE_CACHE_CLEAN_UNBLOCK XSCALE_CACHE_CLEAN_UNBLOCK
ENTRY_NP(xscale_cache_syncI) ENTRY_NP(xscale_cache_syncI)
ENTRY_NP(xscale_cache_purgeID)
EENTRY_NP(xscale_cache_purgeID)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
ENTRY_NP(xscale_cache_cleanID) EENTRY_NP(xscale_cache_cleanID)
ENTRY_NP(xscale_cache_purgeD) EENTRY_NP(xscale_cache_purgeD)
ENTRY(xscale_cache_cleanD) EENTRY(xscale_cache_cleanD)
XSCALE_CACHE_CLEAN_PROLOGUE XSCALE_CACHE_CLEAN_PROLOGUE
1: subs r0, r0, #32 1: subs r0, r0, #32
@ -326,11 +327,11 @@ ENTRY(xscale_cache_cleanD)
XSCALE_CACHE_CLEAN_EPILOGUE XSCALE_CACHE_CLEAN_EPILOGUE
RET RET
EEND(xscale_cache_cleanD)
EEND(xscale_cache_purgeD)
EEND(xscale_cache_cleanID)
EEND(xscale_cache_purgeID)
END(xscale_cache_syncI) END(xscale_cache_syncI)
END(xscale_cache_purgeID)
END(xscale_cache_cleanID)
END(xscale_cache_purgeD)
END(xscale_cache_cleanD)
/* /*
* Clean the mini-data cache. * Clean the mini-data cache.
@ -374,7 +375,7 @@ END(xscale_cache_purgeD_E)
*/ */
/* xscale_cache_syncI is identical to xscale_cache_purgeID */ /* xscale_cache_syncI is identical to xscale_cache_purgeID */
ENTRY(xscale_cache_cleanID_rng) EENTRY(xscale_cache_cleanID_rng)
ENTRY(xscale_cache_cleanD_rng) ENTRY(xscale_cache_cleanD_rng)
cmp r1, #0x4000 cmp r1, #0x4000
bcs _C_LABEL(xscale_cache_cleanID) bcs _C_LABEL(xscale_cache_cleanID)
@ -393,7 +394,7 @@ ENTRY(xscale_cache_cleanD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0) CPWAIT_AND_RETURN(r0)
END(xscale_cache_cleanID_rng) /*END(xscale_cache_cleanID_rng)*/
END(xscale_cache_cleanD_rng) END(xscale_cache_cleanD_rng)
ENTRY(xscale_cache_purgeID_rng) ENTRY(xscale_cache_purgeID_rng)

View File

@ -143,11 +143,12 @@ __FBSDID("$FreeBSD$");
ENTRY_NP(xscalec3_cache_syncI) ENTRY_NP(xscalec3_cache_syncI)
ENTRY_NP(xscalec3_cache_purgeID) xscalec3_cache_purgeID:
EENTRY_NP(xscalec3_cache_purgeID)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
ENTRY_NP(xscalec3_cache_cleanID) EENTRY_NP(xscalec3_cache_cleanID)
ENTRY_NP(xscalec3_cache_purgeD) EENTRY_NP(xscalec3_cache_purgeD)
ENTRY(xscalec3_cache_cleanD) EENTRY(xscalec3_cache_cleanD)
XSCALE_CACHE_CLEAN_BLOCK XSCALE_CACHE_CLEAN_BLOCK
mov r0, #0 mov r0, #0
@ -168,11 +169,11 @@ ENTRY(xscalec3_cache_cleanD)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
RET RET
EEND(xscalec3_cache_purgeID)
EEND(xscalec3_cache_cleanID)
EEND(xscalec3_cache_purgeD)
EEND(xscalec3_cache_cleanD)
END(xscalec3_cache_syncI) END(xscalec3_cache_syncI)
END(xscalec3_cache_purgeID)
END(xscalec3_cache_cleanID)
END(xscalec3_cache_purgeD)
END(xscalec3_cache_cleanD)
ENTRY(xscalec3_cache_purgeID_rng) ENTRY(xscalec3_cache_purgeID_rng)
@ -238,7 +239,7 @@ ENTRY(xscalec3_cache_purgeD_rng)
END(xscalec3_cache_purgeD_rng) END(xscalec3_cache_purgeD_rng)
ENTRY(xscalec3_cache_cleanID_rng) ENTRY(xscalec3_cache_cleanID_rng)
ENTRY(xscalec3_cache_cleanD_rng) EENTRY(xscalec3_cache_cleanD_rng)
cmp r1, #0x4000 cmp r1, #0x4000
bcs _C_LABEL(xscalec3_cache_cleanID) bcs _C_LABEL(xscalec3_cache_cleanID)
@ -257,8 +258,8 @@ ENTRY(xscalec3_cache_cleanD_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
CPWAIT_AND_RETURN(r0) CPWAIT_AND_RETURN(r0)
EEND(xscalec3_cache_cleanD_rng)
END(xscalec3_cache_cleanID_rng) END(xscalec3_cache_cleanID_rng)
END(xscalec3_cache_cleanD_rng)
ENTRY(xscalec3_l2cache_purge) ENTRY(xscalec3_l2cache_purge)
/* Clean-up the L2 cache */ /* Clean-up the L2 cache */

View File

@ -280,12 +280,12 @@ ASENTRY_NP(swi_entry)
* that a newly created thread appears to return from a SWI just like * that a newly created thread appears to return from a SWI just like
* the parent thread that created it. * the parent thread that created it.
*/ */
ASENTRY_NP(swi_exit) ASEENTRY_NP(swi_exit)
DO_AST /* Handle pending signals. */ DO_AST /* Handle pending signals. */
PULLFRAME /* Deallocate trapframe. */ PULLFRAME /* Deallocate trapframe. */
movs pc, lr /* Return to userland. */ movs pc, lr /* Return to userland. */
STOP_UNWINDING /* Don't unwind into user mode. */ STOP_UNWINDING /* Don't unwind into user mode. */
END(swi_exit) EEND(swi_exit)
END(swi_entry) END(swi_entry)
/* /*

View File

@ -54,8 +54,8 @@ __FBSDID("$FreeBSD$");
* Fetch an int from the user's address space. * Fetch an int from the user's address space.
*/ */
ENTRY_NP(casuword32)
ENTRY(casuword) ENTRY(casuword)
EENTRY_NP(casuword32)
GET_PCB(r3) GET_PCB(r3)
ldr r3, [r3] ldr r3, [r3]
@ -91,7 +91,7 @@ ENTRY(casuword)
mov r1, #0x00000000 mov r1, #0x00000000
str r1, [r3, #PCB_ONFAULT] str r1, [r3, #PCB_ONFAULT]
RET RET
END(casuword32) EEND(casuword32)
END(casuword) END(casuword)
/* /*
@ -110,8 +110,8 @@ END(casuword)
* Fetch an int from the user's address space. * Fetch an int from the user's address space.
*/ */
ENTRY_NP(fuword32)
ENTRY(fuword) ENTRY(fuword)
EENTRY_NP(fuword32)
GET_PCB(r2) GET_PCB(r2)
ldr r2, [r2] ldr r2, [r2]
@ -277,8 +277,8 @@ fusupcbfaulttext:
* Store an int in the user's address space. * Store an int in the user's address space.
*/ */
ENTRY_NP(suword32)
ENTRY(suword) ENTRY(suword)
EENTRY_NP(suword32)
GET_PCB(r2) GET_PCB(r2)
ldr r2, [r2] ldr r2, [r2]
@ -390,4 +390,3 @@ ENTRY(subyte)
str r0, [r2, #PCB_ONFAULT] str r0, [r2, #PCB_ONFAULT]
RET RET
END(subyte) END(subyte)

View File

@ -75,7 +75,8 @@ __FBSDID("$FreeBSD$");
* For both types of boot we gather up the args, put them in a struct arm_boot_params * For both types of boot we gather up the args, put them in a struct arm_boot_params
* structure and pass that to initarm. * structure and pass that to initarm.
*/ */
ENTRY_NP(btext) .globl btext
btext:
ASENTRY_NP(_start) ASENTRY_NP(_start)
STOP_UNWINDING /* Can't unwind into the bootloader! */ STOP_UNWINDING /* Can't unwind into the bootloader! */
@ -285,7 +286,6 @@ virt_done:
adr r0, .Lmainreturned adr r0, .Lmainreturned
b _C_LABEL(panic) b _C_LABEL(panic)
/* NOTREACHED */ /* NOTREACHED */
END(btext)
END(_start) END(_start)
/* /*
@ -548,7 +548,7 @@ ENTRY_NP(sigcode)
/* Branch back to retry SYS_sigreturn */ /* Branch back to retry SYS_sigreturn */
b . - 16 b . - 16
END(sigcode)
.word SYS_sigreturn .word SYS_sigreturn
.word SYS_exit .word SYS_exit
@ -560,5 +560,5 @@ ENTRY_NP(sigcode)
.global szsigcode .global szsigcode
szsigcode: szsigcode:
.long esigcode-sigcode .long esigcode-sigcode
END(sigcode)
/* End of locore.S */ /* End of locore.S */

View File

@ -71,7 +71,7 @@ ENTRY(set_stackptr)
msr cpsr_fsxc, r3 /* Restore the old mode */ msr cpsr_fsxc, r3 /* Restore the old mode */
mov pc, lr /* Exit */ mov pc, lr /* Exit */
END(set_stackptr)
/* To get the stack pointer for a particular mode we must switch /* To get the stack pointer for a particular mode we must switch
* to that mode copy the banked r13 and then switch back. * to that mode copy the banked r13 and then switch back.
* This routine provides an easy way of doing this for any mode * This routine provides an easy way of doing this for any mode
@ -90,5 +90,5 @@ ENTRY(get_stackptr)
msr cpsr_fsxc, r3 /* Restore the old mode */ msr cpsr_fsxc, r3 /* Restore the old mode */
mov pc, lr /* Exit */ mov pc, lr /* Exit */
END(get_stackptr)
/* End of setstack.S */ /* End of setstack.S */

View File

@ -130,7 +130,7 @@ ENTRY(bzero)
.Lnormal0: .Lnormal0:
mov r3, #0x00 mov r3, #0x00
b do_memset b do_memset
EEND(bzero)
/* LINTSTUB: Func: void *memset(void *, int, size_t) */ /* LINTSTUB: Func: void *memset(void *, int, size_t) */
ENTRY(memset) ENTRY(memset)
and r3, r1, #0xff /* We deal with bytes */ and r3, r1, #0xff /* We deal with bytes */
@ -276,7 +276,6 @@ do_memset:
strgeb r3, [ip], #0x01 /* Set another byte */ strgeb r3, [ip], #0x01 /* Set another byte */
strgtb r3, [ip] /* and a third */ strgtb r3, [ip] /* and a third */
RET /* Exit */ RET /* Exit */
END(bzero)
END(memset) END(memset)
ENTRY(bcmp) ENTRY(bcmp)
@ -394,7 +393,7 @@ ENTRY(bcopy)
eor r0, r1, r0 eor r0, r1, r0
eor r1, r0, r1 eor r1, r0, r1
eor r0, r1, r0 eor r0, r1, r0
ENTRY(memmove) EENTRY(memmove)
/* Do the buffers overlap? */ /* Do the buffers overlap? */
cmp r0, r1 cmp r0, r1
RETeq /* Bail now if src/dst are the same */ RETeq /* Bail now if src/dst are the same */
@ -931,8 +930,8 @@ ENTRY(memmove)
.Lmemmove_bsrcul1l4: .Lmemmove_bsrcul1l4:
add r1, r1, #1 add r1, r1, #1
b .Lmemmove_bl4 b .Lmemmove_bl4
EEND(memmove)
END(bcopy) END(bcopy)
END(memmove)
#if !defined(_ARM_ARCH_5E) #if !defined(_ARM_ARCH_5E)
ENTRY(memcpy) ENTRY(memcpy)
@ -2945,13 +2944,17 @@ END(memcpy)
ENTRY(user) ENTRY(user)
nop nop
END(user)
ENTRY(btrap) ENTRY(btrap)
nop nop
END(btrap)
ENTRY(etrap) ENTRY(etrap)
nop nop
END(etrap)
ENTRY(bintr) ENTRY(bintr)
nop nop
END(bintr)
ENTRY(eintr) ENTRY(eintr)
nop nop
END(eintr)
#endif #endif

View File

@ -74,9 +74,20 @@
#define GLOBAL(X) .globl x #define GLOBAL(X) .globl x
#define _ENTRY(x) \ #define _ENTRY(x) \
.text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: _FNSTART .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: _FNSTART
#define _END(x) .size x, . - x; _FNEND #define _END(x) .size x, . - x; _FNEND
/*
* EENTRY()/EEND() mark "extra" entry/exit points from a function.
* The unwind info cannot handle the concept of a nested function, or a function
* with multiple .fnstart directives, but some of our assembler code is written
* with multiple labels to allow entry at several points. The EENTRY() macro
* defines such an extra entry point without a new .fnstart, so that it's
* basically just a label that you can jump to. The EEND() macro does nothing
* at all, except document the exit point associated with the same-named entry.
*/
#define _EENTRY(x) .globl x; .type x,_ASM_TYPE_FUNCTION; x:
#define _EEND(x) /* nothing */
#ifdef GPROF #ifdef GPROF
# define _PROF_PROLOGUE \ # define _PROF_PROLOGUE \
mov ip, lr; bl __mcount mov ip, lr; bl __mcount
@ -85,11 +96,17 @@
#endif #endif
#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE #define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE
#define EENTRY(y) _EENTRY(_C_LABEL(y)); _PROF_PROLOGUE
#define ENTRY_NP(y) _ENTRY(_C_LABEL(y)) #define ENTRY_NP(y) _ENTRY(_C_LABEL(y))
#define EENTRY_NP(y) _EENTRY(_C_LABEL(y))
#define END(y) _END(_C_LABEL(y)) #define END(y) _END(_C_LABEL(y))
#define EEND(y)
#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE #define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
#define ASEENTRY(y) _EENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
#define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y)) #define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y))
#define ASEENTRY_NP(y) _EENTRY(_ASM_LABEL(y))
#define ASEND(y) _END(_ASM_LABEL(y)) #define ASEND(y) _END(_ASM_LABEL(y))
#define ASEEND(y)
#define ASMSTR .asciz #define ASMSTR .asciz

View File

@ -51,11 +51,11 @@ ENTRY_NP(__modsi3)
RET RET
END(__modsi3) END(__modsi3)
#ifdef __ARM_EABI__
ENTRY_NP(__aeabi_uidiv)
ENTRY_NP(__aeabi_uidivmod)
#endif
ENTRY_NP(__udivsi3) ENTRY_NP(__udivsi3)
#ifdef __ARM_EABI__
EENTRY_NP(__aeabi_uidiv)
EENTRY_NP(__aeabi_uidivmod)
#endif
.L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */ .L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */
eor r0, r1, r0 eor r0, r1, r0
eor r1, r0, r1 eor r1, r0, r1
@ -77,16 +77,16 @@ ENTRY_NP(__udivsi3)
mov r1, #0 mov r1, #0
RET RET
#ifdef __ARM_EABI__ #ifdef __ARM_EABI__
END(__aeabi_uidiv) EEND(__aeabi_uidiv)
END(__aeabi_uidivmod) EEND(__aeabi_uidivmod)
#endif #endif
END(__udivsi3) END(__udivsi3)
#ifdef __ARM_EABI__
ENTRY_NP(__aeabi_idiv)
ENTRY_NP(__aeabi_idivmod)
#endif
ENTRY_NP(__divsi3) ENTRY_NP(__divsi3)
#ifdef __ARM_EABI__
EENTRY_NP(__aeabi_idiv)
EENTRY_NP(__aeabi_idivmod)
#endif
.L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */ .L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */
eor r0, r1, r0 eor r0, r1, r0
eor r1, r0, r1 eor r1, r0, r1
@ -401,8 +401,8 @@ ENTRY_NP(__divsi3)
mov r0, r3 mov r0, r3
RET RET
#ifdef __ARM_EABI__ #ifdef __ARM_EABI__
END(__aeabi_idiv) EEND(__aeabi_idiv)
END(__aeabi_idivmod) EEND(__aeabi_idivmod)
#endif #endif
END(__divsi3) END(__divsi3)