Use the RET macro.

This commit is contained in:
cognet 2004-11-09 16:47:47 +00:00
parent 601293a498
commit 7e3e230230
27 changed files with 301 additions and 302 deletions

View File

@ -242,7 +242,7 @@ ENTRY(bcopy_page)
strd r4, [r1], #0x08
bgt 1b
ldmfd sp!, {r4, r5}
mov pc, lr
RET
/*
* XSCALE version of bzero_page
@ -269,5 +269,5 @@ ENTRY(bzero_page)
strd r2, [r0], #8
subs r1, r1, #128
bne 1b
mov pc, lr
RET
#endif /* __XSCALE__ */

View File

@ -80,7 +80,7 @@ ENTRY(copyin)
/* Quick exit if length is zero */
teq r2, #0
moveq r0, #0
moveq pc, lr
RETEQ
SAVE_REGS
#ifdef MULTIPROCESSOR
@ -280,14 +280,14 @@ ENTRY(copyin)
str r5, [r4, #PCB_ONFAULT]
RESTORE_REGS
mov pc, lr
RET
.Lcopyfault:
mov r0, #14 /* EFAULT */
str r5, [r4, #PCB_ONFAULT]
RESTORE_REGS
mov pc, lr
RET
/*
* r0 = kernel space address
@ -304,7 +304,7 @@ ENTRY(copyout)
/* Quick exit if length is zero */
teq r2, #0
moveq r0, #0
moveq pc, lr
RETeq
SAVE_REGS
#ifdef MULTIPROCESSOR
@ -501,7 +501,7 @@ ENTRY(copyout)
str r5, [r4, #PCB_ONFAULT]
RESTORE_REGS
mov pc, lr
RET
/*
* r0 = kernel space source address
@ -517,7 +517,7 @@ ENTRY(kcopy)
/* Quick exit if length is zero */
teq r2, #0
moveq r0, #0
moveq pc, lr
RETeq
SAVE_REGS
#ifdef MULTIPROCESSOR
@ -703,7 +703,7 @@ ENTRY(kcopy)
str r5, [r4, #PCB_ONFAULT]
RESTORE_REGS
mov pc, lr
RET
#endif /* !__XSCALE__ */
/*
@ -738,7 +738,7 @@ ENTRY(badaddr_read_1)
strb r3, [r1]
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
mov pc, lr
RET
/*
* int badaddr_read_2(const uint16_t *src, uint16_t *dest)
@ -772,7 +772,7 @@ ENTRY(badaddr_read_2)
strh r3, [r1]
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
mov pc, lr
RET
/*
* int badaddr_read_4(const uint32_t *src, uint32_t *dest)
@ -806,5 +806,5 @@ ENTRY(badaddr_read_4)
str r3, [r1]
mov r0, #0 /* No fault */
1: str ip, [r2, #PCB_ONFAULT]
mov pc, lr
RET

View File

@ -111,7 +111,7 @@ ENTRY(copyin)
ldrbt ip, [r0], #0x01
strb ip, [r1], #0x01
cmp r2, #0x00 /* All done? */
moveq pc, lr
RETeq
/* Destination buffer is now word aligned */
.Lcopyin_wordaligned:
@ -193,7 +193,7 @@ ENTRY(copyin)
.Lcopyin_w_lessthan128:
adds r2, r2, #0x80 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq
subs r2, r2, #0x20
blt .Lcopyin_w_lessthan32
@ -218,7 +218,7 @@ ENTRY(copyin)
.Lcopyin_w_lessthan32:
adds r2, r2, #0x20 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
and r4, r2, #0x18
rsb r5, r4, #0x18
@ -246,14 +246,14 @@ ENTRY(copyin)
/* Less than 8 bytes remaining */
ldmfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
mov r3, #0x00
.Lcopyin_w_less_than8:
subs r2, r2, #0x04
ldrget ip, [r0], #0x04
strge ip, [r1], #0x04
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
addlt r2, r2, #0x04
ldrbt ip, [r0], #0x01
cmp r2, #0x02
@ -262,7 +262,7 @@ ENTRY(copyin)
ldrgtbt ip, [r0]
strgeb r2, [r1], #0x01
strgtb ip, [r1]
mov pc, lr
RET
/*
* At this point, it has not been possible to word align both buffers.
@ -317,7 +317,7 @@ ENTRY(copyin)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x03
blt .Lcopyin_l4
@ -378,7 +378,7 @@ ENTRY(copyin)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x02
blt .Lcopyin_l4
@ -439,7 +439,7 @@ ENTRY(copyin)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x01
blt .Lcopyin_l4
@ -465,7 +465,7 @@ ENTRY(copyin)
ldmfd sp!, {r4-r7}
mov r3, #0x00
adds r2, r2, #0x04
moveq pc, lr
RETeq
.Lcopyin_l4_2:
rsbs r2, r2, #0x03
addne pc, pc, r2, lsl #3
@ -476,7 +476,7 @@ ENTRY(copyin)
strb ip, [r1], #0x01
ldrbt ip, [r0]
strb ip, [r1]
mov pc, lr
RET
/*
@ -541,7 +541,7 @@ ENTRY(copyout)
ldrb ip, [r0], #0x01
strbt ip, [r1], #0x01
cmp r2, #0x00 /* All done? */
moveq pc, lr
RETeq
/* Destination buffer is now word aligned */
.Lcopyout_wordaligned:
@ -639,7 +639,7 @@ ENTRY(copyout)
.Lcopyout_w_lessthan128:
adds r2, r2, #0x80 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x20
blt .Lcopyout_w_lessthan32
@ -668,7 +668,7 @@ ENTRY(copyout)
.Lcopyout_w_lessthan32:
adds r2, r2, #0x20 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
and r4, r2, #0x18
rsb r5, r4, #0x18
@ -696,14 +696,14 @@ ENTRY(copyout)
/* Less than 8 bytes remaining */
ldmfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
mov r3, #0x00
.Lcopyout_w_less_than8:
subs r2, r2, #0x04
ldrge ip, [r0], #0x04
strget ip, [r1], #0x04
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
addlt r2, r2, #0x04
ldrb ip, [r0], #0x01
cmp r2, #0x02
@ -712,7 +712,7 @@ ENTRY(copyout)
ldrgtb ip, [r0]
strgebt r2, [r1], #0x01
strgtbt ip, [r1]
mov pc, lr
RET
/*
* At this point, it has not been possible to word align both buffers.
@ -767,7 +767,7 @@ ENTRY(copyout)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x03
blt .Lcopyout_l4
@ -828,7 +828,7 @@ ENTRY(copyout)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x02
blt .Lcopyout_l4
@ -889,7 +889,7 @@ ENTRY(copyout)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x01
blt .Lcopyout_l4
@ -915,7 +915,7 @@ ENTRY(copyout)
ldmfd sp!, {r4-r7}
mov r3, #0x00
adds r2, r2, #0x04
moveq pc, lr
RETeq
.Lcopyout_l4_2:
rsbs r2, r2, #0x03
addne pc, pc, r2, lsl #3
@ -926,7 +926,7 @@ ENTRY(copyout)
strbt ip, [r1], #0x01
ldrb ip, [r0]
strbt ip, [r1]
mov pc, lr
RET
/*
@ -990,7 +990,7 @@ ENTRY(kcopy)
ldrb ip, [r0], #0x01
strb ip, [r1], #0x01
cmp r2, #0x00 /* All done? */
moveq pc, lr
RETeq
/* Destination buffer is now word aligned */
.Lkcopy_wordaligned:
@ -1071,7 +1071,7 @@ ENTRY(kcopy)
.Lkcopy_w_lessthan128:
adds r2, r2, #0x80 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x20
blt .Lkcopy_w_lessthan32
@ -1096,7 +1096,7 @@ ENTRY(kcopy)
.Lkcopy_w_lessthan32:
adds r2, r2, #0x20 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
and r4, r2, #0x18
rsb r5, r4, #0x18
@ -1124,14 +1124,14 @@ ENTRY(kcopy)
/* Less than 8 bytes remaining */
ldmfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
mov r3, #0x00
.Lkcopy_w_less_than8:
subs r2, r2, #0x04
ldrge ip, [r0], #0x04
strge ip, [r1], #0x04
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
addlt r2, r2, #0x04
ldrb ip, [r0], #0x01
cmp r2, #0x02
@ -1140,7 +1140,7 @@ ENTRY(kcopy)
ldrgtb ip, [r0]
strgeb r2, [r1], #0x01
strgtb ip, [r1]
mov pc, lr
RET
/*
* At this point, it has not been possible to word align both buffers.
@ -1195,7 +1195,7 @@ ENTRY(kcopy)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x03
blt .Lkcopy_bad_endgame
@ -1256,7 +1256,7 @@ ENTRY(kcopy)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x02
blt .Lkcopy_bad_endgame
@ -1317,7 +1317,7 @@ ENTRY(kcopy)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r0, r0, #0x01
blt .Lkcopy_bad_endgame
@ -1343,7 +1343,7 @@ ENTRY(kcopy)
ldmfd sp!, {r4-r7}
mov r3, #0x00
adds r2, r2, #0x04
moveq pc, lr
RETeq
.Lkcopy_bad_endgame2:
rsbs r2, r2, #0x03
addne pc, pc, r2, lsl #3
@ -1354,4 +1354,4 @@ ENTRY(kcopy)
strb ip, [r1], #0x01
ldrb ip, [r0]
strb ip, [r1]
mov pc, lr
RET

View File

@ -92,7 +92,7 @@ ENTRY(read_multi_1)
.Lrm1_l4:
adds r2, r2, #4 /* r2 = length again */
ldmeqdb fp, {fp, sp, pc}
moveq pc, r14
RETeq
cmp r2, #2
ldrb r3, [r0]
strb r3, [r1], #1
@ -182,7 +182,7 @@ ENTRY(insw)
strb r3, [r1], #0x0001
bgt .Linswloop
mov pc, lr
RET
/* Word aligned insw */
@ -198,7 +198,7 @@ ENTRY(insw)
subs r2, r2, #0x00000002 /* Next */
bgt .Lfastinswloop
mov pc, lr
RET
/*
@ -231,7 +231,7 @@ ENTRY(outsw)
str r3, [r0]
bgt .Loutswloop
mov pc, lr
RET
/* Word aligned outsw */
@ -259,7 +259,7 @@ ENTRY(outsw)
bgt .Lfastoutswloop
mov pc, lr
RET
/*
* reads short ints (16 bits) from an I/O address into a block of memory

View File

@ -49,17 +49,17 @@ __FBSDID("$FreeBSD$");
ENTRY(generic_bs_r_1)
ldrb r0, [r1, r2]
mov pc, lr
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_r_2)
ldrh r0, [r1, r2]
mov pc, lr
RET
#endif
ENTRY(generic_bs_r_4)
ldr r0, [r1, r2]
mov pc, lr
RET
/*
* write single
@ -67,17 +67,17 @@ ENTRY(generic_bs_r_4)
ENTRY(generic_bs_w_1)
strb r3, [r1, r2]
mov pc, lr
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_w_2)
strh r3, [r1, r2]
mov pc, lr
RET
#endif
ENTRY(generic_bs_w_4)
str r3, [r1, r2]
mov pc, lr
RET
/*
* read multiple
@ -88,14 +88,14 @@ ENTRY(generic_bs_rm_1)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldrb r3, [r0]
strb r3, [r1], #1
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_rm_2)
@ -103,14 +103,14 @@ ENTRY(generic_armv4_bs_rm_2)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldrh r3, [r0]
strh r3, [r1], #2
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#endif
ENTRY(generic_bs_rm_4)
@ -118,14 +118,14 @@ ENTRY(generic_bs_rm_4)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldr r3, [r0]
str r3, [r1], #4
subs r2, r2, #1
bne 1b
mov pc, lr
RET
/*
* write multiple
@ -136,14 +136,14 @@ ENTRY(generic_bs_wm_1)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldrb r3, [r1], #1
strb r3, [r0]
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_wm_2)
@ -151,14 +151,14 @@ ENTRY(generic_armv4_bs_wm_2)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldrh r3, [r1], #2
strh r3, [r0]
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#endif
ENTRY(generic_bs_wm_4)
@ -166,14 +166,14 @@ ENTRY(generic_bs_wm_4)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldr r3, [r1], #4
str r3, [r0]
subs r2, r2, #1
bne 1b
mov pc, lr
RET
/*
* read region
@ -184,14 +184,14 @@ ENTRY(generic_bs_rr_1)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldrb r3, [r0], #1
strb r3, [r1], #1
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_rr_2)
@ -199,14 +199,14 @@ ENTRY(generic_armv4_bs_rr_2)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldrh r3, [r0], #2
strh r3, [r1], #2
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#endif
ENTRY(generic_bs_rr_4)
@ -214,14 +214,14 @@ ENTRY(generic_bs_rr_4)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldr r3, [r0], #4
str r3, [r1], #4
subs r2, r2, #1
bne 1b
mov pc, lr
RET
/*
* write region.
@ -232,14 +232,14 @@ ENTRY(generic_bs_wr_1)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldrb r3, [r1], #1
strb r3, [r0], #1
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_wr_2)
@ -247,14 +247,14 @@ ENTRY(generic_armv4_bs_wr_2)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldrh r3, [r1], #2
strh r3, [r0], #2
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#endif
ENTRY(generic_bs_wr_4)
@ -262,14 +262,14 @@ ENTRY(generic_bs_wr_4)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #1
bne 1b
mov pc, lr
RET
/*
* set region
@ -280,13 +280,13 @@ ENTRY(generic_bs_sr_1)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: strb r1, [r0], #1
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#if (ARM_ARCH_4 + ARM_ARCH_5) > 0
ENTRY(generic_armv4_bs_sr_2)
@ -294,13 +294,13 @@ ENTRY(generic_armv4_bs_sr_2)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: strh r1, [r0], #2
subs r2, r2, #1
bne 1b
mov pc, lr
RET
#endif
ENTRY(generic_bs_sr_4)
@ -308,13 +308,13 @@ ENTRY(generic_bs_sr_4)
mov r1, r3
ldr r2, [sp, #0]
teq r2, #0
moveq pc, lr
RETeq
1: str r1, [r0], #4
subs r2, r2, #1
bne 1b
mov pc, lr
RET
/*
* copy region
@ -327,7 +327,7 @@ ENTRY(generic_armv4_bs_c_2)
add r1, r2, r3
ldr r2, [sp, #4]
teq r2, #0
moveq pc, lr
RETeq
cmp r0, r1
blt 2f
@ -337,7 +337,7 @@ ENTRY(generic_armv4_bs_c_2)
subs r2, r2, #1
bne 1b
mov pc, lr
RET
2: add r0, r0, r2, lsl #1
add r1, r1, r2, lsl #1
@ -349,5 +349,5 @@ ENTRY(generic_armv4_bs_c_2)
subs r2, r2, #1
bne 3b
mov pc, lr
RET
#endif

View File

@ -87,7 +87,7 @@ ENTRY(copystr)
strne r5, [r3]
ldmfd sp!, {r4-r5} /* stack is 8 byte aligned */
mov pc, lr
RET
#define SAVE_REGS stmfd sp!, {r4-r6}
#define RESTORE_REGS ldmfd sp!, {r4-r6}
@ -147,7 +147,7 @@ ENTRY(copyinstr)
strne r6, [r3]
RESTORE_REGS
mov pc, lr
RET
/*
* r0 - kernel space address
@ -204,14 +204,14 @@ ENTRY(copyoutstr)
strne r6, [r3]
RESTORE_REGS
mov pc, lr
RET
/* A fault occurred during the copy */
.Lcopystrfault:
mov r1, #0x00000000
str r1, [r4, #PCB_ONFAULT]
RESTORE_REGS
mov pc, lr
RET
#ifdef DIAGNOSTIC
.Lcopystrpcbfault:

View File

@ -49,7 +49,7 @@ __FBSDID("$FreeBSD$");
.align 0
ENTRY(cpufunc_nullop)
mov pc, lr
RET
/*
* Generic functions to read the internal coprocessor registers
@ -63,23 +63,23 @@ ENTRY(cpufunc_nullop)
ENTRY(cpufunc_id)
mrc p15, 0, r0, c0, c0, 0
mov pc, lr
RET
ENTRY(cpu_get_control)
mrc p15, 0, r0, c1, c0, 0
mov pc, lr
RET
ENTRY(cpu_read_cache_config)
mrc p15, 0, r0, c0, c0, 1
mov pc, lr
RET
ENTRY(cpufunc_faultstatus)
mrc p15, 0, r0, c5, c0, 0
mov pc, lr
RET
ENTRY(cpufunc_faultaddress)
mrc p15, 0, r0, c6, c0, 0
mov pc, lr
RET
/*
@ -96,12 +96,12 @@ ENTRY(cpufunc_faultaddress)
#if 0 /* See below. */
ENTRY(cpufunc_control)
mcr p15, 0, r0, c1, c0, 0
mov pc, lr
RET
#endif
ENTRY(cpufunc_domains)
mcr p15, 0, r0, c3, c0, 0
mov pc, lr
RET
/*
* Generic functions to read/modify/write the internal coprocessor registers
@ -125,7 +125,7 @@ ENTRY(cpufunc_control)
mov r0, r3 /* Return old value */
#endif
mov pc, lr
RET
.Lglou:
.asciz "plop %p\n"
.align 0

View File

@ -50,7 +50,7 @@ ENTRY(arm3_control)
teq r2, r3 /* Only write if there is a change */
mcrne p15, 0, r2, c2, c0, 0 /* Write new control register */
mov r0, r3 /* Return old value */
mov pc, lr
RET
/*
* Cache functions.
@ -58,4 +58,4 @@ ENTRY(arm3_control)
ENTRY(arm3_cache_flush)
mcr p15, 0, r0, c1, c0, 0
mov pc, lr
RET

View File

@ -61,25 +61,25 @@ ENTRY(arm67_setttb)
mov r0, r0
mov r0, r0
mov pc, lr
RET
/*
* TLB functions
*/
ENTRY(arm67_tlb_flush)
mcr p15, 0, r0, c5, c0, 0
mov pc, lr
RET
ENTRY(arm67_tlb_purge)
mcr p15, 0, r0, c6, c0, 0
mov pc, lr
RET
/*
* Cache functions
*/
ENTRY(arm67_cache_flush)
mcr p15, 0, r0, c7, c0, 0
mov pc, lr
RET
/*
* Context switch.
@ -108,4 +108,4 @@ ENTRY(arm67_context_switch)
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
RET

View File

@ -67,11 +67,11 @@ ENTRY(arm7tdmi_setttb)
ENTRY(arm7tdmi_tlb_flushID)
mov r0, #0
mcr p15, 0, r0, c8, c7, 0
mov pc, lr
RET
ENTRY(arm7tdmi_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1
mov pc, lr
RET
/*
* Cache functions
@ -85,7 +85,7 @@ ENTRY(arm7tdmi_cache_flushID)
mov r0, r0
mov r0, r0
mov pc, lr
RET
/*
* Context switch.

View File

@ -57,7 +57,7 @@ ENTRY(arm8_clock_config)
mov r0, r0 /* NOP */
mcr p15, 0, r2, c15, c0, 0 /* Write clock register */
mov r0, r3 /* Return old value */
mov pc, lr
RET
/*
* Functions to set the MMU Translation Table Base register
@ -89,29 +89,29 @@ ENTRY(arm8_setttb)
mov r0, r0
msr cpsr_all, r3
mov pc, lr
RET
/*
* TLB functions
*/
ENTRY(arm8_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mov pc, lr
RET
ENTRY(arm8_tlb_flushID_SE)
mcr p15, 0, r0, c8, c7, 1 /* flush I+D tlb single entry */
mov pc, lr
RET
/*
* Cache functions
*/
ENTRY(arm8_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
mov pc, lr
RET
ENTRY(arm8_cache_flushID_E)
mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
mov pc, lr
RET
ENTRY(arm8_cache_cleanID)
mov r0, #0x00000000
@ -152,11 +152,11 @@ ENTRY(arm8_cache_cleanID)
adds r0, r0, #0x04000000
bne 1b
mov pc, lr
RET
ENTRY(arm8_cache_cleanID_E)
mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
mov pc, lr
RET
ENTRY(arm8_cache_purgeID)
/*
@ -231,7 +231,7 @@ ENTRY(arm8_cache_purgeID)
bne 1b
msr cpsr_all, r3
mov pc, lr
RET
ENTRY(arm8_cache_purgeID_E)
/*
@ -252,7 +252,7 @@ ENTRY(arm8_cache_purgeID_E)
mcr p15, 0, r0, c7, c11, 1 /* clean I+D single entry */
mcr p15, 0, r0, c7, c7, 1 /* flush I+D single entry */
msr cpsr_all, r3
mov pc, lr
RET
/*
* Context switch.
@ -281,4 +281,4 @@ ENTRY(arm8_context_switch)
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
RET

View File

@ -52,7 +52,7 @@ ENTRY(arm9_setttb)
mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
mov pc, lr
RET
/*
* TLB functions
@ -60,46 +60,46 @@ ENTRY(arm9_setttb)
ENTRY(arm9_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mov pc, lr
RET
/*
* Cache functions
*/
ENTRY(arm9_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
mov pc, lr
RET
ENTRY(arm9_cache_flushID_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
mov pc, lr
RET
ENTRY(arm9_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
RET
ENTRY(arm9_cache_flushI_SE)
mcr p15, 0, r0, c7, c5, 1 /* flush one entry from I cache */
mov pc, lr
RET
ENTRY(arm9_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
mov pc, lr
RET
ENTRY(arm9_cache_flushD_SE)
mcr p15, 0, r0, c7, c6, 1 /* flush one entry from D cache */
mov pc, lr
RET
ENTRY(arm9_cache_cleanID)
mcr p15, 0, r0, c7, c10, 4
mov pc, lr
RET
/*
* Soft functions
*/
ENTRY(arm9_cache_syncI)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D caches */
mov pc, lr
RET
ENTRY_NP(arm9_cache_flushID_rng)
b _C_LABEL(arm9_cache_flushID)
@ -134,4 +134,4 @@ ENTRY(arm9_context_switch)
nop
nop
nop
mov pc, lr
RET

View File

@ -45,23 +45,23 @@ __FBSDID("$FreeBSD$");
*/
ENTRY(armv4_tlb_flushID)
mcr p15, 0, r0, c8, c7, 0 /* flush I+D tlb */
mov pc, lr
RET
ENTRY(armv4_tlb_flushI)
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
mov pc, lr
RET
ENTRY(armv4_tlb_flushD)
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
mov pc, lr
RET
ENTRY(armv4_tlb_flushD_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mov pc, lr
RET
/*
* Other functions
*/
ENTRY(armv4_drain_writebuf)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
RET

View File

@ -60,11 +60,11 @@ ENTRY(ixp12x0_context_switch)
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
RET
ENTRY(ixp12x0_drain_readbuf)
mcr p15, 0, r0, c9, c0, 0 /* drain read buffer */
mov pc, lr
RET
/*
* Information for the IXP12X0 cache clean/purge functions:

View File

@ -84,7 +84,7 @@ ENTRY(sa1_setttb)
#else
str r2, [r3]
#endif
mov pc, lr
RET
/*
* TLB functions
@ -92,30 +92,30 @@ ENTRY(sa1_setttb)
ENTRY(sa1_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
mov pc, lr
RET
/*
* Cache functions
*/
ENTRY(sa1_cache_flushID)
mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */
mov pc, lr
RET
ENTRY(sa1_cache_flushI)
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
RET
ENTRY(sa1_cache_flushD)
mcr p15, 0, r0, c7, c6, 0 /* flush D cache */
mov pc, lr
RET
ENTRY(sa1_cache_flushD_SE)
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
RET
ENTRY(sa1_cache_cleanD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */
mov pc, lr
RET
/*
* Information for the SA-1 cache clean/purge functions:
@ -195,20 +195,20 @@ ENTRY(sa1_cache_cleanD)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
SA1_CACHE_CLEAN_EPILOGUE
mov pc, lr
RET
ENTRY(sa1_cache_purgeID_E)
mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
RET
ENTRY(sa1_cache_purgeD_E)
mcr p15, 0, r0, c7, c10, 1 /* clean dcache entry */
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */
mov pc, lr
RET
/*
* Soft functions
@ -230,7 +230,7 @@ ENTRY(sa1_cache_cleanD_rng)
bhi 1b
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
RET
ENTRY(sa1_cache_purgeID_rng)
cmp r1, #0x4000
@ -248,7 +248,7 @@ ENTRY(sa1_cache_purgeID_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
RET
ENTRY(sa1_cache_purgeD_rng)
cmp r1, #0x4000
@ -265,7 +265,7 @@ ENTRY(sa1_cache_purgeD_rng)
bhi 1b
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mov pc, lr
RET
ENTRY(sa1_cache_syncI_rng)
cmp r1, #0x4000
@ -283,7 +283,7 @@ ENTRY(sa1_cache_syncI_rng)
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
mcr p15, 0, r0, c7, c5, 0 /* flush I cache */
mov pc, lr
RET
/*
* Context switch.
@ -312,5 +312,5 @@ ENTRY(sa110_context_switch)
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
RET
#endif

View File

@ -94,7 +94,7 @@ ENTRY(sa11x0_cpu_sleep)
/* Restore interrupts (which will cause them to be serviced). */
msr cpsr_all, r3
mov pc, lr
RET
/*
@ -118,8 +118,8 @@ ENTRY(sa11x0_context_switch)
/* Make sure that pipeline is emptied */
mov r0, r0
mov r0, r0
mov pc, lr
RET
ENTRY(sa11x0_drain_readbuf)
mcr p15, 0, r0, c9, c0, 0 /* drain read buffer */
mov pc, lr
RET

View File

@ -166,7 +166,7 @@ ENTRY(xscale_setttb)
#else
str r2, [r3]
#endif
mov pc, lr
RET
/*
* TLB functions
@ -177,7 +177,7 @@ ENTRY(xscale_setttb)
ENTRY(xscale_tlb_flushID_SE)
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
mov pc, lr
RET
/*
* Cache functions
@ -317,7 +317,7 @@ ENTRY(xscale_cache_cleanD)
CPWAIT(r0)
XSCALE_CACHE_CLEAN_EPILOGUE
mov pc, lr
RET
/*
* Clean the mini-data cache.
@ -494,4 +494,4 @@ ENTRY(xscale_cpu_sleep)
mcr p14, 0, r0, c7, c0, 0
1:
mov pc, lr
RET

View File

@ -73,7 +73,7 @@ ENTRY(fiq_getregs)
stmia r0, {r8-r13}
BACK_TO_SVC_MODE
mov pc, lr
RET
/*
* fiq_setregs:
@ -87,7 +87,7 @@ ENTRY(fiq_setregs)
ldmia r0, {r8-r13}
BACK_TO_SVC_MODE
mov pc, lr
RET
/*
* fiq_nullhandler:

View File

@ -77,7 +77,7 @@ ENTRY(casuptr)
ldrt r3, [r0]
cmp r3, r1
movne r0, r3
movne pc, lr
RETne
strt r2, [r0]
mov r0, r1
#ifdef MULTIPROCESSOR
@ -94,7 +94,7 @@ ENTRY(casuptr)
#endif
mov r1, #0x00000000
str r1, [r3, #PCB_ONFAULT]
mov pc, lr
RET
/*
@ -130,7 +130,7 @@ ENTRY(fuword)
mov r1, #0x00000000
str r1, [r2, #PCB_ONFAULT]
mov r0, r3
mov pc, lr
RET
/*
* fusword(caddr_t uaddr);
@ -168,7 +168,7 @@ ENTRY(fusword)
#endif
mov r1, #0x00000000
str r1, [r2, #PCB_ONFAULT]
mov pc, lr
RET
/*
* fuswintr(caddr_t uaddr);
@ -181,7 +181,7 @@ ENTRY(fuswintr)
ldr r2, [r2]
teq r2, #0
mvnne r0, #0x00000000
movne pc, lr
RETne
#ifdef MULTIPROCESSOR
/* XXX Probably not appropriate for non-Hydra SMPs */
@ -214,7 +214,7 @@ ENTRY(fuswintr)
mov r1, #0x00000000
str r1, [r2, #PCB_ONFAULT]
mov pc, lr
RET
Lblock_userspace_access:
.word _C_LABEL(block_userspace_access)
@ -258,7 +258,7 @@ ENTRY(fubyte)
mov r1, #0x00000000
str r1, [r2, #PCB_ONFAULT]
mov r0, r3
mov pc, lr
RET
/*
* Handle faults from [fs]u*(). Clean up and return -1.
@ -268,7 +268,7 @@ ENTRY(fubyte)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
mvn r0, #0x00000000
mov pc, lr
RET
/*
* Handle faults from [fs]u*(). Clean up and return -1. This differs from
@ -282,7 +282,7 @@ _C_LABEL(fusubailout):
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
mvn r0, #0x00000000
mov pc, lr
RET
#ifdef DIAGNOSTIC
/*
@ -331,7 +331,7 @@ ENTRY(suword)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
mov pc, lr
RET
/*
* suswintr(caddr_t uaddr, short x);
@ -344,7 +344,7 @@ ENTRY(suswintr)
ldr r2, [r2]
teq r2, #0
mvnne r0, #0x00000000
movne pc, lr
RETne
#ifdef MULTIPROCESSOR
stmfd sp!, {r0, r1, r14}
@ -377,7 +377,7 @@ ENTRY(suswintr)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
mov pc, lr
RET
/*
* susword(caddr_t uaddr, short x);
@ -416,7 +416,7 @@ ENTRY(susword)
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
mov pc, lr
RET
/*
* subyte(caddr_t uaddr, char x);
@ -448,4 +448,4 @@ ENTRY(subyte)
strbt r1, [r0]
mov r0, #0x00000000
str r0, [r2, #PCB_ONFAULT]
mov pc, lr
RET

View File

@ -274,7 +274,7 @@ ASENTRY_NP(L_cksumdata)
orrne r2, r2, r6, lsl #24
#endif
subs r1, r1, r7 /* Update length */
moveq pc, lr /* All done? */
RETeq /* All done? */
/* Buffer is now word aligned */
.Lcksumdata_wordaligned:
@ -366,7 +366,7 @@ ASENTRY_NP(L_cksumdata)
#endif
adds r1, r1, #0x40
moveq pc, lr
RETeq
cmp r1, #0x20
#ifdef __XSCALE__
@ -399,7 +399,7 @@ ASENTRY_NP(L_cksumdata)
#endif
adc r2, r2, #0x00
subs r1, r1, #0x20
moveq pc, lr
RETeq
.Lcksumdata_less_than_32:
/* There are less than 32 bytes left */
@ -442,7 +442,7 @@ ASENTRY_NP(L_cksumdata)
/* Deal with < 4 bytes remaining */
.Lcksumdata_lessthan4:
adds r1, r1, #0x04
moveq pc, lr
RETeq
/* Deal with 1 to 3 remaining bytes, possibly misaligned */
.Lcksumdata_endgame:
@ -467,4 +467,4 @@ ASENTRY_NP(L_cksumdata)
#endif
adds r2, r2, r3
adc r2, r2, #0x00
mov pc, lr
RET

View File

@ -317,12 +317,12 @@ ENTRY_NP(Execute)
ENTRY(setjmp)
stmia r0, {r4-r14}
mov r0, #0x00000000
mov pc, lr
RET
ENTRY(longjmp)
ldmia r0, {r4-r14}
mov r0, #0x00000001
mov pc, lr
RET
.data
.global _C_LABEL(esym)

View File

@ -47,17 +47,17 @@ __FBSDID("$FreeBSD$");
ENTRY(nexus_bs_r_1)
ldrb r0, [r1, r2, lsl #2]
mov pc, lr
RET
ENTRY(nexus_bs_r_2)
ldr r0, [r1, r2, lsl #2]
bic r0, r0, #0xff000000
bic r0, r0, #0x00ff0000
mov pc, lr
RET
ENTRY(nexus_bs_r_4)
ldr r0, [r1, r2, lsl #2]
mov pc, lr
RET
/*
* write single
@ -65,17 +65,17 @@ ENTRY(nexus_bs_r_4)
ENTRY(nexus_bs_w_1)
strb r3, [r1, r2, lsl #2]
mov pc, lr
RET
ENTRY(nexus_bs_w_2)
mov r3, r3, lsl #16
orr r3, r3, r3, lsr #16
str r3, [r1, r2, lsl #2]
mov pc, lr
RET
ENTRY(nexus_bs_w_4)
str r3, [r1, r2, lsl #2]
mov pc, lr
RET
/*
* read multiple
@ -105,7 +105,7 @@ nexus_wm_1_loop:
subs r2, r2, #0x00000001
bgt nexus_wm_1_loop
mov pc, lr
RET
ENTRY(nexus_bs_wm_2)
add r0, r1, r2, lsl #2

View File

@ -65,7 +65,7 @@ ENTRY_NP(SetCPSR)
mov r0, r3 /* Return the old CPSR */
mov pc, lr
RET
/* Gets the CPSR register
@ -76,5 +76,5 @@ ENTRY_NP(SetCPSR)
ENTRY_NP(GetCPSR)
mrs r0, cpsr /* Get the CPSR */
mov pc, lr
RET

View File

@ -117,7 +117,7 @@ do_memset:
stmgeia ip!, {r2-r3}
#endif
bgt .Lmemset_loop128
moveq pc, lr /* Zero length so just exit */
RETeq /* Zero length so just exit */
add r1, r1, #0x80 /* Adjust for extra sub */
@ -136,7 +136,7 @@ do_memset:
stmgeia ip!, {r2-r3}
#endif
bgt .Lmemset_loop32
moveq pc, lr /* Zero length so just exit */
RETeq /* Zero length so just exit */
adds r1, r1, #0x10 /* Partially adjust for extra sub */
@ -148,7 +148,7 @@ do_memset:
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
#endif
moveq pc, lr /* Zero length so just exit */
RETeq /* Zero length so just exit */
addlt r1, r1, #0x10 /* Possibly adjust for extra sub */
@ -157,12 +157,12 @@ do_memset:
subs r1, r1, #0x04
strge r3, [ip], #0x04
bgt .Lmemset_loop4
moveq pc, lr /* Zero length so just exit */
RETeq /* Zero length so just exit */
#ifdef __XSCALE__
/* Compensate for 64-bit alignment check */
adds r1, r1, #0x04
moveq pc, lr
RETeq
cmp r1, #2
#else
cmp r1, #-2
@ -171,7 +171,7 @@ do_memset:
strb r3, [ip], #0x01 /* Set 1 byte */
strgeb r3, [ip], #0x01 /* Set another byte */
strgtb r3, [ip] /* and a third */
mov pc, lr /* Exit */
RET /* Exit */
.Lmemset_wordunaligned:
rsb r2, r2, #0x004
@ -185,12 +185,12 @@ do_memset:
.Lmemset_lessthanfour:
cmp r1, #0x00
moveq pc, lr /* Zero length so exit */
RETeq /* Zero length so exit */
strb r3, [ip], #0x01 /* Set 1 byte */
cmp r1, #0x02
strgeb r3, [ip], #0x01 /* Set another byte */
strgtb r3, [ip] /* and a third */
mov pc, lr /* Exit */
RET /* Exit */
ENTRY(memcmp)
mov ip, r0
@ -201,7 +201,7 @@ ENTRY(memcmp)
/* Are both addresses aligned the same way? */
cmp r2, #0x00
eornes r3, ip, r1
moveq pc, lr /* len == 0, or same addresses! */
RETeq /* len == 0, or same addresses! */
tst r3, #0x03
subne r2, r2, #0x01
bne .Lmemcmp_bytewise2 /* Badly aligned. Do it the slow way */
@ -217,25 +217,25 @@ ENTRY(memcmp)
ldrb r0, [ip], #0x01
ldrb r3, [r1], #0x01
subs r0, r0, r3
movne pc, lr
RETne
subs r2, r2, #0x01
moveq pc, lr
RETeq
/* Compare up to 2 bytes */
ldrb r0, [ip], #0x01
ldrb r3, [r1], #0x01
subs r0, r0, r3
movne pc, lr
RETne
subs r2, r2, #0x01
moveq pc, lr
RETeq
/* Compare 1 byte */
ldrb r0, [ip], #0x01
ldrb r3, [r1], #0x01
subs r0, r0, r3
movne pc, lr
RETne
subs r2, r2, #0x01
moveq pc, lr
RETeq
/* Compare 4 bytes at a time, if possible */
subs r2, r2, #0x04
@ -251,7 +251,7 @@ ENTRY(memcmp)
/* Correct for extra subtraction, and check if done */
adds r2, r2, #0x04
cmpeq r0, #0x00 /* If done, did all bytes match? */
moveq pc, lr /* Yup. Just return */
RETeq /* Yup. Just return */
/* Re-do the final word byte-wise */
sub ip, ip, #0x04
@ -266,7 +266,7 @@ ENTRY(memcmp)
cmpcs r0, r3
beq .Lmemcmp_bytewise2
sub r0, r0, r3
mov pc, lr
RET
/*
* 6 byte compares are very common, thanks to the network stack.
@ -281,25 +281,25 @@ ENTRY(memcmp)
ldrb r2, [r1, #0x01] /* r2 = b2#1 */
subs r0, r0, r3 /* r0 = b1#0 - b2#0 */
ldreqb r3, [ip, #0x01] /* r3 = b1#1 */
movne pc, lr /* Return if mismatch on #0 */
RETne /* Return if mismatch on #0 */
subs r0, r3, r2 /* r0 = b1#1 - b2#1 */
ldreqb r3, [r1, #0x02] /* r3 = b2#2 */
ldreqb r0, [ip, #0x02] /* r0 = b1#2 */
movne pc, lr /* Return if mismatch on #1 */
RETne /* Return if mismatch on #1 */
ldrb r2, [r1, #0x03] /* r2 = b2#3 */
subs r0, r0, r3 /* r0 = b1#2 - b2#2 */
ldreqb r3, [ip, #0x03] /* r3 = b1#3 */
movne pc, lr /* Return if mismatch on #2 */
RETne /* Return if mismatch on #2 */
subs r0, r3, r2 /* r0 = b1#3 - b2#3 */
ldreqb r3, [r1, #0x04] /* r3 = b2#4 */
ldreqb r0, [ip, #0x04] /* r0 = b1#4 */
movne pc, lr /* Return if mismatch on #3 */
RETne /* Return if mismatch on #3 */
ldrb r2, [r1, #0x05] /* r2 = b2#5 */
subs r0, r0, r3 /* r0 = b1#4 - b2#4 */
ldreqb r3, [ip, #0x05] /* r3 = b1#5 */
movne pc, lr /* Return if mismatch on #4 */
RETne /* Return if mismatch on #4 */
sub r0, r3, r2 /* r0 = b1#5 - b2#5 */
mov pc, lr
RET
ENTRY(bcopy)
mov r3, r0
@ -607,7 +607,7 @@ ENTRY(memcpy)
.Lmemcpy_w_lessthan128:
adds r2, r2, #0x80 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x20
blt .Lmemcpy_w_lessthan32
@ -632,7 +632,7 @@ ENTRY(memcpy)
.Lmemcpy_w_lessthan32:
adds r2, r2, #0x20 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
and r4, r2, #0x18
rsbs r4, r4, #0x18
@ -659,11 +659,11 @@ ENTRY(memcpy)
/* Less than 8 bytes remaining */
ldmfd sp!, {r4-r9}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
ldrge ip, [r1], #0x04
strge ip, [r3], #0x04
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
addlt r2, r2, #0x04
ldrb ip, [r1], #0x01
cmp r2, #0x02
@ -672,7 +672,7 @@ ENTRY(memcpy)
ldrgtb ip, [r1]
strgeb r2, [r3], #0x01
strgtb ip, [r3]
mov pc, lr
RET
/*
@ -726,7 +726,7 @@ ENTRY(memcpy)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r1, r1, #0x03
blt .Lmemcpy_bad_done
@ -787,7 +787,7 @@ ENTRY(memcpy)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r1, r1, #0x02
blt .Lmemcpy_bad_done
@ -848,7 +848,7 @@ ENTRY(memcpy)
adds r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
moveq pc, lr /* Return now if done */
RETeq /* Return now if done */
subs r2, r2, #0x04
sublt r1, r1, #0x01
blt .Lmemcpy_bad_done
@ -873,7 +873,7 @@ ENTRY(memcpy)
.Lmemcpy_bad_done:
ldmfd sp!, {r4-r7}
adds r2, r2, #0x04
moveq pc, lr
RETeq
ldrb ip, [r1], #0x01
cmp r2, #0x02
ldrgeb r2, [r1], #0x01
@ -881,7 +881,7 @@ ENTRY(memcpy)
ldrgtb ip, [r1]
strgeb r2, [r3], #0x01
strgtb ip, [r3]
mov pc, lr
RET
/*
@ -892,7 +892,7 @@ ENTRY(memcpy)
.Lmemcpy_short:
add pc, pc, r2, lsl #2
nop
mov pc, lr /* 0x00 */
RET /* 0x00 */
b .Lmemcpy_bytewise /* 0x01 */
b .Lmemcpy_bytewise /* 0x02 */
b .Lmemcpy_bytewise /* 0x03 */
@ -912,7 +912,7 @@ ENTRY(memcpy)
strb ip, [r3], #0x01
ldrneb ip, [r1], #0x01
bne 1b
mov pc, lr
RET
/******************************************************************************
* Special case for 4 byte copies
@ -932,7 +932,7 @@ ENTRY(memcpy)
*/
ldr r2, [r1]
str r2, [r0]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -948,7 +948,7 @@ ENTRY(memcpy)
orr r3, r3, r2, lsl #24 /* r3 = 3210 */
#endif
str r3, [r0]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -963,7 +963,7 @@ ENTRY(memcpy)
#endif
orr r3, r2, r3, lsl #16
str r3, [r0]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -979,7 +979,7 @@ ENTRY(memcpy)
orr r3, r3, r2, lsl #8 /* r3 = 3210 */
#endif
str r3, [r0]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -998,7 +998,7 @@ ENTRY(memcpy)
strb r1, [r0, #0x03]
#endif
strh r3, [r0, #0x01]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1010,7 +1010,7 @@ ENTRY(memcpy)
strb r2, [r0]
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1031,7 +1031,7 @@ ENTRY(memcpy)
#endif
strh r2, [r0, #0x01]
strb r3, [r0, #0x03]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1043,7 +1043,7 @@ ENTRY(memcpy)
strb r2, [r0]
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1059,7 +1059,7 @@ ENTRY(memcpy)
mov r3, r2, lsr #16
strh r3, [r0, #0x02]
#endif
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1077,7 +1077,7 @@ ENTRY(memcpy)
orr r2, r2, r3, lsl #8 /* r2 = xx32 */
#endif
strh r2, [r0, #0x02]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1087,7 +1087,7 @@ ENTRY(memcpy)
ldrh r3, [r1, #0x02]
strh r2, [r0]
strh r3, [r0, #0x02]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1105,7 +1105,7 @@ ENTRY(memcpy)
orr r3, r3, r2, lsr #24 /* r3 = 3210 */
#endif
strh r3, [r0]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1125,7 +1125,7 @@ ENTRY(memcpy)
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
#endif
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1137,7 +1137,7 @@ ENTRY(memcpy)
strb r2, [r0]
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1162,7 +1162,7 @@ ENTRY(memcpy)
mov r3, r3, lsr #8 /* r3 = ...3 */
strb r3, [r0, #0x03]
#endif
mov pc, lr
RET
LMEMCPY_4_PAD
/*
@ -1174,7 +1174,7 @@ ENTRY(memcpy)
strb r2, [r0]
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
mov pc, lr
RET
LMEMCPY_4_PAD
@ -1198,7 +1198,7 @@ ENTRY(memcpy)
ldrh r3, [r1, #0x04]
str r2, [r0]
strh r3, [r0, #0x04]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1216,7 +1216,7 @@ ENTRY(memcpy)
mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */
str r2, [r0]
strh r3, [r0, #0x04]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1235,7 +1235,7 @@ ENTRY(memcpy)
str r2, [r0]
strh r1, [r0, #0x04]
#endif
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1257,7 +1257,7 @@ ENTRY(memcpy)
#endif
str r2, [r0]
strh r1, [r0, #0x04]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1280,7 +1280,7 @@ ENTRY(memcpy)
#endif
strh r3, [r0, #0x03]
strb r2, [r0, #0x05]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1294,7 +1294,7 @@ ENTRY(memcpy)
strh r3, [r0, #0x01]
strh ip, [r0, #0x03]
strb r1, [r0, #0x05]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1321,7 +1321,7 @@ ENTRY(memcpy)
orr r3, r3, r1, lsl #8 /* r3 = 4321 */
strh r3, [r0, #0x01]
#endif
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1335,7 +1335,7 @@ ENTRY(memcpy)
strh r3, [r0, #0x01]
strh ip, [r0, #0x03]
strb r1, [r0, #0x05]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1356,7 +1356,7 @@ ENTRY(memcpy)
strh r3, [r0]
str r2, [r0, #0x02]
#endif
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1374,7 +1374,7 @@ ENTRY(memcpy)
#endif
strh r1, [r0]
str r2, [r0, #0x02]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1384,7 +1384,7 @@ ENTRY(memcpy)
ldr r3, [r1, #0x02]
strh r2, [r0]
str r3, [r0, #0x02]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1404,7 +1404,7 @@ ENTRY(memcpy)
#endif
strh r3, [r0]
str r1, [r0, #0x02]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1425,7 +1425,7 @@ ENTRY(memcpy)
#endif
str r2, [r0, #0x01]
strb r1, [r0, #0x05]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1439,7 +1439,7 @@ ENTRY(memcpy)
strh r3, [r0, #0x01]
strh ip, [r0, #0x03]
strb r1, [r0, #0x05]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1460,7 +1460,7 @@ ENTRY(memcpy)
#endif
str r2, [r0, #0x01]
strb r1, [r0, #0x05]
mov pc, lr
RET
LMEMCPY_6_PAD
/*
@ -1472,7 +1472,7 @@ ENTRY(memcpy)
strb r2, [r0]
str r3, [r0, #0x01]
strb r1, [r0, #0x05]
mov pc, lr
RET
LMEMCPY_6_PAD
@ -1496,7 +1496,7 @@ ENTRY(memcpy)
ldr r3, [r1, #0x04]
str r2, [r0]
str r3, [r0, #0x04]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1517,7 +1517,7 @@ ENTRY(memcpy)
#endif
str r3, [r0]
str r2, [r0, #0x04]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1537,7 +1537,7 @@ ENTRY(memcpy)
#endif
str r2, [r0]
str r3, [r0, #0x04]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1558,7 +1558,7 @@ ENTRY(memcpy)
#endif
str r3, [r0]
str r2, [r0, #0x04]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1583,7 +1583,7 @@ ENTRY(memcpy)
#endif
strh r1, [r0, #0x01]
str r3, [r0, #0x03]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1597,7 +1597,7 @@ ENTRY(memcpy)
strh r3, [r0, #0x01]
str ip, [r0, #0x03]
strb r1, [r0, #0x07]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1625,7 +1625,7 @@ ENTRY(memcpy)
#endif
strh ip, [r0, #0x01]
str r3, [r0, #0x03]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1646,7 +1646,7 @@ ENTRY(memcpy)
#endif
str r2, [r0, #0x03]
strb r1, [r0, #0x07]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1666,7 +1666,7 @@ ENTRY(memcpy)
#endif
str r2, [r0, #0x02]
strh r3, [r0, #0x06]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1689,7 +1689,7 @@ ENTRY(memcpy)
#endif
str r1, [r0, #0x02]
strh r3, [r0, #0x06]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1701,7 +1701,7 @@ ENTRY(memcpy)
strh r2, [r0]
str ip, [r0, #0x02]
strh r3, [r0, #0x06]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1724,7 +1724,7 @@ ENTRY(memcpy)
#endif
str r3, [r0, #0x02]
strh r2, [r0]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1749,7 +1749,7 @@ ENTRY(memcpy)
orr r2, r2, r3, lsl #24 /* r2 = 4321 */
str r2, [r0, #0x01]
#endif
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1770,7 +1770,7 @@ ENTRY(memcpy)
#endif
str r2, [r0, #0x01]
strb r1, [r0, #0x07]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1798,7 +1798,7 @@ ENTRY(memcpy)
#endif
str ip, [r0, #0x01]
strh r1, [r0, #0x05]
mov pc, lr
RET
LMEMCPY_8_PAD
/*
@ -1812,7 +1812,7 @@ ENTRY(memcpy)
str ip, [r0, #0x01]
strh r3, [r0, #0x05]
strb r1, [r0, #0x07]
mov pc, lr
RET
LMEMCPY_8_PAD
/******************************************************************************
@ -1837,7 +1837,7 @@ ENTRY(memcpy)
str r2, [r0]
str r3, [r0, #0x04]
str r1, [r0, #0x08]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -1865,7 +1865,7 @@ ENTRY(memcpy)
#endif
str r2, [r0, #0x04]
str r1, [r0]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -1892,7 +1892,7 @@ ENTRY(memcpy)
#endif
str r3, [r0, #0x04]
str r1, [r0, #0x08]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -1920,7 +1920,7 @@ ENTRY(memcpy)
#endif
str r3, [r0, #0x04]
str r1, [r0, #0x08]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -1949,7 +1949,7 @@ ENTRY(memcpy)
str r2, [r0, #0x03]
str r1, [r0, #0x07]
strb ip, [r0, #0x0b]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -1965,7 +1965,7 @@ ENTRY(memcpy)
str ip, [r0, #0x03]
str r2, [r0, #0x07]
strb r1, [r0, #0x0b]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -1999,7 +1999,7 @@ ENTRY(memcpy)
str r3, [r0, #0x03]
str r2, [r0, #0x07]
strb r1, [r0, #0x0b]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2029,7 +2029,7 @@ ENTRY(memcpy)
str r3, [r0, #0x03]
str ip, [r0, #0x07]
strb r1, [r0, #0x0b]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2055,7 +2055,7 @@ ENTRY(memcpy)
str r1, [r0, #0x02]
str r3, [r0, #0x06]
strh r2, [r0, #0x0a]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2084,7 +2084,7 @@ ENTRY(memcpy)
str r2, [r0, #0x02]
str r3, [r0, #0x06]
strh r1, [r0, #0x0a]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2098,7 +2098,7 @@ ENTRY(memcpy)
str r3, [r0, #0x02]
str ip, [r0, #0x06]
strh r1, [r0, #0x0a]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2127,7 +2127,7 @@ ENTRY(memcpy)
str r2, [r0, #0x06]
str r3, [r0, #0x02]
strh r1, [r0]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2161,7 +2161,7 @@ ENTRY(memcpy)
mov r1, r1, lsr #16 /* r1 = ...B */
strb r1, [r0, #0x0b]
#endif
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2191,7 +2191,7 @@ ENTRY(memcpy)
str r3, [r0, #0x05]
str ip, [r0, #0x01]
strb r1, [r0]
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2232,7 +2232,7 @@ ENTRY(memcpy)
strh ip, [r0, #0x09]
strb r1, [r0, #0x0b]
#endif
mov pc, lr
RET
LMEMCPY_C_PAD
/*
@ -2248,7 +2248,7 @@ ENTRY(memcpy)
str ip, [r0, #0x05]
strh r2, [r0, #0x09]
strb r1, [r0, #0x0b]
mov pc, lr
RET
#endif /* __XSCALE__ */
#ifdef GPROF

View File

@ -317,7 +317,6 @@ ENTRY(cpu_switch)
mov r2, #DOMAIN_CLIENT
cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
beq .Lcs_cache_purge_skipped /* Yup. Don't flush cache */
/*
@ -464,7 +463,7 @@ ENTRY(cpu_switch)
.asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
#endif
ENTRY(savectx)
mov pc, lr
RET
ENTRY(fork_trampoline)
mov r1, r5
mov r2, sp

View File

@ -47,7 +47,7 @@ ENTRY_NP(__modsi3)
/* XXX should cause a fatal error */
mvn r0, #0
#endif
mov pc, lr
RET
ENTRY_NP(__udivsi3)
.L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */
@ -69,7 +69,7 @@ ENTRY_NP(__udivsi3)
.L_divide_l0: /* r0 == 1 */
mov r0, r1
mov r1, #0
mov pc, lr
RET
ENTRY_NP(__divsi3)
.L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */
@ -373,7 +373,7 @@ ENTRY_NP(__divsi3)
movs ip, ip, lsl #1
bicmi r0, r0, #0x80000000 /* Fix incase we divided 0x80000000 */
rsbmi r0, r0, #0
mov pc, lr
RET
.L_udivide_l1:
tst ip, #0x10000000
@ -384,4 +384,4 @@ ENTRY_NP(__divsi3)
subhs r1, r1, r0
addhs r3, r3, r2
mov r0, r3
mov pc, lr
RET

View File

@ -64,7 +64,7 @@ ENTRY(ffs)
/* now lookup in table indexed on top 6 bits of r0 */
ldrneb r0, [ r2, r0, lsr #26 ]
mov pc, lr
RET
.text;
.type .L_ffs_table, _ASM_TYPE_OBJECT;
.L_ffs_table:
@ -80,5 +80,5 @@ ENTRY(ffs)
#else
clzne r0, r0
rsbne r0, r0, #32
mov pc, lr
RET
#endif