[PowerPC64] Enforce natural alignment in bcopy

POWER architecture CPUs (Book-S) require natural alignment for
cache-inhibited storage accesses. Since we can't know the caching model
for a page ahead of time, always enforce natural alignment in bcopy.
This fixes a SIGBUS when calling the function with misaligned pointers
on POWER7.

Submitted by:		Bruno Larsen <bruno.larsen@eldorado.org.br>
Reviewed by:		luporl, bdragon (IRC)
MFC after:		1 week
Sponsored by:		Eldorado Research Institute (eldorado.org.br)
Differential Revision:	https://reviews.freebsd.org/D28776
This commit is contained in:
Leandro Lupori 2021-03-25 11:54:06 -03:00
parent 4e38478c59
commit 2f56128403

View File

@ -34,6 +34,11 @@ __FBSDID("$FreeBSD$");
#define BLOCK_SIZE (1 << BLOCK_SIZE_BITS)
#define BLOCK_SIZE_MASK (BLOCK_SIZE - 1)
/* Minimum 8 byte alignment, to avoid cache-inhibited alignment faults.*/
#ifndef ALIGN_MASK
#define ALIGN_MASK 0x7
#endif
#define MULTI_PHASE_THRESHOLD 512
#ifndef FN_NAME
@ -66,9 +71,38 @@ ENTRY(FN_NAME)
mr %r4, %r0
#endif
/* First check for relative alignment, if unaligned copy one byte at a time */
andi. %r8, %r3, ALIGN_MASK
andi. %r7, %r4, ALIGN_MASK
cmpd %r7, %r8
bne .Lunaligned
cmpldi %r5, MULTI_PHASE_THRESHOLD
bge .Lmulti_phase
b .Lfast_copy
.Lunaligned:
/* forward or backward copy? */
cmpd %r4, %r3
blt .Lbackward_unaligned
/* Just need to setup increment and jump to copy */
li %r0, 1
mtctr %r5
b .Lsingle_1_loop
.Lbackward_unaligned:
/* advance src and dst to last byte, set decrement and jump to copy */
add %r3, %r3, %r5
addi %r3, %r3, -1
add %r4, %r4, %r5
addi %r4, %r4, -1
li %r0, -1
mtctr %r5
b .Lsingle_1_loop
.Lfast_copy:
/* align src */
cmpd %r4, %r3 /* forward or backward copy? */
blt .Lbackward_align