powerpc/powerpc64: Enforce natural alignment in memcpy

Summary:
POWER architecture CPUs (Book-S) require natural alignment for
cache-inhibited storage accesses.  Since we can't know the caching model
for a page ahead of time, always enforce natural alignment in memcpy.
This fixes a SIGBUS in X with acceleration enabled on POWER9.

As part of this, revert r358672, it's no longer necessary with this fix.

Regression tested by alfredo.

Reviewed by: alfredo
Differential Revision: https://reviews.freebsd.org/D23969
This commit is contained in:
Justin Hibbits 2020-03-06 01:45:03 +00:00
parent 924e10b809
commit 00797360b5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=358688
3 changed files with 13 additions and 1 deletions

View File

@ -66,7 +66,7 @@ DEFINE_UIFUNC(, FN_RET, FN_NAME, FN_PARAMS)
* Since POWER ISA 2.07B this is solved transparently
* by the hardware
*/
if (cpu_features2 & PPC_FEATURE2_ARCH_2_07)
if (cpu_features2 & PPC_FEATURE_HAS_VSX)
return (FN_NAME_VSX);
else
return (FN_NAME_NOVSX);

View File

@ -39,6 +39,11 @@ WEAK_REFERENCE(__memcpy, memcpy);
#define BLOCK_BYTES (1 << BLOCK_BITS)
#define BLOCK_MASK (BLOCK_BYTES - 1)
/* Minimum 8 byte alignment, to avoid cache-inhibited alignment faults. */
#ifndef ALIGN_MASK
#define ALIGN_MASK 0x7
#endif
/*
* r3: dst
* r4: src
@ -48,6 +53,12 @@ ENTRY(FN_NAME)
cmpdi %r5, 0 /* len == 0? nothing to do */
beqlr-
/* If src and dst are relatively misaligned, do byte copies. */
andi. %r8, %r3, ALIGN_MASK
andi. %r7, %r4, ALIGN_MASK
cmpd %r8, %r7
mr %r7, %r5
bne+ .Lcopy_remaining_fix_index_byte
mr %r8, %r3 /* save dst */
/* align src */

View File

@ -30,6 +30,7 @@
#define FN_NAME __memcpy_vsx
#define BLOCK_BITS 6
#define ALIGN_MASK 0xf
/*
* r5: bytes to copy (multiple of BLOCK_BYTES)