lib/libc/amd64/string/strchrnul.S: fix edge case in scalar code

When the buffer is immediately preceeded by the character we
are looking for and begins with one higher than that character,
and the buffer is misaligned, a match was errorneously detected
in the first character.  Fix this by changing the way we prevent
matches before the buffer from being detected: instead of
removing the corresponding bit from the 0x80..80 mask, set the
LSB of bytes before the buffer after xoring with the character we
look for.

The bug only affects amd64 with ARCHLEVEL=scalar (cf. simd(7)).
The change comes at a 2% performance impact for short strings
if ARCHLEVEL is set to scalar.  The default configuration is not
affected.

os: FreeBSD
arch: amd64
cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz
        │ strchrnul.scalar.0.out │       strchrnul.scalar.2.out       │
        │         sec/op         │   sec/op     vs base               │
Short                57.89µ ± 2%   59.08µ ± 1%  +2.07% (p=0.030 n=20)
Mid                  19.24µ ± 0%   19.73µ ± 0%  +2.53% (p=0.000 n=20)
Long                 11.03µ ± 0%   11.03µ ± 0%       ~ (p=0.547 n=20)
geomean              23.07µ        23.43µ       +1.53%

        │ strchrnul.scalar.0.out │       strchrnul.scalar.2.out        │
        │          B/s           │     B/s       vs base               │
Short               2.011Gi ± 2%   1.970Gi ± 1%  -2.02% (p=0.030 n=20)
Mid                 6.049Gi ± 0%   5.900Gi ± 0%  -2.47% (p=0.000 n=20)
Long                10.56Gi ± 0%   10.56Gi ± 0%       ~ (p=0.547 n=20)
geomean             5.045Gi        4.969Gi       -1.50%

MFC to:		stable/14
MFC after:	3 days
Approved by:	mjg (blanket, via IRC)
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Robert Clausecker 2023-08-25 16:22:22 +00:00
parent 247e8662d2
commit 3d8ef251aa
1 changed files with 10 additions and 7 deletions

View File

@ -52,22 +52,25 @@ ARCHENTRY(__strchrnul, scalar)
movabs $0x0101010101010101, %r8
mov (%rdi), %rax # load first word
imul %r8, %rsi # replicate char 8 times
movabs $0x8080808080808080, %r9
/*
* Unaligned input: align to 8 bytes. Then proceed the same
* way as with aligned input, but ignore matches before the
* beginning of the string. This is achieved by shifting r9
* into r10 to have 0x00 bytes before the string begins.
* way as with aligned input, but prevent matches before the
* beginning of the string. This is achieved by oring 0x01
* into each byte of the buffer before the string
*/
shl $3, %ecx
mov %r9, %r10
mov %r8, %r10
add $8, %rdi
shl %cl, %r10 # 0x80 where the string is
shl %cl, %r10 # 0x01 where the string is
xor %r8, %r10 # 0x01 where it is not
neg %r8 # negate 01..01 so we can use lea
movabs $0x8080808080808080, %r9
mov %rsi, %rcx
xor %rax, %rcx # str ^ c
or %r10, %rax # str without NUL bytes before it
or %r10, %rcx # (str ^ c) without matches before it
lea (%rax, %r8, 1), %rdx # str - 0x01..01
lea (%rcx, %r8, 1), %r11 # (str ^ c) - 0x01..01
not %rax # ~str
@ -75,7 +78,7 @@ ARCHENTRY(__strchrnul, scalar)
and %rdx, %rax # (str - 0x01..01) & ~str
and %r11, %rcx # ((str ^ c - 0x01..01) & ~(str ^ c)
or %rcx, %rax # matches for both
and %r10, %rax # not including junk bytes or bytes before the string
and %r9, %rax # not including junk bytes
jnz 1f
/* main loop unrolled twice */