amd64 atomic.h: minor codegen optimization in flag access

Previously the pattern to extract status flags from inline assembly
blocks was to use setcc in the block to write the flag to a register.
This was suboptimal in a few ways:
 - It would lead to code like: sete %cl; test %cl; jne, i.e. a flag
   would just be loaded into a register and then reloaded to a flag.
 - The setcc would force the block to use an additional register.
 - If the client code didn't care for the flag value then the setcc
   would be entirely pointless but could not be eliminated by the
   optimizer.

A more modern inline asm construct (since gcc 6 and clang 9) allows for
"flag output operands", where a C variable can be written directly from
a flag.  The optimizer can then use this to produce direct code where
the flag does not take a trip through a register.

In practice this makes each affected operation sequence shorter by five
bytes of instructions.  It's unlikely this has a measurable performance
impact.

Reviewed by:	kib, markj, mjg
Sponsored by:	Dell EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D23869
This commit is contained in:
Ryan Libby 2020-02-28 18:32:36 +00:00
parent e8049590f0
commit 6d1a70dd0a

View File

@ -201,9 +201,8 @@ atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
__asm __volatile( \
" " MPLOCKED " " \
" cmpxchg %3,%1 ; " \
" sete %0 ; " \
"# atomic_cmpset_" #TYPE " " \
: "=q" (res), /* 0 */ \
: "=@cce" (res), /* 0 */ \
"+m" (*dst), /* 1 */ \
"+a" (expect) /* 2 */ \
: "r" (src) /* 3 */ \
@ -219,9 +218,8 @@ atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
__asm __volatile( \
" " MPLOCKED " " \
" cmpxchg %3,%1 ; " \
" sete %0 ; " \
"# atomic_fcmpset_" #TYPE " " \
: "=q" (res), /* 0 */ \
: "=@cce" (res), /* 0 */ \
"+m" (*dst), /* 1 */ \
"+a" (*expect) /* 2 */ \
: "r" (src) /* 3 */ \
@ -278,9 +276,8 @@ atomic_testandset_int(volatile u_int *p, u_int v)
__asm __volatile(
" " MPLOCKED " "
" btsl %2,%1 ; "
" setc %0 ; "
"# atomic_testandset_int"
: "=q" (res), /* 0 */
: "=@ccc" (res), /* 0 */
"+m" (*p) /* 1 */
: "Ir" (v & 0x1f) /* 2 */
: "cc");
@ -295,9 +292,8 @@ atomic_testandset_long(volatile u_long *p, u_int v)
__asm __volatile(
" " MPLOCKED " "
" btsq %2,%1 ; "
" setc %0 ; "
"# atomic_testandset_long"
: "=q" (res), /* 0 */
: "=@ccc" (res), /* 0 */
"+m" (*p) /* 1 */
: "Jr" ((u_long)(v & 0x3f)) /* 2 */
: "cc");
@ -312,9 +308,8 @@ atomic_testandclear_int(volatile u_int *p, u_int v)
__asm __volatile(
" " MPLOCKED " "
" btrl %2,%1 ; "
" setc %0 ; "
"# atomic_testandclear_int"
: "=q" (res), /* 0 */
: "=@ccc" (res), /* 0 */
"+m" (*p) /* 1 */
: "Ir" (v & 0x1f) /* 2 */
: "cc");
@ -329,9 +324,8 @@ atomic_testandclear_long(volatile u_long *p, u_int v)
__asm __volatile(
" " MPLOCKED " "
" btrq %2,%1 ; "
" setc %0 ; "
"# atomic_testandclear_long"
: "=q" (res), /* 0 */
: "=@ccc" (res), /* 0 */
"+m" (*p) /* 1 */
: "Jr" ((u_long)(v & 0x3f)) /* 2 */
: "cc");