eal/ppc: use compiler builtins for atomics

Replace existing PPC assembly code for rte_atomicXX ops with compiler
atomic builtins as previously adopted by DPDK (see [1] and [2]).  This
has the additional benefit of resolving a POWER10 build failure due to an
outstanding gcc issue which fails on the existing PPC assembly code [3].

[1] https://www.dpdk.org/blog/2021/03/26/dpdk-adopts-the-c11-memory-model/
[2] https://doc.dpdk.org/guides/rel_notes/deprecation.html
[3] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98519

Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
This commit is contained in:
David Christensen 2021-09-09 12:42:43 -07:00 committed by David Marchand
parent 567473433b
commit b698651b91

View File

@ -1,6 +1,7 @@
/* /*
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
* Inspired from FreeBSD src/sys/powerpc/include/atomic.h * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
* Copyright (c) 2021 IBM Corporation
* Copyright (c) 2008 Marcel Moolenaar * Copyright (c) 2008 Marcel Moolenaar
* Copyright (c) 2001 Benno Rice * Copyright (c) 2001 Benno Rice
* Copyright (c) 2001 David E. O'Brien * Copyright (c) 2001 David E. O'Brien
@ -16,6 +17,7 @@ extern "C" {
#endif #endif
#include <stdint.h> #include <stdint.h>
#include <rte_compat.h>
#include "generic/rte_atomic.h" #include "generic/rte_atomic.h"
#define rte_mb() asm volatile("sync" : : : "memory") #define rte_mb() asm volatile("sync" : : : "memory")
@ -43,9 +45,6 @@ rte_atomic_thread_fence(int memorder)
} }
/*------------------------- 16 bit atomic operations -------------------------*/ /*------------------------- 16 bit atomic operations -------------------------*/
/* To be compatible with Power7, use GCC built-in functions for 16 bit
* operations */
#ifndef RTE_FORCE_INTRINSICS #ifndef RTE_FORCE_INTRINSICS
static inline int static inline int
rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
@ -92,30 +91,8 @@ rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
static inline int static inline int
rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
{ {
unsigned int ret = 0; return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
__ATOMIC_ACQUIRE) ? 1 : 0;
asm volatile(
"\tlwsync\n"
"1:\tlwarx %[ret], 0, %[dst]\n"
"cmplw %[exp], %[ret]\n"
"bne 2f\n"
"stwcx. %[src], 0, %[dst]\n"
"bne- 1b\n"
"li %[ret], 1\n"
"b 3f\n"
"2:\n"
"stwcx. %[ret], 0, %[dst]\n"
"li %[ret], 0\n"
"3:\n"
"isync\n"
: [ret] "=&r" (ret), "=m" (*dst)
: [dst] "r" (dst),
[exp] "r" (exp),
[src] "r" (src),
"m" (*dst)
: "cc", "memory");
return ret;
} }
static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
@ -126,67 +103,23 @@ static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
static inline void static inline void
rte_atomic32_inc(rte_atomic32_t *v) rte_atomic32_inc(rte_atomic32_t *v)
{ {
int t; __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
asm volatile(
"1: lwarx %[t],0,%[cnt]\n"
"addic %[t],%[t],1\n"
"stwcx. %[t],0,%[cnt]\n"
"bne- 1b\n"
: [t] "=&r" (t), "=m" (v->cnt)
: [cnt] "r" (&v->cnt), "m" (v->cnt)
: "cc", "xer", "memory");
} }
static inline void static inline void
rte_atomic32_dec(rte_atomic32_t *v) rte_atomic32_dec(rte_atomic32_t *v)
{ {
int t; __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
asm volatile(
"1: lwarx %[t],0,%[cnt]\n"
"addic %[t],%[t],-1\n"
"stwcx. %[t],0,%[cnt]\n"
"bne- 1b\n"
: [t] "=&r" (t), "=m" (v->cnt)
: [cnt] "r" (&v->cnt), "m" (v->cnt)
: "cc", "xer", "memory");
} }
static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
{ {
int ret; return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
asm volatile(
"\n\tlwsync\n"
"1: lwarx %[ret],0,%[cnt]\n"
"addic %[ret],%[ret],1\n"
"stwcx. %[ret],0,%[cnt]\n"
"bne- 1b\n"
"isync\n"
: [ret] "=&r" (ret)
: [cnt] "r" (&v->cnt)
: "cc", "xer", "memory");
return ret == 0;
} }
static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
{ {
int ret; return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
asm volatile(
"\n\tlwsync\n"
"1: lwarx %[ret],0,%[cnt]\n"
"addic %[ret],%[ret],-1\n"
"stwcx. %[ret],0,%[cnt]\n"
"bne- 1b\n"
"isync\n"
: [ret] "=&r" (ret)
: [cnt] "r" (&v->cnt)
: "cc", "xer", "memory");
return ret == 0;
} }
static inline uint32_t static inline uint32_t
@ -200,29 +133,8 @@ rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
static inline int static inline int
rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
{ {
unsigned int ret = 0; return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
__ATOMIC_ACQUIRE) ? 1 : 0;
asm volatile (
"\tlwsync\n"
"1: ldarx %[ret], 0, %[dst]\n"
"cmpld %[exp], %[ret]\n"
"bne 2f\n"
"stdcx. %[src], 0, %[dst]\n"
"bne- 1b\n"
"li %[ret], 1\n"
"b 3f\n"
"2:\n"
"stdcx. %[ret], 0, %[dst]\n"
"li %[ret], 0\n"
"3:\n"
"isync\n"
: [ret] "=&r" (ret), "=m" (*dst)
: [dst] "r" (dst),
[exp] "r" (exp),
[src] "r" (src),
"m" (*dst)
: "cc", "memory");
return ret;
} }
static inline void static inline void
@ -234,167 +146,66 @@ rte_atomic64_init(rte_atomic64_t *v)
static inline int64_t static inline int64_t
rte_atomic64_read(rte_atomic64_t *v) rte_atomic64_read(rte_atomic64_t *v)
{ {
long ret; return v->cnt;
asm volatile("ld%U1%X1 %[ret],%[cnt]"
: [ret] "=r"(ret)
: [cnt] "m"(v->cnt));
return ret;
} }
static inline void static inline void
rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
{ {
asm volatile("std%U0%X0 %[new_value],%[cnt]" v->cnt = new_value;
: [cnt] "=m"(v->cnt)
: [new_value] "r"(new_value));
} }
static inline void static inline void
rte_atomic64_add(rte_atomic64_t *v, int64_t inc) rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
{ {
long t; __atomic_add_fetch(&v->cnt, inc, __ATOMIC_ACQUIRE);
asm volatile(
"1: ldarx %[t],0,%[cnt]\n"
"add %[t],%[inc],%[t]\n"
"stdcx. %[t],0,%[cnt]\n"
"bne- 1b\n"
: [t] "=&r" (t), "=m" (v->cnt)
: [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
: "cc", "memory");
} }
static inline void static inline void
rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
{ {
long t; __atomic_sub_fetch(&v->cnt, dec, __ATOMIC_ACQUIRE);
asm volatile(
"1: ldarx %[t],0,%[cnt]\n"
"subf %[t],%[dec],%[t]\n"
"stdcx. %[t],0,%[cnt]\n"
"bne- 1b\n"
: [t] "=&r" (t), "+m" (v->cnt)
: [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
: "cc", "memory");
} }
static inline void static inline void
rte_atomic64_inc(rte_atomic64_t *v) rte_atomic64_inc(rte_atomic64_t *v)
{ {
long t; __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
asm volatile(
"1: ldarx %[t],0,%[cnt]\n"
"addic %[t],%[t],1\n"
"stdcx. %[t],0,%[cnt]\n"
"bne- 1b\n"
: [t] "=&r" (t), "+m" (v->cnt)
: [cnt] "r" (&v->cnt), "m" (v->cnt)
: "cc", "xer", "memory");
} }
static inline void static inline void
rte_atomic64_dec(rte_atomic64_t *v) rte_atomic64_dec(rte_atomic64_t *v)
{ {
long t; __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
asm volatile(
"1: ldarx %[t],0,%[cnt]\n"
"addic %[t],%[t],-1\n"
"stdcx. %[t],0,%[cnt]\n"
"bne- 1b\n"
: [t] "=&r" (t), "+m" (v->cnt)
: [cnt] "r" (&v->cnt), "m" (v->cnt)
: "cc", "xer", "memory");
} }
static inline int64_t static inline int64_t
rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
{ {
long ret; return __atomic_add_fetch(&v->cnt, inc, __ATOMIC_ACQUIRE);
asm volatile(
"\n\tlwsync\n"
"1: ldarx %[ret],0,%[cnt]\n"
"add %[ret],%[inc],%[ret]\n"
"stdcx. %[ret],0,%[cnt]\n"
"bne- 1b\n"
"isync\n"
: [ret] "=&r" (ret)
: [inc] "r" (inc), [cnt] "r" (&v->cnt)
: "cc", "memory");
return ret;
} }
static inline int64_t static inline int64_t
rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
{ {
long ret; return __atomic_sub_fetch(&v->cnt, dec, __ATOMIC_ACQUIRE);
asm volatile(
"\n\tlwsync\n"
"1: ldarx %[ret],0,%[cnt]\n"
"subf %[ret],%[dec],%[ret]\n"
"stdcx. %[ret],0,%[cnt]\n"
"bne- 1b\n"
"isync\n"
: [ret] "=&r" (ret)
: [dec] "r" (dec), [cnt] "r" (&v->cnt)
: "cc", "memory");
return ret;
} }
static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
{ {
long ret; return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
asm volatile(
"\n\tlwsync\n"
"1: ldarx %[ret],0,%[cnt]\n"
"addic %[ret],%[ret],1\n"
"stdcx. %[ret],0,%[cnt]\n"
"bne- 1b\n"
"isync\n"
: [ret] "=&r" (ret)
: [cnt] "r" (&v->cnt)
: "cc", "xer", "memory");
return ret == 0;
} }
static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
{ {
long ret; return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
asm volatile(
"\n\tlwsync\n"
"1: ldarx %[ret],0,%[cnt]\n"
"addic %[ret],%[ret],-1\n"
"stdcx. %[ret],0,%[cnt]\n"
"bne- 1b\n"
"isync\n"
: [ret] "=&r" (ret)
: [cnt] "r" (&v->cnt)
: "cc", "xer", "memory");
return ret == 0;
} }
static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
{ {
return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
} }
/**
* Atomically set a 64-bit counter to 0.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void rte_atomic64_clear(rte_atomic64_t *v) static inline void rte_atomic64_clear(rte_atomic64_t *v)
{ {
v->cnt = 0; v->cnt = 0;