eal: introduce atomic exchange operation

To handle atomic update of link status (64 bit), every driver
was doing its own version using cmpset.
Atomic exchange is a useful primitive in its own right;
therefore make it a EAL routine.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Stephen Hemminger 2018-01-25 18:01:37 -08:00 committed by Ferruh Yigit
parent adeb2a2d57
commit ff2863570f
5 changed files with 146 additions and 1 deletions

View File

@ -136,6 +136,12 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
}
static inline uint16_t
rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
{
return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
}
/*------------------------- 32 bit atomic operations -------------------------*/
static inline int
@ -237,6 +243,13 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
return ret == 0;
}
static inline uint32_t
rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
{
return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
}
/*------------------------- 64 bit atomic operations -------------------------*/
static inline int
@ -431,7 +444,6 @@ static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
{
return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
}
/**
* Atomically set a 64-bit counter to 0.
*
@ -442,6 +454,13 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v)
{
v->cnt = 0;
}
static inline uint64_t
rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
{
return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
}
#endif
#ifdef __cplusplus

View File

@ -104,6 +104,18 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
return res;
}
static inline uint16_t
rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
{
asm volatile(
MPLOCKED
"xchgw %0, %1;"
: "=r" (val), "=m" (*dst)
: "0" (val), "m" (*dst)
: "memory"); /* no-clobber list */
return val;
}
static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
{
return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
@ -178,6 +190,18 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
return res;
}
static inline uint32_t
rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
{
asm volatile(
MPLOCKED
"xchgl %0, %1;"
: "=r" (val), "=m" (*dst)
: "0" (val), "m" (*dst)
: "memory"); /* no-clobber list */
return val;
}
static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
{
return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);

View File

@ -98,6 +98,18 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
return res;
}
static inline uint64_t
rte_atomic64_exchange(volatile uint64_t *dest, uint64_t val)
{
uint64_t old;
do {
old = *dest;
} while (rte_atomic64_cmpset(dest, old, val));
return old;
}
static inline void
rte_atomic64_init(rte_atomic64_t *v)
{

View File

@ -71,6 +71,18 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
return res;
}
static inline uint64_t
rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
{
asm volatile(
MPLOCKED
"xchgq %0, %1;"
: "=r" (val), "=m" (*dst)
: "0" (val), "m" (*dst)
: "memory"); /* no-clobber list */
return val;
}
static inline void
rte_atomic64_init(rte_atomic64_t *v)
{

View File

@ -190,6 +190,32 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
}
#endif
/**
* Atomic exchange.
*
* (atomic) equivalent to:
* ret = *dst
* *dst = val;
* return ret;
*
* @param dst
* The destination location into which the value will be written.
* @param val
* The new value.
* @return
* The original value at that location
*/
static inline uint16_t
rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
#ifdef RTE_FORCE_INTRINSICS
static inline uint16_t
rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
{
return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
}
#endif
/**
* The atomic counter structure.
*/
@ -443,6 +469,32 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
}
#endif
/**
* Atomic exchange.
*
* (atomic) equivalent to:
* ret = *dst
* *dst = val;
* return ret;
*
* @param dst
* The destination location into which the value will be written.
* @param val
* The new value.
* @return
* The original value at that location
*/
static inline uint32_t
rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val);
#ifdef RTE_FORCE_INTRINSICS
static inline uint32_t
rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
{
return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
}
#endif
/**
* The atomic counter structure.
*/
@ -695,6 +747,32 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
}
#endif
/**
* Atomic exchange.
*
* (atomic) equivalent to:
* ret = *dst
* *dst = val;
* return ret;
*
* @param dst
* The destination location into which the value will be written.
* @param val
* The new value.
* @return
* The original value at that location
*/
static inline uint64_t
rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val);
#ifdef RTE_FORCE_INTRINSICS
static inline uint64_t
rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
{
return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
}
#endif
/**
* The atomic counter structure.
*/