diff --git a/lib/eal/arm/include/rte_pause_64.h b/lib/eal/arm/include/rte_pause_64.h index e87d10b8cc..fe4d42b1ea 100644 --- a/lib/eal/arm/include/rte_pause_64.h +++ b/lib/eal/arm/include/rte_pause_64.h @@ -26,10 +26,102 @@ static inline void rte_pause(void) #ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED /* Send an event to quit WFE. */ -#define __SEVL() { asm volatile("sevl" : : : "memory"); } +#define __RTE_ARM_SEVL() { asm volatile("sevl" : : : "memory"); } /* Put processor into low power WFE(Wait For Event) state. */ -#define __WFE() { asm volatile("wfe" : : : "memory"); } +#define __RTE_ARM_WFE() { asm volatile("wfe" : : : "memory"); } + +/* + * Atomic exclusive load from addr, it returns the 16-bit content of + * *addr while making it 'monitored', when it is written by someone + * else, the 'monitored' state is cleared and an event is generated + * implicitly to exit WFE. + */ +#define __RTE_ARM_LOAD_EXC_16(src, dst, memorder) { \ + if (memorder == __ATOMIC_RELAXED) { \ + asm volatile("ldxrh %w[tmp], [%x[addr]]" \ + : [tmp] "=&r" (dst) \ + : [addr] "r" (src) \ + : "memory"); \ + } else { \ + asm volatile("ldaxrh %w[tmp], [%x[addr]]" \ + : [tmp] "=&r" (dst) \ + : [addr] "r" (src) \ + : "memory"); \ + } } + +/* + * Atomic exclusive load from addr, it returns the 32-bit content of + * *addr while making it 'monitored', when it is written by someone + * else, the 'monitored' state is cleared and an event is generated + * implicitly to exit WFE. + */ +#define __RTE_ARM_LOAD_EXC_32(src, dst, memorder) { \ + if (memorder == __ATOMIC_RELAXED) { \ + asm volatile("ldxr %w[tmp], [%x[addr]]" \ + : [tmp] "=&r" (dst) \ + : [addr] "r" (src) \ + : "memory"); \ + } else { \ + asm volatile("ldaxr %w[tmp], [%x[addr]]" \ + : [tmp] "=&r" (dst) \ + : [addr] "r" (src) \ + : "memory"); \ + } } + +/* + * Atomic exclusive load from addr, it returns the 64-bit content of + * *addr while making it 'monitored', when it is written by someone + * else, the 'monitored' state is cleared and an event is generated + * implicitly to exit WFE. + */ +#define __RTE_ARM_LOAD_EXC_64(src, dst, memorder) { \ + if (memorder == __ATOMIC_RELAXED) { \ + asm volatile("ldxr %x[tmp], [%x[addr]]" \ + : [tmp] "=&r" (dst) \ + : [addr] "r" (src) \ + : "memory"); \ + } else { \ + asm volatile("ldaxr %x[tmp], [%x[addr]]" \ + : [tmp] "=&r" (dst) \ + : [addr] "r" (src) \ + : "memory"); \ + } } + +/* + * Atomic exclusive load from addr, it returns the 128-bit content of + * *addr while making it 'monitored', when it is written by someone + * else, the 'monitored' state is cleared and an event is generated + * implicitly to exit WFE. + */ +#define __RTE_ARM_LOAD_EXC_128(src, dst, memorder) { \ + volatile rte_int128_t *dst_128 = (volatile rte_int128_t *)&dst; \ + if (memorder == __ATOMIC_RELAXED) { \ + asm volatile("ldxp %x[tmp0], %x[tmp1], [%x[addr]]" \ + : [tmp0] "=&r" (dst_128->val[0]), \ + [tmp1] "=&r" (dst_128->val[1]) \ + : [addr] "r" (src) \ + : "memory"); \ + } else { \ + asm volatile("ldaxp %x[tmp0], %x[tmp1], [%x[addr]]" \ + : [tmp0] "=&r" (dst_128->val[0]), \ + [tmp1] "=&r" (dst_128->val[1]) \ + : [addr] "r" (src) \ + : "memory"); \ + } } \ + +#define __RTE_ARM_LOAD_EXC(src, dst, memorder, size) { \ + RTE_BUILD_BUG_ON(size != 16 && size != 32 && \ + size != 64 && size != 128); \ + if (size == 16) \ + __RTE_ARM_LOAD_EXC_16(src, dst, memorder) \ + else if (size == 32) \ + __RTE_ARM_LOAD_EXC_32(src, dst, memorder) \ + else if (size == 64) \ + __RTE_ARM_LOAD_EXC_64(src, dst, memorder) \ + else if (size == 128) \ + __RTE_ARM_LOAD_EXC_128(src, dst, memorder) \ +} static __rte_always_inline void rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, @@ -37,36 +129,17 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, { uint16_t value; - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && + memorder != __ATOMIC_RELAXED); - /* - * Atomic exclusive load from addr, it returns the 16-bit content of - * *addr while making it 'monitored',when it is written by someone - * else, the 'monitored' state is cleared and a event is generated - * implicitly to exit WFE. - */ -#define __LOAD_EXC_16(src, dst, memorder) { \ - if (memorder == __ATOMIC_RELAXED) { \ - asm volatile("ldxrh %w[tmp], [%x[addr]]" \ - : [tmp] "=&r" (dst) \ - : [addr] "r"(src) \ - : "memory"); \ - } else { \ - asm volatile("ldaxrh %w[tmp], [%x[addr]]" \ - : [tmp] "=&r" (dst) \ - : [addr] "r"(src) \ - : "memory"); \ - } } - - __LOAD_EXC_16(addr, value, memorder) + __RTE_ARM_LOAD_EXC_16(addr, value, memorder) if (value != expected) { - __SEVL() + __RTE_ARM_SEVL() do { - __WFE() - __LOAD_EXC_16(addr, value, memorder) + __RTE_ARM_WFE() + __RTE_ARM_LOAD_EXC_16(addr, value, memorder) } while (value != expected); } -#undef __LOAD_EXC_16 } static __rte_always_inline void @@ -75,36 +148,17 @@ rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, { uint32_t value; - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && + memorder != __ATOMIC_RELAXED); - /* - * Atomic exclusive load from addr, it returns the 32-bit content of - * *addr while making it 'monitored',when it is written by someone - * else, the 'monitored' state is cleared and a event is generated - * implicitly to exit WFE. - */ -#define __LOAD_EXC_32(src, dst, memorder) { \ - if (memorder == __ATOMIC_RELAXED) { \ - asm volatile("ldxr %w[tmp], [%x[addr]]" \ - : [tmp] "=&r" (dst) \ - : [addr] "r"(src) \ - : "memory"); \ - } else { \ - asm volatile("ldaxr %w[tmp], [%x[addr]]" \ - : [tmp] "=&r" (dst) \ - : [addr] "r"(src) \ - : "memory"); \ - } } - - __LOAD_EXC_32(addr, value, memorder) + __RTE_ARM_LOAD_EXC_32(addr, value, memorder) if (value != expected) { - __SEVL() + __RTE_ARM_SEVL() do { - __WFE() - __LOAD_EXC_32(addr, value, memorder) + __RTE_ARM_WFE() + __RTE_ARM_LOAD_EXC_32(addr, value, memorder) } while (value != expected); } -#undef __LOAD_EXC_32 } static __rte_always_inline void @@ -113,42 +167,37 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, { uint64_t value; - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && + memorder != __ATOMIC_RELAXED); - /* - * Atomic exclusive load from addr, it returns the 64-bit content of - * *addr while making it 'monitored',when it is written by someone - * else, the 'monitored' state is cleared and a event is generated - * implicitly to exit WFE. - */ -#define __LOAD_EXC_64(src, dst, memorder) { \ - if (memorder == __ATOMIC_RELAXED) { \ - asm volatile("ldxr %x[tmp], [%x[addr]]" \ - : [tmp] "=&r" (dst) \ - : [addr] "r"(src) \ - : "memory"); \ - } else { \ - asm volatile("ldaxr %x[tmp], [%x[addr]]" \ - : [tmp] "=&r" (dst) \ - : [addr] "r"(src) \ - : "memory"); \ - } } - - __LOAD_EXC_64(addr, value, memorder) + __RTE_ARM_LOAD_EXC_64(addr, value, memorder) if (value != expected) { - __SEVL() + __RTE_ARM_SEVL() do { - __WFE() - __LOAD_EXC_64(addr, value, memorder) + __RTE_ARM_WFE() + __RTE_ARM_LOAD_EXC_64(addr, value, memorder) } while (value != expected); } } -#undef __LOAD_EXC_64 -#undef __SEVL -#undef __WFE +#define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder) do { \ + RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder)); \ + RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && \ + memorder != __ATOMIC_RELAXED); \ + const uint32_t size = sizeof(*(addr)) << 3; \ + typeof(*(addr)) expected_value = (expected); \ + typeof(*(addr)) value; \ + __RTE_ARM_LOAD_EXC((addr), value, memorder, size) \ + if (!((value & (mask)) cond expected_value)) { \ + __RTE_ARM_SEVL() \ + do { \ + __RTE_ARM_WFE() \ + __RTE_ARM_LOAD_EXC((addr), value, memorder, size) \ + } while (!((value & (mask)) cond expected_value)); \ + } \ +} while (0) -#endif +#endif /* RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED */ #ifdef __cplusplus } diff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h index 668ee4a184..032f9ac05a 100644 --- a/lib/eal/include/generic/rte_pause.h +++ b/lib/eal/include/generic/rte_pause.h @@ -111,6 +111,34 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, while (__atomic_load_n(addr, memorder) != expected) rte_pause(); } -#endif + +/* + * Wait until *addr & mask makes the condition true. With a relaxed memory + * ordering model, the loads around this helper can be reordered. + * + * @param addr + * A pointer to the memory location. + * @param mask + * A mask of value bits in interest. + * @param cond + * A symbol representing the condition. + * @param expected + * An expected value to be in the memory location. + * @param memorder + * Two different memory orders that can be specified: + * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to + * C++11 memory orders with the same names, see the C++11 standard or + * the GCC wiki on atomic synchronization for detailed definition. + */ +#define RTE_WAIT_UNTIL_MASKED(addr, mask, cond, expected, memorder) do { \ + RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder)); \ + RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && \ + memorder != __ATOMIC_RELAXED); \ + typeof(*(addr)) expected_value = (expected); \ + while (!((__atomic_load_n((addr), (memorder)) & (mask)) cond \ + expected_value)) \ + rte_pause(); \ +} while (0) +#endif /* ! RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED */ #endif /* _RTE_PAUSE_H_ */