Add more 8 and 16 bit variants of the the atomic(9) functions on arm64.

These are direct copies of the 32 bit functions, adjusted ad needed.
While here fix atomic_fcmpset_16 to use the valid load and store exclusive
instructions.

Sponsored by:	DARPA, AFRL
This commit is contained in:
Andrew Turner 2019-11-07 17:34:44 +00:00
parent 542c56ea9a
commit 4ffa494e4f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=354452

View File

@ -57,6 +57,40 @@
#define ATOMIC_OP(op, asm_op, bar, a, l) \
static __inline void \
atomic_##op##_##bar##8(volatile uint8_t *p, uint8_t val) \
{ \
uint8_t tmp; \
int res; \
\
__asm __volatile( \
"1: ld"#a"xrb %w0, [%2] \n" \
" "#asm_op" %w0, %w0, %w3 \n" \
" st"#l"xrb %w1, %w0, [%2] \n" \
" cbnz %w1, 1b \n" \
: "=&r"(tmp), "=&r"(res) \
: "r" (p), "r" (val) \
: "memory" \
); \
} \
\
static __inline void \
atomic_##op##_##bar##16(volatile uint16_t *p, uint16_t val) \
{ \
uint16_t tmp; \
int res; \
\
__asm __volatile( \
"1: ld"#a"xrh %w0, [%2] \n" \
" "#asm_op" %w0, %w0, %w3 \n" \
" st"#l"xrh %w1, %w0, [%2] \n" \
" cbnz %w1, 1b \n" \
: "=&r"(tmp), "=&r"(res) \
: "r" (p), "r" (val) \
: "memory" \
); \
} \
\
static __inline void \
atomic_##op##_##bar##32(volatile uint32_t *p, uint32_t val) \
{ \
uint32_t tmp; \
@ -135,10 +169,10 @@ atomic_fcmpset_##bar##16(volatile uint16_t *p, uint16_t *cmpval, \
\
__asm __volatile( \
"1: mov %w1, #1 \n" \
" ld"#a"xh %w0, [%2] \n" \
" ld"#a"xrh %w0, [%2] \n" \
" cmp %w0, %w3 \n" \
" b.ne 2f \n" \
" st"#l"xh %w1, %w4, [%2] \n" \
" st"#l"xrh %w1, %w4, [%2] \n" \
"2:" \
: "=&r"(tmp), "=&r"(res) \
: "r" (p), "r" (_cmpval), "r" (newval) \
@ -205,6 +239,52 @@ ATOMIC_FCMPSET(rel_, ,l)
#define ATOMIC_CMPSET(bar, a, l) \
static __inline int \
atomic_cmpset_##bar##8(volatile uint8_t *p, uint8_t cmpval, \
uint8_t newval) \
{ \
uint8_t tmp; \
int res; \
\
__asm __volatile( \
"1: mov %w1, #1 \n" \
" ld"#a"xrb %w0, [%2] \n" \
" cmp %w0, %w3 \n" \
" b.ne 2f \n" \
" st"#l"xrb %w1, %w4, [%2] \n" \
" cbnz %w1, 1b \n" \
"2:" \
: "=&r"(tmp), "=&r"(res) \
: "r" (p), "r" (cmpval), "r" (newval) \
: "cc", "memory" \
); \
\
return (!res); \
} \
\
static __inline int \
atomic_cmpset_##bar##16(volatile uint16_t *p, uint16_t cmpval, \
uint16_t newval) \
{ \
uint16_t tmp; \
int res; \
\
__asm __volatile( \
"1: mov %w1, #1 \n" \
" ld"#a"xrh %w0, [%2] \n" \
" cmp %w0, %w3 \n" \
" b.ne 2f \n" \
" st"#l"xrh %w1, %w4, [%2] \n" \
" cbnz %w1, 1b \n" \
"2:" \
: "=&r"(tmp), "=&r"(res) \
: "r" (p), "r" (cmpval), "r" (newval) \
: "cc", "memory" \
); \
\
return (!res); \
} \
\
static __inline int \
atomic_cmpset_##bar##32(volatile uint32_t *p, uint32_t cmpval, \
uint32_t newval) \
{ \