Add the 8 and 16 bit atomic load/store functions with a barrier on arm64.

Reviewed by:	cem
MFC after:	2 weeks
Sponsored by:	DARPA, AFRL
Differential Revision:	https://reviews.freebsd.org/D22966
This commit is contained in:
Andrew Turner 2020-01-03 10:03:36 +00:00
parent 5a93d93e8b
commit 6d26116baa

View File

@ -448,6 +448,34 @@ atomic_swap_64(volatile uint64_t *p, uint64_t val)
return (ret);
}
static __inline uint8_t
atomic_load_acq_8(volatile uint8_t *p)
{
uint8_t ret;
__asm __volatile(
"ldarb %w0, [%1] \n"
: "=&r" (ret)
: "r" (p)
: "memory");
return (ret);
}
static __inline uint16_t
atomic_load_acq_16(volatile uint16_t *p)
{
uint16_t ret;
__asm __volatile(
"ldarh %w0, [%1] \n"
: "=&r" (ret)
: "r" (p)
: "memory");
return (ret);
}
static __inline uint32_t
atomic_load_acq_32(volatile uint32_t *p)
{
@ -476,6 +504,28 @@ atomic_load_acq_64(volatile uint64_t *p)
return (ret);
}
static __inline void
atomic_store_rel_8(volatile uint8_t *p, uint8_t val)
{
__asm __volatile(
"stlrb %w0, [%1] \n"
:
: "r" (val), "r" (p)
: "memory");
}
static __inline void
atomic_store_rel_16(volatile uint16_t *p, uint16_t val)
{
__asm __volatile(
"stlrh %w0, [%1] \n"
:
: "r" (val), "r" (p)
: "memory");
}
static __inline void
atomic_store_rel_32(volatile uint32_t *p, uint32_t val)
{