atomic: Add some type checking to plain atomic_load/store helpers

Reviewed by:	rpokala, mjg, imp, kib
MFC after:	2 weeks
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D35828
This commit is contained in:
Mark Johnston 2022-07-25 17:48:46 -04:00
parent 2057985649
commit 30253da1a9

View File

@ -36,38 +36,82 @@
#error do not include this header, use machine/atomic.h
#endif
#define atomic_load_char(p) (*(volatile u_char *)(p))
#define atomic_load_short(p) (*(volatile u_short *)(p))
#define atomic_load_int(p) (*(volatile u_int *)(p))
#define atomic_load_long(p) (*(volatile u_long *)(p))
#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p))
#define atomic_load_8(p) (*(volatile uint8_t *)(p))
#define atomic_load_16(p) (*(volatile uint16_t *)(p))
#define atomic_load_32(p) (*(volatile uint32_t *)(p))
#ifdef _LP64
#define atomic_load_64(p) (*(volatile uint64_t *)(p))
#include <sys/cdefs.h>
#include <sys/types.h>
#define __atomic_load_char_relaxed(p) (*(volatile u_char *)(p))
#define __atomic_load_short_relaxed(p) (*(volatile u_short *)(p))
#define __atomic_load_int_relaxed(p) (*(volatile u_int *)(p))
#define __atomic_load_long_relaxed(p) (*(volatile u_long *)(p))
#define __atomic_load_8_relaxed(p) (*(volatile uint8_t *)(p))
#define __atomic_load_16_relaxed(p) (*(volatile uint16_t *)(p))
#define __atomic_load_32_relaxed(p) (*(volatile uint32_t *)(p))
#define __atomic_load_64_relaxed(p) (*(volatile uint64_t *)(p))
#define __atomic_store_char_relaxed(p, v) \
(*(volatile u_char *)(p) = (u_char)(v))
#define __atomic_store_short_relaxed(p, v) \
(*(volatile u_short *)(p) = (u_short)(v))
#define __atomic_store_int_relaxed(p, v) \
(*(volatile u_int *)(p) = (u_int)(v))
#define __atomic_store_long_relaxed(p, v) \
(*(volatile u_long *)(p) = (u_long)(v))
#define __atomic_store_8_relaxed(p, v) \
(*(volatile uint8_t *)(p) = (uint8_t)(v))
#define __atomic_store_16_relaxed(p, v) \
(*(volatile uint16_t *)(p) = (uint16_t)(v))
#define __atomic_store_32_relaxed(p, v) \
(*(volatile uint32_t *)(p) = (uint32_t)(v))
#define __atomic_store_64_relaxed(p, v) \
(*(volatile uint64_t *)(p) = (uint64_t)(v))
/*
* When _Generic is available, try to provide some type checking.
*/
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
__has_extension(c_generic_selections)
#define __atomic_load_generic(p, t, ut, n) \
_Generic(*(p), \
t: __atomic_load_ ## n ## _relaxed(p), ut: __atomic_load_ ## n ## _relaxed(p))
#define __atomic_store_generic(p, v, t, ut, n) \
_Generic(*(p), \
t: __atomic_store_ ## n ## _relaxed(p, v), ut: __atomic_store_ ## n ## _relaxed(p, v))
#else
#define __atomic_load_generic(p, t, ut, n) __atomic_load_ ## n ## _relaxed(p)
#define __atomic_store_generic(p, v, t, ut, n) __atomic_store_ ## n ## _relaxed(p, v)
#endif
#define atomic_store_char(p, v) \
(*(volatile u_char *)(p) = (u_char)(v))
#define atomic_store_short(p, v) \
(*(volatile u_short *)(p) = (u_short)(v))
#define atomic_store_int(p, v) \
(*(volatile u_int *)(p) = (u_int)(v))
#define atomic_store_long(p, v) \
(*(volatile u_long *)(p) = (u_long)(v))
#define atomic_store_ptr(p, v) \
(*(volatile __typeof(*p) *)(p) = (v))
#define atomic_store_8(p, v) \
(*(volatile uint8_t *)(p) = (uint8_t)(v))
#define atomic_store_16(p, v) \
(*(volatile uint16_t *)(p) = (uint16_t)(v))
#define atomic_store_32(p, v) \
(*(volatile uint32_t *)(p) = (uint32_t)(v))
#ifdef _LP64
#define atomic_store_64(p, v) \
(*(volatile uint64_t *)(p) = (uint64_t)(v))
#define atomic_load_char(p) __atomic_load_generic(p, char, u_char, char)
#define atomic_load_short(p) __atomic_load_generic(p, short, u_short, short)
#define atomic_load_int(p) __atomic_load_generic(p, int, u_int, int)
#define atomic_load_long(p) __atomic_load_generic(p, long, u_long, long)
#define atomic_load_8(p) __atomic_load_generic(p, int8_t, uint8_t, 8)
#define atomic_load_16(p) __atomic_load_generic(p, int16_t, uint16_t, 16)
#define atomic_load_32(p) __atomic_load_generic(p, int32_t, uint32_t, 32)
#ifdef __LP64__
#define atomic_load_64(p) __atomic_load_generic(p, int64_t, uint64_t, 64)
#endif
#define atomic_store_char(p, v) \
__atomic_store_generic(p, v, char, u_char, char)
#define atomic_store_short(p, v) \
__atomic_store_generic(p, v, short, u_short, short)
#define atomic_store_int(p, v) \
__atomic_store_generic(p, v, int, u_int, int)
#define atomic_store_long(p, v) \
__atomic_store_generic(p, v, long, u_long, long)
#define atomic_store_8(p, v) \
__atomic_store_generic(p, v, int8_t, uint8_t, 8)
#define atomic_store_16(p, v) \
__atomic_store_generic(p, v, int16_t, uint16_t, 16)
#define atomic_store_32(p, v) \
__atomic_store_generic(p, v, int32_t, uint32_t, 32)
#ifdef __LP64__
#define atomic_store_64(p, v) \
__atomic_store_generic(p, v, int64_t, uint64_t, 64)
#endif
#define atomic_load_ptr(p) (*(volatile __typeof(*p) *)(p))
#define atomic_store_ptr(p, v) (*(volatile __typeof(*p) *)(p) = (v))
/*
* Currently all architectures provide acquire and release fences on their own,