Optimise xchg() to use atomic_swap_32() and atomic_swap_64().

Suggested by:	kib@
MFC after:	1 week
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2018-02-18 18:46:56 +00:00
parent 644680491e
commit 8f294983e9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=329524

View File

@ -205,31 +205,31 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
u16 u16[0]; \
u32 u32[0]; \
u64 u64[0]; \
} __ret = { .val = READ_ONCE(*ptr) }, __new = { .val = (new) }; \
} __ret, __new = { .val = (new) }; \
\
CTASSERT(sizeof(__ret.val) == 1 || sizeof(__ret.val) == 2 || \
sizeof(__ret.val) == 4 || sizeof(__ret.val) == 8); \
\
switch (sizeof(__ret.val)) { \
case 1: \
__ret.val = READ_ONCE(*ptr); \
while (!atomic_fcmpset_8((volatile u8 *)(ptr), \
__ret.u8, __new.u8[0])) \
; \
break; \
case 2: \
__ret.val = READ_ONCE(*ptr); \
while (!atomic_fcmpset_16((volatile u16 *)(ptr), \
__ret.u16, __new.u16[0])) \
; \
break; \
case 4: \
while (!atomic_fcmpset_32((volatile u32 *)(ptr), \
__ret.u32, __new.u32[0])) \
; \
__ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr), \
__new.u32[0]); \
break; \
case 8: \
while (!atomic_fcmpset_64((volatile u64 *)(ptr), \
__ret.u64, __new.u64[0])) \
; \
__ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr), \
__new.u64[0]); \
break; \
} \
__ret.val; \