cleanup of illumos compatibility atomics

atomic_cas_32 is implemented using atomic_fcmpset_32 on all platforms.
Ditto for atomic_cas_64 and atomic_fcmpset_64 on platforms that have it.
The only exception is sparc64 that provides MD atomic_cas_32 and
atomic_cas_64.
This is slightly inefficient as fcmpset reports whether the operation
updated the target and that information is not needed for cas.
Nevertheless, there is less code to maintain and to add for new platforms.
Also, the operations are done inline now as opposed to function calls before.

atomic_add_64_nv is implemented using atomic_fetchadd_64 on platforms
that provide it.

casptr, cas32, atomic_or_8, atomic_or_8_nv are completely removed as they
have no users.

atomic_mtx that is used to emulate 64-bit atomics on platforms that lack
them is defined only on those platforms.

As a result, platform specific opensolaris_atomic.S files have lost most of
their code.  The only exception is i386 where the compat+contrib code
provides 64-bit atomics for userland use.  That code assumes availability of
cmpxchg8b instruction.  FreeBSD does not have that assumption for i386
userland and does not provide 64-bit atomics.  Hopefully, this can and will
be fixed.

MFC after:	3 weeks
This commit is contained in:
avg 2019-10-09 11:26:36 +00:00
parent 6bf933c434
commit e9642c209b
7 changed files with 51 additions and 303 deletions

View File

@ -32,6 +32,9 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/atomic.h>
#if !defined(__LP64__) && !defined(__mips_n32) && \
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64)
#ifdef _KERNEL
#include <sys/kernel.h>
@ -52,8 +55,6 @@ atomic_init(void)
}
#endif
#if !defined(__LP64__) && !defined(__mips_n32) && \
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64)
void
atomic_add_64(volatile uint64_t *target, int64_t delta)
{
@ -94,7 +95,6 @@ atomic_load_64(volatile uint64_t *a)
mtx_unlock(&atomic_mtx);
return (ret);
}
#endif
uint64_t
atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
@ -107,27 +107,6 @@ atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
return (newval);
}
#if defined(__powerpc__) || defined(__arm__) || defined(__mips__)
void
atomic_or_8(volatile uint8_t *target, uint8_t value)
{
mtx_lock(&atomic_mtx);
*target |= value;
mtx_unlock(&atomic_mtx);
}
#endif
uint8_t
atomic_or_8_nv(volatile uint8_t *target, uint8_t value)
{
uint8_t newval;
mtx_lock(&atomic_mtx);
newval = (*target |= value);
mtx_unlock(&atomic_mtx);
return (newval);
}
uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
{
@ -140,19 +119,7 @@ atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
mtx_unlock(&atomic_mtx);
return (oldval);
}
uint32_t
atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
{
uint32_t oldval;
mtx_lock(&atomic_mtx);
oldval = *target;
if (oldval == cmp)
*target = newval;
mtx_unlock(&atomic_mtx);
return (oldval);
}
#endif
void
membar_producer(void)

View File

@ -32,10 +32,6 @@
#include <sys/types.h>
#include <machine/atomic.h>
#define casptr(_a, _b, _c) \
atomic_cmpset_ptr((volatile uintptr_t *)(_a), (uintptr_t)(_b), (uintptr_t) (_c))
#define cas32 atomic_cmpset_32
#if defined(__i386__) && (defined(_KERNEL) || defined(KLD_MODULE))
#define I386_HAVE_ATOMIC64
#endif
@ -46,27 +42,12 @@ extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
extern void atomic_dec_64(volatile uint64_t *target);
extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value);
extern uint64_t atomic_load_64(volatile uint64_t *a);
#endif
#ifndef __sparc64__
extern uint32_t atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
uint32_t newval);
extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
uint64_t newval);
#endif
extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
extern uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value);
extern void membar_producer(void);
#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__) || \
defined(__mips__) || defined(__aarch64__) || defined(__riscv)
extern void atomic_or_8(volatile uint8_t *target, uint8_t value);
#else
static __inline void
atomic_or_8(volatile uint8_t *target, uint8_t value)
{
atomic_set_8(target, value);
}
#endif
extern void membar_producer(void);
static __inline uint32_t
atomic_add_32_nv(volatile uint32_t *target, int32_t delta)
@ -80,27 +61,6 @@ atomic_add_int_nv(volatile u_int *target, int delta)
return (atomic_add_32_nv(target, delta));
}
static __inline void
atomic_dec_32(volatile uint32_t *target)
{
atomic_subtract_32(target, 1);
}
static __inline uint32_t
atomic_dec_32_nv(volatile uint32_t *target)
{
return (atomic_fetchadd_32(target, -1) - 1);
}
#if defined(__LP64__) || defined(__mips_n32) || \
defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64)
static __inline void
atomic_dec_64(volatile uint64_t *target)
{
atomic_subtract_64(target, 1);
}
#endif
static __inline void
atomic_inc_32(volatile uint32_t *target)
{
@ -113,6 +73,51 @@ atomic_inc_32_nv(volatile uint32_t *target)
return (atomic_add_32_nv(target, 1));
}
static __inline void
atomic_dec_32(volatile uint32_t *target)
{
atomic_subtract_32(target, 1);
}
static __inline uint32_t
atomic_dec_32_nv(volatile uint32_t *target)
{
return (atomic_add_32_nv(target, -1));
}
#ifndef __sparc64__
static inline uint32_t
atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
{
(void)atomic_fcmpset_32(target, &cmp, newval);
return (cmp);
}
#endif
#if defined(__LP64__) || defined(__mips_n32) || \
defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64)
static __inline void
atomic_dec_64(volatile uint64_t *target)
{
atomic_subtract_64(target, 1);
}
static inline uint64_t
atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
{
return (atomic_fetchadd_64(target, delta) + delta);
}
#ifndef __sparc64__
static inline uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
{
(void)atomic_fcmpset_64(target, &cmp, newval);
return (cmp);
}
#endif
#endif
static __inline void
atomic_inc_64(volatile uint64_t *target)
{

View File

@ -28,58 +28,6 @@
#include <machine/asm.h>
/*
* uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
*/
ENTRY(atomic_add_64_nv)
1: ldxr x2, [x0] /* Load *target */
add x2, x2, x1 /* x2 = x2 + delta */
stxr w3, x2, [x0] /* Store *target */
cbnz w3, 1b /* Check if the store succeeded */
mov x0, x2 /* Return the new value */
ret
END(atomic_add_64_nv)
/*
* uint32_t
* atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
*/
ENTRY(atomic_cas_32)
1: ldxr w3, [x0] /* Load *target */
cmp w3, w1 /* Does *targe == cmp? */
b.ne 2f /* If not exit */
stxr w4, w2, [x0] /* Store newval to *target */
cbnz w4, 1b /* Check if the store succeeded */
2: mov w0, w3 /* Return the old value */
ret
END(atomic_cas_32)
/*
* uint64_t
* atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
*/
ENTRY(atomic_cas_64)
1: ldxr x3, [x0] /* Load *target */
cmp x3, x1 /* Does *targe == cmp? */
b.ne 2f /* If not exit */
stxr w4, x2, [x0] /* Store newval to *target */
cbnz w4, 1b /* Check if the store succeeded */
2: mov x0, x3 /* Return the old value */
ret
END(atomic_cas_64)
/*
* uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value)
*/
ENTRY(atomic_or_8_nv)
1: ldxrb w2, [x0] /* Load *target */
orr w2, w2, w1 /* x2 = x2 | delta */
stxrb w3, w2, [x0] /* Store *target */
cbnz w3, 1b /* Check if the store succeeded */
mov w0, w2 /* Return the new value */
ret
END(atomic_or_8_nv)
ENTRY(membar_producer)
dmb ish
ret

View File

@ -28,40 +28,6 @@
#define _ASM
#include <sys/asm_linkage.h>
ENTRY(atomic_add_64_nv)
mov %rsi, %rax // %rax = delta addend
lock
xaddq %rsi, (%rdi) // %rsi = old value, (%rdi) = sum
addq %rsi, %rax // new value = original value + delta
ret
SET_SIZE(atomic_add_64_nv)
ENTRY(atomic_or_8_nv)
movb (%rdi), %al // %al = old value
1:
movb %sil, %cl
orb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_or_8_nv)
ENTRY(atomic_cas_32)
movl %esi, %eax
lock
cmpxchgl %edx, (%rdi)
ret
SET_SIZE(atomic_cas_32)
ENTRY(atomic_cas_64)
movq %rsi, %rax
lock
cmpxchgq %rdx, (%rdi)
ret
SET_SIZE(atomic_cas_64)
ENTRY(membar_producer)
sfence
ret

View File

@ -89,28 +89,6 @@
SET_SIZE(atomic_add_64_nv)
SET_SIZE(atomic_add_64)
ENTRY(atomic_or_8_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
movl 8(%esp), %ecx // %ecx = delta
orb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_or_8_nv)
ENTRY(atomic_cas_32)
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
lock
cmpxchgl %ecx, (%edx)
ret
SET_SIZE(atomic_cas_32)
ENTRY(atomic_cas_64)
pushl %ebx
pushl %esi

View File

@ -27,61 +27,6 @@
#include <machine/asm.h>
ENTRY(atomic_add_64_nv)
1: ldarx %r5,0,%r3
add %r5,%r4,%r5
stdcx. %r5,0,%r3
bne- 1b
mr %r3,%r5
blr
ENTRY(atomic_cas_32)
1: lwarx %r6,0,%r3
cmplw %r6,%r4
bne 2f
stwcx. %r5,0,%r3
bne- 1b
b 3f
2: stwcx. %r6,0,%r3 /* clear reservation */
3: mr %r3,%r6
blr
ENTRY(atomic_cas_64)
1: ldarx %r6,0,%r3
cmpld %r6,%r4
bne 2f
stdcx. %r5,0,%r3
bne- 1b
b 3f
2: stdcx. %r6,0,%r3 /* clear reservation */
3: mr %r3,%r6
blr
ENTRY(atomic_or_8_nv)
li %r6,3
andc. %r6,%r3,%r6 /* r6 = r3 & ~4 */
addi %r7,%r6,3
sub %r7,%r7,%r3 /* offset in r7 */
sldi %r7,%r7,3 /* bits to shift in r7 */
rlwinm %r4,%r4,0,24,31 /* mask and rotate the argument */
slw %r4,%r4,%r7
1: lwarx %r5,0,%r6
or %r5,%r4,%r5
stwcx. %r5,0,%r6
bne- 1b
srw %r3,%r5,%r7
rlwinm %r3,%r3,0,24,31 /* mask return value */
blr
ENTRY(membar_producer)
eieio
blr

View File

@ -39,67 +39,6 @@
#define __ASI_ATOMIC ASI_P
#endif
/*
* NOTE: If atomic_add_64 and atomic_add_64_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_add_64_nv.
*/
ENTRY(atomic_add_64)
ALTENTRY(atomic_add_64_nv)
ALTENTRY(atomic_add_ptr)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long)
ALTENTRY(atomic_add_long_nv)
add_64:
ldx [%o0], %o2
1:
add %o2, %o1, %o3
casxa [%o0] __ASI_ATOMIC, %o2, %o3
cmp %o2, %o3
bne,a,pn %xcc, 1b
mov %o3, %o2
retl
add %o2, %o1, %o0 ! return new value
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_long)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_ptr)
SET_SIZE(atomic_add_64_nv)
SET_SIZE(atomic_add_64)
/*
* NOTE: If atomic_or_8 and atomic_or_8_nv are ever
* separated, you need to also edit the libc sparcv9 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_or_8_nv.
*/
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar)
and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
set 0xff, %o3 ! %o3 = mask
sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
and %o1, %o3, %o1 ! %o1 = single byte value
andn %o0, 0x3, %o0 ! %o0 = word address
ld [%o0], %o2 ! read old value
1:
or %o2, %o1, %o5 ! or in the new value
casa [%o0] __ASI_ATOMIC, %o2, %o5
cmp %o2, %o5
bne,a,pn %icc, 1b
mov %o5, %o2 ! %o2 = old value
or %o2, %o1, %o5
and %o5, %o3, %o5
retl
srl %o5, %g1, %o0 ! %o0 = new value
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8_nv)
SET_SIZE(atomic_or_8)
/*
* Spitfires and Blackbirds have a problem with membars in the
* delay slot (SF_ERRATA_51). For safety's sake, we assume