[PowerPC] [MIPS] Implement 32-bit kernel emulation of atomic64 operations

This is a lock-based emulation of 64-bit atomics for kernel use, split off
from an earlier patch by jhibbits.

This is needed to unblock future improvements that reduce the need for
locking on 64-bit platforms by using atomic updates.

The implementation allows for future integration with userland atomic64,
but as that implies going through sysarch for every use, the current
status quo of userland doing its own locking may be for the best.

Submitted by:	jhibbits (original patch), kevans (mips bits)
Reviewed by:	jhibbits, jeff, kevans
Differential Revision:	https://reviews.freebsd.org/D22976
This commit is contained in:
Brandon Bergren 2020-01-02 23:20:37 +00:00
parent 990beb037d
commit 9aafc7c052
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=356308
8 changed files with 236 additions and 3 deletions

View File

@ -33,7 +33,8 @@ __FBSDID("$FreeBSD$");
#include <sys/atomic.h>
#if !defined(__LP64__) && !defined(__mips_n32) && \
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64)
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \
!defined(HAS_EMULATED_ATOMIC64)
#ifdef _KERNEL
#include <sys/kernel.h>

View File

@ -42,7 +42,8 @@
#endif
#if !defined(__LP64__) && !defined(__mips_n32) && \
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64)
!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \
!defined(HAS_EMULATED_ATOMIC64)
extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
extern void atomic_dec_64(volatile uint64_t *target);
extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value);
@ -109,7 +110,8 @@ atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
#endif
#if defined(__LP64__) || defined(__mips_n32) || \
defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64)
defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) || \
defined(HAS_EMULATED_ATOMIC64)
static __inline void
atomic_dec_64(volatile uint64_t *target)
{

View File

@ -50,6 +50,7 @@ mips/mips/vm_machdep.c standard
# misc opt-in bits
kern/kern_clocksource.c standard
kern/link_elf_obj.c standard
kern/subr_atomic64.c optional mips | mipsel | mipshf | mipselhf
kern/subr_busdma_bufalloc.c standard
kern/subr_dummy_vdso_tc.c standard
kern/subr_sfbuf.c optional mips | mipsel | mipsn32

View File

@ -76,6 +76,7 @@ dev/uart/uart_cpu_powerpc.c optional uart
dev/usb/controller/ehci_fsl.c optional ehci mpc85xx
dev/vt/hw/ofwfb/ofwfb.c optional vt aim
kern/kern_clocksource.c standard
kern/subr_atomic64.c optional powerpc | powerpcspe
kern/subr_dummy_vdso_tc.c standard
kern/syscalls.c optional ktr
kern/subr_sfbuf.c standard

140
sys/kern/subr_atomic64.c Normal file
View File

@ -0,0 +1,140 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2019 Justin Hibbits
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <machine/atomic.h>
#include <machine/param.h>
#include <vm/vm.h>
#include <vm/pmap.h>
enum {
ATOMIC64_ADD,
ATOMIC64_CLEAR,
ATOMIC64_CMPSET,
ATOMIC64_FCMPSET,
ATOMIC64_FETCHADD,
ATOMIC64_LOAD,
ATOMIC64_SET,
ATOMIC64_SUBTRACT,
ATOMIC64_STORE,
ATOMIC64_SWAP
};
#ifdef _KERNEL
#define A64_POOL_SIZE MAXCPU
/* Estimated size of a cacheline */
#define CACHE_ALIGN CACHE_LINE_SIZE
#define GET_MUTEX(p) \
(&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)])
#define LOCK_A64() \
struct mtx *_amtx = GET_MUTEX(p); \
if (smp_started) mtx_lock(_amtx)
#define UNLOCK_A64() if (smp_started) mtx_unlock(_amtx)
#define ATOMIC64_EMU_UN(op, rt, block, ret) \
rt \
atomic_##op##_64(volatile u_int64_t *p) { \
u_int64_t tmp __unused; \
LOCK_A64(); \
block; \
UNLOCK_A64(); \
ret; } struct hack
#define ATOMIC64_EMU_BIN(op, rt, block, ret) \
rt \
atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) { \
u_int64_t tmp __unused; \
LOCK_A64(); \
block; \
UNLOCK_A64(); \
ret; } struct hack
static struct mtx a64_mtx_pool[A64_POOL_SIZE];
ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return);
ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return);
ATOMIC64_EMU_BIN(fetchadd, u_int64_t, (*p = *p + v, v = *p - v), return (v));
ATOMIC64_EMU_UN(load, u_int64_t, (tmp = *p), return (tmp));
ATOMIC64_EMU_BIN(set, void, *p |= v, return);
ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return);
ATOMIC64_EMU_BIN(store, void, *p = v, return);
ATOMIC64_EMU_BIN(swap, u_int64_t, tmp = *p; *p = v; v = tmp, return(v));
int atomic_cmpset_64(volatile u_int64_t *p, u_int64_t old, u_int64_t new)
{
u_int64_t tmp;
LOCK_A64();
tmp = *p;
if (tmp == old)
*p = new;
UNLOCK_A64();
return (tmp == old);
}
int atomic_fcmpset_64(volatile u_int64_t *p, u_int64_t *old, u_int64_t new)
{
u_int64_t tmp, tmp_old;
LOCK_A64();
tmp = *p;
tmp_old = *old;
if (tmp == tmp_old)
*p = new;
else
*old = tmp;
UNLOCK_A64();
return (tmp == tmp_old);
}
static void
atomic64_mtxinit(void *x __unused)
{
int i;
for (i = 0; i < A64_POOL_SIZE; i++)
mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF);
}
SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL);
#endif /* _KERNEL */

View File

@ -38,6 +38,10 @@
#include <sys/atomic_common.h>
#if !defined(__mips_n64) && !defined(__mips_n32)
#include <sys/_atomic64e.h>
#endif
/*
* Note: All the 64-bit atomic operations are only atomic when running
* in 64-bit mode. It is assumed that code compiled for n32 and n64

View File

@ -40,6 +40,10 @@
#include <sys/atomic_common.h>
#ifndef __powerpc64__
#include <sys/_atomic64e.h>
#endif
/*
* The __ATOMIC_REL/ACQ() macros provide memory barriers only in conjunction
* with the atomic lXarx/stXcx. sequences below. They are not exposed outside

80
sys/sys/_atomic64e.h Normal file
View File

@ -0,0 +1,80 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2019 Justin Hibbits
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_ATOMIC64E_H_
#define _SYS_ATOMIC64E_H_
#ifndef _MACHINE_ATOMIC_H_
#error "This should not be included directly. Include <machine/atomic.h>"
#endif
#ifdef _KERNEL
#define HAS_EMULATED_ATOMIC64
/* Emulated versions of 64-bit atomic operations. */
void atomic_add_64(volatile u_int64_t *, u_int64_t);
#define atomic_add_acq_64 atomic_add_64
#define atomic_add_rel_64 atomic_add_64
int atomic_cmpset_64(volatile u_int64_t *, u_int64_t, u_int64_t);
#define atomic_cmpset_acq_64 atomic_cmpset_64
#define atomic_cmpset_rel_64 atomic_cmpset_64
void atomic_clear_64(volatile u_int64_t *, u_int64_t);
#define atomic_clear_acq_64 atomic_clear_64
#define atomic_clear_rel_64 atomic_clear_64
int atomic_fcmpset_64(volatile u_int64_t *, u_int64_t *, u_int64_t);
#define atomic_fcmpset_acq_64 atomic_fcmpset_64
#define atomic_fcmpset_rel_64 atomic_fcmpset_64
u_int64_t atomic_fetchadd_64(volatile u_int64_t *, u_int64_t);
u_int64_t atomic_load_64(volatile u_int64_t *);
#define atomic_load_acq_64 atomic_load_64
void atomic_readandclear_64(volatile u_int64_t *);
void atomic_set_64(volatile u_int64_t *, u_int64_t);
#define atomic_set_acq_64 atomic_set_64
#define atomic_set_rel_64 atomic_set_64
void atomic_subtract_64(volatile u_int64_t *, u_int64_t);
#define atomic_subtract_acq_64 atomic_subtract_64
#define atomic_subtract_rel_64 atomic_subtract_64
void atomic_store_64(volatile u_int64_t *, u_int64_t);
#define atomic_store_rel_64 atomic_store_64
u_int64_t atomic_swap_64(volatile u_int64_t *, u_int64_t);
#endif /* _KERNEL */
#endif /* _SYS_ATOMIC64E_H_ */