- Reduce number of atomic operations needed to be implemented in asm by

implementing some of them using existing ones.
- Allow to compile ZFS on all archs and use atomic operations surrounded
  by global mutex on archs we don't have or can't have all atomic
  operations needed by ZFS.
This commit is contained in:
Pawel Jakub Dawidek 2007-06-08 12:35:47 +00:00
parent 083c4dd695
commit 3b7917d766
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170431
19 changed files with 567 additions and 3423 deletions

View File

@ -11,9 +11,7 @@ SUBDIR= libavl \
.if ${MK_ZFS} != "no"
_libzfs= libzfs
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
_libzpool= libzpool
.endif
.endif
.include <bsd.subdir.mk>

View File

@ -11,7 +11,13 @@
# LIST_SRCS
.PATH: ${.CURDIR}/../../../sys/contrib/opensolaris/uts/common/os
# ATOMIC_SRCS
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "ia64"
.PATH: ${.CURDIR}/../../../sys/contrib/opensolaris/common/atomic/${MACHINE_ARCH}
ATOMIC_SRCS= atomic.S
.else
.PATH: ${.CURDIR}/../../../sys/compat/opensolaris/kern
ATOMIC_SRCS= opensolaris_atomic.c
.endif
LIB= zpool
@ -19,7 +25,6 @@ ZFS_COMMON_SRCS= ${ZFS_COMMON_OBJS:C/.o$/.c/} vdev_file.c
ZFS_SHARED_SRCS= ${ZFS_SHARED_OBJS:C/.o$/.c/}
KERNEL_SRCS= kernel.c taskq.c util.c
LIST_SRCS= list.c
ATOMIC_SRCS= atomic.S
SRCS= ${ZFS_COMMON_SRCS} ${ZFS_SHARED_SRCS} \
${KERNEL_SRCS} ${LIST_SRCS} ${ATOMIC_SRCS}

View File

@ -5,9 +5,7 @@
SUBDIR= ${_ztest}
.if ${MK_ZFS} != "no"
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
_ztest= ztest
.endif
.endif
.include <bsd.subdir.mk>

View File

@ -5,9 +5,7 @@
SUBDIR= ${_zdb}
.if ${MK_ZFS} != "no"
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
_zdb= zdb
.endif
.endif
.include <bsd.subdir.mk>

View File

@ -0,0 +1,133 @@
/*-
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/atomic.h>
#ifdef _KERNEL
#include <sys/kernel.h>
struct mtx atomic_mtx;
MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF);
#else
#include <pthread.h>
#define mtx_lock(lock) pthread_mutex_lock(lock)
#define mtx_unlock(lock) pthread_mutex_unlock(lock)
static pthread_mutex_t atomic_mtx;
static __attribute__((constructor)) void
atomic_init(void)
{
pthread_mutex_init(&atomic_mtx, NULL);
}
#endif
#ifndef __LP64__
void
atomic_add_64(volatile uint64_t *target, int64_t delta)
{
mtx_lock(&atomic_mtx);
*target += delta;
mtx_unlock(&atomic_mtx);
}
#endif
uint64_t
atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
{
uint64_t newval;
mtx_lock(&atomic_mtx);
newval = (*target += delta);
mtx_unlock(&atomic_mtx);
return (newval);
}
#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__)
void
atomic_or_8(volatile uint8_t *target, uint8_t value)
{
mtx_lock(&atomic_mtx);
*target |= value;
mtx_unlock(&atomic_mtx);
}
#endif
uint8_t
atomic_or_8_nv(volatile uint8_t *target, uint8_t value)
{
uint8_t newval;
mtx_lock(&atomic_mtx);
newval = (*target |= value);
mtx_unlock(&atomic_mtx);
return (newval);
}
#ifndef __LP64__
void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
void *oldval, **trg;
mtx_lock(&atomic_mtx);
trg = __DEVOLATILE(void **, target);
oldval = *trg;
if (oldval == cmp)
*trg = newval;
mtx_unlock(&atomic_mtx);
return (oldval);
}
#endif
#ifndef __sparc64__
uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
{
uint64_t oldval;
mtx_lock(&atomic_mtx);
oldval = *target;
if (oldval == cmp)
*target = newval;
mtx_unlock(&atomic_mtx);
return (oldval);
}
#endif
void
membar_producer(void)
{
/* nothing */
}

View File

@ -0,0 +1,114 @@
/*-
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_ATOMIC_H_
#define _OPENSOLARIS_SYS_ATOMIC_H_
#include <sys/types.h>
#include <machine/atomic.h>
#ifndef __LP64__
extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
extern void *atomic_cas_ptr(volatile void *target, void *cmp, void *newval);
#endif
#ifndef __sparc64__
extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
uint64_t newval);
#endif
extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
extern uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value);
extern void membar_producer(void);
#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__)
extern void atomic_or_8(volatile uint8_t *target, uint8_t value);
#else
static __inline void
atomic_or_8(volatile uint8_t *target, uint8_t value)
{
atomic_set_8(target, value);
}
#endif
static __inline uint32_t
atomic_add_32_nv(volatile uint32_t *target, int32_t delta)
{
return (atomic_fetchadd_32(target, delta) + delta);
}
static __inline u_int
atomic_add_int_nv(volatile u_int *target, int delta)
{
return (atomic_add_32_nv(target, delta));
}
static __inline void
atomic_dec_32(volatile uint32_t *target)
{
atomic_subtract_32(target, 1);
}
static __inline uint32_t
atomic_dec_32_nv(volatile uint32_t *target)
{
return (atomic_fetchadd_32(target, -1) - 1);
}
static __inline void
atomic_inc_32(volatile uint32_t *target)
{
atomic_add_32(target, 1);
}
static __inline uint32_t
atomic_inc_32_nv(volatile uint32_t *target)
{
return (atomic_add_32_nv(target, 1));
}
static __inline void
atomic_inc_64(volatile uint64_t *target)
{
atomic_add_64(target, 1);
}
static __inline uint64_t
atomic_inc_64_nv(volatile uint64_t *target)
{
return (atomic_add_64_nv(target, 1));
}
#ifdef __LP64__
static __inline void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
return ((void *)atomic_cas_64((uint64_t *)target, (uint64_t)cmp,
(uint64_t)newval));
}
#endif
#endif /* !_OPENSOLARIS_SYS_ATOMIC_H_ */

View File

@ -31,296 +31,7 @@
#define _ASM
#include <sys/asm_linkage.h>
#if defined(_KERNEL)
/*
* Legacy kernel interfaces; they will go away (eventually).
*/
ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
#endif
ENTRY(atomic_inc_8)
ALTENTRY(atomic_inc_uchar)
lock
incb (%rdi)
ret
SET_SIZE(atomic_inc_uchar)
SET_SIZE(atomic_inc_8)
ENTRY(atomic_inc_16)
ALTENTRY(atomic_inc_ushort)
lock
incw (%rdi)
ret
SET_SIZE(atomic_inc_ushort)
SET_SIZE(atomic_inc_16)
ENTRY(atomic_inc_32)
ALTENTRY(atomic_inc_uint)
lock
incl (%rdi)
ret
SET_SIZE(atomic_inc_uint)
SET_SIZE(atomic_inc_32)
ENTRY(atomic_inc_64)
ALTENTRY(atomic_inc_ulong)
lock
incq (%rdi)
ret
SET_SIZE(atomic_inc_ulong)
SET_SIZE(atomic_inc_64)
ENTRY(atomic_inc_8_nv)
ALTENTRY(atomic_inc_uchar_nv)
movb (%rdi), %al // %al = old value
1:
leaq 1(%rax), %rcx // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_inc_uchar_nv)
SET_SIZE(atomic_inc_8_nv)
ENTRY(atomic_inc_16_nv)
ALTENTRY(atomic_inc_ushort_nv)
movw (%rdi), %ax // %ax = old value
1:
leaq 1(%rax), %rcx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_inc_ushort_nv)
SET_SIZE(atomic_inc_16_nv)
ENTRY(atomic_inc_32_nv)
ALTENTRY(atomic_inc_uint_nv)
movl (%rdi), %eax // %eax = old value
1:
leaq 1(%rax), %rcx // %ecx = new value
lock
cmpxchgl %ecx, (%rdi) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_inc_uint_nv)
SET_SIZE(atomic_inc_32_nv)
ENTRY(atomic_inc_64_nv)
ALTENTRY(atomic_inc_ulong_nv)
movq (%rdi), %rax // %rax = old value
1:
leaq 1(%rax), %rcx // %rcx = new value
lock
cmpxchgq %rcx, (%rdi) // try to stick it in
jne 1b
movq %rcx, %rax // return new value
ret
SET_SIZE(atomic_inc_ulong_nv)
SET_SIZE(atomic_inc_64_nv)
ENTRY(atomic_dec_8)
ALTENTRY(atomic_dec_uchar)
lock
decb (%rdi)
ret
SET_SIZE(atomic_dec_uchar)
SET_SIZE(atomic_dec_8)
ENTRY(atomic_dec_16)
ALTENTRY(atomic_dec_ushort)
lock
decw (%rdi)
ret
SET_SIZE(atomic_dec_ushort)
SET_SIZE(atomic_dec_16)
ENTRY(atomic_dec_32)
ALTENTRY(atomic_dec_uint)
lock
decl (%rdi)
ret
SET_SIZE(atomic_dec_uint)
SET_SIZE(atomic_dec_32)
ENTRY(atomic_dec_64)
ALTENTRY(atomic_dec_ulong)
lock
decq (%rdi)
ret
SET_SIZE(atomic_dec_ulong)
SET_SIZE(atomic_dec_64)
ENTRY(atomic_dec_8_nv)
ALTENTRY(atomic_dec_uchar_nv)
movb (%rdi), %al // %al = old value
1:
leaq -1(%rax), %rcx // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_dec_uchar_nv)
SET_SIZE(atomic_dec_8_nv)
ENTRY(atomic_dec_16_nv)
ALTENTRY(atomic_dec_ushort_nv)
movw (%rdi), %ax // %ax = old value
1:
leaq -1(%rax), %rcx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_dec_ushort_nv)
SET_SIZE(atomic_dec_16_nv)
ENTRY(atomic_dec_32_nv)
ALTENTRY(atomic_dec_uint_nv)
movl (%rdi), %eax // %eax = old value
1:
leaq -1(%rax), %rcx // %ecx = new value
lock
cmpxchgl %ecx, (%rdi) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_dec_uint_nv)
SET_SIZE(atomic_dec_32_nv)
ENTRY(atomic_dec_64_nv)
ALTENTRY(atomic_dec_ulong_nv)
movq (%rdi), %rax // %rax = old value
1:
leaq -1(%rax), %rcx // %rcx = new value
lock
cmpxchgq %rcx, (%rdi) // try to stick it in
jne 1b
movq %rcx, %rax // return new value
ret
SET_SIZE(atomic_dec_ulong_nv)
SET_SIZE(atomic_dec_64_nv)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
lock
orb %sil, (%rdi)
ret
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8)
ENTRY(atomic_or_16)
ALTENTRY(atomic_or_ushort)
lock
orw %si, (%rdi)
ret
SET_SIZE(atomic_or_ushort)
SET_SIZE(atomic_or_16)
ENTRY(atomic_or_32)
ALTENTRY(atomic_or_uint)
lock
orl %esi, (%rdi)
ret
SET_SIZE(atomic_or_uint)
SET_SIZE(atomic_or_32)
ENTRY(atomic_or_64)
ALTENTRY(atomic_or_ulong)
lock
orq %rsi, (%rdi)
ret
SET_SIZE(atomic_or_ulong)
SET_SIZE(atomic_or_64)
ENTRY(atomic_and_8)
ALTENTRY(atomic_and_uchar)
lock
andb %sil, (%rdi)
ret
SET_SIZE(atomic_and_uchar)
SET_SIZE(atomic_and_8)
ENTRY(atomic_and_16)
ALTENTRY(atomic_and_ushort)
lock
andw %si, (%rdi)
ret
SET_SIZE(atomic_and_ushort)
SET_SIZE(atomic_and_16)
ENTRY(atomic_and_32)
ALTENTRY(atomic_and_uint)
lock
andl %esi, (%rdi)
ret
SET_SIZE(atomic_and_uint)
SET_SIZE(atomic_and_32)
ENTRY(atomic_and_64)
ALTENTRY(atomic_and_ulong)
lock
andq %rsi, (%rdi)
ret
SET_SIZE(atomic_and_ulong)
SET_SIZE(atomic_and_64)
ENTRY(atomic_add_8_nv)
ALTENTRY(atomic_add_char_nv)
movb (%rdi), %al // %al = old value
1:
movb %sil, %cl
addb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_add_char_nv)
SET_SIZE(atomic_add_8_nv)
ENTRY(atomic_add_16_nv)
ALTENTRY(atomic_add_short_nv)
movw (%rdi), %ax // %ax = old value
1:
movw %si, %cx
addw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_add_short_nv)
SET_SIZE(atomic_add_16_nv)
ENTRY(atomic_add_32_nv)
ALTENTRY(atomic_add_int_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
addl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
ENTRY(atomic_add_64_nv)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
@ -330,68 +41,9 @@
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_64_nv)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movb (%rdi), %al // %al = old value
1:
movb %sil, %cl
andb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_and_16_nv)
ALTENTRY(atomic_and_ushort_nv)
movw (%rdi), %ax // %ax = old value
1:
movw %si, %cx
andw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_and_ushort_nv)
SET_SIZE(atomic_and_16_nv)
ENTRY(atomic_and_32_nv)
ALTENTRY(atomic_and_uint_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
andl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_and_uint_nv)
SET_SIZE(atomic_and_32_nv)
ENTRY(atomic_and_64_nv)
ALTENTRY(atomic_and_ulong_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
andq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_and_ulong_nv)
SET_SIZE(atomic_and_64_nv)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movb (%rdi), %al // %al = old value
1:
movb %sil, %cl
@ -401,160 +53,16 @@
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_or_16_nv)
ALTENTRY(atomic_or_ushort_nv)
movw (%rdi), %ax // %ax = old value
1:
movw %si, %cx
orw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_or_ushort_nv)
SET_SIZE(atomic_or_16_nv)
ENTRY(atomic_or_32_nv)
ALTENTRY(atomic_or_uint_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
orl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_or_uint_nv)
SET_SIZE(atomic_or_32_nv)
ENTRY(atomic_or_64_nv)
ALTENTRY(atomic_or_ulong_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
orq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_or_ulong_nv)
SET_SIZE(atomic_or_64_nv)
ENTRY(atomic_cas_8)
ALTENTRY(atomic_cas_uchar)
movzbl %sil, %eax
lock
cmpxchgb %dl, (%rdi)
ret
SET_SIZE(atomic_cas_uchar)
SET_SIZE(atomic_cas_8)
ENTRY(atomic_cas_16)
ALTENTRY(atomic_cas_ushort)
movzwl %si, %eax
lock
cmpxchgw %dx, (%rdi)
ret
SET_SIZE(atomic_cas_ushort)
SET_SIZE(atomic_cas_16)
ENTRY(atomic_cas_32)
ALTENTRY(atomic_cas_uint)
movl %esi, %eax
lock
cmpxchgl %edx, (%rdi)
ret
SET_SIZE(atomic_cas_uint)
SET_SIZE(atomic_cas_32)
SET_SIZE(atomic_or_8_nv)
ENTRY(atomic_cas_64)
ALTENTRY(atomic_cas_ulong)
ALTENTRY(atomic_cas_ptr)
movq %rsi, %rax
lock
cmpxchgq %rdx, (%rdi)
ret
SET_SIZE(atomic_cas_ptr)
SET_SIZE(atomic_cas_ulong)
SET_SIZE(atomic_cas_64)
ENTRY(atomic_swap_8)
ALTENTRY(atomic_swap_uchar)
movzbl %sil, %eax
lock
xchgb %al, (%rdi)
ret
SET_SIZE(atomic_swap_uchar)
SET_SIZE(atomic_swap_8)
ENTRY(atomic_swap_16)
ALTENTRY(atomic_swap_ushort)
movzwl %si, %eax
lock
xchgw %ax, (%rdi)
ret
SET_SIZE(atomic_swap_ushort)
SET_SIZE(atomic_swap_16)
ENTRY(atomic_swap_32)
ALTENTRY(atomic_swap_uint)
movl %esi, %eax
lock
xchgl %eax, (%rdi)
ret
SET_SIZE(atomic_swap_uint)
SET_SIZE(atomic_swap_32)
ENTRY(atomic_swap_64)
ALTENTRY(atomic_swap_ulong)
ALTENTRY(atomic_swap_ptr)
movq %rsi, %rax
lock
xchgq %rax, (%rdi)
ret
SET_SIZE(atomic_swap_ptr)
SET_SIZE(atomic_swap_ulong)
SET_SIZE(atomic_swap_64)
ENTRY(atomic_set_long_excl)
xorl %eax, %eax
lock
btsq %rsi, (%rdi)
jnc 1f
decl %eax // return -1
1:
ret
SET_SIZE(atomic_set_long_excl)
ENTRY(atomic_clear_long_excl)
xorl %eax, %eax
lock
btrq %rsi, (%rdi)
jc 1f
decl %eax // return -1
1:
ret
SET_SIZE(atomic_clear_long_excl)
ENTRY(membar_enter)
ALTENTRY(membar_exit)
mfence
ret
SET_SIZE(membar_exit)
SET_SIZE(membar_enter)
ENTRY(membar_producer)
sfence
ret
SET_SIZE(membar_producer)
ENTRY(membar_consumer)
lfence
ret
SET_SIZE(membar_consumer)

View File

@ -31,327 +31,6 @@
#define _ASM
#include <sys/asm_linkage.h>
#if defined(_KERNEL)
/*
* Legacy kernel interfaces; they will go away (eventually).
*/
ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
#endif
ENTRY(atomic_inc_8)
ALTENTRY(atomic_inc_uchar)
movl 4(%esp), %eax
lock
incb (%eax)
ret
SET_SIZE(atomic_inc_uchar)
SET_SIZE(atomic_inc_8)
ENTRY(atomic_inc_16)
ALTENTRY(atomic_inc_ushort)
movl 4(%esp), %eax
lock
incw (%eax)
ret
SET_SIZE(atomic_inc_ushort)
SET_SIZE(atomic_inc_16)
ENTRY(atomic_inc_32)
ALTENTRY(atomic_inc_uint)
ALTENTRY(atomic_inc_ulong)
movl 4(%esp), %eax
lock
incl (%eax)
ret
SET_SIZE(atomic_inc_ulong)
SET_SIZE(atomic_inc_uint)
SET_SIZE(atomic_inc_32)
ENTRY(atomic_inc_8_nv)
ALTENTRY(atomic_inc_uchar_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
leal 1(%eax), %ecx // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_inc_uchar_nv)
SET_SIZE(atomic_inc_8_nv)
ENTRY(atomic_inc_16_nv)
ALTENTRY(atomic_inc_ushort_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
leal 1(%eax), %ecx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_inc_ushort_nv)
SET_SIZE(atomic_inc_16_nv)
ENTRY(atomic_inc_32_nv)
ALTENTRY(atomic_inc_uint_nv)
ALTENTRY(atomic_inc_ulong_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
leal 1(%eax), %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_inc_ulong_nv)
SET_SIZE(atomic_inc_uint_nv)
SET_SIZE(atomic_inc_32_nv)
ENTRY(atomic_inc_64)
ALTENTRY(atomic_inc_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi // %edi = target address
movl (%edi), %eax
movl 4(%edi), %edx // %edx:%eax = old value
1:
xorl %ebx, %ebx
xorl %ecx, %ecx
incl %ebx // %ecx:%ebx = 1
addl %eax, %ebx
adcl %edx, %ecx // add in the carry from inc
lock
cmpxchg8b (%edi) // try to stick it in
jne 1b
movl %ebx, %eax
movl %ecx, %edx // return new value
popl %ebx
popl %edi
ret
SET_SIZE(atomic_inc_64_nv)
SET_SIZE(atomic_inc_64)
ENTRY(atomic_dec_8)
ALTENTRY(atomic_dec_uchar)
movl 4(%esp), %eax
lock
decb (%eax)
ret
SET_SIZE(atomic_dec_uchar)
SET_SIZE(atomic_dec_8)
ENTRY(atomic_dec_16)
ALTENTRY(atomic_dec_ushort)
movl 4(%esp), %eax
lock
decw (%eax)
ret
SET_SIZE(atomic_dec_ushort)
SET_SIZE(atomic_dec_16)
ENTRY(atomic_dec_32)
ALTENTRY(atomic_dec_uint)
ALTENTRY(atomic_dec_ulong)
movl 4(%esp), %eax
lock
decl (%eax)
ret
SET_SIZE(atomic_dec_ulong)
SET_SIZE(atomic_dec_uint)
SET_SIZE(atomic_dec_32)
ENTRY(atomic_dec_8_nv)
ALTENTRY(atomic_dec_uchar_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
leal -1(%eax), %ecx // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_dec_uchar_nv)
SET_SIZE(atomic_dec_8_nv)
ENTRY(atomic_dec_16_nv)
ALTENTRY(atomic_dec_ushort_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
leal -1(%eax), %ecx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_dec_ushort_nv)
SET_SIZE(atomic_dec_16_nv)
ENTRY(atomic_dec_32_nv)
ALTENTRY(atomic_dec_uint_nv)
ALTENTRY(atomic_dec_ulong_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
leal -1(%eax), %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_dec_ulong_nv)
SET_SIZE(atomic_dec_uint_nv)
SET_SIZE(atomic_dec_32_nv)
ENTRY(atomic_dec_64)
ALTENTRY(atomic_dec_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi // %edi = target address
movl (%edi), %eax
movl 4(%edi), %edx // %edx:%eax = old value
1:
xorl %ebx, %ebx
xorl %ecx, %ecx
not %ecx
not %ebx // %ecx:%ebx = -1
addl %eax, %ebx
adcl %edx, %ecx // add in the carry from inc
lock
cmpxchg8b (%edi) // try to stick it in
jne 1b
movl %ebx, %eax
movl %ecx, %edx // return new value
popl %ebx
popl %edi
ret
SET_SIZE(atomic_dec_64_nv)
SET_SIZE(atomic_dec_64)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
movl 4(%esp), %eax
movb 8(%esp), %cl
lock
orb %cl, (%eax)
ret
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8)
ENTRY(atomic_or_16)
ALTENTRY(atomic_or_ushort)
movl 4(%esp), %eax
movw 8(%esp), %cx
lock
orw %cx, (%eax)
ret
SET_SIZE(atomic_or_ushort)
SET_SIZE(atomic_or_16)
ENTRY(atomic_or_32)
ALTENTRY(atomic_or_uint)
ALTENTRY(atomic_or_ulong)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
orl %ecx, (%eax)
ret
SET_SIZE(atomic_or_ulong)
SET_SIZE(atomic_or_uint)
SET_SIZE(atomic_or_32)
ENTRY(atomic_and_8)
ALTENTRY(atomic_and_uchar)
movl 4(%esp), %eax
movb 8(%esp), %cl
lock
andb %cl, (%eax)
ret
SET_SIZE(atomic_and_uchar)
SET_SIZE(atomic_and_8)
ENTRY(atomic_and_16)
ALTENTRY(atomic_and_ushort)
movl 4(%esp), %eax
movw 8(%esp), %cx
lock
andw %cx, (%eax)
ret
SET_SIZE(atomic_and_ushort)
SET_SIZE(atomic_and_16)
ENTRY(atomic_and_32)
ALTENTRY(atomic_and_uint)
ALTENTRY(atomic_and_ulong)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
andl %ecx, (%eax)
ret
SET_SIZE(atomic_and_ulong)
SET_SIZE(atomic_and_uint)
SET_SIZE(atomic_and_32)
ENTRY(atomic_add_8_nv)
ALTENTRY(atomic_add_char_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
movl 8(%esp), %ecx // %ecx = delta
addb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_add_char_nv)
SET_SIZE(atomic_add_8_nv)
ENTRY(atomic_add_16_nv)
ALTENTRY(atomic_add_short_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
addw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_add_short_nv)
SET_SIZE(atomic_add_16_nv)
ENTRY(atomic_add_32_nv)
ALTENTRY(atomic_add_int_nv)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
addl %eax, %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
ENTRY(atomic_add_64)
ALTENTRY(atomic_add_64_nv)
pushl %edi
@ -376,7 +55,6 @@
SET_SIZE(atomic_add_64)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
@ -387,160 +65,9 @@
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_or_uchar_nv)
SET_SIZE(atomic_or_8_nv)
ENTRY(atomic_or_16_nv)
ALTENTRY(atomic_or_ushort_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
orw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_or_ushort_nv)
SET_SIZE(atomic_or_16_nv)
ENTRY(atomic_or_32_nv)
ALTENTRY(atomic_or_uint_nv)
ALTENTRY(atomic_or_ulong_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
orl %eax, %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_or_ulong_nv)
SET_SIZE(atomic_or_uint_nv)
SET_SIZE(atomic_or_32_nv)
ENTRY(atomic_or_64)
ALTENTRY(atomic_or_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi // %edi = target address
movl (%edi), %eax
movl 4(%edi), %edx // %edx:%eax = old value
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx // %ecx:%ebx = delta
orl %eax, %ebx
orl %edx, %ecx // %ecx:%ebx = new value
lock
cmpxchg8b (%edi) // try to stick it in
jne 1b
movl %ebx, %eax
movl %ecx, %edx // return new value
popl %ebx
popl %edi
ret
SET_SIZE(atomic_or_64_nv)
SET_SIZE(atomic_or_64)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
movl 8(%esp), %ecx // %ecx = delta
andb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_and_16_nv)
ALTENTRY(atomic_and_ushort_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
andw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_and_ushort_nv)
SET_SIZE(atomic_and_16_nv)
ENTRY(atomic_and_32_nv)
ALTENTRY(atomic_and_uint_nv)
ALTENTRY(atomic_and_ulong_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
andl %eax, %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_and_ulong_nv)
SET_SIZE(atomic_and_uint_nv)
SET_SIZE(atomic_and_32_nv)
ENTRY(atomic_and_64)
ALTENTRY(atomic_and_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi // %edi = target address
movl (%edi), %eax
movl 4(%edi), %edx // %edx:%eax = old value
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx // %ecx:%ebx = delta
andl %eax, %ebx
andl %edx, %ecx // %ecx:%ebx = new value
lock
cmpxchg8b (%edi) // try to stick it in
jne 1b
movl %ebx, %eax
movl %ecx, %edx // return new value
popl %ebx
popl %edi
ret
SET_SIZE(atomic_and_64_nv)
SET_SIZE(atomic_and_64)
ENTRY(atomic_cas_8)
ALTENTRY(atomic_cas_uchar)
movl 4(%esp), %edx
movzbl 8(%esp), %eax
movb 12(%esp), %cl
lock
cmpxchgb %cl, (%edx)
ret
SET_SIZE(atomic_cas_uchar)
SET_SIZE(atomic_cas_8)
ENTRY(atomic_cas_16)
ALTENTRY(atomic_cas_ushort)
movl 4(%esp), %edx
movzwl 8(%esp), %eax
movw 12(%esp), %cx
lock
cmpxchgw %cx, (%edx)
ret
SET_SIZE(atomic_cas_ushort)
SET_SIZE(atomic_cas_16)
ENTRY(atomic_cas_32)
ALTENTRY(atomic_cas_uint)
ALTENTRY(atomic_cas_ulong)
ALTENTRY(atomic_cas_ptr)
ENTRY(atomic_cas_ptr)
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
@ -548,9 +75,6 @@
cmpxchgl %ecx, (%edx)
ret
SET_SIZE(atomic_cas_ptr)
SET_SIZE(atomic_cas_ulong)
SET_SIZE(atomic_cas_uint)
SET_SIZE(atomic_cas_32)
ENTRY(atomic_cas_64)
pushl %ebx
@ -567,89 +91,8 @@
ret
SET_SIZE(atomic_cas_64)
ENTRY(atomic_swap_8)
ALTENTRY(atomic_swap_uchar)
movl 4(%esp), %edx
movzbl 8(%esp), %eax
lock
xchgb %al, (%edx)
ret
SET_SIZE(atomic_swap_uchar)
SET_SIZE(atomic_swap_8)
ENTRY(atomic_swap_16)
ALTENTRY(atomic_swap_ushort)
movl 4(%esp), %edx
movzwl 8(%esp), %eax
lock
xchgw %ax, (%edx)
ret
SET_SIZE(atomic_swap_ushort)
SET_SIZE(atomic_swap_16)
ENTRY(atomic_swap_32)
ALTENTRY(atomic_swap_uint)
ALTENTRY(atomic_swap_ptr)
ALTENTRY(atomic_swap_ulong)
movl 4(%esp), %edx
movl 8(%esp), %eax
lock
xchgl %eax, (%edx)
ret
SET_SIZE(atomic_swap_ulong)
SET_SIZE(atomic_swap_ptr)
SET_SIZE(atomic_swap_uint)
SET_SIZE(atomic_swap_32)
ENTRY(atomic_swap_64)
pushl %esi
pushl %ebx
movl 12(%esp), %esi
movl 16(%esp), %ebx
movl 20(%esp), %ecx
movl (%esi), %eax
movl 4(%esi), %edx // %edx:%eax = old value
1:
lock
cmpxchg8b (%esi)
jne 1b
popl %ebx
popl %esi
ret
SET_SIZE(atomic_swap_64)
ENTRY(atomic_set_long_excl)
movl 4(%esp), %edx // %edx = target address
movl 8(%esp), %ecx // %ecx = bit id
xorl %eax, %eax
lock
btsl %ecx, (%edx)
jnc 1f
decl %eax // return -1
1:
ret
SET_SIZE(atomic_set_long_excl)
ENTRY(atomic_clear_long_excl)
movl 4(%esp), %edx // %edx = target address
movl 8(%esp), %ecx // %ecx = bit id
xorl %eax, %eax
lock
btrl %ecx, (%edx)
jc 1f
decl %eax // return -1
1:
ret
SET_SIZE(atomic_clear_long_excl)
ENTRY(membar_enter)
ALTENTRY(membar_exit)
ALTENTRY(membar_producer)
ALTENTRY(membar_consumer)
ENTRY(membar_producer)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_consumer)
SET_SIZE(membar_producer)
SET_SIZE(membar_exit)
SET_SIZE(membar_enter)

View File

@ -54,9 +54,6 @@
#include <sys/sunddi.h>
#include <sys/dnlc.h>
struct mtx atomic_mtx;
MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF);
struct mtx zfs_debug_mtx;
MTX_SYSINIT(zfs_debug_mtx, &zfs_debug_mtx, "zfs_debug", MTX_DEF);
SYSCTL_NODE(_vfs, OID_AUTO, zfs, CTLFLAG_RW, 0, "ZFS file system");

View File

@ -27,8 +27,6 @@
#ifndef _IA32_SYS_ASM_LINKAGE_H
#define _IA32_SYS_ASM_LINKAGE_H
#ifdef __cplusplus
extern "C" {
#endif
@ -39,157 +37,10 @@ extern "C" {
* make annoying differences in assembler syntax go away
*/
/*
* D16 and A16 are used to insert instructions prefixes; the
* macros help the assembler code be slightly more portable.
*/
#if !defined(__GNUC_AS__)
/*
* /usr/ccs/bin/as prefixes are parsed as separate instructions
*/
#define D16 data16;
#define A16 addr16;
/*
* (There are some weird constructs in constant expressions)
*/
#define _CONST(const) [const]
#define _BITNOT(const) -1!_CONST(const)
#define _MUL(a, b) _CONST(a \* b)
#else
/*
* Why not use the 'data16' and 'addr16' prefixes .. well, the
* assembler doesn't quite believe in real mode, and thus argues with
* us about what we're trying to do.
*/
#define D16 .byte 0x66;
#define A16 .byte 0x67;
#define _CONST(const) (const)
#define _BITNOT(const) ~_CONST(const)
#define _MUL(a, b) _CONST(a * b)
#endif
/*
* C pointers are different sizes between i386 and amd64.
* These constants can be used to compute offsets into pointer arrays.
*/
#if defined(__amd64)
#define CLONGSHIFT 3
#define CLONGSIZE 8
#define CLONGMASK 7
#elif defined(__i386)
#define CLONGSHIFT 2
#define CLONGSIZE 4
#define CLONGMASK 3
#endif
/*
* Since we know we're either ILP32 or LP64 ..
*/
#define CPTRSHIFT CLONGSHIFT
#define CPTRSIZE CLONGSIZE
#define CPTRMASK CLONGMASK
#if CPTRSIZE != (1 << CPTRSHIFT) || CLONGSIZE != (1 << CLONGSHIFT)
#error "inconsistent shift constants"
#endif
#if CPTRMASK != (CPTRSIZE - 1) || CLONGMASK != (CLONGSIZE - 1)
#error "inconsistent mask constants"
#endif
#if defined(__i386__) || defined(__amd64__)
#define ASM_ENTRY_ALIGN 16
/*
* SSE register alignment and save areas
*/
#define XMM_SIZE 16
#define XMM_ALIGN 16
#if defined(__amd64)
#define SAVE_XMM_PROLOG(sreg, nreg) \
subq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp; \
movq %rsp, sreg
#define RSTOR_XMM_EPILOG(sreg, nreg) \
addq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp
#elif defined(__i386)
#define SAVE_XMM_PROLOG(sreg, nreg) \
subl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp; \
movl %esp, sreg; \
addl $XMM_ALIGN, sreg; \
andl $_BITNOT(XMM_ALIGN-1), sreg
#define RSTOR_XMM_EPILOG(sreg, nreg) \
addl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp;
#endif /* __i386 */
/*
* profiling causes definitions of the MCOUNT and RTMCOUNT
* particular to the type
*/
#ifdef GPROF
#define MCOUNT(x) \
pushl %ebp; \
movl %esp, %ebp; \
call _mcount; \
popl %ebp
#endif /* GPROF */
#ifdef PROF
#define MCOUNT(x) \
/* CSTYLED */ \
.lcomm .L_/**/x/**/1, 4, 4; \
pushl %ebp; \
movl %esp, %ebp; \
/* CSTYLED */ \
movl $.L_/**/x/**/1, %edx; \
call _mcount; \
popl %ebp
#endif /* PROF */
/*
* if we are not profiling, MCOUNT should be defined to nothing
*/
#if !defined(PROF) && !defined(GPROF)
#define MCOUNT(x)
#endif /* !defined(PROF) && !defined(GPROF) */
#define RTMCOUNT(x) MCOUNT(x)
/*
* Macro to define weak symbol aliases. These are similar to the ANSI-C
* #pragma weak name = _name
* except a compiler can determine type. The assembler must be told. Hence,
* the second parameter must be the type of the symbol (i.e.: function,...)
*/
#define ANSI_PRAGMA_WEAK(sym, stype) \
.weak sym; \
.type sym, @stype; \
/* CSTYLED */ \
sym = _/**/sym
/*
* Like ANSI_PRAGMA_WEAK(), but for unrelated names, as in:
* #pragma weak sym1 = sym2
*/
#define ANSI_PRAGMA_WEAK2(sym1, sym2, stype) \
.weak sym1; \
.type sym1, @stype; \
sym1 = sym2
/*
* ENTRY provides the standard procedure entry code and an easy way to
* insert the calls to mcount for profiling. ENTRY_NP is identical, but
@ -200,46 +51,8 @@ sym1 = sym2
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x:
#define RTENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: RTMCOUNT(x)
/*
* ENTRY2 is identical to ENTRY but provides two labels for the entry point.
*/
#define ENTRY2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
/* CSTYLED */ \
x: ; \
y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
/* CSTYLED */ \
x: ; \
y:
/*
* ALTENTRY provides for additional entry points.
*/
@ -248,52 +61,45 @@ x: ; \
.type x, @function; \
x:
/*
* DGDEF and DGDEF2 provide global data declarations.
*
* DGDEF provides a word aligned word of storage.
*
* DGDEF2 allocates "sz" bytes of storage with **NO** alignment. This
* implies this macro is best used for byte arrays.
*
* DGDEF3 allocates "sz" bytes of storage with "algn" alignment.
*/
#define DGDEF2(name, sz) \
.data; \
.globl name; \
.type name, @object; \
.size name, sz; \
name:
#define DGDEF3(name, sz, algn) \
.data; \
.align algn; \
.globl name; \
.type name, @object; \
.size name, sz; \
name:
#define DGDEF(name) DGDEF3(name, 4, 4)
/*
* SET_SIZE trails a function and set the size for the ELF symbol table.
*/
#define SET_SIZE(x) \
.size x, [.-x]
#elif defined(__sparc64__)
/*
* NWORD provides native word value.
* ENTRY provides the standard procedure entry code and an easy way to
* insert the calls to mcount for profiling. ENTRY_NP is identical, but
* never calls mcount.
*/
#if defined(__amd64)
#define ENTRY(x) \
.section ".text"; \
.align 4; \
.global x; \
.type x, @function; \
x:
/*CSTYLED*/
#define NWORD quad
/*
* ALTENTRY provides for additional entry points.
*/
#define ALTENTRY(x) \
.global x; \
.type x, @function; \
x:
#elif defined(__i386)
/*
* SET_SIZE trails a function and set the size for the ELF symbol table.
*/
#define SET_SIZE(x) \
.size x, (.-x)
#define NWORD long
#else
#endif /* __i386 */
#error Unsupported architecture.
#endif
#endif /* _ASM */

View File

@ -1,431 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_ATOMIC_H
#define _SYS_ATOMIC_H
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_KERNEL) && defined(__GNUC__) && defined(_ASM_INLINES) && \
(defined(__i386) || defined(__amd64))
#include <asm/atomic.h>
#endif
#if defined(_KERNEL) || defined(__STDC__)
/*
* Increment target.
*/
extern void atomic_inc_8(volatile uint8_t *);
extern void atomic_inc_uchar(volatile uchar_t *);
extern void atomic_inc_16(volatile uint16_t *);
extern void atomic_inc_ushort(volatile ushort_t *);
extern void atomic_inc_32(volatile uint32_t *);
extern void atomic_inc_uint(volatile uint_t *);
extern void atomic_inc_ulong(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_inc_64(volatile uint64_t *);
#endif
/*
* Decrement target
*/
extern void atomic_dec_8(volatile uint8_t *);
extern void atomic_dec_uchar(volatile uchar_t *);
extern void atomic_dec_16(volatile uint16_t *);
extern void atomic_dec_ushort(volatile ushort_t *);
extern void atomic_dec_32(volatile uint32_t *);
extern void atomic_dec_uint(volatile uint_t *);
extern void atomic_dec_ulong(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_dec_64(volatile uint64_t *);
#endif
/*
* Add delta to target
*/
#ifdef __i386__
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_add_64(volatile uint64_t *, int64_t);
#endif
#endif
/*
* logical OR bits with target
*/
extern void atomic_or_8(volatile uint8_t *, uint8_t);
extern void atomic_or_uchar(volatile uchar_t *, uchar_t);
extern void atomic_or_16(volatile uint16_t *, uint16_t);
extern void atomic_or_ushort(volatile ushort_t *, ushort_t);
extern void atomic_or_32(volatile uint32_t *, uint32_t);
extern void atomic_or_uint(volatile uint_t *, uint_t);
extern void atomic_or_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_or_64(volatile uint64_t *, uint64_t);
#endif
/*
* logical AND bits with target
*/
extern void atomic_and_8(volatile uint8_t *, uint8_t);
extern void atomic_and_uchar(volatile uchar_t *, uchar_t);
extern void atomic_and_16(volatile uint16_t *, uint16_t);
extern void atomic_and_ushort(volatile ushort_t *, ushort_t);
extern void atomic_and_32(volatile uint32_t *, uint32_t);
extern void atomic_and_uint(volatile uint_t *, uint_t);
extern void atomic_and_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_and_64(volatile uint64_t *, uint64_t);
#endif
/*
* As above, but return the new value. Note that these _nv() variants are
* substantially more expensive on some platforms than the no-return-value
* versions above, so don't use them unless you really need to know the
* new value *atomically* (e.g. when decrementing a reference count and
* checking whether it went to zero).
*/
/*
* Increment target and return new value.
*/
extern uint8_t atomic_inc_8_nv(volatile uint8_t *);
extern uchar_t atomic_inc_uchar_nv(volatile uchar_t *);
extern uint16_t atomic_inc_16_nv(volatile uint16_t *);
extern ushort_t atomic_inc_ushort_nv(volatile ushort_t *);
extern uint32_t atomic_inc_32_nv(volatile uint32_t *);
extern uint_t atomic_inc_uint_nv(volatile uint_t *);
extern ulong_t atomic_inc_ulong_nv(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_inc_64_nv(volatile uint64_t *);
#endif
/*
* Decrement target and return new value.
*/
extern uint8_t atomic_dec_8_nv(volatile uint8_t *);
extern uchar_t atomic_dec_uchar_nv(volatile uchar_t *);
extern uint16_t atomic_dec_16_nv(volatile uint16_t *);
extern ushort_t atomic_dec_ushort_nv(volatile ushort_t *);
extern uint32_t atomic_dec_32_nv(volatile uint32_t *);
extern uint_t atomic_dec_uint_nv(volatile uint_t *);
extern ulong_t atomic_dec_ulong_nv(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_dec_64_nv(volatile uint64_t *);
#endif
/*
* Add delta to target
*/
extern uint8_t atomic_add_8_nv(volatile uint8_t *, int8_t);
extern uchar_t atomic_add_char_nv(volatile uchar_t *, signed char);
extern uint16_t atomic_add_16_nv(volatile uint16_t *, int16_t);
extern ushort_t atomic_add_short_nv(volatile ushort_t *, short);
extern uint32_t atomic_add_32_nv(volatile uint32_t *, int32_t);
extern uint_t atomic_add_int_nv(volatile uint_t *, int);
extern void *atomic_add_ptr_nv(volatile void *, ssize_t);
extern ulong_t atomic_add_long_nv(volatile ulong_t *, long);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t);
#endif
/*
* logical OR bits with target and return new value.
*/
extern uint8_t atomic_or_8_nv(volatile uint8_t *, uint8_t);
extern uchar_t atomic_or_uchar_nv(volatile uchar_t *, uchar_t);
extern uint16_t atomic_or_16_nv(volatile uint16_t *, uint16_t);
extern ushort_t atomic_or_ushort_nv(volatile ushort_t *, ushort_t);
extern uint32_t atomic_or_32_nv(volatile uint32_t *, uint32_t);
extern uint_t atomic_or_uint_nv(volatile uint_t *, uint_t);
extern ulong_t atomic_or_ulong_nv(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_or_64_nv(volatile uint64_t *, uint64_t);
#endif
/*
* logical AND bits with target and return new value.
*/
extern uint8_t atomic_and_8_nv(volatile uint8_t *, uint8_t);
extern uchar_t atomic_and_uchar_nv(volatile uchar_t *, uchar_t);
extern uint16_t atomic_and_16_nv(volatile uint16_t *, uint16_t);
extern ushort_t atomic_and_ushort_nv(volatile ushort_t *, ushort_t);
extern uint32_t atomic_and_32_nv(volatile uint32_t *, uint32_t);
extern uint_t atomic_and_uint_nv(volatile uint_t *, uint_t);
extern ulong_t atomic_and_ulong_nv(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_and_64_nv(volatile uint64_t *, uint64_t);
#endif
/*
* If *arg1 == arg2, set *arg1 = arg3; return old value
*/
extern uint8_t atomic_cas_8(volatile uint8_t *, uint8_t, uint8_t);
extern uchar_t atomic_cas_uchar(volatile uchar_t *, uchar_t, uchar_t);
extern uint16_t atomic_cas_16(volatile uint16_t *, uint16_t, uint16_t);
extern ushort_t atomic_cas_ushort(volatile ushort_t *, ushort_t, ushort_t);
extern uint32_t atomic_cas_32(volatile uint32_t *, uint32_t, uint32_t);
extern uint_t atomic_cas_uint(volatile uint_t *, uint_t, uint_t);
extern void *atomic_cas_ptr(volatile void *, void *, void *);
extern ulong_t atomic_cas_ulong(volatile ulong_t *, ulong_t, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_cas_64(volatile uint64_t *, uint64_t, uint64_t);
#endif
/*
* Swap target and return old value
*/
extern uint8_t atomic_swap_8(volatile uint8_t *, uint8_t);
extern uchar_t atomic_swap_uchar(volatile uchar_t *, uchar_t);
extern uint16_t atomic_swap_16(volatile uint16_t *, uint16_t);
extern ushort_t atomic_swap_ushort(volatile ushort_t *, ushort_t);
extern uint32_t atomic_swap_32(volatile uint32_t *, uint32_t);
extern uint_t atomic_swap_uint(volatile uint_t *, uint_t);
extern void *atomic_swap_ptr(volatile void *, void *);
extern ulong_t atomic_swap_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
#endif
/*
* Perform an exclusive atomic bit set/clear on a target.
* Returns 0 if bit was sucessfully set/cleared, or -1
* if the bit was already set/cleared.
*/
extern int atomic_set_long_excl(volatile ulong_t *, uint_t);
extern int atomic_clear_long_excl(volatile ulong_t *, uint_t);
/*
* Generic memory barrier used during lock entry, placed after the
* memory operation that acquires the lock to guarantee that the lock
* protects its data. No stores from after the memory barrier will
* reach visibility, and no loads from after the barrier will be
* resolved, before the lock acquisition reaches global visibility.
*/
extern void membar_enter(void);
/*
* Generic memory barrier used during lock exit, placed before the
* memory operation that releases the lock to guarantee that the lock
* protects its data. All loads and stores issued before the barrier
* will be resolved before the subsequent lock update reaches visibility.
*/
extern void membar_exit(void);
/*
* Arrange that all stores issued before this point in the code reach
* global visibility before any stores that follow; useful in producer
* modules that update a data item, then set a flag that it is available.
* The memory barrier guarantees that the available flag is not visible
* earlier than the updated data, i.e. it imposes store ordering.
*/
extern void membar_producer(void);
/*
* Arrange that all loads issued before this point in the code are
* completed before any subsequent loads; useful in consumer modules
* that check to see if data is available and read the data.
* The memory barrier guarantees that the data is not sampled until
* after the available flag has been seen, i.e. it imposes load ordering.
*/
extern void membar_consumer(void);
#endif
#if !defined(_KERNEL) && !defined(__STDC__)
extern void atomic_inc_8();
extern void atomic_inc_uchar();
extern void atomic_inc_16();
extern void atomic_inc_ushort();
extern void atomic_inc_32();
extern void atomic_inc_uint();
extern void atomic_inc_ulong();
#if defined(_INT64_TYPE)
extern void atomic_inc_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_dec_8();
extern void atomic_dec_uchar();
extern void atomic_dec_16();
extern void atomic_dec_ushort();
extern void atomic_dec_32();
extern void atomic_dec_uint();
extern void atomic_dec_ulong();
#if defined(_INT64_TYPE)
extern void atomic_dec_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_add_8();
extern void atomic_add_char();
extern void atomic_add_16();
extern void atomic_add_short();
extern void atomic_add_32();
extern void atomic_add_int();
extern void atomic_add_ptr();
extern void atomic_add_long();
#if defined(_INT64_TYPE)
extern void atomic_add_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_or_8();
extern void atomic_or_uchar();
extern void atomic_or_16();
extern void atomic_or_ushort();
extern void atomic_or_32();
extern void atomic_or_uint();
extern void atomic_or_ulong();
#if defined(_INT64_TYPE)
extern void atomic_or_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_and_8();
extern void atomic_and_uchar();
extern void atomic_and_16();
extern void atomic_and_ushort();
extern void atomic_and_32();
extern void atomic_and_uint();
extern void atomic_and_ulong();
#if defined(_INT64_TYPE)
extern void atomic_and_64();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_inc_8_nv();
extern uchar_t atomic_inc_uchar_nv();
extern uint16_t atomic_inc_16_nv();
extern ushort_t atomic_inc_ushort_nv();
extern uint32_t atomic_inc_32_nv();
extern uint_t atomic_inc_uint_nv();
extern ulong_t atomic_inc_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_inc_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_dec_8_nv();
extern uchar_t atomic_dec_uchar_nv();
extern uint16_t atomic_dec_16_nv();
extern ushort_t atomic_dec_ushort_nv();
extern uint32_t atomic_dec_32_nv();
extern uint_t atomic_dec_uint_nv();
extern ulong_t atomic_dec_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_dec_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_add_8_nv();
extern uchar_t atomic_add_char_nv();
extern uint16_t atomic_add_16_nv();
extern ushort_t atomic_add_short_nv();
extern uint32_t atomic_add_32_nv();
extern uint_t atomic_add_int_nv();
extern void *atomic_add_ptr_nv();
extern ulong_t atomic_add_long_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_add_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_or_8_nv();
extern uchar_t atomic_or_uchar_nv();
extern uint16_t atomic_or_16_nv();
extern ushort_t atomic_or_ushort_nv();
extern uint32_t atomic_or_32_nv();
extern uint_t atomic_or_uint_nv();
extern ulong_t atomic_or_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_or_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_and_8_nv();
extern uchar_t atomic_and_uchar_nv();
extern uint16_t atomic_and_16_nv();
extern ushort_t atomic_and_ushort_nv();
extern uint32_t atomic_and_32_nv();
extern uint_t atomic_and_uint_nv();
extern ulong_t atomic_and_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_and_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_cas_8();
extern uchar_t atomic_cas_uchar();
extern uint16_t atomic_cas_16();
extern ushort_t atomic_cas_ushort();
extern uint32_t atomic_cas_32();
extern uint_t atomic_cas_uint();
extern void *atomic_cas_ptr();
extern ulong_t atomic_cas_ulong();
#if defined(_INT64_TYPE)
extern uint64_t atomic_cas_64();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_swap_8();
extern uchar_t atomic_swap_uchar();
extern uint16_t atomic_swap_16();
extern ushort_t atomic_swap_ushort();
extern uint32_t atomic_swap_32();
extern uint_t atomic_swap_uint();
extern void *atomic_swap_ptr();
extern ulong_t atomic_swap_ulong();
#if defined(_INT64_TYPE)
extern uint64_t atomic_swap_64();
#endif /* defined(_INT64_TYPE) */
extern int atomic_set_long_excl();
extern int atomic_clear_long_excl();
extern void membar_enter();
extern void membar_exit();
extern void membar_producer();
extern void membar_consumer();
#endif
#if defined(_KERNEL)
#if defined(_LP64) || defined(_ILP32)
#define atomic_add_ip atomic_add_long
#define atomic_add_ip_nv atomic_add_long_nv
#define casip atomic_cas_ulong
#endif
#if defined(__sparc)
extern uint8_t ldstub(uint8_t *);
#endif
/*
* Legacy kernel interfaces; they will go away (eventually).
*/
extern uint8_t cas8(uint8_t *, uint8_t, uint8_t);
extern uint32_t cas32(uint32_t *, uint32_t, uint32_t);
extern uint64_t cas64(uint64_t *, uint64_t, uint64_t);
extern ulong_t caslong(ulong_t *, ulong_t, ulong_t);
extern void *casptr(void *, void *, void *);
extern void atomic_and_long(ulong_t *, ulong_t);
extern void atomic_or_long(ulong_t *, ulong_t);
#if defined(__sparc)
extern uint32_t swapl(uint32_t *, uint32_t);
#endif
#endif /* _KERNEL */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ATOMIC_H */

View File

@ -0,0 +1,133 @@
/*-
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/atomic.h>
#ifdef _KERNEL
#include <sys/kernel.h>
struct mtx atomic_mtx;
MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF);
#else
#include <pthread.h>
#define mtx_lock(lock) pthread_mutex_lock(lock)
#define mtx_unlock(lock) pthread_mutex_unlock(lock)
static pthread_mutex_t atomic_mtx;
static __attribute__((constructor)) void
atomic_init(void)
{
pthread_mutex_init(&atomic_mtx, NULL);
}
#endif
#ifndef __LP64__
void
atomic_add_64(volatile uint64_t *target, int64_t delta)
{
mtx_lock(&atomic_mtx);
*target += delta;
mtx_unlock(&atomic_mtx);
}
#endif
uint64_t
atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
{
uint64_t newval;
mtx_lock(&atomic_mtx);
newval = (*target += delta);
mtx_unlock(&atomic_mtx);
return (newval);
}
#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__)
void
atomic_or_8(volatile uint8_t *target, uint8_t value)
{
mtx_lock(&atomic_mtx);
*target |= value;
mtx_unlock(&atomic_mtx);
}
#endif
uint8_t
atomic_or_8_nv(volatile uint8_t *target, uint8_t value)
{
uint8_t newval;
mtx_lock(&atomic_mtx);
newval = (*target |= value);
mtx_unlock(&atomic_mtx);
return (newval);
}
#ifndef __LP64__
void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
void *oldval, **trg;
mtx_lock(&atomic_mtx);
trg = __DEVOLATILE(void **, target);
oldval = *trg;
if (oldval == cmp)
*trg = newval;
mtx_unlock(&atomic_mtx);
return (oldval);
}
#endif
#ifndef __sparc64__
uint64_t
atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
{
uint64_t oldval;
mtx_lock(&atomic_mtx);
oldval = *target;
if (oldval == cmp)
*target = newval;
mtx_unlock(&atomic_mtx);
return (oldval);
}
#endif
void
membar_producer(void)
{
/* nothing */
}

View File

@ -0,0 +1,114 @@
/*-
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_ATOMIC_H_
#define _OPENSOLARIS_SYS_ATOMIC_H_
#include <sys/types.h>
#include <machine/atomic.h>
#ifndef __LP64__
extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
extern void *atomic_cas_ptr(volatile void *target, void *cmp, void *newval);
#endif
#ifndef __sparc64__
extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
uint64_t newval);
#endif
extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
extern uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value);
extern void membar_producer(void);
#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__)
extern void atomic_or_8(volatile uint8_t *target, uint8_t value);
#else
static __inline void
atomic_or_8(volatile uint8_t *target, uint8_t value)
{
atomic_set_8(target, value);
}
#endif
static __inline uint32_t
atomic_add_32_nv(volatile uint32_t *target, int32_t delta)
{
return (atomic_fetchadd_32(target, delta) + delta);
}
static __inline u_int
atomic_add_int_nv(volatile u_int *target, int delta)
{
return (atomic_add_32_nv(target, delta));
}
static __inline void
atomic_dec_32(volatile uint32_t *target)
{
atomic_subtract_32(target, 1);
}
static __inline uint32_t
atomic_dec_32_nv(volatile uint32_t *target)
{
return (atomic_fetchadd_32(target, -1) - 1);
}
static __inline void
atomic_inc_32(volatile uint32_t *target)
{
atomic_add_32(target, 1);
}
static __inline uint32_t
atomic_inc_32_nv(volatile uint32_t *target)
{
return (atomic_add_32_nv(target, 1));
}
static __inline void
atomic_inc_64(volatile uint64_t *target)
{
atomic_add_64(target, 1);
}
static __inline uint64_t
atomic_inc_64_nv(volatile uint64_t *target)
{
return (atomic_add_64_nv(target, 1));
}
#ifdef __LP64__
static __inline void *
atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
{
return ((void *)atomic_cas_64((uint64_t *)target, (uint64_t)cmp,
(uint64_t)newval));
}
#endif
#endif /* !_OPENSOLARIS_SYS_ATOMIC_H_ */

View File

@ -31,296 +31,7 @@
#define _ASM
#include <sys/asm_linkage.h>
#if defined(_KERNEL)
/*
* Legacy kernel interfaces; they will go away (eventually).
*/
ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
#endif
ENTRY(atomic_inc_8)
ALTENTRY(atomic_inc_uchar)
lock
incb (%rdi)
ret
SET_SIZE(atomic_inc_uchar)
SET_SIZE(atomic_inc_8)
ENTRY(atomic_inc_16)
ALTENTRY(atomic_inc_ushort)
lock
incw (%rdi)
ret
SET_SIZE(atomic_inc_ushort)
SET_SIZE(atomic_inc_16)
ENTRY(atomic_inc_32)
ALTENTRY(atomic_inc_uint)
lock
incl (%rdi)
ret
SET_SIZE(atomic_inc_uint)
SET_SIZE(atomic_inc_32)
ENTRY(atomic_inc_64)
ALTENTRY(atomic_inc_ulong)
lock
incq (%rdi)
ret
SET_SIZE(atomic_inc_ulong)
SET_SIZE(atomic_inc_64)
ENTRY(atomic_inc_8_nv)
ALTENTRY(atomic_inc_uchar_nv)
movb (%rdi), %al // %al = old value
1:
leaq 1(%rax), %rcx // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_inc_uchar_nv)
SET_SIZE(atomic_inc_8_nv)
ENTRY(atomic_inc_16_nv)
ALTENTRY(atomic_inc_ushort_nv)
movw (%rdi), %ax // %ax = old value
1:
leaq 1(%rax), %rcx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_inc_ushort_nv)
SET_SIZE(atomic_inc_16_nv)
ENTRY(atomic_inc_32_nv)
ALTENTRY(atomic_inc_uint_nv)
movl (%rdi), %eax // %eax = old value
1:
leaq 1(%rax), %rcx // %ecx = new value
lock
cmpxchgl %ecx, (%rdi) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_inc_uint_nv)
SET_SIZE(atomic_inc_32_nv)
ENTRY(atomic_inc_64_nv)
ALTENTRY(atomic_inc_ulong_nv)
movq (%rdi), %rax // %rax = old value
1:
leaq 1(%rax), %rcx // %rcx = new value
lock
cmpxchgq %rcx, (%rdi) // try to stick it in
jne 1b
movq %rcx, %rax // return new value
ret
SET_SIZE(atomic_inc_ulong_nv)
SET_SIZE(atomic_inc_64_nv)
ENTRY(atomic_dec_8)
ALTENTRY(atomic_dec_uchar)
lock
decb (%rdi)
ret
SET_SIZE(atomic_dec_uchar)
SET_SIZE(atomic_dec_8)
ENTRY(atomic_dec_16)
ALTENTRY(atomic_dec_ushort)
lock
decw (%rdi)
ret
SET_SIZE(atomic_dec_ushort)
SET_SIZE(atomic_dec_16)
ENTRY(atomic_dec_32)
ALTENTRY(atomic_dec_uint)
lock
decl (%rdi)
ret
SET_SIZE(atomic_dec_uint)
SET_SIZE(atomic_dec_32)
ENTRY(atomic_dec_64)
ALTENTRY(atomic_dec_ulong)
lock
decq (%rdi)
ret
SET_SIZE(atomic_dec_ulong)
SET_SIZE(atomic_dec_64)
ENTRY(atomic_dec_8_nv)
ALTENTRY(atomic_dec_uchar_nv)
movb (%rdi), %al // %al = old value
1:
leaq -1(%rax), %rcx // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_dec_uchar_nv)
SET_SIZE(atomic_dec_8_nv)
ENTRY(atomic_dec_16_nv)
ALTENTRY(atomic_dec_ushort_nv)
movw (%rdi), %ax // %ax = old value
1:
leaq -1(%rax), %rcx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_dec_ushort_nv)
SET_SIZE(atomic_dec_16_nv)
ENTRY(atomic_dec_32_nv)
ALTENTRY(atomic_dec_uint_nv)
movl (%rdi), %eax // %eax = old value
1:
leaq -1(%rax), %rcx // %ecx = new value
lock
cmpxchgl %ecx, (%rdi) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_dec_uint_nv)
SET_SIZE(atomic_dec_32_nv)
ENTRY(atomic_dec_64_nv)
ALTENTRY(atomic_dec_ulong_nv)
movq (%rdi), %rax // %rax = old value
1:
leaq -1(%rax), %rcx // %rcx = new value
lock
cmpxchgq %rcx, (%rdi) // try to stick it in
jne 1b
movq %rcx, %rax // return new value
ret
SET_SIZE(atomic_dec_ulong_nv)
SET_SIZE(atomic_dec_64_nv)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
lock
orb %sil, (%rdi)
ret
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8)
ENTRY(atomic_or_16)
ALTENTRY(atomic_or_ushort)
lock
orw %si, (%rdi)
ret
SET_SIZE(atomic_or_ushort)
SET_SIZE(atomic_or_16)
ENTRY(atomic_or_32)
ALTENTRY(atomic_or_uint)
lock
orl %esi, (%rdi)
ret
SET_SIZE(atomic_or_uint)
SET_SIZE(atomic_or_32)
ENTRY(atomic_or_64)
ALTENTRY(atomic_or_ulong)
lock
orq %rsi, (%rdi)
ret
SET_SIZE(atomic_or_ulong)
SET_SIZE(atomic_or_64)
ENTRY(atomic_and_8)
ALTENTRY(atomic_and_uchar)
lock
andb %sil, (%rdi)
ret
SET_SIZE(atomic_and_uchar)
SET_SIZE(atomic_and_8)
ENTRY(atomic_and_16)
ALTENTRY(atomic_and_ushort)
lock
andw %si, (%rdi)
ret
SET_SIZE(atomic_and_ushort)
SET_SIZE(atomic_and_16)
ENTRY(atomic_and_32)
ALTENTRY(atomic_and_uint)
lock
andl %esi, (%rdi)
ret
SET_SIZE(atomic_and_uint)
SET_SIZE(atomic_and_32)
ENTRY(atomic_and_64)
ALTENTRY(atomic_and_ulong)
lock
andq %rsi, (%rdi)
ret
SET_SIZE(atomic_and_ulong)
SET_SIZE(atomic_and_64)
ENTRY(atomic_add_8_nv)
ALTENTRY(atomic_add_char_nv)
movb (%rdi), %al // %al = old value
1:
movb %sil, %cl
addb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_add_char_nv)
SET_SIZE(atomic_add_8_nv)
ENTRY(atomic_add_16_nv)
ALTENTRY(atomic_add_short_nv)
movw (%rdi), %ax // %ax = old value
1:
movw %si, %cx
addw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_add_short_nv)
SET_SIZE(atomic_add_16_nv)
ENTRY(atomic_add_32_nv)
ALTENTRY(atomic_add_int_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
addl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
ENTRY(atomic_add_64_nv)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
@ -330,68 +41,9 @@
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_64_nv)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movb (%rdi), %al // %al = old value
1:
movb %sil, %cl
andb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%rdi) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_and_16_nv)
ALTENTRY(atomic_and_ushort_nv)
movw (%rdi), %ax // %ax = old value
1:
movw %si, %cx
andw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_and_ushort_nv)
SET_SIZE(atomic_and_16_nv)
ENTRY(atomic_and_32_nv)
ALTENTRY(atomic_and_uint_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
andl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_and_uint_nv)
SET_SIZE(atomic_and_32_nv)
ENTRY(atomic_and_64_nv)
ALTENTRY(atomic_and_ulong_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
andq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_and_ulong_nv)
SET_SIZE(atomic_and_64_nv)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movb (%rdi), %al // %al = old value
1:
movb %sil, %cl
@ -401,160 +53,16 @@
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_or_16_nv)
ALTENTRY(atomic_or_ushort_nv)
movw (%rdi), %ax // %ax = old value
1:
movw %si, %cx
orw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%rdi) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_or_ushort_nv)
SET_SIZE(atomic_or_16_nv)
ENTRY(atomic_or_32_nv)
ALTENTRY(atomic_or_uint_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
orl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_or_uint_nv)
SET_SIZE(atomic_or_32_nv)
ENTRY(atomic_or_64_nv)
ALTENTRY(atomic_or_ulong_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
orq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_or_ulong_nv)
SET_SIZE(atomic_or_64_nv)
ENTRY(atomic_cas_8)
ALTENTRY(atomic_cas_uchar)
movzbl %sil, %eax
lock
cmpxchgb %dl, (%rdi)
ret
SET_SIZE(atomic_cas_uchar)
SET_SIZE(atomic_cas_8)
ENTRY(atomic_cas_16)
ALTENTRY(atomic_cas_ushort)
movzwl %si, %eax
lock
cmpxchgw %dx, (%rdi)
ret
SET_SIZE(atomic_cas_ushort)
SET_SIZE(atomic_cas_16)
ENTRY(atomic_cas_32)
ALTENTRY(atomic_cas_uint)
movl %esi, %eax
lock
cmpxchgl %edx, (%rdi)
ret
SET_SIZE(atomic_cas_uint)
SET_SIZE(atomic_cas_32)
SET_SIZE(atomic_or_8_nv)
ENTRY(atomic_cas_64)
ALTENTRY(atomic_cas_ulong)
ALTENTRY(atomic_cas_ptr)
movq %rsi, %rax
lock
cmpxchgq %rdx, (%rdi)
ret
SET_SIZE(atomic_cas_ptr)
SET_SIZE(atomic_cas_ulong)
SET_SIZE(atomic_cas_64)
ENTRY(atomic_swap_8)
ALTENTRY(atomic_swap_uchar)
movzbl %sil, %eax
lock
xchgb %al, (%rdi)
ret
SET_SIZE(atomic_swap_uchar)
SET_SIZE(atomic_swap_8)
ENTRY(atomic_swap_16)
ALTENTRY(atomic_swap_ushort)
movzwl %si, %eax
lock
xchgw %ax, (%rdi)
ret
SET_SIZE(atomic_swap_ushort)
SET_SIZE(atomic_swap_16)
ENTRY(atomic_swap_32)
ALTENTRY(atomic_swap_uint)
movl %esi, %eax
lock
xchgl %eax, (%rdi)
ret
SET_SIZE(atomic_swap_uint)
SET_SIZE(atomic_swap_32)
ENTRY(atomic_swap_64)
ALTENTRY(atomic_swap_ulong)
ALTENTRY(atomic_swap_ptr)
movq %rsi, %rax
lock
xchgq %rax, (%rdi)
ret
SET_SIZE(atomic_swap_ptr)
SET_SIZE(atomic_swap_ulong)
SET_SIZE(atomic_swap_64)
ENTRY(atomic_set_long_excl)
xorl %eax, %eax
lock
btsq %rsi, (%rdi)
jnc 1f
decl %eax // return -1
1:
ret
SET_SIZE(atomic_set_long_excl)
ENTRY(atomic_clear_long_excl)
xorl %eax, %eax
lock
btrq %rsi, (%rdi)
jc 1f
decl %eax // return -1
1:
ret
SET_SIZE(atomic_clear_long_excl)
ENTRY(membar_enter)
ALTENTRY(membar_exit)
mfence
ret
SET_SIZE(membar_exit)
SET_SIZE(membar_enter)
ENTRY(membar_producer)
sfence
ret
SET_SIZE(membar_producer)
ENTRY(membar_consumer)
lfence
ret
SET_SIZE(membar_consumer)

View File

@ -31,327 +31,6 @@
#define _ASM
#include <sys/asm_linkage.h>
#if defined(_KERNEL)
/*
* Legacy kernel interfaces; they will go away (eventually).
*/
ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
#endif
ENTRY(atomic_inc_8)
ALTENTRY(atomic_inc_uchar)
movl 4(%esp), %eax
lock
incb (%eax)
ret
SET_SIZE(atomic_inc_uchar)
SET_SIZE(atomic_inc_8)
ENTRY(atomic_inc_16)
ALTENTRY(atomic_inc_ushort)
movl 4(%esp), %eax
lock
incw (%eax)
ret
SET_SIZE(atomic_inc_ushort)
SET_SIZE(atomic_inc_16)
ENTRY(atomic_inc_32)
ALTENTRY(atomic_inc_uint)
ALTENTRY(atomic_inc_ulong)
movl 4(%esp), %eax
lock
incl (%eax)
ret
SET_SIZE(atomic_inc_ulong)
SET_SIZE(atomic_inc_uint)
SET_SIZE(atomic_inc_32)
ENTRY(atomic_inc_8_nv)
ALTENTRY(atomic_inc_uchar_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
leal 1(%eax), %ecx // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_inc_uchar_nv)
SET_SIZE(atomic_inc_8_nv)
ENTRY(atomic_inc_16_nv)
ALTENTRY(atomic_inc_ushort_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
leal 1(%eax), %ecx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_inc_ushort_nv)
SET_SIZE(atomic_inc_16_nv)
ENTRY(atomic_inc_32_nv)
ALTENTRY(atomic_inc_uint_nv)
ALTENTRY(atomic_inc_ulong_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
leal 1(%eax), %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_inc_ulong_nv)
SET_SIZE(atomic_inc_uint_nv)
SET_SIZE(atomic_inc_32_nv)
ENTRY(atomic_inc_64)
ALTENTRY(atomic_inc_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi // %edi = target address
movl (%edi), %eax
movl 4(%edi), %edx // %edx:%eax = old value
1:
xorl %ebx, %ebx
xorl %ecx, %ecx
incl %ebx // %ecx:%ebx = 1
addl %eax, %ebx
adcl %edx, %ecx // add in the carry from inc
lock
cmpxchg8b (%edi) // try to stick it in
jne 1b
movl %ebx, %eax
movl %ecx, %edx // return new value
popl %ebx
popl %edi
ret
SET_SIZE(atomic_inc_64_nv)
SET_SIZE(atomic_inc_64)
ENTRY(atomic_dec_8)
ALTENTRY(atomic_dec_uchar)
movl 4(%esp), %eax
lock
decb (%eax)
ret
SET_SIZE(atomic_dec_uchar)
SET_SIZE(atomic_dec_8)
ENTRY(atomic_dec_16)
ALTENTRY(atomic_dec_ushort)
movl 4(%esp), %eax
lock
decw (%eax)
ret
SET_SIZE(atomic_dec_ushort)
SET_SIZE(atomic_dec_16)
ENTRY(atomic_dec_32)
ALTENTRY(atomic_dec_uint)
ALTENTRY(atomic_dec_ulong)
movl 4(%esp), %eax
lock
decl (%eax)
ret
SET_SIZE(atomic_dec_ulong)
SET_SIZE(atomic_dec_uint)
SET_SIZE(atomic_dec_32)
ENTRY(atomic_dec_8_nv)
ALTENTRY(atomic_dec_uchar_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
leal -1(%eax), %ecx // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_dec_uchar_nv)
SET_SIZE(atomic_dec_8_nv)
ENTRY(atomic_dec_16_nv)
ALTENTRY(atomic_dec_ushort_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
leal -1(%eax), %ecx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_dec_ushort_nv)
SET_SIZE(atomic_dec_16_nv)
ENTRY(atomic_dec_32_nv)
ALTENTRY(atomic_dec_uint_nv)
ALTENTRY(atomic_dec_ulong_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
leal -1(%eax), %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_dec_ulong_nv)
SET_SIZE(atomic_dec_uint_nv)
SET_SIZE(atomic_dec_32_nv)
ENTRY(atomic_dec_64)
ALTENTRY(atomic_dec_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi // %edi = target address
movl (%edi), %eax
movl 4(%edi), %edx // %edx:%eax = old value
1:
xorl %ebx, %ebx
xorl %ecx, %ecx
not %ecx
not %ebx // %ecx:%ebx = -1
addl %eax, %ebx
adcl %edx, %ecx // add in the carry from inc
lock
cmpxchg8b (%edi) // try to stick it in
jne 1b
movl %ebx, %eax
movl %ecx, %edx // return new value
popl %ebx
popl %edi
ret
SET_SIZE(atomic_dec_64_nv)
SET_SIZE(atomic_dec_64)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
movl 4(%esp), %eax
movb 8(%esp), %cl
lock
orb %cl, (%eax)
ret
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8)
ENTRY(atomic_or_16)
ALTENTRY(atomic_or_ushort)
movl 4(%esp), %eax
movw 8(%esp), %cx
lock
orw %cx, (%eax)
ret
SET_SIZE(atomic_or_ushort)
SET_SIZE(atomic_or_16)
ENTRY(atomic_or_32)
ALTENTRY(atomic_or_uint)
ALTENTRY(atomic_or_ulong)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
orl %ecx, (%eax)
ret
SET_SIZE(atomic_or_ulong)
SET_SIZE(atomic_or_uint)
SET_SIZE(atomic_or_32)
ENTRY(atomic_and_8)
ALTENTRY(atomic_and_uchar)
movl 4(%esp), %eax
movb 8(%esp), %cl
lock
andb %cl, (%eax)
ret
SET_SIZE(atomic_and_uchar)
SET_SIZE(atomic_and_8)
ENTRY(atomic_and_16)
ALTENTRY(atomic_and_ushort)
movl 4(%esp), %eax
movw 8(%esp), %cx
lock
andw %cx, (%eax)
ret
SET_SIZE(atomic_and_ushort)
SET_SIZE(atomic_and_16)
ENTRY(atomic_and_32)
ALTENTRY(atomic_and_uint)
ALTENTRY(atomic_and_ulong)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
andl %ecx, (%eax)
ret
SET_SIZE(atomic_and_ulong)
SET_SIZE(atomic_and_uint)
SET_SIZE(atomic_and_32)
ENTRY(atomic_add_8_nv)
ALTENTRY(atomic_add_char_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
movl 8(%esp), %ecx // %ecx = delta
addb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_add_char_nv)
SET_SIZE(atomic_add_8_nv)
ENTRY(atomic_add_16_nv)
ALTENTRY(atomic_add_short_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
addw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_add_short_nv)
SET_SIZE(atomic_add_16_nv)
ENTRY(atomic_add_32_nv)
ALTENTRY(atomic_add_int_nv)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
addl %eax, %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
ENTRY(atomic_add_64)
ALTENTRY(atomic_add_64_nv)
pushl %edi
@ -376,7 +55,6 @@
SET_SIZE(atomic_add_64)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
@ -387,160 +65,9 @@
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_or_uchar_nv)
SET_SIZE(atomic_or_8_nv)
ENTRY(atomic_or_16_nv)
ALTENTRY(atomic_or_ushort_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
orw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_or_ushort_nv)
SET_SIZE(atomic_or_16_nv)
ENTRY(atomic_or_32_nv)
ALTENTRY(atomic_or_uint_nv)
ALTENTRY(atomic_or_ulong_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
orl %eax, %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_or_ulong_nv)
SET_SIZE(atomic_or_uint_nv)
SET_SIZE(atomic_or_32_nv)
ENTRY(atomic_or_64)
ALTENTRY(atomic_or_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi // %edi = target address
movl (%edi), %eax
movl 4(%edi), %edx // %edx:%eax = old value
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx // %ecx:%ebx = delta
orl %eax, %ebx
orl %edx, %ecx // %ecx:%ebx = new value
lock
cmpxchg8b (%edi) // try to stick it in
jne 1b
movl %ebx, %eax
movl %ecx, %edx // return new value
popl %ebx
popl %edi
ret
SET_SIZE(atomic_or_64_nv)
SET_SIZE(atomic_or_64)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movl 4(%esp), %edx // %edx = target address
movb (%edx), %al // %al = old value
1:
movl 8(%esp), %ecx // %ecx = delta
andb %al, %cl // %cl = new value
lock
cmpxchgb %cl, (%edx) // try to stick it in
jne 1b
movzbl %cl, %eax // return new value
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_and_16_nv)
ALTENTRY(atomic_and_ushort_nv)
movl 4(%esp), %edx // %edx = target address
movw (%edx), %ax // %ax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
andw %ax, %cx // %cx = new value
lock
cmpxchgw %cx, (%edx) // try to stick it in
jne 1b
movzwl %cx, %eax // return new value
ret
SET_SIZE(atomic_and_ushort_nv)
SET_SIZE(atomic_and_16_nv)
ENTRY(atomic_and_32_nv)
ALTENTRY(atomic_and_uint_nv)
ALTENTRY(atomic_and_ulong_nv)
movl 4(%esp), %edx // %edx = target address
movl (%edx), %eax // %eax = old value
1:
movl 8(%esp), %ecx // %ecx = delta
andl %eax, %ecx // %ecx = new value
lock
cmpxchgl %ecx, (%edx) // try to stick it in
jne 1b
movl %ecx, %eax // return new value
ret
SET_SIZE(atomic_and_ulong_nv)
SET_SIZE(atomic_and_uint_nv)
SET_SIZE(atomic_and_32_nv)
ENTRY(atomic_and_64)
ALTENTRY(atomic_and_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi // %edi = target address
movl (%edi), %eax
movl 4(%edi), %edx // %edx:%eax = old value
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx // %ecx:%ebx = delta
andl %eax, %ebx
andl %edx, %ecx // %ecx:%ebx = new value
lock
cmpxchg8b (%edi) // try to stick it in
jne 1b
movl %ebx, %eax
movl %ecx, %edx // return new value
popl %ebx
popl %edi
ret
SET_SIZE(atomic_and_64_nv)
SET_SIZE(atomic_and_64)
ENTRY(atomic_cas_8)
ALTENTRY(atomic_cas_uchar)
movl 4(%esp), %edx
movzbl 8(%esp), %eax
movb 12(%esp), %cl
lock
cmpxchgb %cl, (%edx)
ret
SET_SIZE(atomic_cas_uchar)
SET_SIZE(atomic_cas_8)
ENTRY(atomic_cas_16)
ALTENTRY(atomic_cas_ushort)
movl 4(%esp), %edx
movzwl 8(%esp), %eax
movw 12(%esp), %cx
lock
cmpxchgw %cx, (%edx)
ret
SET_SIZE(atomic_cas_ushort)
SET_SIZE(atomic_cas_16)
ENTRY(atomic_cas_32)
ALTENTRY(atomic_cas_uint)
ALTENTRY(atomic_cas_ulong)
ALTENTRY(atomic_cas_ptr)
ENTRY(atomic_cas_ptr)
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
@ -548,9 +75,6 @@
cmpxchgl %ecx, (%edx)
ret
SET_SIZE(atomic_cas_ptr)
SET_SIZE(atomic_cas_ulong)
SET_SIZE(atomic_cas_uint)
SET_SIZE(atomic_cas_32)
ENTRY(atomic_cas_64)
pushl %ebx
@ -567,89 +91,8 @@
ret
SET_SIZE(atomic_cas_64)
ENTRY(atomic_swap_8)
ALTENTRY(atomic_swap_uchar)
movl 4(%esp), %edx
movzbl 8(%esp), %eax
lock
xchgb %al, (%edx)
ret
SET_SIZE(atomic_swap_uchar)
SET_SIZE(atomic_swap_8)
ENTRY(atomic_swap_16)
ALTENTRY(atomic_swap_ushort)
movl 4(%esp), %edx
movzwl 8(%esp), %eax
lock
xchgw %ax, (%edx)
ret
SET_SIZE(atomic_swap_ushort)
SET_SIZE(atomic_swap_16)
ENTRY(atomic_swap_32)
ALTENTRY(atomic_swap_uint)
ALTENTRY(atomic_swap_ptr)
ALTENTRY(atomic_swap_ulong)
movl 4(%esp), %edx
movl 8(%esp), %eax
lock
xchgl %eax, (%edx)
ret
SET_SIZE(atomic_swap_ulong)
SET_SIZE(atomic_swap_ptr)
SET_SIZE(atomic_swap_uint)
SET_SIZE(atomic_swap_32)
ENTRY(atomic_swap_64)
pushl %esi
pushl %ebx
movl 12(%esp), %esi
movl 16(%esp), %ebx
movl 20(%esp), %ecx
movl (%esi), %eax
movl 4(%esi), %edx // %edx:%eax = old value
1:
lock
cmpxchg8b (%esi)
jne 1b
popl %ebx
popl %esi
ret
SET_SIZE(atomic_swap_64)
ENTRY(atomic_set_long_excl)
movl 4(%esp), %edx // %edx = target address
movl 8(%esp), %ecx // %ecx = bit id
xorl %eax, %eax
lock
btsl %ecx, (%edx)
jnc 1f
decl %eax // return -1
1:
ret
SET_SIZE(atomic_set_long_excl)
ENTRY(atomic_clear_long_excl)
movl 4(%esp), %edx // %edx = target address
movl 8(%esp), %ecx // %ecx = bit id
xorl %eax, %eax
lock
btrl %ecx, (%edx)
jc 1f
decl %eax // return -1
1:
ret
SET_SIZE(atomic_clear_long_excl)
ENTRY(membar_enter)
ALTENTRY(membar_exit)
ALTENTRY(membar_producer)
ALTENTRY(membar_consumer)
ENTRY(membar_producer)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_consumer)
SET_SIZE(membar_producer)
SET_SIZE(membar_exit)
SET_SIZE(membar_enter)

View File

@ -54,9 +54,6 @@
#include <sys/sunddi.h>
#include <sys/dnlc.h>
struct mtx atomic_mtx;
MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF);
struct mtx zfs_debug_mtx;
MTX_SYSINIT(zfs_debug_mtx, &zfs_debug_mtx, "zfs_debug", MTX_DEF);
SYSCTL_NODE(_vfs, OID_AUTO, zfs, CTLFLAG_RW, 0, "ZFS file system");

View File

@ -27,8 +27,6 @@
#ifndef _IA32_SYS_ASM_LINKAGE_H
#define _IA32_SYS_ASM_LINKAGE_H
#ifdef __cplusplus
extern "C" {
#endif
@ -39,157 +37,10 @@ extern "C" {
* make annoying differences in assembler syntax go away
*/
/*
* D16 and A16 are used to insert instructions prefixes; the
* macros help the assembler code be slightly more portable.
*/
#if !defined(__GNUC_AS__)
/*
* /usr/ccs/bin/as prefixes are parsed as separate instructions
*/
#define D16 data16;
#define A16 addr16;
/*
* (There are some weird constructs in constant expressions)
*/
#define _CONST(const) [const]
#define _BITNOT(const) -1!_CONST(const)
#define _MUL(a, b) _CONST(a \* b)
#else
/*
* Why not use the 'data16' and 'addr16' prefixes .. well, the
* assembler doesn't quite believe in real mode, and thus argues with
* us about what we're trying to do.
*/
#define D16 .byte 0x66;
#define A16 .byte 0x67;
#define _CONST(const) (const)
#define _BITNOT(const) ~_CONST(const)
#define _MUL(a, b) _CONST(a * b)
#endif
/*
* C pointers are different sizes between i386 and amd64.
* These constants can be used to compute offsets into pointer arrays.
*/
#if defined(__amd64)
#define CLONGSHIFT 3
#define CLONGSIZE 8
#define CLONGMASK 7
#elif defined(__i386)
#define CLONGSHIFT 2
#define CLONGSIZE 4
#define CLONGMASK 3
#endif
/*
* Since we know we're either ILP32 or LP64 ..
*/
#define CPTRSHIFT CLONGSHIFT
#define CPTRSIZE CLONGSIZE
#define CPTRMASK CLONGMASK
#if CPTRSIZE != (1 << CPTRSHIFT) || CLONGSIZE != (1 << CLONGSHIFT)
#error "inconsistent shift constants"
#endif
#if CPTRMASK != (CPTRSIZE - 1) || CLONGMASK != (CLONGSIZE - 1)
#error "inconsistent mask constants"
#endif
#if defined(__i386__) || defined(__amd64__)
#define ASM_ENTRY_ALIGN 16
/*
* SSE register alignment and save areas
*/
#define XMM_SIZE 16
#define XMM_ALIGN 16
#if defined(__amd64)
#define SAVE_XMM_PROLOG(sreg, nreg) \
subq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp; \
movq %rsp, sreg
#define RSTOR_XMM_EPILOG(sreg, nreg) \
addq $_CONST(_MUL(XMM_SIZE, nreg)), %rsp
#elif defined(__i386)
#define SAVE_XMM_PROLOG(sreg, nreg) \
subl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp; \
movl %esp, sreg; \
addl $XMM_ALIGN, sreg; \
andl $_BITNOT(XMM_ALIGN-1), sreg
#define RSTOR_XMM_EPILOG(sreg, nreg) \
addl $_CONST(_MUL(XMM_SIZE, nreg) + XMM_ALIGN), %esp;
#endif /* __i386 */
/*
* profiling causes definitions of the MCOUNT and RTMCOUNT
* particular to the type
*/
#ifdef GPROF
#define MCOUNT(x) \
pushl %ebp; \
movl %esp, %ebp; \
call _mcount; \
popl %ebp
#endif /* GPROF */
#ifdef PROF
#define MCOUNT(x) \
/* CSTYLED */ \
.lcomm .L_/**/x/**/1, 4, 4; \
pushl %ebp; \
movl %esp, %ebp; \
/* CSTYLED */ \
movl $.L_/**/x/**/1, %edx; \
call _mcount; \
popl %ebp
#endif /* PROF */
/*
* if we are not profiling, MCOUNT should be defined to nothing
*/
#if !defined(PROF) && !defined(GPROF)
#define MCOUNT(x)
#endif /* !defined(PROF) && !defined(GPROF) */
#define RTMCOUNT(x) MCOUNT(x)
/*
* Macro to define weak symbol aliases. These are similar to the ANSI-C
* #pragma weak name = _name
* except a compiler can determine type. The assembler must be told. Hence,
* the second parameter must be the type of the symbol (i.e.: function,...)
*/
#define ANSI_PRAGMA_WEAK(sym, stype) \
.weak sym; \
.type sym, @stype; \
/* CSTYLED */ \
sym = _/**/sym
/*
* Like ANSI_PRAGMA_WEAK(), but for unrelated names, as in:
* #pragma weak sym1 = sym2
*/
#define ANSI_PRAGMA_WEAK2(sym1, sym2, stype) \
.weak sym1; \
.type sym1, @stype; \
sym1 = sym2
/*
* ENTRY provides the standard procedure entry code and an easy way to
* insert the calls to mcount for profiling. ENTRY_NP is identical, but
@ -200,46 +51,8 @@ sym1 = sym2
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x:
#define RTENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: RTMCOUNT(x)
/*
* ENTRY2 is identical to ENTRY but provides two labels for the entry point.
*/
#define ENTRY2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
/* CSTYLED */ \
x: ; \
y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
/* CSTYLED */ \
x: ; \
y:
/*
* ALTENTRY provides for additional entry points.
*/
@ -248,52 +61,45 @@ x: ; \
.type x, @function; \
x:
/*
* DGDEF and DGDEF2 provide global data declarations.
*
* DGDEF provides a word aligned word of storage.
*
* DGDEF2 allocates "sz" bytes of storage with **NO** alignment. This
* implies this macro is best used for byte arrays.
*
* DGDEF3 allocates "sz" bytes of storage with "algn" alignment.
*/
#define DGDEF2(name, sz) \
.data; \
.globl name; \
.type name, @object; \
.size name, sz; \
name:
#define DGDEF3(name, sz, algn) \
.data; \
.align algn; \
.globl name; \
.type name, @object; \
.size name, sz; \
name:
#define DGDEF(name) DGDEF3(name, 4, 4)
/*
* SET_SIZE trails a function and set the size for the ELF symbol table.
*/
#define SET_SIZE(x) \
.size x, [.-x]
#elif defined(__sparc64__)
/*
* NWORD provides native word value.
* ENTRY provides the standard procedure entry code and an easy way to
* insert the calls to mcount for profiling. ENTRY_NP is identical, but
* never calls mcount.
*/
#if defined(__amd64)
#define ENTRY(x) \
.section ".text"; \
.align 4; \
.global x; \
.type x, @function; \
x:
/*CSTYLED*/
#define NWORD quad
/*
* ALTENTRY provides for additional entry points.
*/
#define ALTENTRY(x) \
.global x; \
.type x, @function; \
x:
#elif defined(__i386)
/*
* SET_SIZE trails a function and set the size for the ELF symbol table.
*/
#define SET_SIZE(x) \
.size x, (.-x)
#define NWORD long
#else
#endif /* __i386 */
#error Unsupported architecture.
#endif
#endif /* _ASM */

View File

@ -1,431 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_ATOMIC_H
#define _SYS_ATOMIC_H
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_KERNEL) && defined(__GNUC__) && defined(_ASM_INLINES) && \
(defined(__i386) || defined(__amd64))
#include <asm/atomic.h>
#endif
#if defined(_KERNEL) || defined(__STDC__)
/*
* Increment target.
*/
extern void atomic_inc_8(volatile uint8_t *);
extern void atomic_inc_uchar(volatile uchar_t *);
extern void atomic_inc_16(volatile uint16_t *);
extern void atomic_inc_ushort(volatile ushort_t *);
extern void atomic_inc_32(volatile uint32_t *);
extern void atomic_inc_uint(volatile uint_t *);
extern void atomic_inc_ulong(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_inc_64(volatile uint64_t *);
#endif
/*
* Decrement target
*/
extern void atomic_dec_8(volatile uint8_t *);
extern void atomic_dec_uchar(volatile uchar_t *);
extern void atomic_dec_16(volatile uint16_t *);
extern void atomic_dec_ushort(volatile ushort_t *);
extern void atomic_dec_32(volatile uint32_t *);
extern void atomic_dec_uint(volatile uint_t *);
extern void atomic_dec_ulong(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_dec_64(volatile uint64_t *);
#endif
/*
* Add delta to target
*/
#ifdef __i386__
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_add_64(volatile uint64_t *, int64_t);
#endif
#endif
/*
* logical OR bits with target
*/
extern void atomic_or_8(volatile uint8_t *, uint8_t);
extern void atomic_or_uchar(volatile uchar_t *, uchar_t);
extern void atomic_or_16(volatile uint16_t *, uint16_t);
extern void atomic_or_ushort(volatile ushort_t *, ushort_t);
extern void atomic_or_32(volatile uint32_t *, uint32_t);
extern void atomic_or_uint(volatile uint_t *, uint_t);
extern void atomic_or_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_or_64(volatile uint64_t *, uint64_t);
#endif
/*
* logical AND bits with target
*/
extern void atomic_and_8(volatile uint8_t *, uint8_t);
extern void atomic_and_uchar(volatile uchar_t *, uchar_t);
extern void atomic_and_16(volatile uint16_t *, uint16_t);
extern void atomic_and_ushort(volatile ushort_t *, ushort_t);
extern void atomic_and_32(volatile uint32_t *, uint32_t);
extern void atomic_and_uint(volatile uint_t *, uint_t);
extern void atomic_and_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern void atomic_and_64(volatile uint64_t *, uint64_t);
#endif
/*
* As above, but return the new value. Note that these _nv() variants are
* substantially more expensive on some platforms than the no-return-value
* versions above, so don't use them unless you really need to know the
* new value *atomically* (e.g. when decrementing a reference count and
* checking whether it went to zero).
*/
/*
* Increment target and return new value.
*/
extern uint8_t atomic_inc_8_nv(volatile uint8_t *);
extern uchar_t atomic_inc_uchar_nv(volatile uchar_t *);
extern uint16_t atomic_inc_16_nv(volatile uint16_t *);
extern ushort_t atomic_inc_ushort_nv(volatile ushort_t *);
extern uint32_t atomic_inc_32_nv(volatile uint32_t *);
extern uint_t atomic_inc_uint_nv(volatile uint_t *);
extern ulong_t atomic_inc_ulong_nv(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_inc_64_nv(volatile uint64_t *);
#endif
/*
* Decrement target and return new value.
*/
extern uint8_t atomic_dec_8_nv(volatile uint8_t *);
extern uchar_t atomic_dec_uchar_nv(volatile uchar_t *);
extern uint16_t atomic_dec_16_nv(volatile uint16_t *);
extern ushort_t atomic_dec_ushort_nv(volatile ushort_t *);
extern uint32_t atomic_dec_32_nv(volatile uint32_t *);
extern uint_t atomic_dec_uint_nv(volatile uint_t *);
extern ulong_t atomic_dec_ulong_nv(volatile ulong_t *);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_dec_64_nv(volatile uint64_t *);
#endif
/*
* Add delta to target
*/
extern uint8_t atomic_add_8_nv(volatile uint8_t *, int8_t);
extern uchar_t atomic_add_char_nv(volatile uchar_t *, signed char);
extern uint16_t atomic_add_16_nv(volatile uint16_t *, int16_t);
extern ushort_t atomic_add_short_nv(volatile ushort_t *, short);
extern uint32_t atomic_add_32_nv(volatile uint32_t *, int32_t);
extern uint_t atomic_add_int_nv(volatile uint_t *, int);
extern void *atomic_add_ptr_nv(volatile void *, ssize_t);
extern ulong_t atomic_add_long_nv(volatile ulong_t *, long);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t);
#endif
/*
* logical OR bits with target and return new value.
*/
extern uint8_t atomic_or_8_nv(volatile uint8_t *, uint8_t);
extern uchar_t atomic_or_uchar_nv(volatile uchar_t *, uchar_t);
extern uint16_t atomic_or_16_nv(volatile uint16_t *, uint16_t);
extern ushort_t atomic_or_ushort_nv(volatile ushort_t *, ushort_t);
extern uint32_t atomic_or_32_nv(volatile uint32_t *, uint32_t);
extern uint_t atomic_or_uint_nv(volatile uint_t *, uint_t);
extern ulong_t atomic_or_ulong_nv(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_or_64_nv(volatile uint64_t *, uint64_t);
#endif
/*
* logical AND bits with target and return new value.
*/
extern uint8_t atomic_and_8_nv(volatile uint8_t *, uint8_t);
extern uchar_t atomic_and_uchar_nv(volatile uchar_t *, uchar_t);
extern uint16_t atomic_and_16_nv(volatile uint16_t *, uint16_t);
extern ushort_t atomic_and_ushort_nv(volatile ushort_t *, ushort_t);
extern uint32_t atomic_and_32_nv(volatile uint32_t *, uint32_t);
extern uint_t atomic_and_uint_nv(volatile uint_t *, uint_t);
extern ulong_t atomic_and_ulong_nv(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_and_64_nv(volatile uint64_t *, uint64_t);
#endif
/*
* If *arg1 == arg2, set *arg1 = arg3; return old value
*/
extern uint8_t atomic_cas_8(volatile uint8_t *, uint8_t, uint8_t);
extern uchar_t atomic_cas_uchar(volatile uchar_t *, uchar_t, uchar_t);
extern uint16_t atomic_cas_16(volatile uint16_t *, uint16_t, uint16_t);
extern ushort_t atomic_cas_ushort(volatile ushort_t *, ushort_t, ushort_t);
extern uint32_t atomic_cas_32(volatile uint32_t *, uint32_t, uint32_t);
extern uint_t atomic_cas_uint(volatile uint_t *, uint_t, uint_t);
extern void *atomic_cas_ptr(volatile void *, void *, void *);
extern ulong_t atomic_cas_ulong(volatile ulong_t *, ulong_t, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_cas_64(volatile uint64_t *, uint64_t, uint64_t);
#endif
/*
* Swap target and return old value
*/
extern uint8_t atomic_swap_8(volatile uint8_t *, uint8_t);
extern uchar_t atomic_swap_uchar(volatile uchar_t *, uchar_t);
extern uint16_t atomic_swap_16(volatile uint16_t *, uint16_t);
extern ushort_t atomic_swap_ushort(volatile ushort_t *, ushort_t);
extern uint32_t atomic_swap_32(volatile uint32_t *, uint32_t);
extern uint_t atomic_swap_uint(volatile uint_t *, uint_t);
extern void *atomic_swap_ptr(volatile void *, void *);
extern ulong_t atomic_swap_ulong(volatile ulong_t *, ulong_t);
#if defined(_KERNEL) || defined(_INT64_TYPE)
extern uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
#endif
/*
* Perform an exclusive atomic bit set/clear on a target.
* Returns 0 if bit was sucessfully set/cleared, or -1
* if the bit was already set/cleared.
*/
extern int atomic_set_long_excl(volatile ulong_t *, uint_t);
extern int atomic_clear_long_excl(volatile ulong_t *, uint_t);
/*
* Generic memory barrier used during lock entry, placed after the
* memory operation that acquires the lock to guarantee that the lock
* protects its data. No stores from after the memory barrier will
* reach visibility, and no loads from after the barrier will be
* resolved, before the lock acquisition reaches global visibility.
*/
extern void membar_enter(void);
/*
* Generic memory barrier used during lock exit, placed before the
* memory operation that releases the lock to guarantee that the lock
* protects its data. All loads and stores issued before the barrier
* will be resolved before the subsequent lock update reaches visibility.
*/
extern void membar_exit(void);
/*
* Arrange that all stores issued before this point in the code reach
* global visibility before any stores that follow; useful in producer
* modules that update a data item, then set a flag that it is available.
* The memory barrier guarantees that the available flag is not visible
* earlier than the updated data, i.e. it imposes store ordering.
*/
extern void membar_producer(void);
/*
* Arrange that all loads issued before this point in the code are
* completed before any subsequent loads; useful in consumer modules
* that check to see if data is available and read the data.
* The memory barrier guarantees that the data is not sampled until
* after the available flag has been seen, i.e. it imposes load ordering.
*/
extern void membar_consumer(void);
#endif
#if !defined(_KERNEL) && !defined(__STDC__)
extern void atomic_inc_8();
extern void atomic_inc_uchar();
extern void atomic_inc_16();
extern void atomic_inc_ushort();
extern void atomic_inc_32();
extern void atomic_inc_uint();
extern void atomic_inc_ulong();
#if defined(_INT64_TYPE)
extern void atomic_inc_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_dec_8();
extern void atomic_dec_uchar();
extern void atomic_dec_16();
extern void atomic_dec_ushort();
extern void atomic_dec_32();
extern void atomic_dec_uint();
extern void atomic_dec_ulong();
#if defined(_INT64_TYPE)
extern void atomic_dec_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_add_8();
extern void atomic_add_char();
extern void atomic_add_16();
extern void atomic_add_short();
extern void atomic_add_32();
extern void atomic_add_int();
extern void atomic_add_ptr();
extern void atomic_add_long();
#if defined(_INT64_TYPE)
extern void atomic_add_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_or_8();
extern void atomic_or_uchar();
extern void atomic_or_16();
extern void atomic_or_ushort();
extern void atomic_or_32();
extern void atomic_or_uint();
extern void atomic_or_ulong();
#if defined(_INT64_TYPE)
extern void atomic_or_64();
#endif /* defined(_INT64_TYPE) */
extern void atomic_and_8();
extern void atomic_and_uchar();
extern void atomic_and_16();
extern void atomic_and_ushort();
extern void atomic_and_32();
extern void atomic_and_uint();
extern void atomic_and_ulong();
#if defined(_INT64_TYPE)
extern void atomic_and_64();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_inc_8_nv();
extern uchar_t atomic_inc_uchar_nv();
extern uint16_t atomic_inc_16_nv();
extern ushort_t atomic_inc_ushort_nv();
extern uint32_t atomic_inc_32_nv();
extern uint_t atomic_inc_uint_nv();
extern ulong_t atomic_inc_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_inc_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_dec_8_nv();
extern uchar_t atomic_dec_uchar_nv();
extern uint16_t atomic_dec_16_nv();
extern ushort_t atomic_dec_ushort_nv();
extern uint32_t atomic_dec_32_nv();
extern uint_t atomic_dec_uint_nv();
extern ulong_t atomic_dec_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_dec_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_add_8_nv();
extern uchar_t atomic_add_char_nv();
extern uint16_t atomic_add_16_nv();
extern ushort_t atomic_add_short_nv();
extern uint32_t atomic_add_32_nv();
extern uint_t atomic_add_int_nv();
extern void *atomic_add_ptr_nv();
extern ulong_t atomic_add_long_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_add_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_or_8_nv();
extern uchar_t atomic_or_uchar_nv();
extern uint16_t atomic_or_16_nv();
extern ushort_t atomic_or_ushort_nv();
extern uint32_t atomic_or_32_nv();
extern uint_t atomic_or_uint_nv();
extern ulong_t atomic_or_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_or_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_and_8_nv();
extern uchar_t atomic_and_uchar_nv();
extern uint16_t atomic_and_16_nv();
extern ushort_t atomic_and_ushort_nv();
extern uint32_t atomic_and_32_nv();
extern uint_t atomic_and_uint_nv();
extern ulong_t atomic_and_ulong_nv();
#if defined(_INT64_TYPE)
extern uint64_t atomic_and_64_nv();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_cas_8();
extern uchar_t atomic_cas_uchar();
extern uint16_t atomic_cas_16();
extern ushort_t atomic_cas_ushort();
extern uint32_t atomic_cas_32();
extern uint_t atomic_cas_uint();
extern void *atomic_cas_ptr();
extern ulong_t atomic_cas_ulong();
#if defined(_INT64_TYPE)
extern uint64_t atomic_cas_64();
#endif /* defined(_INT64_TYPE) */
extern uint8_t atomic_swap_8();
extern uchar_t atomic_swap_uchar();
extern uint16_t atomic_swap_16();
extern ushort_t atomic_swap_ushort();
extern uint32_t atomic_swap_32();
extern uint_t atomic_swap_uint();
extern void *atomic_swap_ptr();
extern ulong_t atomic_swap_ulong();
#if defined(_INT64_TYPE)
extern uint64_t atomic_swap_64();
#endif /* defined(_INT64_TYPE) */
extern int atomic_set_long_excl();
extern int atomic_clear_long_excl();
extern void membar_enter();
extern void membar_exit();
extern void membar_producer();
extern void membar_consumer();
#endif
#if defined(_KERNEL)
#if defined(_LP64) || defined(_ILP32)
#define atomic_add_ip atomic_add_long
#define atomic_add_ip_nv atomic_add_long_nv
#define casip atomic_cas_ulong
#endif
#if defined(__sparc)
extern uint8_t ldstub(uint8_t *);
#endif
/*
* Legacy kernel interfaces; they will go away (eventually).
*/
extern uint8_t cas8(uint8_t *, uint8_t, uint8_t);
extern uint32_t cas32(uint32_t *, uint32_t, uint32_t);
extern uint64_t cas64(uint64_t *, uint64_t, uint64_t);
extern ulong_t caslong(ulong_t *, ulong_t, ulong_t);
extern void *casptr(void *, void *, void *);
extern void atomic_and_long(ulong_t *, ulong_t);
extern void atomic_or_long(ulong_t *, ulong_t);
#if defined(__sparc)
extern uint32_t swapl(uint32_t *, uint32_t);
#endif
#endif /* _KERNEL */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ATOMIC_H */

View File

@ -21,8 +21,13 @@ SRCS+= opensolaris_string.c
SRCS+= opensolaris_vfs.c
SRCS+= opensolaris_zone.c
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "ia64"
.PATH: ${.CURDIR}/../../contrib/opensolaris/common/atomic/${MACHINE_ARCH}
SRCS+= atomic.S
.else
.PATH: ${.CURDIR}/../../compat/opensolaris/kern
SRCS+= opensolaris_atomic.c
.endif
.PATH: ${.CURDIR}/../../contrib/opensolaris/uts/common/fs
SRCS+= gfs.c