MFp4: various style fixes, including

o  s/u_int/uint/g
o  s/#define<sp>/#define<tab>/g
o  indent macro definitions
o  Improve vertical spacing
o  Globally align line continuation character
This commit is contained in:
Marcel Moolenaar 2004-09-22 19:47:42 +00:00
parent f44831e61c
commit 5c48823c36

View File

@ -48,101 +48,88 @@
/*
* Some common forms of cmpxch.
*/
static __inline u_int32_t
ia64_cmpxchg_acq_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
static __inline uint32_t
ia64_cmpxchg_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
u_int32_t ret;
uint32_t ret;
IA64_CMPXCHG(4, acq, p, cmpval, newval, ret);
return (ret);
}
static __inline u_int32_t
ia64_cmpxchg_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
static __inline uint32_t
ia64_cmpxchg_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
u_int32_t ret;
uint32_t ret;
IA64_CMPXCHG(4, rel, p, cmpval, newval, ret);
return (ret);
}
static __inline u_int64_t
ia64_cmpxchg_acq_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
static __inline uint64_t
ia64_cmpxchg_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
{
u_int64_t ret;
uint64_t ret;
IA64_CMPXCHG(8, acq, p, cmpval, newval, ret);
return (ret);
}
static __inline u_int64_t
ia64_cmpxchg_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
static __inline uint64_t
ia64_cmpxchg_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
{
u_int64_t ret;
uint64_t ret;
IA64_CMPXCHG(8, rel, p, cmpval, newval, ret);
return (ret);
}
#define ATOMIC_STORE_LOAD(type, width, size) \
static __inline u_int##width##_t \
ia64_ld_acq_##width(volatile u_int##width##_t* p) \
{ \
u_int##width##_t v; \
\
__asm __volatile ("ld" size ".acq %0=%1" \
: "=r" (v) \
: "m" (*p) \
: "memory"); \
static __inline uint##width##_t \
ia64_ld_acq_##width(volatile uint##width##_t* p) \
{ \
uint##width##_t v; \
__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v) \
: "m" (*p) : "memory"); \
return (v); \
} \
} \
\
static __inline u_int##width##_t \
atomic_load_acq_##width(volatile u_int##width##_t* p) \
{ \
u_int##width##_t v; \
\
__asm __volatile ("ld" size ".acq %0=%1" \
: "=r" (v) \
: "m" (*p) \
: "memory"); \
static __inline uint##width##_t \
atomic_load_acq_##width(volatile uint##width##_t* p) \
{ \
uint##width##_t v; \
__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v) \
: "m" (*p) : "memory"); \
return (v); \
} \
} \
\
static __inline u_int##width##_t \
atomic_load_acq_##type(volatile u_int##width##_t* p) \
{ \
u_int##width##_t v; \
\
__asm __volatile ("ld" size ".acq %0=%1" \
: "=r" (v) \
: "m" (*p) \
: "memory"); \
static __inline uint##width##_t \
atomic_load_acq_##type(volatile uint##width##_t* p) \
{ \
uint##width##_t v; \
__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v) \
: "m" (*p) : "memory"); \
return (v); \
} \
} \
\
static __inline void \
ia64_st_rel_##width(volatile u_int##width##_t* p, u_int##width##_t v)\
{ \
__asm __volatile ("st" size ".rel %0=%1" \
: "=m" (*p) \
: "r" (v) \
: "memory"); \
} \
static __inline void \
ia64_st_rel_##width(volatile uint##width##_t* p, uint##width##_t v) \
{ \
__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p) \
: "r" (v) : "memory"); \
} \
\
static __inline void \
atomic_store_rel_##width(volatile u_int##width##_t* p, u_int##width##_t v)\
{ \
__asm __volatile ("st" size ".rel %0=%1" \
: "=m" (*p) \
: "r" (v) \
: "memory"); \
} \
static __inline void \
atomic_store_rel_##width(volatile uint##width##_t* p, \
uint##width##_t v) \
{ \
__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p) \
: "r" (v) : "memory"); \
} \
\
static __inline void \
atomic_store_rel_##type(volatile u_int##width##_t* p, u_int##width##_t v)\
{ \
__asm __volatile ("st" size ".rel %0=%1" \
: "=m" (*p) \
: "r" (v) \
: "memory"); \
}
static __inline void \
atomic_store_rel_##type(volatile uint##width##_t* p, \
uint##width##_t v) \
{ \
__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p) \
: "r" (v) : "memory"); \
}
ATOMIC_STORE_LOAD(char, 8, "1")
ATOMIC_STORE_LOAD(short, 16, "2")
@ -152,49 +139,47 @@ ATOMIC_STORE_LOAD(long, 64, "8")
#undef ATOMIC_STORE_LOAD
#define IA64_ATOMIC(sz, type, name, width, op) \
\
static __inline void \
atomic_##name##_acq_##width(volatile type *p, type v) \
{ \
static __inline void \
atomic_##name##_acq_##width(volatile type *p, type v) \
{ \
type old, ret; \
do { \
old = *p; \
IA64_CMPXCHG(sz, acq, p, old, old op v, ret); \
} while (ret != old); \
} \
} \
\
static __inline void \
atomic_##name##_rel_##width(volatile type *p, type v) \
{ \
static __inline void \
atomic_##name##_rel_##width(volatile type *p, type v) \
{ \
type old, ret; \
do { \
old = *p; \
IA64_CMPXCHG(sz, rel, p, old, old op v, ret); \
} while (ret != old); \
}
}
IA64_ATOMIC(1, u_int8_t, set, 8, |)
IA64_ATOMIC(2, u_int16_t, set, 16, |)
IA64_ATOMIC(4, u_int32_t, set, 32, |)
IA64_ATOMIC(8, u_int64_t, set, 64, |)
IA64_ATOMIC(1, uint8_t, set, 8, |)
IA64_ATOMIC(2, uint16_t, set, 16, |)
IA64_ATOMIC(4, uint32_t, set, 32, |)
IA64_ATOMIC(8, uint64_t, set, 64, |)
IA64_ATOMIC(1, u_int8_t, clear, 8, &~)
IA64_ATOMIC(2, u_int16_t, clear, 16, &~)
IA64_ATOMIC(4, u_int32_t, clear, 32, &~)
IA64_ATOMIC(8, u_int64_t, clear, 64, &~)
IA64_ATOMIC(1, uint8_t, clear, 8, &~)
IA64_ATOMIC(2, uint16_t, clear, 16, &~)
IA64_ATOMIC(4, uint32_t, clear, 32, &~)
IA64_ATOMIC(8, uint64_t, clear, 64, &~)
IA64_ATOMIC(1, u_int8_t, add, 8, +)
IA64_ATOMIC(2, u_int16_t, add, 16, +)
IA64_ATOMIC(4, u_int32_t, add, 32, +)
IA64_ATOMIC(8, u_int64_t, add, 64, +)
IA64_ATOMIC(1, uint8_t, add, 8, +)
IA64_ATOMIC(2, uint16_t, add, 16, +)
IA64_ATOMIC(4, uint32_t, add, 32, +)
IA64_ATOMIC(8, uint64_t, add, 64, +)
IA64_ATOMIC(1, u_int8_t, subtract, 8, -)
IA64_ATOMIC(2, u_int16_t, subtract, 16, -)
IA64_ATOMIC(4, u_int32_t, subtract, 32, -)
IA64_ATOMIC(8, u_int64_t, subtract, 64, -)
IA64_ATOMIC(1, uint8_t, subtract, 8, -)
IA64_ATOMIC(2, uint16_t, subtract, 16, -)
IA64_ATOMIC(4, uint32_t, subtract, 32, -)
IA64_ATOMIC(8, uint64_t, subtract, 64, -)
#undef IA64_ATOMIC
#undef IA64_CMPXCHG
#define atomic_set_8 atomic_set_acq_8
#define atomic_clear_8 atomic_clear_acq_8
@ -268,21 +253,23 @@ IA64_ATOMIC(8, u_int64_t, subtract, 64, -)
#define atomic_add_rel_long atomic_add_rel_64
#define atomic_subtract_rel_long atomic_subtract_rel_64
#undef IA64_CMPXCHG
/*
* Atomically compare the value stored at *p with cmpval and if the
* two values are equal, update the value of *p with newval. Returns
* zero if the compare failed, nonzero otherwise.
*/
static __inline int
atomic_cmpset_acq_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
atomic_cmpset_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
return ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval;
return (ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval);
}
static __inline int
atomic_cmpset_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
atomic_cmpset_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
{
return ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval;
return (ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval);
}
/*
@ -291,15 +278,15 @@ atomic_cmpset_rel_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
* zero if the compare failed, nonzero otherwise.
*/
static __inline int
atomic_cmpset_acq_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
atomic_cmpset_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
{
return ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval;
return (ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval);
}
static __inline int
atomic_cmpset_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
atomic_cmpset_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
{
return ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval;
return (ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval);
}
#define atomic_cmpset_32 atomic_cmpset_acq_32
@ -314,15 +301,19 @@ atomic_cmpset_rel_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
static __inline int
atomic_cmpset_acq_ptr(volatile void *dst, void *exp, void *src)
{
return atomic_cmpset_acq_long((volatile u_long *)dst,
(u_long)exp, (u_long)src);
int ret;
ret = atomic_cmpset_acq_long((volatile u_long *)dst, (u_long)exp,
(u_long)src);
return (ret);
}
static __inline int
atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src)
{
return atomic_cmpset_rel_long((volatile u_long *)dst,
(u_long)exp, (u_long)src);
int ret;
ret = atomic_cmpset_rel_long((volatile u_long *)dst, (u_long)exp,
(u_long)src);
return (ret);
}
#define atomic_cmpset_ptr atomic_cmpset_acq_ptr
@ -330,7 +321,7 @@ atomic_cmpset_rel_ptr(volatile void *dst, void *exp, void *src)
static __inline void *
atomic_load_acq_ptr(volatile void *p)
{
return (void *)atomic_load_acq_long((volatile u_long *)p);
return ((void *)atomic_load_acq_long((volatile u_long *)p));
}
static __inline void
@ -340,23 +331,23 @@ atomic_store_rel_ptr(volatile void *p, void *v)
}
#define ATOMIC_PTR(NAME) \
static __inline void \
atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
{ \
static __inline void \
atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
{ \
atomic_##NAME##_long((volatile u_long *)p, v); \
} \
} \
\
static __inline void \
atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
{ \
atomic_##NAME##_acq_long((volatile u_long *)p, v);\
} \
static __inline void \
atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
{ \
atomic_##NAME##_acq_long((volatile u_long *)p, v); \
} \
\
static __inline void \
atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
{ \
atomic_##NAME##_rel_long((volatile u_long *)p, v);\
}
static __inline void \
atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
{ \
atomic_##NAME##_rel_long((volatile u_long *)p, v); \
}
ATOMIC_PTR(set)
ATOMIC_PTR(clear)
@ -365,24 +356,24 @@ ATOMIC_PTR(subtract)
#undef ATOMIC_PTR
static __inline u_int32_t
atomic_readandclear_32(volatile u_int32_t* p)
static __inline uint32_t
atomic_readandclear_32(volatile uint32_t* p)
{
u_int32_t val;
uint32_t val;
do {
val = *p;
} while (!atomic_cmpset_32(p, val, 0));
return val;
return (val);
}
static __inline u_int64_t
atomic_readandclear_64(volatile u_int64_t* p)
static __inline uint64_t
atomic_readandclear_64(volatile uint64_t* p)
{
u_int64_t val;
uint64_t val;
do {
val = *p;
} while (!atomic_cmpset_64(p, val, 0));
return val;
return (val);
}
#define atomic_readandclear_int atomic_readandclear_32