Use newer constraints for inline assembly for an operand that is both an

input and an output by using the '+' modifier rather than listing the
operand in both the input and output sections.

Reviwed by:	bde
This commit is contained in:
John Baldwin 2001-11-12 16:57:33 +00:00
parent a95a0d36d4
commit 20a2016a81
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=86301
2 changed files with 36 additions and 36 deletions

View File

@ -99,8 +99,8 @@ static __inline void \
atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
__asm __volatile(MPLOCKED OP \
: "=m" (*p) \
: "0" (*p), "ir" (V)); \
: "+m" (*p) \
: "ir" (V)); \
}
/*
@ -211,25 +211,25 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
#endif /* KLD_MODULE */
#if !defined(LOCORE)
ATOMIC_ASM(set, char, "orb %b2,%0", v)
ATOMIC_ASM(clear, char, "andb %b2,%0", ~v)
ATOMIC_ASM(add, char, "addb %b2,%0", v)
ATOMIC_ASM(subtract, char, "subb %b2,%0", v)
ATOMIC_ASM(set, char, "orb %b1,%0", v)
ATOMIC_ASM(clear, char, "andb %b1,%0", ~v)
ATOMIC_ASM(add, char, "addb %b1,%0", v)
ATOMIC_ASM(subtract, char, "subb %b1,%0", v)
ATOMIC_ASM(set, short, "orw %w2,%0", v)
ATOMIC_ASM(clear, short, "andw %w2,%0", ~v)
ATOMIC_ASM(add, short, "addw %w2,%0", v)
ATOMIC_ASM(subtract, short, "subw %w2,%0", v)
ATOMIC_ASM(set, short, "orw %w1,%0", v)
ATOMIC_ASM(clear, short, "andw %w1,%0", ~v)
ATOMIC_ASM(add, short, "addw %w1,%0", v)
ATOMIC_ASM(subtract, short, "subw %w1,%0", v)
ATOMIC_ASM(set, int, "orl %2,%0", v)
ATOMIC_ASM(clear, int, "andl %2,%0", ~v)
ATOMIC_ASM(add, int, "addl %2,%0", v)
ATOMIC_ASM(subtract, int, "subl %2,%0", v)
ATOMIC_ASM(set, int, "orl %1,%0", v)
ATOMIC_ASM(clear, int, "andl %1,%0", ~v)
ATOMIC_ASM(add, int, "addl %1,%0", v)
ATOMIC_ASM(subtract, int, "subl %1,%0", v)
ATOMIC_ASM(set, long, "orl %2,%0", v)
ATOMIC_ASM(clear, long, "andl %2,%0", ~v)
ATOMIC_ASM(add, long, "addl %2,%0", v)
ATOMIC_ASM(subtract, long, "subl %2,%0", v)
ATOMIC_ASM(set, long, "orl %1,%0", v)
ATOMIC_ASM(clear, long, "andl %1,%0", ~v)
ATOMIC_ASM(add, long, "addl %1,%0", v)
ATOMIC_ASM(subtract, long, "subl %1,%0", v)
ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0")
ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0")

View File

@ -99,8 +99,8 @@ static __inline void \
atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
__asm __volatile(MPLOCKED OP \
: "=m" (*p) \
: "0" (*p), "ir" (V)); \
: "+m" (*p) \
: "ir" (V)); \
}
/*
@ -211,25 +211,25 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
#endif /* KLD_MODULE */
#if !defined(LOCORE)
ATOMIC_ASM(set, char, "orb %b2,%0", v)
ATOMIC_ASM(clear, char, "andb %b2,%0", ~v)
ATOMIC_ASM(add, char, "addb %b2,%0", v)
ATOMIC_ASM(subtract, char, "subb %b2,%0", v)
ATOMIC_ASM(set, char, "orb %b1,%0", v)
ATOMIC_ASM(clear, char, "andb %b1,%0", ~v)
ATOMIC_ASM(add, char, "addb %b1,%0", v)
ATOMIC_ASM(subtract, char, "subb %b1,%0", v)
ATOMIC_ASM(set, short, "orw %w2,%0", v)
ATOMIC_ASM(clear, short, "andw %w2,%0", ~v)
ATOMIC_ASM(add, short, "addw %w2,%0", v)
ATOMIC_ASM(subtract, short, "subw %w2,%0", v)
ATOMIC_ASM(set, short, "orw %w1,%0", v)
ATOMIC_ASM(clear, short, "andw %w1,%0", ~v)
ATOMIC_ASM(add, short, "addw %w1,%0", v)
ATOMIC_ASM(subtract, short, "subw %w1,%0", v)
ATOMIC_ASM(set, int, "orl %2,%0", v)
ATOMIC_ASM(clear, int, "andl %2,%0", ~v)
ATOMIC_ASM(add, int, "addl %2,%0", v)
ATOMIC_ASM(subtract, int, "subl %2,%0", v)
ATOMIC_ASM(set, int, "orl %1,%0", v)
ATOMIC_ASM(clear, int, "andl %1,%0", ~v)
ATOMIC_ASM(add, int, "addl %1,%0", v)
ATOMIC_ASM(subtract, int, "subl %1,%0", v)
ATOMIC_ASM(set, long, "orl %2,%0", v)
ATOMIC_ASM(clear, long, "andl %2,%0", ~v)
ATOMIC_ASM(add, long, "addl %2,%0", v)
ATOMIC_ASM(subtract, long, "subl %2,%0", v)
ATOMIC_ASM(set, long, "orl %1,%0", v)
ATOMIC_ASM(clear, long, "andl %1,%0", ~v)
ATOMIC_ASM(add, long, "addl %1,%0", v)
ATOMIC_ASM(subtract, long, "subl %1,%0", v)
ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0")
ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0")