Use the newer "+" modifier on output contraints when a register or

memory datum is used for both input and output instead of using
matching constraints.
This commit is contained in:
John Baldwin 2002-10-25 20:22:12 +00:00
parent 3d5500fc51
commit 4c86c028ac
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=105958
2 changed files with 62 additions and 62 deletions

View File

@ -372,8 +372,8 @@ alpha_pal_wripir(u_int64_t ipir)
register u_int64_t a0 __asm__("$16") = ipir;
__asm__ __volatile__ (
"call_pal 0xd #PAL_ipir"
: "=r" (a0)
: "0" (a0)
: "+r" (a0)
:
: "$1", "$22", "$23", "$24", "$25");
}
@ -395,8 +395,8 @@ alpha_pal_wrmces(u_int64_t mces)
register u_int64_t a0 __asm__("$16") = mces;
__asm__ __volatile__ (
"call_pal 0x11 #PAL_wrmces"
: "=r" (a0)
: "0" (a0)
: "+r" (a0)
:
: "$1", "$22", "$23", "$24", "$25");
}
@ -406,8 +406,8 @@ alpha_pal_wrfen(u_int64_t fen)
register u_int64_t a0 __asm__("$16") = fen;
__asm__ __volatile__ (
"call_pal 0x2b #PAL_wrfen"
: "=r" (a0)
: "0" (a0)
: "+r" (a0)
:
: "$1", "$22", "$23", "$24", "$25");
}
@ -417,8 +417,8 @@ alpha_pal_wrvptptr(u_int64_t vptptr)
register u_int64_t a0 __asm__("$16") = vptptr;
__asm__ __volatile__ (
"call_pal 0x2d #PAL_wrvptptr"
: "=r" (a0)
: "0" (a0)
: "+r" (a0)
:
: "$1", "$22", "$23", "$24", "$25");
}
@ -429,8 +429,8 @@ alpha_pal_swpctx(u_int64_t pcb)
register u_int64_t v0 __asm__("$0");
__asm__ __volatile__ (
"call_pal 0x30 #PAL_OSF1_swpctx"
: "=r" (v0), "=r" (a0)
: "1" (a0)
: "=r" (v0), "+r" (a0)
:
: "$1", "$22", "$23", "$24", "$25", "memory");
return v0;
}
@ -441,8 +441,8 @@ alpha_pal_wrval(u_int64_t sysvalue)
register u_int64_t a0 __asm__("$16") = sysvalue;
__asm__ __volatile__ (
"call_pal 0x31 #PAL_wrval"
: "=r" (a0)
: "0" (a0)
: "+r" (a0)
:
: "$1", "$22", "$23", "$24", "$25");
}
@ -465,8 +465,8 @@ alpha_pal_tbi(u_int64_t op, u_int64_t va)
register u_int64_t a1 __asm__("$17") = va;
__asm__ __volatile__ (
"call_pal 0x33 #PAL_OSF1_tbi"
: "=r" (a0), "=r" (a1)
: "0" (a0), "1" (a1)
: "+r" (a0), "+r" (a1)
:
: "$1", "$22", "$23", "$24", "$25");
}
@ -477,8 +477,8 @@ alpha_pal_wrent(void *ent, u_int64_t which)
register u_int64_t a1 __asm__("$17") = which;
__asm__ __volatile__ (
"call_pal 0x34 #PAL_OSF1_wrent"
: "=r" (a0), "=r" (a1)
: "0" (a0), "1" (a1)
: "+r" (a0), "+r" (a1)
:
: "$1", "$22", "$23", "$24", "$25");
}
@ -489,8 +489,8 @@ alpha_pal_swpipl(u_int64_t newipl)
register u_int64_t v0 __asm__("$0");
__asm__ __volatile__ (
"call_pal 0x35 #PAL_OSF1_swpipl"
: "=r" (v0), "=r" (a0)
: "1" (a0)
: "=r" (v0), "+r" (a0)
:
: "$1", "$22", "$23", "$24", "$25");
return v0;
}
@ -513,8 +513,8 @@ alpha_pal_wrusp(u_int64_t usp)
register u_int64_t a0 __asm__("$16") = usp;
__asm__ __volatile__ (
"call_pal 0x38 #PAL_wrusp"
: "=r" (a0)
: "0" (a0)
: "+r" (a0)
:
: "$1", "$22", "$23", "$24", "$25");
}
@ -526,8 +526,8 @@ alpha_pal_wrperfmon(u_int64_t arg0, u_int64_t arg1)
register u_int64_t a1 __asm__("$17") = arg1;
__asm__ __volatile__ (
"call_pal 0x39 #PAL_OSF1_wrperfmon"
: "=r" (a0), "=r" (a1), "=r" (v0)
: "0" (a0), "1" (a1)
: "+r" (a0), "+r" (a1), "=r" (v0)
:
: "$1", "$22", "$23", "$24", "$25");
return v0;
}

View File

@ -72,16 +72,16 @@ static __inline void atomic_clear_32(volatile u_int32_t *p, u_int32_t v)
#ifdef __GNUC__
__asm __volatile (
"1:\tldl_l %0, %2\n\t" /* load old value */
"bic %0, %3, %0\n\t" /* calculate new value */
"1:\tldl_l %0, %1\n\t" /* load old value */
"bic %0, %2, %0\n\t" /* calculate new value */
"stl_c %0, %1\n\t" /* attempt to store */
"beq %0, 2f\n\t" /* spin if failed */
"mb\n\t" /* drain to memory */
".section .text3,\"ax\"\n" /* improve branch prediction */
"2:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (temp), "=m" (*p)
: "m" (*p), "r" (v)
: "=&r" (temp), "+m" (*p)
: "r" (v)
: "memory");
#endif
}
@ -92,16 +92,16 @@ static __inline void atomic_add_32(volatile u_int32_t *p, u_int32_t v)
#ifdef __GNUC__
__asm __volatile (
"1:\tldl_l %0, %2\n\t" /* load old value */
"addl %0, %3, %0\n\t" /* calculate new value */
"1:\tldl_l %0, %1\n\t" /* load old value */
"addl %0, %2, %0\n\t" /* calculate new value */
"stl_c %0, %1\n\t" /* attempt to store */
"beq %0, 2f\n\t" /* spin if failed */
"mb\n\t" /* drain to memory */
".section .text3,\"ax\"\n" /* improve branch prediction */
"2:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (temp), "=m" (*p)
: "m" (*p), "r" (v)
: "=&r" (temp), "+m" (*p)
: "r" (v)
: "memory");
#endif
}
@ -112,16 +112,16 @@ static __inline void atomic_subtract_32(volatile u_int32_t *p, u_int32_t v)
#ifdef __GNUC__
__asm __volatile (
"1:\tldl_l %0, %2\n\t" /* load old value */
"subl %0, %3, %0\n\t" /* calculate new value */
"1:\tldl_l %0, %1\n\t" /* load old value */
"subl %0, %2, %0\n\t" /* calculate new value */
"stl_c %0, %1\n\t" /* attempt to store */
"beq %0, 2f\n\t" /* spin if failed */
"mb\n\t" /* drain to memory */
".section .text3,\"ax\"\n" /* improve branch prediction */
"2:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (temp), "=m" (*p)
: "m" (*p), "r" (v)
: "=&r" (temp), "+m" (*p)
: "r" (v)
: "memory");
#endif
}
@ -133,15 +133,15 @@ static __inline u_int32_t atomic_readandclear_32(volatile u_int32_t *addr)
#ifdef __GNUC__
__asm __volatile (
"wmb\n" /* ensure pending writes have drained */
"1:\tldl_l %0,%3\n\t" /* load current value, asserting lock */
"1:\tldl_l %0,%2\n\t" /* load current value, asserting lock */
"ldiq %1,0\n\t" /* value to store */
"stl_c %1,%2\n\t" /* attempt to store */
"beq %1,2f\n\t" /* if the store failed, spin */
"br 3f\n" /* it worked, exit */
"2:\tbr 1b\n" /* *addr not updated, loop */
"3:\tmb\n" /* it worked */
: "=&r"(result), "=&r"(temp), "=m" (*addr)
: "m"(*addr)
: "=&r"(result), "=&r"(temp), "+m" (*addr)
:
: "memory");
#endif
@ -154,16 +154,16 @@ static __inline void atomic_set_64(volatile u_int64_t *p, u_int64_t v)
#ifdef __GNUC__
__asm __volatile (
"1:\tldq_l %0, %2\n\t" /* load old value */
"bis %0, %3, %0\n\t" /* calculate new value */
"1:\tldq_l %0, %1\n\t" /* load old value */
"bis %0, %2, %0\n\t" /* calculate new value */
"stq_c %0, %1\n\t" /* attempt to store */
"beq %0, 2f\n\t" /* spin if failed */
"mb\n\t" /* drain to memory */
".section .text3,\"ax\"\n" /* improve branch prediction */
"2:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (temp), "=m" (*p)
: "m" (*p), "r" (v)
: "=&r" (temp), "+m" (*p)
: "r" (v)
: "memory");
#endif
}
@ -174,16 +174,16 @@ static __inline void atomic_clear_64(volatile u_int64_t *p, u_int64_t v)
#ifdef __GNUC__
__asm __volatile (
"1:\tldq_l %0, %2\n\t" /* load old value */
"bic %0, %3, %0\n\t" /* calculate new value */
"1:\tldq_l %0, %1\n\t" /* load old value */
"bic %0, %2, %0\n\t" /* calculate new value */
"stq_c %0, %1\n\t" /* attempt to store */
"beq %0, 2f\n\t" /* spin if failed */
"mb\n\t" /* drain to memory */
".section .text3,\"ax\"\n" /* improve branch prediction */
"2:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (temp), "=m" (*p)
: "m" (*p), "r" (v)
: "=&r" (temp), "+m" (*p)
: "r" (v)
: "memory");
#endif
}
@ -194,16 +194,16 @@ static __inline void atomic_add_64(volatile u_int64_t *p, u_int64_t v)
#ifdef __GNUC__
__asm __volatile (
"1:\tldq_l %0, %2\n\t" /* load old value */
"addq %0, %3, %0\n\t" /* calculate new value */
"1:\tldq_l %0, %1\n\t" /* load old value */
"addq %0, %2, %0\n\t" /* calculate new value */
"stq_c %0, %1\n\t" /* attempt to store */
"beq %0, 2f\n\t" /* spin if failed */
"mb\n\t" /* drain to memory */
".section .text3,\"ax\"\n" /* improve branch prediction */
"2:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (temp), "=m" (*p)
: "m" (*p), "r" (v)
: "=&r" (temp), "+m" (*p)
: "r" (v)
: "memory");
#endif
}
@ -214,16 +214,16 @@ static __inline void atomic_subtract_64(volatile u_int64_t *p, u_int64_t v)
#ifdef __GNUC__
__asm __volatile (
"1:\tldq_l %0, %2\n\t" /* load old value */
"subq %0, %3, %0\n\t" /* calculate new value */
"1:\tldq_l %0, %1\n\t" /* load old value */
"subq %0, %2, %0\n\t" /* calculate new value */
"stq_c %0, %1\n\t" /* attempt to store */
"beq %0, 2f\n\t" /* spin if failed */
"mb\n\t" /* drain to memory */
".section .text3,\"ax\"\n" /* improve branch prediction */
"2:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (temp), "=m" (*p)
: "m" (*p), "r" (v)
: "=&r" (temp), "+m" (*p)
: "r" (v)
: "memory");
#endif
}
@ -235,15 +235,15 @@ static __inline u_int64_t atomic_readandclear_64(volatile u_int64_t *addr)
#ifdef __GNUC__
__asm __volatile (
"wmb\n" /* ensure pending writes have drained */
"1:\tldq_l %0,%3\n\t" /* load current value, asserting lock */
"1:\tldq_l %0,%2\n\t" /* load current value, asserting lock */
"ldiq %1,0\n\t" /* value to store */
"stq_c %1,%2\n\t" /* attempt to store */
"beq %1,2f\n\t" /* if the store failed, spin */
"br 3f\n" /* it worked, exit */
"2:\tbr 1b\n" /* *addr not updated, loop */
"3:\tmb\n" /* it worked */
: "=&r"(result), "=&r"(temp), "=m" (*addr)
: "m"(*addr)
: "=&r"(result), "=&r"(temp), "+m" (*addr)
:
: "memory");
#endif
@ -376,7 +376,7 @@ atomic_cmpset_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
#ifdef __GNUC__
__asm __volatile (
"1:\tldl_l %0, %4\n\t" /* load old value */
"1:\tldl_l %0, %1\n\t" /* load old value */
"cmpeq %0, %2, %0\n\t" /* compare */
"beq %0, 2f\n\t" /* exit if not equal */
"mov %3, %0\n\t" /* value to store */
@ -387,8 +387,8 @@ atomic_cmpset_32(volatile u_int32_t* p, u_int32_t cmpval, u_int32_t newval)
".section .text3,\"ax\"\n" /* improve branch prediction */
"3:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (ret), "=m" (*p)
: "r" ((long)(int)cmpval), "r" (newval), "m" (*p)
: "=&r" (ret), "+m" (*p)
: "r" ((long)(int)cmpval), "r" (newval)
: "memory");
#endif
@ -407,7 +407,7 @@ atomic_cmpset_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
#ifdef __GNUC__
__asm __volatile (
"1:\tldq_l %0, %4\n\t" /* load old value */
"1:\tldq_l %0, %1\n\t" /* load old value */
"cmpeq %0, %2, %0\n\t" /* compare */
"beq %0, 2f\n\t" /* exit if not equal */
"mov %3, %0\n\t" /* value to store */
@ -418,8 +418,8 @@ atomic_cmpset_64(volatile u_int64_t* p, u_int64_t cmpval, u_int64_t newval)
".section .text3,\"ax\"\n" /* improve branch prediction */
"3:\tbr 1b\n" /* try again */
".previous\n"
: "=&r" (ret), "=m" (*p)
: "r" (cmpval), "r" (newval), "m" (*p)
: "=&r" (ret), "+m" (*p)
: "r" (cmpval), "r" (newval)
: "memory");
#endif