amd64 pmap.c: minor codegen optimization in flag access

Following previous revision, apply the same minor optimization to
hand-rolled atomic_fcmpset_128 in pmap.c.

Reviewed by:	kib, markj
Sponsored by:	Dell EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D23870
This commit is contained in:
Ryan Libby 2020-02-28 18:32:40 +00:00
parent 6d1a70dd0a
commit cd1da6ff8b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=358440

View File

@ -714,8 +714,8 @@ pmap_di_load_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *out)
old_low = new_low = 0;
old_high = new_high = (uintptr_t)0;
__asm volatile("lock;cmpxchg16b\t%1;sete\t%0"
: "=r" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
__asm volatile("lock;cmpxchg16b\t%1"
: "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
: "b"(new_low), "c" (new_high)
: "memory", "cc");
if (res == 0) {
@ -742,8 +742,8 @@ pmap_di_store_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *old_val,
old_low = old_val->gen;
old_high = (uintptr_t)old_val->next;
__asm volatile("lock;cmpxchg16b\t%1;sete\t%0"
: "=r" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
__asm volatile("lock;cmpxchg16b\t%1"
: "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
: "b"(new_low), "c" (new_high)
: "memory", "cc");
return (res);