Use atomic_fcmpset_XXX() instead of atomic_cmpset_XXX() when possible

in the LinuxKPI.

Suggested by:	mjg @
MFC after:	1 week
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2018-08-09 09:39:32 +00:00
parent 2a7ea45864
commit 6402bc3d1e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=337527
4 changed files with 23 additions and 33 deletions

View File

@ -87,9 +87,8 @@ atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
long ret = old;
for (;;) {
if (atomic_cmpset_long(&v->counter, old, new))
if (atomic_fcmpset_long(&v->counter, &ret, new))
break;
ret = READ_ONCE(v->counter);
if (ret != old)
break;
}
@ -99,13 +98,12 @@ atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
static inline int
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
long c;
long c = atomic_long_read(v);
for (;;) {
c = atomic_long_read(v);
if (unlikely(c == u))
break;
if (likely(atomic_cmpset_long(&v->counter, c, c + a)))
if (likely(atomic_fcmpset_long(&v->counter, &c, c + a)))
break;
}
return (c != u);

View File

@ -108,13 +108,12 @@ atomic_dec(atomic_t *v)
static inline int
atomic_add_unless(atomic_t *v, int a, int u)
{
int c;
int c = atomic_read(v);
for (;;) {
c = atomic_read(v);
if (unlikely(c == u))
break;
if (likely(atomic_cmpset_int(&v->counter, c, c + a)))
if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
break;
}
return (c != u);
@ -132,12 +131,10 @@ atomic_xchg(atomic_t *v, int i)
#if !defined(__mips__)
return (atomic_swap_int(&v->counter, i));
#else
int ret;
for (;;) {
ret = READ_ONCE(v->counter);
if (atomic_cmpset_int(&v->counter, ret, i))
break;
}
int ret = atomic_read(v);
while (!atomic_fcmpset_int(&v->counter, &ret, i))
;
return (ret);
#endif
}
@ -148,9 +145,8 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
int ret = old;
for (;;) {
if (atomic_cmpset_int(&v->counter, old, new))
if (atomic_fcmpset_int(&v->counter, &ret, new))
break;
ret = READ_ONCE(v->counter);
if (ret != old)
break;
}

View File

@ -92,13 +92,12 @@ atomic64_dec(atomic64_t *v)
static inline int64_t
atomic64_add_unless(atomic64_t *v, int64_t a, int64_t u)
{
int64_t c;
int64_t c = atomic64_read(v);
for (;;) {
c = atomic64_read(v);
if (unlikely(c == u))
break;
if (likely(atomic_cmpset_64(&v->counter, c, c + a)))
if (likely(atomic_fcmpset_64(&v->counter, &c, c + a)))
break;
}
return (c != u);
@ -111,12 +110,10 @@ atomic64_xchg(atomic64_t *v, int64_t i)
(defined(__powerpc__) && !defined(__powerpc64__)))
return (atomic_swap_64(&v->counter, i));
#else
int64_t ret;
for (;;) {
ret = READ_ONCE(v->counter);
if (atomic_cmpset_64(&v->counter, ret, i))
break;
}
int64_t ret = atomic64_read(v);
while (!atomic_fcmpset_64(&v->counter, &ret, i))
;
return (ret);
#endif
}
@ -127,9 +124,8 @@ atomic64_cmpxchg(atomic64_t *v, int64_t old, int64_t new)
int64_t ret = old;
for (;;) {
if (atomic_cmpset_64(&v->counter, old, new))
if (atomic_fcmpset_64(&v->counter, &ret, new))
break;
ret = READ_ONCE(v->counter);
if (ret != old)
break;
}

View File

@ -282,10 +282,10 @@ test_and_clear_bit(long bit, volatile unsigned long *var)
var += BIT_WORD(bit);
bit %= BITS_PER_LONG;
bit = (1UL << bit);
do {
val = *var;
} while (atomic_cmpset_long(var, val, val & ~bit) == 0);
val = *var;
while (!atomic_fcmpset_long(var, &val, val & ~bit))
;
return !!(val & bit);
}
@ -312,10 +312,10 @@ test_and_set_bit(long bit, volatile unsigned long *var)
var += BIT_WORD(bit);
bit %= BITS_PER_LONG;
bit = (1UL << bit);
do {
val = *var;
} while (atomic_cmpset_long(var, val, val | bit) == 0);
val = *var;
while (!atomic_fcmpset_long(var, &val, val | bit))
;
return !!(val & bit);
}