- Merge r338254 from cxgbe(4):

Use fcmpset instead of cmpset when appropriate.
- Revert r277226 of cxgbe(4), obsolete since r334320.
This commit is contained in:
marius 2019-05-09 11:34:46 +00:00
parent c3e96d5122
commit b0f90dce2b

View File

@ -36,12 +36,6 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <machine/cpu.h>
#if defined(__i386__)
#define atomic_cmpset_acq_64 atomic_cmpset_64
#define atomic_cmpset_rel_64 atomic_cmpset_64
#endif
#include <net/mp_ring.h>
union ring_state {
@ -195,11 +189,12 @@ drain_ring_lockless(struct ifmp_ring *r, union ring_state os, uint16_t prev, int
n = r->drain(r, cidx, pidx);
if (n == 0) {
critical_enter();
os.state = r->state;
do {
os.state = ns.state = r->state;
ns.state = os.state;
ns.cidx = cidx;
ns.flags = STALLED;
} while (atomic_cmpset_64(&r->state, os.state,
} while (atomic_fcmpset_64(&r->state, &os.state,
ns.state) == 0);
critical_exit();
if (prev != STALLED)
@ -222,11 +217,13 @@ drain_ring_lockless(struct ifmp_ring *r, union ring_state os, uint16_t prev, int
if (cidx != pidx && pending < 64 && total < budget)
continue;
critical_enter();
os.state = r->state;
do {
os.state = ns.state = r->state;
ns.state = os.state;
ns.cidx = cidx;
ns.flags = state_to_flags(ns, total >= budget);
} while (atomic_cmpset_acq_64(&r->state, os.state, ns.state) == 0);
} while (atomic_fcmpset_acq_64(&r->state, &os.state,
ns.state) == 0);
critical_exit();
if (ns.flags == ABDICATED)
@ -379,10 +376,8 @@ ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget, int abdi
if (abdicate) {
if (os.flags == IDLE)
ns.flags = ABDICATED;
}
else {
} else
ns.flags = BUSY;
}
r->state = ns.state;
counter_u64_add(r->enqueues, n);
@ -398,7 +393,6 @@ ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget, int abdi
mtx_unlock(&r->lock);
return (0);
}
#else
int
ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget, int abdicate)
@ -414,8 +408,8 @@ ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget, int abdi
* Reserve room for the new items. Our reservation, if successful, is
* from 'pidx_start' to 'pidx_stop'.
*/
os.state = r->state;
for (;;) {
os.state = r->state;
if (n >= space_available(r, os)) {
counter_u64_add(r->drops, n);
MPASS(os.flags != IDLE);
@ -426,7 +420,7 @@ ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget, int abdi
ns.state = os.state;
ns.pidx_head = increment_idx(r, os.pidx_head, n);
critical_enter();
if (atomic_cmpset_64(&r->state, os.state, ns.state))
if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
break;
critical_exit();
cpu_spinwait();
@ -456,17 +450,16 @@ ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget, int abdi
* Update the ring's pidx_tail. The release style atomic guarantees
* that the items are visible to any thread that sees the updated pidx.
*/
os.state = r->state;
do {
os.state = ns.state = r->state;
ns.state = os.state;
ns.pidx_tail = pidx_stop;
if (abdicate) {
if (os.flags == IDLE)
ns.flags = ABDICATED;
}
else {
} else
ns.flags = BUSY;
}
} while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0);
} while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
critical_exit();
counter_u64_add(r->enqueues, n);