Fix misplaced read memory barrier in seq.
Impact on capability races was small: it was possible to get a spurious ENOTCAPABLE (early return), but it was not possible to bypass checks. Tidy up some comments.
This commit is contained in:
parent
f8ca61996e
commit
00915f067f
@ -70,16 +70,16 @@ typedef uint32_t seq_t;
|
||||
#include <machine/cpu.h>
|
||||
|
||||
/*
|
||||
* This is a temporary hack until memory barriers are cleaned up.
|
||||
* Stuff below is going away when we gain suitable memory barriers.
|
||||
*
|
||||
* atomic_load_acq_int at least on amd64 provides a full memory barrier,
|
||||
* in a way which affects perforance.
|
||||
* in a way which affects performance.
|
||||
*
|
||||
* Hack below covers all architectures and avoids most of the penalty at least
|
||||
* on amd64.
|
||||
* on amd64 but still has unnecessary cost.
|
||||
*/
|
||||
static __inline int
|
||||
atomic_load_acq_rmb_int(volatile u_int *p)
|
||||
atomic_load_rmb_int(volatile u_int *p)
|
||||
{
|
||||
volatile u_int v;
|
||||
|
||||
@ -88,6 +88,16 @@ atomic_load_acq_rmb_int(volatile u_int *p)
|
||||
return (v);
|
||||
}
|
||||
|
||||
static __inline int
|
||||
atomic_rmb_load_int(volatile u_int *p)
|
||||
{
|
||||
volatile u_int v = 0;
|
||||
|
||||
atomic_load_acq_int(&v);
|
||||
v = *p;
|
||||
return (v);
|
||||
}
|
||||
|
||||
static __inline bool
|
||||
seq_in_modify(seq_t seqp)
|
||||
{
|
||||
@ -117,7 +127,7 @@ seq_read(seq_t *seqp)
|
||||
seq_t ret;
|
||||
|
||||
for (;;) {
|
||||
ret = atomic_load_acq_rmb_int(seqp);
|
||||
ret = atomic_load_rmb_int(seqp);
|
||||
if (seq_in_modify(ret)) {
|
||||
cpu_spinwait();
|
||||
continue;
|
||||
@ -132,7 +142,7 @@ static __inline seq_t
|
||||
seq_consistent(seq_t *seqp, seq_t oldseq)
|
||||
{
|
||||
|
||||
return (atomic_load_acq_rmb_int(seqp) == oldseq);
|
||||
return (atomic_rmb_load_int(seqp) == oldseq);
|
||||
}
|
||||
|
||||
static __inline seq_t
|
||||
|
Loading…
Reference in New Issue
Block a user