Add two missing fences with comments describing them. These were found by
inspection and after a lengthy discussion with jhb and kib. They have not produced test failures. Don't pointer chase through cpu0's smr. Use cpu correct smr even when not in a critical section to reduce the likelihood of false sharing.
This commit is contained in:
parent
82952c4065
commit
915c367e8e
@ -195,7 +195,7 @@ smr_advance(smr_t smr)
|
||||
* odd and an observed value of 0 in a particular CPU means
|
||||
* it is not currently in a read section.
|
||||
*/
|
||||
s = smr->c_shared;
|
||||
s = zpcpu_get(smr)->c_shared;
|
||||
goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR;
|
||||
|
||||
/*
|
||||
@ -242,14 +242,19 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
|
||||
*/
|
||||
success = true;
|
||||
critical_enter();
|
||||
s = smr->c_shared;
|
||||
s = zpcpu_get(smr)->c_shared;
|
||||
|
||||
/*
|
||||
* Acquire barrier loads s_wr_seq after s_rd_seq so that we can not
|
||||
* observe an updated read sequence that is larger than write.
|
||||
*/
|
||||
s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
|
||||
s_wr_seq = smr_current(smr);
|
||||
|
||||
/*
|
||||
* wr_seq must be loaded prior to any c_seq value so that a stale
|
||||
* c_seq can only reference time after this wr_seq.
|
||||
*/
|
||||
s_wr_seq = atomic_load_acq_int(&s->s_wr_seq);
|
||||
|
||||
/*
|
||||
* Detect whether the goal is valid and has already been observed.
|
||||
@ -336,6 +341,12 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
|
||||
out:
|
||||
critical_exit();
|
||||
|
||||
/*
|
||||
* Serialize with smr_advance()/smr_exit(). The caller is now free
|
||||
* to modify memory as expected.
|
||||
*/
|
||||
atomic_thread_fence_acq();
|
||||
|
||||
return (success);
|
||||
}
|
||||
|
||||
|
@ -69,11 +69,18 @@ struct smr {
|
||||
/*
|
||||
* Return the current write sequence number.
|
||||
*/
|
||||
static inline smr_seq_t
|
||||
smr_shared_current(smr_shared_t s)
|
||||
{
|
||||
|
||||
return (atomic_load_int(&s->s_wr_seq));
|
||||
}
|
||||
|
||||
static inline smr_seq_t
|
||||
smr_current(smr_t smr)
|
||||
{
|
||||
|
||||
return (atomic_load_int(&smr->c_shared->s_wr_seq));
|
||||
return (smr_shared_current(zpcpu_get(smr)->c_shared));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -106,7 +113,7 @@ smr_enter(smr_t smr)
|
||||
* is detected and handled there.
|
||||
*/
|
||||
/* This is an add because we do not have atomic_store_acq_int */
|
||||
atomic_add_acq_int(&smr->c_seq, smr_current(smr));
|
||||
atomic_add_acq_int(&smr->c_seq, smr_shared_current(smr->c_shared));
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user