Implement a deferred write advancement feature that can be used to further

amortize shared cacheline writes.

Discussed with: rlibby
Differential Revision:	https://reviews.freebsd.org/D23462
This commit is contained in:
Jeff Roberson 2020-02-04 02:44:52 +00:00
parent c8ea36e881
commit bc6509845d
2 changed files with 39 additions and 0 deletions

View File

@ -209,6 +209,26 @@ smr_advance(smr_t smr)
return (goal);
}
smr_seq_t
smr_advance_deferred(smr_t smr, int limit)
{
smr_seq_t goal;
smr_t csmr;
critical_enter();
csmr = zpcpu_get(smr);
if (++csmr->c_deferred >= limit) {
goal = SMR_SEQ_INVALID;
csmr->c_deferred = 0;
} else
goal = smr_shared_current(csmr->c_shared) + SMR_SEQ_INCR;
critical_exit();
if (goal != SMR_SEQ_INVALID)
return (goal);
return (smr_advance(smr));
}
/*
* Poll to determine whether all readers have observed the 'goal' write
* sequence number.
@ -256,6 +276,17 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
*/
s_wr_seq = atomic_load_acq_int(&s->s_wr_seq);
/*
* This may have come from a deferred advance. Consider one
* increment past the current wr_seq valid and make sure we
* have advanced far enough to succeed. We simply add to avoid
* an additional fence.
*/
if (goal == s_wr_seq + SMR_SEQ_INCR) {
atomic_add_int(&s->s_wr_seq, SMR_SEQ_INCR);
s_wr_seq = goal;
}
/*
* Detect whether the goal is valid and has already been observed.
*

View File

@ -64,6 +64,7 @@ typedef struct smr_shared *smr_shared_t;
struct smr {
smr_seq_t c_seq; /* Current observed sequence. */
smr_shared_t c_shared; /* Shared SMR state. */
int c_deferred; /* Deferred advance counter. */
};
/*
@ -145,6 +146,13 @@ smr_exit(smr_t smr)
*/
smr_seq_t smr_advance(smr_t smr);
/*
* Advances the write sequence number only after N calls. Returns
* the correct goal for a wr_seq that has not yet occurred. Used to
* minimize shared cacheline invalidations for frequent writers.
*/
smr_seq_t smr_advance_deferred(smr_t smr, int limit);
/*
* Returns true if a goal sequence has been reached. If
* wait is true this will busy loop until success.