diff --git a/sys/kern/subr_smr.c b/sys/kern/subr_smr.c index c344fdcb7f5f..f475b8516363 100644 --- a/sys/kern/subr_smr.c +++ b/sys/kern/subr_smr.c @@ -209,6 +209,26 @@ smr_advance(smr_t smr) return (goal); } +smr_seq_t +smr_advance_deferred(smr_t smr, int limit) +{ + smr_seq_t goal; + smr_t csmr; + + critical_enter(); + csmr = zpcpu_get(smr); + if (++csmr->c_deferred >= limit) { + goal = SMR_SEQ_INVALID; + csmr->c_deferred = 0; + } else + goal = smr_shared_current(csmr->c_shared) + SMR_SEQ_INCR; + critical_exit(); + if (goal != SMR_SEQ_INVALID) + return (goal); + + return (smr_advance(smr)); +} + /* * Poll to determine whether all readers have observed the 'goal' write * sequence number. @@ -256,6 +276,17 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait) */ s_wr_seq = atomic_load_acq_int(&s->s_wr_seq); + /* + * This may have come from a deferred advance. Consider one + * increment past the current wr_seq valid and make sure we + * have advanced far enough to succeed. We simply add to avoid + * an additional fence. + */ + if (goal == s_wr_seq + SMR_SEQ_INCR) { + atomic_add_int(&s->s_wr_seq, SMR_SEQ_INCR); + s_wr_seq = goal; + } + /* * Detect whether the goal is valid and has already been observed. * diff --git a/sys/sys/smr.h b/sys/sys/smr.h index b2c2fad07fec..0bd3d5ac77b5 100644 --- a/sys/sys/smr.h +++ b/sys/sys/smr.h @@ -64,6 +64,7 @@ typedef struct smr_shared *smr_shared_t; struct smr { smr_seq_t c_seq; /* Current observed sequence. */ smr_shared_t c_shared; /* Shared SMR state. */ + int c_deferred; /* Deferred advance counter. */ }; /* @@ -145,6 +146,13 @@ smr_exit(smr_t smr) */ smr_seq_t smr_advance(smr_t smr); +/* + * Advances the write sequence number only after N calls. Returns + * the correct goal for a wr_seq that has not yet occurred. Used to + * minimize shared cacheline invalidations for frequent writers. + */ +smr_seq_t smr_advance_deferred(smr_t smr, int limit); + /* * Returns true if a goal sequence has been reached. If * wait is true this will busy loop until success.