ck: add support for executing callbacks outside of main poll loop

Pull in change from upstream deca119d14bfffd440770eb67cbdbeaf7b57eb7b

|    ck_epoch: introduce ck_epoch_deferred
|
|    Allow for deferral to occur outside epoch poll critical loop (which may access per-CPU structures).
|

Approved by:	sbruno
This commit is contained in:
Matt Macy 2018-05-17 18:14:10 +00:00
parent 75a67bf3d0
commit 4c7d0d925f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=333745
2 changed files with 17 additions and 6 deletions

View File

@ -266,6 +266,7 @@ void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *, void *);
void ck_epoch_unregister(ck_epoch_record_t *);
bool ck_epoch_poll(ck_epoch_record_t *);
bool ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred);
void ck_epoch_synchronize(ck_epoch_record_t *);
void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *);
void ck_epoch_barrier(ck_epoch_record_t *);

View File

@ -349,7 +349,7 @@ ck_epoch_scan(struct ck_epoch *global,
}
static void
ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e, ck_stack_t *deferred)
{
unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
ck_stack_entry_t *head, *next, *cursor;
@ -362,7 +362,10 @@ ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
ck_epoch_entry_container(cursor);
next = CK_STACK_NEXT(cursor);
entry->function(entry);
if (deferred != NULL)
ck_stack_push_spnc(deferred, &entry->stack_entry);
else
entry->function(entry);
i++;
}
@ -390,7 +393,7 @@ ck_epoch_reclaim(struct ck_epoch_record *record)
unsigned int epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch);
ck_epoch_dispatch(record, epoch, NULL);
return;
}
@ -551,7 +554,7 @@ ck_epoch_barrier_wait(struct ck_epoch_record *record, ck_epoch_wait_cb_t *cb,
* is far from ideal too.
*/
bool
ck_epoch_poll(struct ck_epoch_record *record)
ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred)
{
bool active;
unsigned int epoch;
@ -572,7 +575,7 @@ ck_epoch_poll(struct ck_epoch_record *record)
if (active == false) {
record->epoch = epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch);
ck_epoch_dispatch(record, epoch, deferred);
return true;
}
@ -580,6 +583,13 @@ ck_epoch_poll(struct ck_epoch_record *record)
/* If an active thread exists, rely on epoch observation. */
(void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1);
ck_epoch_dispatch(record, epoch + 1);
ck_epoch_dispatch(record, epoch + 1, deferred);
return true;
}
bool
ck_epoch_poll(struct ck_epoch_record *record)
{
return ck_epoch_poll_deferred(record, NULL);
}