Import CK as of commit 6b141c0bdd21ce8b3e14147af8f87f22b20ecf32
This brings us changes we needed in ck_epoch.
This commit is contained in:
parent
c1411a76e5
commit
7e8cd4e1af
@ -105,7 +105,7 @@ linux_rcu_runtime_init(void *arg __unused)
|
||||
ck_epoch_record_t *record;
|
||||
|
||||
record = malloc(sizeof(*record), M_LRCU, M_WAITOK | M_ZERO);
|
||||
ck_epoch_register(&linux_epoch, record);
|
||||
ck_epoch_register(&linux_epoch, record, NULL);
|
||||
|
||||
DPCPU_ID_SET(i, linux_reader_epoch_record, record);
|
||||
}
|
||||
@ -116,7 +116,7 @@ linux_rcu_runtime_init(void *arg __unused)
|
||||
|
||||
record = malloc(sizeof(*record), M_LRCU, M_WAITOK | M_ZERO);
|
||||
|
||||
ck_epoch_register(&linux_epoch, &record->epoch_record);
|
||||
ck_epoch_register(&linux_epoch, &record->epoch_record, NULL);
|
||||
mtx_init(&record->head_lock, "LRCU-HEAD", NULL, MTX_DEF);
|
||||
mtx_init(&record->sync_lock, "LRCU-SYNC", NULL, MTX_DEF);
|
||||
TASK_INIT(&record->task, 0, linux_rcu_cleaner_func, record);
|
||||
@ -170,14 +170,14 @@ linux_srcu_get_record(void)
|
||||
* NOTE: The only records that are unregistered and can be
|
||||
* recycled are srcu_epoch_records.
|
||||
*/
|
||||
record = (struct srcu_epoch_record *)ck_epoch_recycle(&linux_epoch);
|
||||
record = (struct srcu_epoch_record *)ck_epoch_recycle(&linux_epoch, NULL);
|
||||
if (__predict_true(record != NULL))
|
||||
return (record);
|
||||
|
||||
record = malloc(sizeof(*record), M_LRCU, M_WAITOK | M_ZERO);
|
||||
mtx_init(&record->read_lock, "SRCU-READ", NULL, MTX_DEF | MTX_NOWITNESS);
|
||||
mtx_init(&record->sync_lock, "SRCU-SYNC", NULL, MTX_DEF | MTX_NOWITNESS);
|
||||
ck_epoch_register(&linux_epoch, &record->epoch_record);
|
||||
ck_epoch_register(&linux_epoch, &record->epoch_record, NULL);
|
||||
|
||||
return (record);
|
||||
}
|
||||
|
@ -83,6 +83,7 @@ struct ck_epoch_ref {
|
||||
};
|
||||
|
||||
struct ck_epoch_record {
|
||||
ck_stack_entry_t record_next;
|
||||
struct ck_epoch *global;
|
||||
unsigned int state;
|
||||
unsigned int epoch;
|
||||
@ -92,17 +93,16 @@ struct ck_epoch_record {
|
||||
} local CK_CC_CACHELINE;
|
||||
unsigned int n_pending;
|
||||
unsigned int n_peak;
|
||||
unsigned long n_dispatch;
|
||||
unsigned int n_dispatch;
|
||||
void *ct;
|
||||
ck_stack_t pending[CK_EPOCH_LENGTH];
|
||||
ck_stack_entry_t record_next;
|
||||
} CK_CC_CACHELINE;
|
||||
typedef struct ck_epoch_record ck_epoch_record_t;
|
||||
|
||||
struct ck_epoch {
|
||||
unsigned int epoch;
|
||||
char pad[CK_MD_CACHELINE - sizeof(unsigned int)];
|
||||
ck_stack_t records;
|
||||
unsigned int n_free;
|
||||
ck_stack_t records;
|
||||
};
|
||||
typedef struct ck_epoch ck_epoch_t;
|
||||
|
||||
@ -110,7 +110,14 @@ typedef struct ck_epoch ck_epoch_t;
|
||||
* Internal functions.
|
||||
*/
|
||||
void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *);
|
||||
void _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);
|
||||
bool _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);
|
||||
|
||||
CK_CC_FORCE_INLINE static void *
|
||||
ck_epoch_record_ct(const ck_epoch_record_t *record)
|
||||
{
|
||||
|
||||
return ck_pr_load_ptr(&record->ct);
|
||||
}
|
||||
|
||||
/*
|
||||
* Marks the beginning of an epoch-protected section.
|
||||
@ -160,9 +167,10 @@ ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section)
|
||||
}
|
||||
|
||||
/*
|
||||
* Marks the end of an epoch-protected section.
|
||||
* Marks the end of an epoch-protected section. Returns true if no more
|
||||
* sections exist for the caller.
|
||||
*/
|
||||
CK_CC_FORCE_INLINE static void
|
||||
CK_CC_FORCE_INLINE static bool
|
||||
ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section)
|
||||
{
|
||||
|
||||
@ -170,15 +178,19 @@ ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section)
|
||||
ck_pr_store_uint(&record->active, record->active - 1);
|
||||
|
||||
if (section != NULL)
|
||||
_ck_epoch_delref(record, section);
|
||||
return _ck_epoch_delref(record, section);
|
||||
|
||||
return;
|
||||
return record->active == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Defers the execution of the function pointed to by the "cb"
|
||||
* argument until an epoch counter loop. This allows for a
|
||||
* non-blocking deferral.
|
||||
*
|
||||
* We can get away without a fence here due to the monotonic nature
|
||||
* of the epoch counter. Worst case, this will result in some delays
|
||||
* before object destruction.
|
||||
*/
|
||||
CK_CC_FORCE_INLINE static void
|
||||
ck_epoch_call(ck_epoch_record_t *record,
|
||||
@ -195,13 +207,74 @@ ck_epoch_call(ck_epoch_record_t *record,
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as ck_epoch_call, but allows for records to be shared and is reentrant.
|
||||
*/
|
||||
CK_CC_FORCE_INLINE static void
|
||||
ck_epoch_call_strict(ck_epoch_record_t *record,
|
||||
ck_epoch_entry_t *entry,
|
||||
ck_epoch_cb_t *function)
|
||||
{
|
||||
struct ck_epoch *epoch = record->global;
|
||||
unsigned int e = ck_pr_load_uint(&epoch->epoch);
|
||||
unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
|
||||
|
||||
ck_pr_inc_uint(&record->n_pending);
|
||||
entry->function = function;
|
||||
|
||||
/* Store fence is implied by push operation. */
|
||||
ck_stack_push_upmc(&record->pending[offset], &entry->stack_entry);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This callback is used for synchronize_wait to allow for custom blocking
|
||||
* behavior.
|
||||
*/
|
||||
typedef void ck_epoch_wait_cb_t(ck_epoch_t *, ck_epoch_record_t *,
|
||||
void *);
|
||||
|
||||
/*
|
||||
* Return latest epoch value. This operation provides load ordering.
|
||||
*/
|
||||
CK_CC_FORCE_INLINE static unsigned int
|
||||
ck_epoch_value(const ck_epoch_t *ep)
|
||||
{
|
||||
|
||||
ck_pr_fence_load();
|
||||
return ck_pr_load_uint(&ep->epoch);
|
||||
}
|
||||
|
||||
void ck_epoch_init(ck_epoch_t *);
|
||||
ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *);
|
||||
void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *);
|
||||
|
||||
/*
|
||||
* Attempts to recycle an unused epoch record. If one is successfully
|
||||
* allocated, the record context pointer is also updated.
|
||||
*/
|
||||
ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *, void *);
|
||||
|
||||
/*
|
||||
* Registers an epoch record. An optional context pointer may be passed that
|
||||
* is retrievable with ck_epoch_record_ct.
|
||||
*/
|
||||
void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *, void *);
|
||||
|
||||
/*
|
||||
* Marks a record as available for re-use by a subsequent recycle operation.
|
||||
* Note that the record cannot be physically destroyed.
|
||||
*/
|
||||
void ck_epoch_unregister(ck_epoch_record_t *);
|
||||
|
||||
bool ck_epoch_poll(ck_epoch_record_t *);
|
||||
void ck_epoch_synchronize(ck_epoch_record_t *);
|
||||
void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *);
|
||||
void ck_epoch_barrier(ck_epoch_record_t *);
|
||||
void ck_epoch_barrier_wait(ck_epoch_record_t *, ck_epoch_wait_cb_t *, void *);
|
||||
|
||||
/*
|
||||
* Reclaim entries associated with a record. This is safe to call only on
|
||||
* the caller's record or records that are using call_strict.
|
||||
*/
|
||||
void ck_epoch_reclaim(ck_epoch_record_t *);
|
||||
|
||||
#endif /* CK_EPOCH_H */
|
||||
|
@ -139,7 +139,7 @@ CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
|
||||
|
||||
#define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1)
|
||||
|
||||
void
|
||||
bool
|
||||
_ck_epoch_delref(struct ck_epoch_record *record,
|
||||
struct ck_epoch_section *section)
|
||||
{
|
||||
@ -150,7 +150,7 @@ _ck_epoch_delref(struct ck_epoch_record *record,
|
||||
current->count--;
|
||||
|
||||
if (current->count > 0)
|
||||
return;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the current bucket no longer has any references, then
|
||||
@ -161,8 +161,7 @@ _ck_epoch_delref(struct ck_epoch_record *record,
|
||||
* If no other active bucket exists, then the record will go
|
||||
* inactive in order to allow for forward progress.
|
||||
*/
|
||||
other = &record->local.bucket[(i + 1) &
|
||||
CK_EPOCH_SENSE_MASK];
|
||||
other = &record->local.bucket[(i + 1) & CK_EPOCH_SENSE_MASK];
|
||||
if (other->count > 0 &&
|
||||
((int)(current->epoch - other->epoch) < 0)) {
|
||||
/*
|
||||
@ -172,7 +171,7 @@ _ck_epoch_delref(struct ck_epoch_record *record,
|
||||
ck_pr_store_uint(&record->epoch, other->epoch);
|
||||
}
|
||||
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
@ -230,7 +229,7 @@ ck_epoch_init(struct ck_epoch *global)
|
||||
}
|
||||
|
||||
struct ck_epoch_record *
|
||||
ck_epoch_recycle(struct ck_epoch *global)
|
||||
ck_epoch_recycle(struct ck_epoch *global, void *ct)
|
||||
{
|
||||
struct ck_epoch_record *record;
|
||||
ck_stack_entry_t *cursor;
|
||||
@ -249,6 +248,12 @@ ck_epoch_recycle(struct ck_epoch *global)
|
||||
CK_EPOCH_STATE_USED);
|
||||
if (state == CK_EPOCH_STATE_FREE) {
|
||||
ck_pr_dec_uint(&global->n_free);
|
||||
ck_pr_store_ptr(&record->ct, ct);
|
||||
|
||||
/*
|
||||
* The context pointer is ordered by a
|
||||
* subsequent protected section.
|
||||
*/
|
||||
return record;
|
||||
}
|
||||
}
|
||||
@ -258,7 +263,8 @@ ck_epoch_recycle(struct ck_epoch *global)
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
|
||||
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record,
|
||||
void *ct)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
@ -269,6 +275,7 @@ ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
|
||||
record->n_dispatch = 0;
|
||||
record->n_peak = 0;
|
||||
record->n_pending = 0;
|
||||
record->ct = ct;
|
||||
memset(&record->local, 0, sizeof record->local);
|
||||
|
||||
for (i = 0; i < CK_EPOCH_LENGTH; i++)
|
||||
@ -295,6 +302,7 @@ ck_epoch_unregister(struct ck_epoch_record *record)
|
||||
for (i = 0; i < CK_EPOCH_LENGTH; i++)
|
||||
ck_stack_init(&record->pending[i]);
|
||||
|
||||
ck_pr_store_ptr(&record->ct, NULL);
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE);
|
||||
ck_pr_inc_uint(&global->n_free);
|
||||
@ -345,11 +353,10 @@ ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
|
||||
{
|
||||
unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
|
||||
ck_stack_entry_t *head, *next, *cursor;
|
||||
unsigned int n_pending, n_peak;
|
||||
unsigned int i = 0;
|
||||
|
||||
head = CK_STACK_FIRST(&record->pending[epoch]);
|
||||
ck_stack_init(&record->pending[epoch]);
|
||||
|
||||
head = ck_stack_batch_pop_upmc(&record->pending[epoch]);
|
||||
for (cursor = head; cursor != NULL; cursor = next) {
|
||||
struct ck_epoch_entry *entry =
|
||||
ck_epoch_entry_container(cursor);
|
||||
@ -359,11 +366,18 @@ ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
|
||||
i++;
|
||||
}
|
||||
|
||||
if (record->n_pending > record->n_peak)
|
||||
record->n_peak = record->n_pending;
|
||||
n_peak = ck_pr_load_uint(&record->n_peak);
|
||||
n_pending = ck_pr_load_uint(&record->n_pending);
|
||||
|
||||
/* We don't require accuracy around peak calculation. */
|
||||
if (n_pending > n_peak)
|
||||
ck_pr_store_uint(&record->n_peak, n_peak);
|
||||
|
||||
if (i > 0) {
|
||||
ck_pr_add_uint(&record->n_dispatch, i);
|
||||
ck_pr_sub_uint(&record->n_pending, i);
|
||||
}
|
||||
|
||||
record->n_dispatch += i;
|
||||
record->n_pending -= i;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -381,13 +395,24 @@ ck_epoch_reclaim(struct ck_epoch_record *record)
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_FORCE_INLINE static void
|
||||
epoch_block(struct ck_epoch *global, struct ck_epoch_record *cr,
|
||||
ck_epoch_wait_cb_t *cb, void *ct)
|
||||
{
|
||||
|
||||
if (cb != NULL)
|
||||
cb(global, cr, ct);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function must not be called with-in read section.
|
||||
*/
|
||||
void
|
||||
ck_epoch_synchronize(struct ck_epoch_record *record)
|
||||
ck_epoch_synchronize_wait(struct ck_epoch *global,
|
||||
ck_epoch_wait_cb_t *cb, void *ct)
|
||||
{
|
||||
struct ck_epoch *global = record->global;
|
||||
struct ck_epoch_record *cr;
|
||||
unsigned int delta, epoch, goal, i;
|
||||
bool active;
|
||||
@ -424,10 +449,27 @@ ck_epoch_synchronize(struct ck_epoch_record *record)
|
||||
* period.
|
||||
*/
|
||||
e_d = ck_pr_load_uint(&global->epoch);
|
||||
if (e_d != delta) {
|
||||
delta = e_d;
|
||||
goto reload;
|
||||
if (e_d == delta) {
|
||||
epoch_block(global, cr, cb, ct);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the epoch has been updated, we may have already
|
||||
* met our goal.
|
||||
*/
|
||||
delta = e_d;
|
||||
if ((goal > epoch) & (delta >= goal))
|
||||
goto leave;
|
||||
|
||||
epoch_block(global, cr, cb, ct);
|
||||
|
||||
/*
|
||||
* If the epoch has been updated, then a grace period
|
||||
* requires that all threads are observed idle at the
|
||||
* same epoch.
|
||||
*/
|
||||
cr = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -459,20 +501,6 @@ ck_epoch_synchronize(struct ck_epoch_record *record)
|
||||
* Otherwise, we have just acquired latest snapshot.
|
||||
*/
|
||||
delta = delta + r;
|
||||
continue;
|
||||
|
||||
reload:
|
||||
if ((goal > epoch) & (delta >= goal)) {
|
||||
/*
|
||||
* Right now, epoch overflow is handled as an edge
|
||||
* case. If we have already observed an epoch
|
||||
* generation, then we can be sure no hazardous
|
||||
* references exist to objects from this generation. We
|
||||
* can actually avoid an addtional scan step at this
|
||||
* point.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -480,8 +508,16 @@ ck_epoch_synchronize(struct ck_epoch_record *record)
|
||||
* However, if non-temporal instructions are used, full barrier
|
||||
* semantics are necessary.
|
||||
*/
|
||||
leave:
|
||||
ck_pr_fence_memory();
|
||||
record->epoch = delta;
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_synchronize(struct ck_epoch_record *record)
|
||||
{
|
||||
|
||||
ck_epoch_synchronize_wait(record->global, NULL, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -494,6 +530,16 @@ ck_epoch_barrier(struct ck_epoch_record *record)
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_barrier_wait(struct ck_epoch_record *record, ck_epoch_wait_cb_t *cb,
|
||||
void *ct)
|
||||
{
|
||||
|
||||
ck_epoch_synchronize_wait(record->global, cb, ct);
|
||||
ck_epoch_reclaim(record);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It may be worth it to actually apply these deferral semantics to an epoch
|
||||
* that was observed at ck_epoch_call time. The problem is that the latter
|
||||
@ -509,7 +555,6 @@ ck_epoch_poll(struct ck_epoch_record *record)
|
||||
{
|
||||
bool active;
|
||||
unsigned int epoch;
|
||||
unsigned int snapshot;
|
||||
struct ck_epoch_record *cr = NULL;
|
||||
struct ck_epoch *global = record->global;
|
||||
|
||||
@ -533,12 +578,7 @@ ck_epoch_poll(struct ck_epoch_record *record)
|
||||
}
|
||||
|
||||
/* If an active thread exists, rely on epoch observation. */
|
||||
if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1,
|
||||
&snapshot) == false) {
|
||||
record->epoch = snapshot;
|
||||
} else {
|
||||
record->epoch = epoch + 1;
|
||||
}
|
||||
(void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1);
|
||||
|
||||
ck_epoch_dispatch(record, epoch + 1);
|
||||
return true;
|
||||
|
Loading…
Reference in New Issue
Block a user