epoch(9) revert r340097 - no longer a need for multiple sections per cpu
I spoke with Samy Bahra and recent changes to CK to make ck_epoch_call and ck_epoch_poll not modify the record have eliminated the need for this.
This commit is contained in:
parent
635c18840a
commit
91cf497515
@ -63,8 +63,7 @@ static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
|
||||
|
||||
TAILQ_HEAD (epoch_tdlist, epoch_tracker);
|
||||
typedef struct epoch_record {
|
||||
ck_epoch_record_t er_read_record;
|
||||
ck_epoch_record_t er_write_record;
|
||||
ck_epoch_record_t er_record;
|
||||
volatile struct epoch_tdlist er_tdlist;
|
||||
volatile uint32_t er_gen;
|
||||
uint32_t er_cpuid;
|
||||
@ -176,8 +175,7 @@ epoch_ctor(epoch_t epoch)
|
||||
CPU_FOREACH(cpu) {
|
||||
er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
|
||||
bzero(er, sizeof(*er));
|
||||
ck_epoch_register(&epoch->e_epoch, &er->er_read_record, NULL);
|
||||
ck_epoch_register(&epoch->e_epoch, &er->er_write_record, NULL);
|
||||
ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
|
||||
TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
|
||||
er->er_cpuid = cpu;
|
||||
}
|
||||
@ -262,7 +260,7 @@ epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et)
|
||||
td->td_pre_epoch_prio = td->td_priority;
|
||||
er = epoch_currecord(epoch);
|
||||
TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
|
||||
ck_epoch_begin(&er->er_read_record, &et->et_section);
|
||||
ck_epoch_begin(&er->er_record, &et->et_section);
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
@ -279,7 +277,7 @@ epoch_enter(epoch_t epoch)
|
||||
td->td_epochnest++;
|
||||
critical_enter();
|
||||
er = epoch_currecord(epoch);
|
||||
ck_epoch_begin(&er->er_read_record, NULL);
|
||||
ck_epoch_begin(&er->er_record, NULL);
|
||||
}
|
||||
|
||||
void
|
||||
@ -307,7 +305,7 @@ epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et)
|
||||
#ifdef INVARIANTS
|
||||
et->et_td = (void*)0xDEADBEEF;
|
||||
#endif
|
||||
ck_epoch_end(&er->er_read_record, &et->et_section);
|
||||
ck_epoch_end(&er->er_record, &et->et_section);
|
||||
TAILQ_REMOVE(&er->er_tdlist, et, et_link);
|
||||
er->er_gen++;
|
||||
if (__predict_false(td->td_pre_epoch_prio != td->td_priority))
|
||||
@ -326,7 +324,7 @@ epoch_exit(epoch_t epoch)
|
||||
MPASS(td->td_epochnest);
|
||||
td->td_epochnest--;
|
||||
er = epoch_currecord(epoch);
|
||||
ck_epoch_end(&er->er_read_record, NULL);
|
||||
ck_epoch_end(&er->er_record, NULL);
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
@ -346,7 +344,7 @@ epoch_block_handler_preempt(struct ck_epoch *global __unused,
|
||||
int spincount, gen;
|
||||
int locksheld __unused;
|
||||
|
||||
record = __containerof(cr, struct epoch_record, er_read_record);
|
||||
record = __containerof(cr, struct epoch_record, er_record);
|
||||
td = curthread;
|
||||
locksheld = td->td_locks;
|
||||
spincount = 0;
|
||||
@ -574,7 +572,7 @@ epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t
|
||||
critical_enter();
|
||||
*DPCPU_PTR(epoch_cb_count) += 1;
|
||||
er = epoch_currecord(epoch);
|
||||
ck_epoch_call(&er->er_write_record, cb, (ck_epoch_cb_t *)callback);
|
||||
ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
|
||||
critical_exit();
|
||||
return;
|
||||
boottime:
|
||||
@ -598,7 +596,7 @@ epoch_call_task(void *arg __unused)
|
||||
if (__predict_false((epoch = allepochs[i]) == NULL))
|
||||
continue;
|
||||
er = epoch_currecord(epoch);
|
||||
record = &er->er_write_record;
|
||||
record = &er->er_record;
|
||||
if ((npending = record->n_pending) == 0)
|
||||
continue;
|
||||
ck_epoch_poll_deferred(record, &cb_stack);
|
||||
|
Loading…
Reference in New Issue
Block a user