epoch: skip poll function call in hardclock unless there are callbacks pending

Reported by:	mjg
Approved by:	sbruno
This commit is contained in:
Matt Macy 2018-05-17 21:39:15 +00:00
parent 04b1905584
commit a5f1042498
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=333769
3 changed files with 13 additions and 17 deletions

View File

@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/callout.h>
#include <sys/epoch.h>
#include <sys/gtaskqueue.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
@ -468,7 +469,8 @@ hardclock_cpu(int usermode)
PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
#endif
callout_process(sbinuptime());
epoch_pcpu_poll();
if (__predict_false(DPCPU_GET(epoch_cb_count)))
GROUPTASK_ENQUEUE(DPCPU_PTR(epoch_cb_task));
}
/*
@ -574,7 +576,8 @@ hardclock_cnt(int cnt, int usermode)
}
if (curcpu == CPU_FIRST())
cpu_tick_calibration();
epoch_pcpu_poll();
if (__predict_false(DPCPU_GET(epoch_cb_count)))
GROUPTASK_ENQUEUE(DPCPU_PTR(epoch_cb_task));
}
void

View File

@ -111,8 +111,8 @@ struct epoch {
epoch_t allepochs[MAX_EPOCHS];
static DPCPU_DEFINE(struct grouptask, cb_task);
static DPCPU_DEFINE(int, cb_count);
DPCPU_DEFINE(struct grouptask, epoch_cb_task);
DPCPU_DEFINE(int, epoch_cb_count);
static __read_mostly int domcount[MAXMEMDOM];
static __read_mostly int domoffsets[MAXMEMDOM];
@ -157,8 +157,8 @@ epoch_init(void *arg __unused)
}
done:
CPU_FOREACH(cpu) {
GROUPTASK_INIT(DPCPU_ID_PTR(cpu, cb_task), 0, epoch_call_task, NULL);
taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, cb_task), NULL, cpu, -1, "epoch call task");
GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL);
taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task");
}
inited = 1;
global_epoch = epoch_alloc();
@ -533,7 +533,7 @@ epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t
counter_u64_add(epoch->e_frees, 1);
critical_enter();
*DPCPU_PTR(cb_count) += 1;
*DPCPU_PTR(epoch_cb_count) += 1;
eps = epoch->e_pcpu[curcpu];
ck_epoch_call(&eps->eps_record.er_record, cb, (ck_epoch_cb_t*)callback);
critical_exit();
@ -566,7 +566,7 @@ epoch_call_task(void *arg __unused)
total += npending - record->n_pending;
}
epoch_exit_private(&section);
*DPCPU_PTR(cb_count) -= total;
*DPCPU_PTR(epoch_cb_count) -= total;
critical_exit();
head = ck_stack_batch_pop_npsc(&cb_stack);
@ -578,14 +578,6 @@ epoch_call_task(void *arg __unused)
}
}
void
epoch_pcpu_poll(void)
{
if (DPCPU_GET(cb_count))
GROUPTASK_ENQUEUE(DPCPU_PTR(cb_task));
}
int
in_epoch(void)
{

View File

@ -36,6 +36,8 @@ struct epoch;
typedef struct epoch *epoch_t;
extern epoch_t global_epoch;
DPCPU_DECLARE(int, epoch_cb_count);
DPCPU_DECLARE(struct grouptask, epoch_cb_task);
struct epoch_context {
void *data[2];
@ -49,7 +51,6 @@ void epoch_enter_internal(epoch_t epoch, struct thread *td);
void epoch_exit_internal(epoch_t epoch, struct thread *td);
void epoch_wait(epoch_t epoch);
void epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t));
void epoch_pcpu_poll(void);
int in_epoch(void);
static __inline void