Make taskqgroup_attach{,_cpu}(9) work across architectures
So far, intr_{g,s}etaffinity(9) take a single int for identifying a device interrupt. This approach doesn't work on all architectures supported, as a single int isn't sufficient to globally specify a device interrupt. In particular, with multiple interrupt controllers in one system as found on e. g. arm and arm64 machines, an interrupt number as returned by rman_get_start(9) may be only unique relative to the bus and, thus, interrupt controller, a certain device hangs off from. In turn, this makes taskqgroup_attach{,_cpu}(9) and - internal to the gtaskqueue implementation - taskqgroup_attach_deferred{,_cpu}() not work across architectures. Yet in turn, iflib(4) as gtaskqueue consumer so far doesn't fit architectures where interrupt numbers aren't globally unique. However, at least for intr_setaffinity(..., CPU_WHICH_IRQ, ...) as employed by the gtaskqueue implementation to bind an interrupt to a particular CPU, using bus_bind_intr(9) instead is equivalent from a functional point of view, with bus_bind_intr(9) taking the device and interrupt resource arguments required for uniquely specifying a device interrupt. Thus, change the gtaskqueue implementation to employ bus_bind_intr(9) instead and intr_{g,s}etaffinity(9) to take the device and interrupt resource arguments required respectively. This change also moves struct grouptask from <sys/_task.h> to <sys/gtaskqueue.h> and wraps struct gtask along with the gtask_fn_t typedef into #ifdef _KERNEL as userland likes to include <sys/_task.h> or indirectly drags it in - for better or worse also with _KERNEL defined -, which with device_t and struct resource dependencies otherwise is no longer as easily possible now. The userland inclusion problem probably can be improved a bit by introducing a _WANT_TASK (as well as a _WANT_MOUNT) akin to the existing _WANT_PRISON etc., which is orthogonal to this change, though, and likely needs an exp-run. While at it: - Change the gt_cpu member in the grouptask structure to be of type int as used elswhere for specifying CPUs (an int16_t may be too narrow sooner or later), - move the gtaskqueue_enqueue_fn typedef from <sys/gtaskqueue.h> to the gtaskqueue implementation as it's only used and needed there, - change the GTASK_INIT macro to use "gtask" rather than "task" as argument given that it actually operates on a struct gtask rather than a struct task, and - let subr_gtaskqueue.c consistently use __func__ to print functions names. Reported by: mmel Reviewed by: mmel Differential Revision: https://reviews.freebsd.org/D19139
This commit is contained in:
parent
8e2074c2e1
commit
68256995cd
@ -109,7 +109,7 @@ tasklet_subsystem_init(void *arg __unused)
|
||||
GROUPTASK_INIT(&tw->gtask, 0, tasklet_handler, tw);
|
||||
snprintf(buf, sizeof(buf), "softirq%d", i);
|
||||
taskqgroup_attach_cpu(qgroup_softirq, &tw->gtask,
|
||||
"tasklet", i, -1, buf);
|
||||
"tasklet", i, NULL, NULL, buf);
|
||||
}
|
||||
}
|
||||
SYSINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_init, NULL);
|
||||
|
@ -147,7 +147,7 @@ epoch_init(void *arg __unused)
|
||||
GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
|
||||
epoch_call_task, NULL);
|
||||
taskqgroup_attach_cpu(qgroup_softirq,
|
||||
DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1,
|
||||
DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
|
||||
"epoch call task");
|
||||
}
|
||||
inited = 1;
|
||||
|
@ -33,7 +33,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/systm.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/cpuset.h>
|
||||
#include <sys/interrupt.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/kthread.h>
|
||||
#include <sys/libkern.h>
|
||||
@ -64,6 +63,8 @@ struct gtaskqueue_busy {
|
||||
|
||||
static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1;
|
||||
|
||||
typedef void (*gtaskqueue_enqueue_fn)(void *context);
|
||||
|
||||
struct gtaskqueue {
|
||||
STAILQ_HEAD(, gtask) tq_queue;
|
||||
gtaskqueue_enqueue_fn tq_enqueue;
|
||||
@ -681,7 +682,7 @@ taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
|
||||
}
|
||||
}
|
||||
if (idx == -1)
|
||||
panic("taskqgroup_find: Failed to pick a qid.");
|
||||
panic("%s: failed to pick a qid.", __func__);
|
||||
|
||||
return (idx);
|
||||
}
|
||||
@ -713,13 +714,13 @@ SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH,
|
||||
|
||||
void
|
||||
taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
|
||||
void *uniq, int irq, const char *name)
|
||||
void *uniq, device_t dev, struct resource *irq, const char *name)
|
||||
{
|
||||
cpuset_t mask;
|
||||
int qid, error;
|
||||
int cpu, qid, error;
|
||||
|
||||
gtask->gt_uniq = uniq;
|
||||
snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
|
||||
gtask->gt_dev = dev;
|
||||
gtask->gt_irq = irq;
|
||||
gtask->gt_cpu = -1;
|
||||
mtx_lock(&qgroup->tqg_lock);
|
||||
@ -727,14 +728,14 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
|
||||
qgroup->tqg_queue[qid].tgc_cnt++;
|
||||
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
|
||||
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
|
||||
if (irq != -1 && tqg_smp_started) {
|
||||
gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
|
||||
CPU_ZERO(&mask);
|
||||
CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
|
||||
if (dev != NULL && irq != NULL && tqg_smp_started) {
|
||||
cpu = qgroup->tqg_queue[qid].tgc_cpu;
|
||||
gtask->gt_cpu = cpu;
|
||||
mtx_unlock(&qgroup->tqg_lock);
|
||||
error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
|
||||
error = bus_bind_intr(dev, irq, cpu);
|
||||
if (error)
|
||||
printf("%s: setaffinity failed for %s: %d\n", __func__, gtask->gt_name, error);
|
||||
printf("%s: binding interrupt failed for %s: %d\n",
|
||||
__func__, gtask->gt_name, error);
|
||||
} else
|
||||
mtx_unlock(&qgroup->tqg_lock);
|
||||
}
|
||||
@ -742,27 +743,22 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
|
||||
static void
|
||||
taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
|
||||
{
|
||||
cpuset_t mask;
|
||||
int qid, cpu, error;
|
||||
|
||||
mtx_lock(&qgroup->tqg_lock);
|
||||
qid = taskqgroup_find(qgroup, gtask->gt_uniq);
|
||||
cpu = qgroup->tqg_queue[qid].tgc_cpu;
|
||||
if (gtask->gt_irq != -1) {
|
||||
if (gtask->gt_dev != NULL && gtask->gt_irq != NULL) {
|
||||
mtx_unlock(&qgroup->tqg_lock);
|
||||
|
||||
CPU_ZERO(&mask);
|
||||
CPU_SET(cpu, &mask);
|
||||
error = intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask);
|
||||
error = bus_bind_intr(gtask->gt_dev, gtask->gt_irq, cpu);
|
||||
mtx_lock(&qgroup->tqg_lock);
|
||||
if (error)
|
||||
printf("%s: %s setaffinity failed: %d\n", __func__, gtask->gt_name, error);
|
||||
printf("%s: binding interrupt failed for %s: %d\n",
|
||||
__func__, gtask->gt_name, error);
|
||||
|
||||
}
|
||||
qgroup->tqg_queue[qid].tgc_cnt++;
|
||||
|
||||
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
|
||||
gt_list);
|
||||
LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
|
||||
MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
|
||||
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
|
||||
mtx_unlock(&qgroup->tqg_lock);
|
||||
@ -770,14 +766,14 @@ taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
|
||||
|
||||
int
|
||||
taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
|
||||
void *uniq, int cpu, int irq, const char *name)
|
||||
void *uniq, int cpu, device_t dev, struct resource *irq, const char *name)
|
||||
{
|
||||
cpuset_t mask;
|
||||
int i, qid, error;
|
||||
|
||||
qid = -1;
|
||||
gtask->gt_uniq = uniq;
|
||||
snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
|
||||
gtask->gt_dev = dev;
|
||||
gtask->gt_irq = irq;
|
||||
gtask->gt_cpu = cpu;
|
||||
mtx_lock(&qgroup->tqg_lock);
|
||||
@ -800,12 +796,11 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
|
||||
cpu = qgroup->tqg_queue[qid].tgc_cpu;
|
||||
mtx_unlock(&qgroup->tqg_lock);
|
||||
|
||||
CPU_ZERO(&mask);
|
||||
CPU_SET(cpu, &mask);
|
||||
if (irq != -1 && tqg_smp_started) {
|
||||
error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
|
||||
if (dev != NULL && irq != NULL && tqg_smp_started) {
|
||||
error = bus_bind_intr(dev, irq, cpu);
|
||||
if (error)
|
||||
printf("%s: setaffinity failed: %d\n", __func__, error);
|
||||
printf("%s: binding interrupt failed for %s: %d\n",
|
||||
__func__, gtask->gt_name, error);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
@ -813,10 +808,12 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
|
||||
static int
|
||||
taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
|
||||
{
|
||||
cpuset_t mask;
|
||||
int i, qid, irq, cpu, error;
|
||||
device_t dev;
|
||||
struct resource *irq;
|
||||
int cpu, error, i, qid;
|
||||
|
||||
qid = -1;
|
||||
dev = gtask->gt_dev;
|
||||
irq = gtask->gt_irq;
|
||||
cpu = gtask->gt_cpu;
|
||||
MPASS(tqg_smp_started);
|
||||
@ -837,13 +834,11 @@ taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtas
|
||||
gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
|
||||
mtx_unlock(&qgroup->tqg_lock);
|
||||
|
||||
CPU_ZERO(&mask);
|
||||
CPU_SET(cpu, &mask);
|
||||
|
||||
if (irq != -1) {
|
||||
error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
|
||||
if (dev != NULL && irq != NULL) {
|
||||
error = bus_bind_intr(dev, irq, cpu);
|
||||
if (error)
|
||||
printf("%s: setaffinity failed: %d\n", __func__, error);
|
||||
printf("%s: binding interrupt failed for %s: %d\n",
|
||||
__func__, gtask->gt_name, error);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
@ -859,7 +854,7 @@ taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
|
||||
if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
|
||||
break;
|
||||
if (i == qgroup->tqg_cnt)
|
||||
panic("taskqgroup_detach: task %s not in group\n", gtask->gt_name);
|
||||
panic("%s: task %s not in group", __func__, gtask->gt_name);
|
||||
qgroup->tqg_queue[i].tgc_cnt--;
|
||||
LIST_REMOVE(gtask, gt_list);
|
||||
mtx_unlock(&qgroup->tqg_lock);
|
||||
@ -882,8 +877,7 @@ taskqgroup_binder(void *ctx)
|
||||
thread_unlock(curthread);
|
||||
|
||||
if (error)
|
||||
printf("%s: setaffinity failed: %d\n", __func__,
|
||||
error);
|
||||
printf("%s: binding curthread failed: %d\n", __func__, error);
|
||||
free(gtask, M_DEVBUF);
|
||||
}
|
||||
|
||||
@ -1051,15 +1045,16 @@ taskqgroup_destroy(struct taskqgroup *qgroup)
|
||||
|
||||
void
|
||||
taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
|
||||
const char *name)
|
||||
const char *name)
|
||||
{
|
||||
|
||||
GROUPTASK_INIT(gtask, 0, fn, ctx);
|
||||
taskqgroup_attach(qgroup_config, gtask, gtask, -1, name);
|
||||
taskqgroup_attach(qgroup_config, gtask, gtask, NULL, NULL, name);
|
||||
}
|
||||
|
||||
void
|
||||
taskqgroup_config_gtask_deinit(struct grouptask *gtask)
|
||||
{
|
||||
|
||||
taskqgroup_detach(qgroup_config, gtask);
|
||||
}
|
||||
|
@ -4481,7 +4481,8 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
|
||||
|
||||
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
|
||||
/* XXX format name */
|
||||
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
|
||||
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
|
||||
NULL, NULL, "admin");
|
||||
|
||||
/* Set up cpu set. If it fails, use the set of all CPUs. */
|
||||
if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) {
|
||||
@ -4742,7 +4743,8 @@ iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp,
|
||||
|
||||
GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx);
|
||||
/* XXX format name */
|
||||
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin");
|
||||
taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx,
|
||||
NULL, NULL, "admin");
|
||||
|
||||
/* XXX --- can support > 1 -- but keep it simple for now */
|
||||
scctx->isc_intr = IFLIB_INTR_LEGACY;
|
||||
@ -5634,19 +5636,22 @@ get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid)
|
||||
|
||||
/* Just to avoid copy/paste */
|
||||
static inline int
|
||||
iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid,
|
||||
struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name)
|
||||
iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
|
||||
int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq,
|
||||
const char *name)
|
||||
{
|
||||
int cpuid;
|
||||
int err, tid;
|
||||
device_t dev;
|
||||
int err, cpuid, tid;
|
||||
|
||||
dev = ctx->ifc_dev;
|
||||
cpuid = find_nth(ctx, qid);
|
||||
tid = get_core_offset(ctx, type, qid);
|
||||
MPASS(tid >= 0);
|
||||
cpuid = find_close_core(cpuid, tid);
|
||||
err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name);
|
||||
err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res,
|
||||
name);
|
||||
if (err) {
|
||||
device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err);
|
||||
device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err);
|
||||
return (err);
|
||||
}
|
||||
#ifdef notyet
|
||||
@ -5661,6 +5666,7 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
|
||||
iflib_intr_type_t type, driver_filter_t *filter,
|
||||
void *filter_arg, int qid, const char *name)
|
||||
{
|
||||
device_t dev;
|
||||
struct grouptask *gtask;
|
||||
struct taskqgroup *tqg;
|
||||
iflib_filter_info_t info;
|
||||
@ -5720,20 +5726,22 @@ iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid,
|
||||
info->ifi_task = gtask;
|
||||
info->ifi_ctx = q;
|
||||
|
||||
dev = ctx->ifc_dev;
|
||||
err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name);
|
||||
if (err != 0) {
|
||||
device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err);
|
||||
device_printf(dev, "_iflib_irq_alloc failed %d\n", err);
|
||||
return (err);
|
||||
}
|
||||
if (type == IFLIB_INTR_ADMIN)
|
||||
return (0);
|
||||
|
||||
if (tqrid != -1) {
|
||||
err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name);
|
||||
err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
|
||||
q, name);
|
||||
if (err)
|
||||
return (err);
|
||||
} else {
|
||||
taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
|
||||
taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name);
|
||||
}
|
||||
|
||||
return (0);
|
||||
@ -5746,7 +5754,6 @@ iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
|
||||
struct taskqgroup *tqg;
|
||||
gtask_fn_t *fn;
|
||||
void *q;
|
||||
int irq_num = -1;
|
||||
int err;
|
||||
|
||||
switch (type) {
|
||||
@ -5755,16 +5762,12 @@ iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
|
||||
gtask = &ctx->ifc_txqs[qid].ift_task;
|
||||
tqg = qgroup_if_io_tqg;
|
||||
fn = _task_fn_tx;
|
||||
if (irq != NULL)
|
||||
irq_num = rman_get_start(irq->ii_res);
|
||||
break;
|
||||
case IFLIB_INTR_RX:
|
||||
q = &ctx->ifc_rxqs[qid];
|
||||
gtask = &ctx->ifc_rxqs[qid].ifr_task;
|
||||
tqg = qgroup_if_io_tqg;
|
||||
fn = _task_fn_rx;
|
||||
if (irq != NULL)
|
||||
irq_num = rman_get_start(irq->ii_res);
|
||||
break;
|
||||
case IFLIB_INTR_IOV:
|
||||
q = ctx;
|
||||
@ -5776,13 +5779,14 @@ iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type,
|
||||
panic("unknown net intr type");
|
||||
}
|
||||
GROUPTASK_INIT(gtask, 0, fn, q);
|
||||
if (irq_num != -1) {
|
||||
err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name);
|
||||
if (irq != NULL) {
|
||||
err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg,
|
||||
q, name);
|
||||
if (err)
|
||||
taskqgroup_attach(tqg, gtask, q, irq_num, name);
|
||||
}
|
||||
else {
|
||||
taskqgroup_attach(tqg, gtask, q, irq_num, name);
|
||||
taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev,
|
||||
irq->ii_res, name);
|
||||
} else {
|
||||
taskqgroup_attach(tqg, gtask, q, NULL, NULL, name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5805,7 +5809,9 @@ iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *
|
||||
iflib_rxq_t rxq = ctx->ifc_rxqs;
|
||||
if_irq_t irq = &ctx->ifc_legacy_irq;
|
||||
iflib_filter_info_t info;
|
||||
device_t dev;
|
||||
struct grouptask *gtask;
|
||||
struct resource *res;
|
||||
struct taskqgroup *tqg;
|
||||
gtask_fn_t *fn;
|
||||
int tqrid;
|
||||
@ -5825,14 +5831,17 @@ iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *
|
||||
info->ifi_task = gtask;
|
||||
info->ifi_ctx = ctx;
|
||||
|
||||
dev = ctx->ifc_dev;
|
||||
/* We allocate a single interrupt resource */
|
||||
if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0)
|
||||
return (err);
|
||||
GROUPTASK_INIT(gtask, 0, fn, q);
|
||||
taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name);
|
||||
res = irq->ii_res;
|
||||
taskqgroup_attach(tqg, gtask, q, dev, res, name);
|
||||
|
||||
GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
|
||||
taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx");
|
||||
taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res,
|
||||
"tx");
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -5882,7 +5891,8 @@ void
|
||||
iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name)
|
||||
{
|
||||
|
||||
taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name);
|
||||
taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL,
|
||||
name);
|
||||
}
|
||||
|
||||
void
|
||||
@ -5891,7 +5901,8 @@ iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
|
||||
{
|
||||
|
||||
GROUPTASK_INIT(gtask, 0, fn, ctx);
|
||||
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
|
||||
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL,
|
||||
name);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -44,7 +44,6 @@
|
||||
* (q) taskqueue lock
|
||||
*/
|
||||
typedef void task_fn_t(void *context, int pending);
|
||||
typedef void gtask_fn_t(void *context);
|
||||
|
||||
struct task {
|
||||
STAILQ_ENTRY(task) ta_link; /* (q) link for queue */
|
||||
@ -54,6 +53,10 @@ struct task {
|
||||
void *ta_context; /* (c) argument for handler */
|
||||
};
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
typedef void gtask_fn_t(void *context);
|
||||
|
||||
struct gtask {
|
||||
STAILQ_ENTRY(gtask) ta_link; /* (q) link for queue */
|
||||
uint16_t ta_flags; /* (q) state flags */
|
||||
@ -62,15 +65,6 @@ struct gtask {
|
||||
void *ta_context; /* (c) argument for handler */
|
||||
};
|
||||
|
||||
struct grouptask {
|
||||
struct gtask gt_task;
|
||||
void *gt_taskqueue;
|
||||
LIST_ENTRY(grouptask) gt_list;
|
||||
void *gt_uniq;
|
||||
#define GROUPTASK_NAMELEN 32
|
||||
char gt_name[GROUPTASK_NAMELEN];
|
||||
int16_t gt_irq;
|
||||
int16_t gt_cpu;
|
||||
};
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* !_SYS__TASK_H_ */
|
||||
|
@ -31,20 +31,35 @@
|
||||
|
||||
#ifndef _SYS_GTASKQUEUE_H_
|
||||
#define _SYS_GTASKQUEUE_H_
|
||||
#include <sys/taskqueue.h>
|
||||
|
||||
#ifndef _KERNEL
|
||||
#error "no user-serviceable parts inside"
|
||||
#endif
|
||||
|
||||
#include <sys/_task.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/taskqueue.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
struct gtaskqueue;
|
||||
typedef void (*gtaskqueue_enqueue_fn)(void *context);
|
||||
|
||||
/*
|
||||
* Taskqueue groups. Manages dynamic thread groups and irq binding for
|
||||
* device and other tasks.
|
||||
*/
|
||||
|
||||
struct grouptask {
|
||||
struct gtask gt_task;
|
||||
void *gt_taskqueue;
|
||||
LIST_ENTRY(grouptask) gt_list;
|
||||
void *gt_uniq;
|
||||
#define GROUPTASK_NAMELEN 32
|
||||
char gt_name[GROUPTASK_NAMELEN];
|
||||
device_t gt_dev;
|
||||
struct resource *gt_irq;
|
||||
int gt_cpu;
|
||||
};
|
||||
|
||||
void gtaskqueue_block(struct gtaskqueue *queue);
|
||||
void gtaskqueue_unblock(struct gtaskqueue *queue);
|
||||
|
||||
@ -55,28 +70,29 @@ void gtaskqueue_drain_all(struct gtaskqueue *queue);
|
||||
void grouptask_block(struct grouptask *grouptask);
|
||||
void grouptask_unblock(struct grouptask *grouptask);
|
||||
int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *task);
|
||||
|
||||
void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *grptask,
|
||||
void *uniq, int irq, const char *name);
|
||||
int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *grptask,
|
||||
void *uniq, int cpu, int irq, const char *name);
|
||||
void *uniq, device_t dev, struct resource *irq, const char *name);
|
||||
int taskqgroup_attach_cpu(struct taskqgroup *qgroup,
|
||||
struct grouptask *grptask, void *uniq, int cpu, device_t dev,
|
||||
struct resource *irq, const char *name);
|
||||
void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask);
|
||||
struct taskqgroup *taskqgroup_create(const char *name);
|
||||
void taskqgroup_destroy(struct taskqgroup *qgroup);
|
||||
int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride);
|
||||
void taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
|
||||
const char *name);
|
||||
void taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask,
|
||||
gtask_fn_t *fn, const char *name);
|
||||
void taskqgroup_config_gtask_deinit(struct grouptask *gtask);
|
||||
|
||||
#define TASK_ENQUEUED 0x1
|
||||
#define TASK_SKIP_WAKEUP 0x2
|
||||
#define TASK_NOENQUEUE 0x4
|
||||
|
||||
|
||||
#define GTASK_INIT(task, flags, priority, func, context) do { \
|
||||
(task)->ta_flags = flags; \
|
||||
(task)->ta_priority = (priority); \
|
||||
(task)->ta_func = (func); \
|
||||
(task)->ta_context = (context); \
|
||||
#define GTASK_INIT(gtask, flags, priority, func, context) do { \
|
||||
(gtask)->ta_flags = flags; \
|
||||
(gtask)->ta_priority = (priority); \
|
||||
(gtask)->ta_func = (func); \
|
||||
(gtask)->ta_context = (context); \
|
||||
} while (0)
|
||||
|
||||
#define GROUPTASK_INIT(gtask, priority, func, context) \
|
||||
|
@ -60,7 +60,7 @@
|
||||
* in the range 5 to 9.
|
||||
*/
|
||||
#undef __FreeBSD_version
|
||||
#define __FreeBSD_version 1300011 /* Master, propagated to newvers */
|
||||
#define __FreeBSD_version 1300012 /* Master, propagated to newvers */
|
||||
|
||||
/*
|
||||
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
|
||||
|
Loading…
Reference in New Issue
Block a user