"fast swi" taskqueue support. This is a taskqueue that uses spinlocks

making it useful for dispatching swi tasks from fast interrupt handlers.

Sponsered by:	FreeBSD Foundation
This commit is contained in:
Sam Leffler 2003-09-05 23:09:22 +00:00
parent 7c00e355a2
commit f82c9e70f9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=119789
3 changed files with 131 additions and 3 deletions

View File

@ -272,3 +272,121 @@ TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
TASKQUEUE_DEFINE(thread, taskqueue_thread_enqueue, 0,
kthread_create(taskqueue_kthread, NULL,
&taskqueue_thread_proc, RFNOWAIT, 0, "taskqueue"));
int
taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
{
struct task *ins;
struct task *prev;
mtx_lock_spin(&queue->tq_mutex);
/*
* Don't allow new tasks on a queue which is being freed.
*/
if (queue->tq_draining) {
mtx_unlock_spin(&queue->tq_mutex);
return EPIPE;
}
/*
* Count multiple enqueues.
*/
if (task->ta_pending) {
task->ta_pending++;
mtx_unlock_spin(&queue->tq_mutex);
return 0;
}
/*
* Optimise the case when all tasks have the same priority.
*/
prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
if (!prev || prev->ta_priority >= task->ta_priority) {
STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
} else {
prev = 0;
for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
prev = ins, ins = STAILQ_NEXT(ins, ta_link))
if (ins->ta_priority < task->ta_priority)
break;
if (prev)
STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
else
STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
}
task->ta_pending = 1;
if (queue->tq_enqueue)
queue->tq_enqueue(queue->tq_context);
mtx_unlock_spin(&queue->tq_mutex);
return 0;
}
static void
taskqueue_run_fast(struct taskqueue *queue)
{
struct task *task;
int pending;
mtx_lock_spin(&queue->tq_mutex);
while (STAILQ_FIRST(&queue->tq_queue)) {
/*
* Carefully remove the first task from the queue and
* zero its pending count.
*/
task = STAILQ_FIRST(&queue->tq_queue);
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
pending = task->ta_pending;
task->ta_pending = 0;
mtx_unlock_spin(&queue->tq_mutex);
task->ta_func(task->ta_context, pending);
mtx_lock_spin(&queue->tq_mutex);
}
mtx_unlock_spin(&queue->tq_mutex);
}
struct taskqueue *taskqueue_fast;
static void *taskqueue_fast_ih;
static void
taskqueue_fast_schedule(void *context)
{
swi_sched(taskqueue_fast_ih, 0);
}
static void
taskqueue_fast_run(void *dummy)
{
taskqueue_run_fast(taskqueue_fast);
}
static void
taskqueue_define_fast(void *arg)
{
taskqueue_fast = malloc(sizeof(struct taskqueue),
M_TASKQUEUE, M_NOWAIT | M_ZERO);
if (!taskqueue_fast) {
printf("%s: Unable to allocate fast task queue!\n", __func__);
return;
}
STAILQ_INIT(&taskqueue_fast->tq_queue);
taskqueue_fast->tq_name = "fast";
taskqueue_fast->tq_enqueue = taskqueue_fast_schedule;
mtx_init(&taskqueue_fast->tq_mutex, "taskqueue", NULL, MTX_SPIN);
mtx_lock(&taskqueue_queues_mutex);
STAILQ_INSERT_TAIL(&taskqueue_queues, taskqueue_fast, tq_link);
mtx_unlock(&taskqueue_queues_mutex);
swi_add(NULL, "Fast task queue", taskqueue_fast_run,
NULL, SWI_TQ_FAST, 0, &taskqueue_fast_ih);
}
SYSINIT(taskqueue_fast, SI_SUB_CONFIGURE, SI_ORDER_SECOND,
taskqueue_define_fast, NULL);

View File

@ -94,9 +94,10 @@ struct ithd {
#define SWI_CAMNET 2
#define SWI_CAMBIO 3
#define SWI_VM 4
#define SWI_TQ_GIANT 5
#define SWI_TQ 6
#define SWI_CLOCK 7
#define SWI_TQ_FAST 5
#define SWI_TQ_GIANT 6
#define SWI_TQ 7
#define SWI_CLOCK 8
extern struct ithd *tty_ithd;
extern struct ithd *clk_ithd;

View File

@ -120,4 +120,13 @@ TASKQUEUE_DECLARE(swi);
*/
TASKQUEUE_DECLARE(thread);
/*
* Queue for swi handlers dispatched from fast interrupt handlers.
* These are necessarily different from the above because the queue
* must be locked with spinlocks since sleep mutex's cannot be used
* from a fast interrupt handler context.
*/
TASKQUEUE_DECLARE(fast);
int taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task);
#endif /* !_SYS_TASKQUEUE_H_ */