Fix taskqueue_drain(9) to not have false negatives. For threaded

taskqueues, more than one task can be running simultaneously.

Also make taskqueue_run(9) static to the file, since there are no
consumers in the base kernel and the function signature needs to change
with this fix.

Remove mention of taskqueue_run(9) and taskqueue_run_fast(9) from the
taskqueue(9) man page.

Reviewed by:    jhb
Approved by:    zml (mentor)
This commit is contained in:
mdf 2010-07-22 16:41:09 +00:00
parent f6fa9f4a32
commit fa23fa820a
4 changed files with 49 additions and 39 deletions

View File

@ -64,10 +64,6 @@ struct task {
.Ft int
.Fn taskqueue_enqueue_fast "struct taskqueue *queue" "struct task *task"
.Ft void
.Fn taskqueue_run "struct taskqueue *queue"
.Ft void
.Fn taskqueue_run_fast "struct taskqueue *queue"
.Ft void
.Fn taskqueue_drain "struct taskqueue *queue" "struct task *task"
.Ft int
.Fn taskqueue_member "struct taskqueue *queue" "struct thread *td"
@ -143,12 +139,6 @@ when the enqueuing must happen from a fast interrupt handler.
This method uses spin locks to avoid the possibility of sleeping in the fast
interrupt context.
.Pp
To execute all the tasks on a queue,
call
.Fn taskqueue_run
or
.Fn taskqueue_run_fast
depending on the flavour of the queue.
When a task is executed,
first it is removed from the queue,
the value of

View File

@ -63,6 +63,8 @@ struct taskqueue {
#define TQ_FLAGS_BLOCKED (1 << 1)
#define TQ_FLAGS_PENDING (1 << 2)
static void taskqueue_run(struct taskqueue *, struct task **);
static __inline void
TQ_LOCK(struct taskqueue *tq)
{
@ -139,7 +141,7 @@ taskqueue_free(struct taskqueue *queue)
TQ_LOCK(queue);
queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
taskqueue_run(queue);
taskqueue_run(queue, &queue->tq_running);
taskqueue_terminate(queue->tq_threads, queue);
mtx_destroy(&queue->tq_mutex);
free(queue->tq_threads, M_TASKQUEUE);
@ -215,15 +217,14 @@ taskqueue_unblock(struct taskqueue *queue)
TQ_UNLOCK(queue);
}
void
taskqueue_run(struct taskqueue *queue)
static void
taskqueue_run(struct taskqueue *queue, struct task **tpp)
{
struct task *task;
int owned, pending;
struct task *task, *running;
int pending;
owned = mtx_owned(&queue->tq_mutex);
if (!owned)
TQ_LOCK(queue);
mtx_assert(&queue->tq_mutex, MA_OWNED);
running = NULL;
while (STAILQ_FIRST(&queue->tq_queue)) {
/*
* Carefully remove the first task from the queue and
@ -233,22 +234,16 @@ taskqueue_run(struct taskqueue *queue)
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
pending = task->ta_pending;
task->ta_pending = 0;
queue->tq_running = task;
task->ta_running = tpp;
*tpp = task;
TQ_UNLOCK(queue);
task->ta_func(task->ta_context, pending);
TQ_LOCK(queue);
queue->tq_running = NULL;
*tpp = NULL;
wakeup(task);
}
/*
* For compatibility, unlock on return if the queue was not locked
* on entry, although this opens a race window.
*/
if (!owned)
TQ_UNLOCK(queue);
}
void
@ -256,15 +251,19 @@ taskqueue_drain(struct taskqueue *queue, struct task *task)
{
if (queue->tq_spin) { /* XXX */
mtx_lock_spin(&queue->tq_mutex);
while (task->ta_pending != 0 || task == queue->tq_running)
while (task->ta_pending != 0 ||
(task->ta_running != NULL && task == *task->ta_running)) {
msleep_spin(task, &queue->tq_mutex, "-", 0);
}
mtx_unlock_spin(&queue->tq_mutex);
} else {
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
mtx_lock(&queue->tq_mutex);
while (task->ta_pending != 0 || task == queue->tq_running)
while (task->ta_pending != 0 ||
(task->ta_running != NULL && task == *task->ta_running)) {
msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
}
mtx_unlock(&queue->tq_mutex);
}
}
@ -278,7 +277,9 @@ taskqueue_swi_enqueue(void *context)
static void
taskqueue_swi_run(void *dummy)
{
taskqueue_run(taskqueue_swi);
TQ_LOCK(taskqueue_swi);
taskqueue_run(taskqueue_swi, &taskqueue_swi->tq_running);
TQ_UNLOCK(taskqueue_swi);
}
static void
@ -290,7 +291,9 @@ taskqueue_swi_giant_enqueue(void *context)
static void
taskqueue_swi_giant_run(void *dummy)
{
taskqueue_run(taskqueue_swi_giant);
TQ_LOCK(taskqueue_swi_giant);
taskqueue_run(taskqueue_swi_giant, &taskqueue_swi_giant->tq_running);
TQ_UNLOCK(taskqueue_swi_giant);
}
int
@ -352,12 +355,22 @@ void
taskqueue_thread_loop(void *arg)
{
struct taskqueue **tqp, *tq;
struct task *running;
/*
* The kernel stack space is globaly addressable, and it would
* be an error to ask whether a task is running after the
* taskqueue has been released. So it is safe to have the
* task point back to an address in the taskqueue's stack to
* determine if the task is running.
*/
running = NULL;
tqp = arg;
tq = *tqp;
TQ_LOCK(tq);
while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
taskqueue_run(tq);
taskqueue_run(tq, &running);
/*
* Because taskqueue_run() can drop tq_mutex, we need to
* check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
@ -423,7 +436,9 @@ taskqueue_fast_enqueue(void *context)
static void
taskqueue_fast_run(void *dummy)
{
taskqueue_run(taskqueue_fast);
TQ_LOCK(taskqueue_fast);
taskqueue_run(taskqueue_fast, &taskqueue_fast->tq_running);
TQ_UNLOCK(taskqueue_fast);
}
TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,

View File

@ -36,15 +36,20 @@
* taskqueue_run(). The first argument is taken from the 'ta_context'
* field of struct task and the second argument is a count of how many
* times the task was enqueued before the call to taskqueue_run().
*
* List of locks
* (c) const after init
* (q) taskqueue lock
*/
typedef void task_fn_t(void *context, int pending);
struct task {
STAILQ_ENTRY(task) ta_link; /* link for queue */
u_short ta_pending; /* count times queued */
u_short ta_priority; /* Priority */
task_fn_t *ta_func; /* task handler */
void *ta_context; /* argument for handler */
struct task **ta_running; /* (q) queue's running task pointer */
STAILQ_ENTRY(task) ta_link; /* (q) link for queue */
u_short ta_pending; /* (q) count times queued */
u_short ta_priority; /* (c) Priority */
task_fn_t *ta_func; /* (c) task handler */
void *ta_context; /* (c) argument for handler */
};
#endif /* !_SYS__TASK_H_ */

View File

@ -56,7 +56,6 @@ int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
int taskqueue_enqueue(struct taskqueue *queue, struct task *task);
void taskqueue_drain(struct taskqueue *queue, struct task *task);
void taskqueue_free(struct taskqueue *queue);
void taskqueue_run(struct taskqueue *queue);
void taskqueue_block(struct taskqueue *queue);
void taskqueue_unblock(struct taskqueue *queue);
int taskqueue_member(struct taskqueue *queue, struct thread *td);
@ -75,6 +74,7 @@ void taskqueue_thread_enqueue(void *context);
(task)->ta_priority = (priority); \
(task)->ta_func = (func); \
(task)->ta_context = (context); \
(task)->ta_running = NULL; \
} while (0)
/*