From f5f2b87df0362242b13b8183a2a8d88be63b0e73 Mon Sep 17 00:00:00 2001 From: tuxoko Date: Fri, 6 Nov 2015 15:00:55 -0800 Subject: [PATCH] Fix taskq dynamic spawning Currently taskq_dispatch() will spawn new task with a condition that the caller is also a member of the taskq. However, under this condition, it will still cause deadlock where a task on tq1 is waiting another thread, who is trying to dispatch a task on tq1. So this patch removes the check. For example when you do: zfs send pp/fs0@001 | zfs recv pp/fs0_copy This will easily deadlock before this patch. Also, move the seq_task check from taskq_thread_spawn() to taskq_thread() because it's not used by the caller from taskq_dispatch(). Signed-off-by: Chunwei Chen Signed-off-by: Tim Chase Signed-off-by: Brian Behlendorf Closes #496 --- module/spl/spl-taskq.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c index f6ef56251c63..2c2e3ad465ab 100644 --- a/module/spl/spl-taskq.c +++ b/module/spl/spl-taskq.c @@ -538,7 +538,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id) } EXPORT_SYMBOL(taskq_cancel_id); -static int taskq_thread_spawn(taskq_t *tq, int seq_tasks); +static int taskq_thread_spawn(taskq_t *tq); taskqid_t taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) @@ -587,9 +587,8 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) wake_up(&tq->tq_work_waitq); out: /* Spawn additional taskq threads if required. */ - if (tq->tq_nactive == tq->tq_nthreads && - taskq_member_impl(tq, current)) - (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1); + if (tq->tq_nactive == tq->tq_nthreads) + (void) taskq_thread_spawn(tq); spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); return (rc); @@ -635,9 +634,8 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, spin_unlock(&t->tqent_lock); out: /* Spawn additional taskq threads if required. */ - if (tq->tq_nactive == tq->tq_nthreads && - taskq_member_impl(tq, current)) - (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1); + if (tq->tq_nactive == tq->tq_nthreads) + (void) taskq_thread_spawn(tq); spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); return (rc); } @@ -683,9 +681,8 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, wake_up(&tq->tq_work_waitq); out: /* Spawn additional taskq threads if required. */ - if (tq->tq_nactive == tq->tq_nthreads && - taskq_member_impl(tq, current)) - (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1); + if (tq->tq_nactive == tq->tq_nthreads) + (void) taskq_thread_spawn(tq); spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); } EXPORT_SYMBOL(taskq_dispatch_ent); @@ -756,15 +753,14 @@ taskq_thread_spawn_task(void *arg) * which is also a dynamic taskq cannot be safely used for this. */ static int -taskq_thread_spawn(taskq_t *tq, int seq_tasks) +taskq_thread_spawn(taskq_t *tq) { int spawning = 0; if (!(tq->tq_flags & TASKQ_DYNAMIC)) return (0); - if ((seq_tasks > spl_taskq_thread_sequential) && - (tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && + if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) && (tq->tq_flags & TASKQ_ACTIVE)) { spawning = (++tq->tq_nspawn); taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task, @@ -898,7 +894,8 @@ taskq_thread(void *args) } /* Spawn additional taskq threads if required. */ - if (taskq_thread_spawn(tq, ++seq_tasks)) + if ((++seq_tasks) > spl_taskq_thread_sequential && + taskq_thread_spawn(tq)) seq_tasks = 0; tqt->tqt_id = 0;