From 076821eaff146a56aa6a66916d70eda7db3347ed Mon Sep 17 00:00:00 2001 From: Tim Chase Date: Thu, 27 Aug 2015 11:13:20 -0500 Subject: [PATCH] Create a new thread during recursive taskq dispatch if necessary When dynamic taskq is enabled and all threads for a taskq are occupied, a recursive dispatch can cause a deadlock if calling thread depends on the recursively-dispatched thread for its return condition. This patch attempts to create a new thread for recursive dispatch when none are available. Signed-off-by: Tim Chase Signed-off-by: Brian Behlendorf Closes #472 --- module/spl/spl-taskq.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c index 7a756af37a..2202aced78 100644 --- a/module/spl/spl-taskq.c +++ b/module/spl/spl-taskq.c @@ -53,6 +53,7 @@ EXPORT_SYMBOL(system_taskq); /* Private dedicated taskq for creating new taskq threads on demand. */ static taskq_t *dynamic_taskq; static taskq_thread_t *taskq_thread_create(taskq_t *); +static int taskq_thread_spawn(taskq_t *tq, int seq_tasks); static int task_km_flags(uint_t flags) @@ -533,6 +534,7 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) { taskq_ent_t *t; taskqid_t rc = 0; + boolean_t threadlimit = B_FALSE; ASSERT(tq); ASSERT(func); @@ -574,7 +576,13 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags) wake_up(&tq->tq_work_waitq); out: + threadlimit = (tq->tq_nactive == tq->tq_nthreads); spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); + + /* Spawn additional taskq threads if required. */ + if (threadlimit && taskq_member(tq, current)) + (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1); + return (rc); } EXPORT_SYMBOL(taskq_dispatch); @@ -585,6 +593,7 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, { taskqid_t rc = 0; taskq_ent_t *t; + boolean_t threadlimit = B_FALSE; ASSERT(tq); ASSERT(func); @@ -617,7 +626,13 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg, spin_unlock(&t->tqent_lock); out: + threadlimit = (tq->tq_nactive == tq->tq_nthreads); spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); + + /* Spawn additional taskq threads if required. */ + if (threadlimit && taskq_member(tq, current)) + (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1); + return (rc); } EXPORT_SYMBOL(taskq_dispatch_delay); @@ -626,6 +641,8 @@ void taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, taskq_ent_t *t) { + boolean_t threadlimit = B_FALSE; + ASSERT(tq); ASSERT(func); @@ -661,7 +678,12 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags, wake_up(&tq->tq_work_waitq); out: + threadlimit = (tq->tq_nactive == tq->tq_nthreads); spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags); + + /* Spawn additional taskq threads if required. */ + if (threadlimit && taskq_member(tq, current)) + (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1); } EXPORT_SYMBOL(taskq_dispatch_ent);