From 58bf6afd3f065274e0111ab9bab44828dd72c726 Mon Sep 17 00:00:00 2001 From: Paul Dagnelie Date: Tue, 2 Nov 2021 09:23:48 -0700 Subject: [PATCH] Fix cpu hotplug atomic sleep issue We move the spinlock unlock before the thread creation. This should be safe because the thread creation code doesn't actually manipulate any taskq data structures; that's done by the thread once it's created. We also remove the assertion that the maxthreads is the current threads plus one; that assertion could fail if multiple hotplug events come in quick succession, and the first new taskq thread hasn't had a chance to start processing yet. Reviewed-by: Brian Behlendorf Reviewed-by: Matthew Ahrens eviewed-by: Tony Nguyen Signed-off-by: Paul Dagnelie Closes #12714 --- module/os/linux/spl/spl-taskq.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/module/os/linux/spl/spl-taskq.c b/module/os/linux/spl/spl-taskq.c index 61631256c8..fb25a41544 100644 --- a/module/os/linux/spl/spl-taskq.c +++ b/module/os/linux/spl/spl-taskq.c @@ -1298,8 +1298,10 @@ spl_taskq_expand(unsigned int cpu, struct hlist_node *node) ASSERT(tq); spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class); - if (!(tq->tq_flags & TASKQ_ACTIVE)) - goto out; + if (!(tq->tq_flags & TASKQ_ACTIVE)) { + spin_unlock_irqrestore(&tq->tq_lock, flags); + return (err); + } ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT); int nthreads = MIN(tq->tq_cpu_pct, 100); @@ -1308,13 +1310,12 @@ spl_taskq_expand(unsigned int cpu, struct hlist_node *node) if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) && tq->tq_maxthreads > tq->tq_nthreads) { - ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads + 1); + spin_unlock_irqrestore(&tq->tq_lock, flags); taskq_thread_t *tqt = taskq_thread_create(tq); if (tqt == NULL) err = -1; + return (err); } - -out: spin_unlock_irqrestore(&tq->tq_lock, flags); return (err); }