diff --git a/include/os/linux/spl/sys/taskq.h b/include/os/linux/spl/sys/taskq.h
index b73dab631e..8051de36ba 100644
--- a/include/os/linux/spl/sys/taskq.h
+++ b/include/os/linux/spl/sys/taskq.h
@@ -20,6 +20,10 @@
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see .
*/
+/*
+ * Copyright (c) 2024, Klara Inc.
+ * Copyright (c) 2024, Syneto
+ */
#ifndef _SPL_TASKQ_H
#define _SPL_TASKQ_H
@@ -33,6 +37,9 @@
#include
#include
#include
+#include
+
+typedef struct kstat_s kstat_t;
#define TASKQ_NAMELEN 31
@@ -74,6 +81,32 @@ typedef enum tq_lock_role {
typedef unsigned long taskqid_t;
typedef void (task_func_t)(void *);
+typedef struct taskq_sums {
+ /* gauges (inc/dec counters, current value) */
+ wmsum_t tqs_threads_active; /* threads running a task */
+ wmsum_t tqs_threads_idle; /* threads waiting for work */
+ wmsum_t tqs_threads_total; /* total threads */
+ wmsum_t tqs_tasks_pending; /* tasks waiting to execute */
+ wmsum_t tqs_tasks_priority; /* hi-pri tasks waiting */
+ wmsum_t tqs_tasks_total; /* total waiting tasks */
+ wmsum_t tqs_tasks_delayed; /* tasks deferred to future */
+ wmsum_t tqs_entries_free; /* task entries on free list */
+
+ /* counters (inc only, since taskq creation) */
+ wmsum_t tqs_threads_created; /* threads created */
+ wmsum_t tqs_threads_destroyed; /* threads destroyed */
+ wmsum_t tqs_tasks_dispatched; /* tasks dispatched */
+ wmsum_t tqs_tasks_dispatched_delayed; /* tasks delayed to future */
+ wmsum_t tqs_tasks_executed_normal; /* normal pri tasks executed */
+ wmsum_t tqs_tasks_executed_priority; /* high pri tasks executed */
+ wmsum_t tqs_tasks_executed; /* total tasks executed */
+ wmsum_t tqs_tasks_delayed_requeued; /* delayed tasks requeued */
+ wmsum_t tqs_tasks_cancelled; /* tasks cancelled before run */
+ wmsum_t tqs_thread_wakeups; /* total thread wakeups */
+ wmsum_t tqs_thread_wakeups_nowork; /* thread woken but no tasks */
+ wmsum_t tqs_thread_sleeps; /* total thread sleeps */
+} taskq_sums_t;
+
typedef struct taskq {
spinlock_t tq_lock; /* protects taskq_t */
char *tq_name; /* taskq name */
@@ -105,6 +138,8 @@ typedef struct taskq {
struct hlist_node tq_hp_cb_node;
boolean_t tq_hp_support;
unsigned long lastspawnstop; /* when to purge dynamic */
+ taskq_sums_t tq_sums;
+ kstat_t *tq_ksp;
} taskq_t;
typedef struct taskq_ent {
@@ -123,6 +158,13 @@ typedef struct taskq_ent {
#define TQENT_FLAG_PREALLOC 0x1
#define TQENT_FLAG_CANCEL 0x2
+/* bits 2-3 are which list tqent is on */
+#define TQENT_LIST_NONE 0x0
+#define TQENT_LIST_PENDING 0x4
+#define TQENT_LIST_PRIORITY 0x8
+#define TQENT_LIST_DELAY 0xc
+#define TQENT_LIST_MASK 0xc
+
typedef struct taskq_thread {
struct list_head tqt_thread_list;
struct list_head tqt_active_list;
diff --git a/module/os/linux/spl/spl-taskq.c b/module/os/linux/spl/spl-taskq.c
index e7b812c3b5..61012bfb36 100644
--- a/module/os/linux/spl/spl-taskq.c
+++ b/module/os/linux/spl/spl-taskq.c
@@ -22,16 +22,98 @@
*
* Solaris Porting Layer (SPL) Task Queue Implementation.
*/
+/*
+ * Copyright (c) 2024, Klara Inc.
+ * Copyright (c) 2024, Syneto
+ */
#include
#include
#include
#include
#include
+#include
+#include
+#include
#ifdef HAVE_CPU_HOTPLUG
#include
#endif
+typedef struct taskq_kstats {
+ /* static values, for completeness */
+ kstat_named_t tqks_threads_max;
+ kstat_named_t tqks_entry_pool_min;
+ kstat_named_t tqks_entry_pool_max;
+
+ /* gauges (inc/dec counters, current value) */
+ kstat_named_t tqks_threads_active;
+ kstat_named_t tqks_threads_idle;
+ kstat_named_t tqks_threads_total;
+ kstat_named_t tqks_tasks_pending;
+ kstat_named_t tqks_tasks_priority;
+ kstat_named_t tqks_tasks_total;
+ kstat_named_t tqks_tasks_delayed;
+ kstat_named_t tqks_entries_free;
+
+ /* counters (inc only, since taskq creation) */
+ kstat_named_t tqks_threads_created;
+ kstat_named_t tqks_threads_destroyed;
+ kstat_named_t tqks_tasks_dispatched;
+ kstat_named_t tqks_tasks_dispatched_delayed;
+ kstat_named_t tqks_tasks_executed_normal;
+ kstat_named_t tqks_tasks_executed_priority;
+ kstat_named_t tqks_tasks_executed;
+ kstat_named_t tqks_tasks_delayed_requeued;
+ kstat_named_t tqks_tasks_cancelled;
+ kstat_named_t tqks_thread_wakeups;
+ kstat_named_t tqks_thread_wakeups_nowork;
+ kstat_named_t tqks_thread_sleeps;
+} taskq_kstats_t;
+
+static taskq_kstats_t taskq_kstats_template = {
+ { "threads_max", KSTAT_DATA_UINT64 },
+ { "entry_pool_min", KSTAT_DATA_UINT64 },
+ { "entry_pool_max", KSTAT_DATA_UINT64 },
+ { "threads_active", KSTAT_DATA_UINT64 },
+ { "threads_idle", KSTAT_DATA_UINT64 },
+ { "threads_total", KSTAT_DATA_UINT64 },
+ { "tasks_pending", KSTAT_DATA_UINT64 },
+ { "tasks_priority", KSTAT_DATA_UINT64 },
+ { "tasks_total", KSTAT_DATA_UINT64 },
+ { "tasks_delayed", KSTAT_DATA_UINT64 },
+ { "entries_free", KSTAT_DATA_UINT64 },
+
+ { "threads_created", KSTAT_DATA_UINT64 },
+ { "threads_destroyed", KSTAT_DATA_UINT64 },
+ { "tasks_dispatched", KSTAT_DATA_UINT64 },
+ { "tasks_dispatched_delayed", KSTAT_DATA_UINT64 },
+ { "tasks_executed_normal", KSTAT_DATA_UINT64 },
+ { "tasks_executed_priority", KSTAT_DATA_UINT64 },
+ { "tasks_executed", KSTAT_DATA_UINT64 },
+ { "tasks_delayed_requeued", KSTAT_DATA_UINT64 },
+ { "tasks_cancelled", KSTAT_DATA_UINT64 },
+ { "thread_wakeups", KSTAT_DATA_UINT64 },
+ { "thread_wakeups_nowork", KSTAT_DATA_UINT64 },
+ { "thread_sleeps", KSTAT_DATA_UINT64 },
+};
+
+#define TQSTAT_INC(tq, stat) wmsum_add(&tq->tq_sums.tqs_##stat, 1)
+#define TQSTAT_DEC(tq, stat) wmsum_add(&tq->tq_sums.tqs_##stat, -1)
+
+#define _TQSTAT_MOD_LIST(mod, tq, t) do { \
+ switch (t->tqent_flags & TQENT_LIST_MASK) { \
+ case TQENT_LIST_NONE: ASSERT(list_empty(&t->tqent_list)); break;\
+ case TQENT_LIST_PENDING: mod(tq, tasks_pending); break; \
+ case TQENT_LIST_PRIORITY: mod(tq, tasks_priority); break; \
+ case TQENT_LIST_DELAY: mod(tq, tasks_delayed); break; \
+ } \
+} while (0)
+#define TQSTAT_INC_LIST(tq, t) _TQSTAT_MOD_LIST(TQSTAT_INC, tq, t)
+#define TQSTAT_DEC_LIST(tq, t) _TQSTAT_MOD_LIST(TQSTAT_DEC, tq, t)
+
+#define TQENT_SET_LIST(t, l) \
+ t->tqent_flags = (t->tqent_flags & ~TQENT_LIST_MASK) | l;
+
static int spl_taskq_thread_bind = 0;
module_param(spl_taskq_thread_bind, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
@@ -134,6 +216,7 @@ retry:
ASSERT(!timer_pending(&t->tqent_timer));
list_del_init(&t->tqent_list);
+ TQSTAT_DEC(tq, entries_free);
return (t);
}
@@ -204,12 +287,11 @@ task_done(taskq_t *tq, taskq_ent_t *t)
{
ASSERT(tq);
ASSERT(t);
+ ASSERT(list_empty(&t->tqent_list));
/* Wake tasks blocked in taskq_wait_id() */
wake_up_all(&t->tqent_waitq);
- list_del_init(&t->tqent_list);
-
if (tq->tq_nalloc <= tq->tq_minalloc) {
t->tqent_id = TASKQID_INVALID;
t->tqent_func = NULL;
@@ -217,6 +299,7 @@ task_done(taskq_t *tq, taskq_ent_t *t)
t->tqent_flags = 0;
list_add_tail(&t->tqent_list, &tq->tq_free_list);
+ TQSTAT_INC(tq, entries_free);
} else {
task_free(tq, t);
}
@@ -263,6 +346,8 @@ task_expire_impl(taskq_ent_t *t)
spin_unlock_irqrestore(&tq->tq_lock, flags);
wake_up(&tq->tq_work_waitq);
+
+ TQSTAT_INC(tq, tasks_delayed_requeued);
}
static void
@@ -534,7 +619,10 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
t = taskq_find(tq, id);
if (t && t != ERR_PTR(-EBUSY)) {
list_del_init(&t->tqent_list);
+ TQSTAT_DEC_LIST(tq, t);
+
t->tqent_flags |= TQENT_FLAG_CANCEL;
+ TQSTAT_INC(tq, tasks_cancelled);
/*
* When canceling the lowest outstanding task id we
@@ -604,13 +692,19 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
spin_lock(&t->tqent_lock);
/* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
- if (flags & TQ_NOQUEUE)
+ if (flags & TQ_NOQUEUE) {
+ TQENT_SET_LIST(t, TQENT_LIST_PRIORITY);
list_add(&t->tqent_list, &tq->tq_prio_list);
/* Queue to the priority list instead of the pending list */
- else if (flags & TQ_FRONT)
+ } else if (flags & TQ_FRONT) {
+ TQENT_SET_LIST(t, TQENT_LIST_PRIORITY);
list_add_tail(&t->tqent_list, &tq->tq_prio_list);
- else
+ } else {
+ TQENT_SET_LIST(t, TQENT_LIST_PENDING);
list_add_tail(&t->tqent_list, &tq->tq_pend_list);
+ }
+ TQSTAT_INC_LIST(tq, t);
+ TQSTAT_INC(tq, tasks_total);
t->tqent_id = rc = tq->tq_next_id;
tq->tq_next_id++;
@@ -629,6 +723,8 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
wake_up(&tq->tq_work_waitq);
+ TQSTAT_INC(tq, tasks_dispatched);
+
/* Spawn additional taskq threads if required. */
if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
@@ -662,6 +758,8 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
/* Queue to the delay list for subsequent execution */
list_add_tail(&t->tqent_list, &tq->tq_delay_list);
+ TQENT_SET_LIST(t, TQENT_LIST_DELAY);
+ TQSTAT_INC_LIST(tq, t);
t->tqent_id = rc = tq->tq_next_id;
tq->tq_next_id++;
@@ -676,6 +774,8 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
spin_unlock(&t->tqent_lock);
+ TQSTAT_INC(tq, tasks_dispatched_delayed);
+
/* Spawn additional taskq threads if required. */
if (tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
@@ -724,10 +824,15 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
t->tqent_flags |= TQENT_FLAG_PREALLOC;
/* Queue to the priority list instead of the pending list */
- if (flags & TQ_FRONT)
+ if (flags & TQ_FRONT) {
+ TQENT_SET_LIST(t, TQENT_LIST_PRIORITY);
list_add_tail(&t->tqent_list, &tq->tq_prio_list);
- else
+ } else {
+ TQENT_SET_LIST(t, TQENT_LIST_PENDING);
list_add_tail(&t->tqent_list, &tq->tq_pend_list);
+ }
+ TQSTAT_INC_LIST(tq, t);
+ TQSTAT_INC(tq, tasks_total);
t->tqent_id = tq->tq_next_id;
tq->tq_next_id++;
@@ -742,6 +847,8 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
wake_up(&tq->tq_work_waitq);
+ TQSTAT_INC(tq, tasks_dispatched);
+
/* Spawn additional taskq threads if required. */
if (tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
@@ -908,6 +1015,8 @@ taskq_thread(void *args)
wake_up(&tq->tq_wait_waitq);
set_current_state(TASK_INTERRUPTIBLE);
+ TQSTAT_INC(tq, threads_total);
+
while (!kthread_should_stop()) {
if (list_empty(&tq->tq_pend_list) &&
@@ -919,9 +1028,15 @@ taskq_thread(void *args)
add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
spin_unlock_irqrestore(&tq->tq_lock, flags);
+ TQSTAT_INC(tq, thread_sleeps);
+ TQSTAT_INC(tq, threads_idle);
+
schedule();
seq_tasks = 0;
+ TQSTAT_DEC(tq, threads_idle);
+ TQSTAT_INC(tq, thread_wakeups);
+
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
remove_wait_queue(&tq->tq_work_waitq, &wait);
@@ -931,6 +1046,8 @@ taskq_thread(void *args)
if ((t = taskq_next_ent(tq)) != NULL) {
list_del_init(&t->tqent_list);
+ TQSTAT_DEC_LIST(tq, t);
+ TQSTAT_DEC(tq, tasks_total);
/*
* A TQENT_FLAG_PREALLOC task may be reused or freed
@@ -955,6 +1072,7 @@ taskq_thread(void *args)
tq->tq_nactive++;
spin_unlock_irqrestore(&tq->tq_lock, flags);
+ TQSTAT_INC(tq, threads_active);
DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t);
/* Perform the requested task */
@@ -962,8 +1080,17 @@ taskq_thread(void *args)
DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t);
+ TQSTAT_DEC(tq, threads_active);
+ if ((t->tqent_flags & TQENT_LIST_MASK) ==
+ TQENT_LIST_PENDING)
+ TQSTAT_INC(tq, tasks_executed_normal);
+ else
+ TQSTAT_INC(tq, tasks_executed_priority);
+ TQSTAT_INC(tq, tasks_executed);
+
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
+
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
tqt->tqt_task = NULL;
@@ -989,7 +1116,8 @@ taskq_thread(void *args)
tqt->tqt_id = TASKQID_INVALID;
tqt->tqt_flags = 0;
wake_up_all(&tq->tq_wait_waitq);
- }
+ } else
+ TQSTAT_INC(tq, thread_wakeups_nowork);
set_current_state(TASK_INTERRUPTIBLE);
@@ -998,6 +1126,10 @@ taskq_thread(void *args)
__set_current_state(TASK_RUNNING);
tq->tq_nthreads--;
list_del_init(&tqt->tqt_thread_list);
+
+ TQSTAT_DEC(tq, threads_total);
+ TQSTAT_INC(tq, threads_destroyed);
+
error:
kmem_free(tqt, sizeof (taskq_thread_t));
spin_unlock_irqrestore(&tq->tq_lock, flags);
@@ -1037,9 +1169,156 @@ taskq_thread_create(taskq_t *tq)
wake_up_process(tqt->tqt_thread);
+ TQSTAT_INC(tq, threads_created);
+
return (tqt);
}
+static void
+taskq_stats_init(taskq_t *tq)
+{
+ taskq_sums_t *tqs = &tq->tq_sums;
+ wmsum_init(&tqs->tqs_threads_active, 0);
+ wmsum_init(&tqs->tqs_threads_idle, 0);
+ wmsum_init(&tqs->tqs_threads_total, 0);
+ wmsum_init(&tqs->tqs_tasks_pending, 0);
+ wmsum_init(&tqs->tqs_tasks_priority, 0);
+ wmsum_init(&tqs->tqs_tasks_total, 0);
+ wmsum_init(&tqs->tqs_tasks_delayed, 0);
+ wmsum_init(&tqs->tqs_entries_free, 0);
+ wmsum_init(&tqs->tqs_threads_created, 0);
+ wmsum_init(&tqs->tqs_threads_destroyed, 0);
+ wmsum_init(&tqs->tqs_tasks_dispatched, 0);
+ wmsum_init(&tqs->tqs_tasks_dispatched_delayed, 0);
+ wmsum_init(&tqs->tqs_tasks_executed_normal, 0);
+ wmsum_init(&tqs->tqs_tasks_executed_priority, 0);
+ wmsum_init(&tqs->tqs_tasks_executed, 0);
+ wmsum_init(&tqs->tqs_tasks_delayed_requeued, 0);
+ wmsum_init(&tqs->tqs_tasks_cancelled, 0);
+ wmsum_init(&tqs->tqs_thread_wakeups, 0);
+ wmsum_init(&tqs->tqs_thread_wakeups_nowork, 0);
+ wmsum_init(&tqs->tqs_thread_sleeps, 0);
+}
+
+static void
+taskq_stats_fini(taskq_t *tq)
+{
+ taskq_sums_t *tqs = &tq->tq_sums;
+ wmsum_fini(&tqs->tqs_threads_active);
+ wmsum_fini(&tqs->tqs_threads_idle);
+ wmsum_fini(&tqs->tqs_threads_total);
+ wmsum_fini(&tqs->tqs_tasks_pending);
+ wmsum_fini(&tqs->tqs_tasks_priority);
+ wmsum_fini(&tqs->tqs_tasks_total);
+ wmsum_fini(&tqs->tqs_tasks_delayed);
+ wmsum_fini(&tqs->tqs_entries_free);
+ wmsum_fini(&tqs->tqs_threads_created);
+ wmsum_fini(&tqs->tqs_threads_destroyed);
+ wmsum_fini(&tqs->tqs_tasks_dispatched);
+ wmsum_fini(&tqs->tqs_tasks_dispatched_delayed);
+ wmsum_fini(&tqs->tqs_tasks_executed_normal);
+ wmsum_fini(&tqs->tqs_tasks_executed_priority);
+ wmsum_fini(&tqs->tqs_tasks_executed);
+ wmsum_fini(&tqs->tqs_tasks_delayed_requeued);
+ wmsum_fini(&tqs->tqs_tasks_cancelled);
+ wmsum_fini(&tqs->tqs_thread_wakeups);
+ wmsum_fini(&tqs->tqs_thread_wakeups_nowork);
+ wmsum_fini(&tqs->tqs_thread_sleeps);
+}
+
+static int
+taskq_kstats_update(kstat_t *ksp, int rw)
+{
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+
+ taskq_t *tq = ksp->ks_private;
+ taskq_kstats_t *tqks = ksp->ks_data;
+
+ tqks->tqks_threads_max.value.ui64 = tq->tq_maxthreads;
+ tqks->tqks_entry_pool_min.value.ui64 = tq->tq_minalloc;
+ tqks->tqks_entry_pool_max.value.ui64 = tq->tq_maxalloc;
+
+ taskq_sums_t *tqs = &tq->tq_sums;
+
+ tqks->tqks_threads_active.value.ui64 =
+ wmsum_value(&tqs->tqs_threads_active);
+ tqks->tqks_threads_idle.value.ui64 =
+ wmsum_value(&tqs->tqs_threads_idle);
+ tqks->tqks_threads_total.value.ui64 =
+ wmsum_value(&tqs->tqs_threads_total);
+ tqks->tqks_tasks_pending.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_pending);
+ tqks->tqks_tasks_priority.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_priority);
+ tqks->tqks_tasks_total.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_total);
+ tqks->tqks_tasks_delayed.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_delayed);
+ tqks->tqks_entries_free.value.ui64 =
+ wmsum_value(&tqs->tqs_entries_free);
+ tqks->tqks_threads_created.value.ui64 =
+ wmsum_value(&tqs->tqs_threads_created);
+ tqks->tqks_threads_destroyed.value.ui64 =
+ wmsum_value(&tqs->tqs_threads_destroyed);
+ tqks->tqks_tasks_dispatched.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_dispatched);
+ tqks->tqks_tasks_dispatched_delayed.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_dispatched_delayed);
+ tqks->tqks_tasks_executed_normal.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_executed_normal);
+ tqks->tqks_tasks_executed_priority.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_executed_priority);
+ tqks->tqks_tasks_executed.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_executed);
+ tqks->tqks_tasks_delayed_requeued.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_delayed_requeued);
+ tqks->tqks_tasks_cancelled.value.ui64 =
+ wmsum_value(&tqs->tqs_tasks_cancelled);
+ tqks->tqks_thread_wakeups.value.ui64 =
+ wmsum_value(&tqs->tqs_thread_wakeups);
+ tqks->tqks_thread_wakeups_nowork.value.ui64 =
+ wmsum_value(&tqs->tqs_thread_wakeups_nowork);
+ tqks->tqks_thread_sleeps.value.ui64 =
+ wmsum_value(&tqs->tqs_thread_sleeps);
+
+ return (0);
+}
+
+static void
+taskq_kstats_init(taskq_t *tq)
+{
+ char name[TASKQ_NAMELEN+5]; /* 5 for dot, 3x instance digits, null */
+ snprintf(name, sizeof (name), "%s.%d", tq->tq_name, tq->tq_instance);
+
+ kstat_t *ksp = kstat_create("taskq", 0, name, "misc",
+ KSTAT_TYPE_NAMED, sizeof (taskq_kstats_t) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL);
+
+ if (ksp == NULL)
+ return;
+
+ ksp->ks_private = tq;
+ ksp->ks_update = taskq_kstats_update;
+ ksp->ks_data = kmem_alloc(sizeof (taskq_kstats_t), KM_SLEEP);
+ memcpy(ksp->ks_data, &taskq_kstats_template, sizeof (taskq_kstats_t));
+ kstat_install(ksp);
+
+ tq->tq_ksp = ksp;
+}
+
+static void
+taskq_kstats_fini(taskq_t *tq)
+{
+ if (tq->tq_ksp == NULL)
+ return;
+
+ kmem_free(tq->tq_ksp->ks_data, sizeof (taskq_kstats_t));
+ kstat_delete(tq->tq_ksp);
+
+ tq->tq_ksp = NULL;
+}
+
taskq_t *
taskq_create(const char *name, int threads_arg, pri_t pri,
int minalloc, int maxalloc, uint_t flags)
@@ -1104,6 +1383,7 @@ taskq_create(const char *name, int threads_arg, pri_t pri,
init_waitqueue_head(&tq->tq_wait_waitq);
tq->tq_lock_class = TQ_LOCK_GENERAL;
INIT_LIST_HEAD(&tq->tq_taskqs);
+ taskq_stats_init(tq);
if (flags & TASKQ_PREPOPULATE) {
spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
@@ -1137,14 +1417,17 @@ taskq_create(const char *name, int threads_arg, pri_t pri,
if (rc) {
taskq_destroy(tq);
- tq = NULL;
- } else {
- down_write(&tq_list_sem);
- tq->tq_instance = taskq_find_by_name(name) + 1;
- list_add_tail(&tq->tq_taskqs, &tq_list);
- up_write(&tq_list_sem);
+ return (NULL);
}
+ down_write(&tq_list_sem);
+ tq->tq_instance = taskq_find_by_name(name) + 1;
+ list_add_tail(&tq->tq_taskqs, &tq_list);
+ up_write(&tq_list_sem);
+
+ /* Install kstats late, because the name includes tq_instance */
+ taskq_kstats_init(tq);
+
return (tq);
}
EXPORT_SYMBOL(taskq_create);
@@ -1177,6 +1460,8 @@ taskq_destroy(taskq_t *tq)
taskq_wait(tq);
+ taskq_kstats_fini(tq);
+
/* remove taskq from global list used by the kstats */
down_write(&tq_list_sem);
list_del(&tq->tq_taskqs);
@@ -1230,6 +1515,7 @@ taskq_destroy(taskq_t *tq)
spin_unlock_irqrestore(&tq->tq_lock, flags);
+ taskq_stats_fini(tq);
kmem_strfree(tq->tq_name);
kmem_free(tq, sizeof (taskq_t));
}