diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index c3472ab37e..facb2354a8 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -140,7 +140,7 @@ typedef struct ztest_args { spa_t *za_spa; objset_t *za_os; zilog_t *za_zilog; - pthread_t za_thread; + kthread_t *za_thread; uint64_t za_instance; uint64_t za_random; uint64_t za_diroff; @@ -231,7 +231,7 @@ ztest_info_t ztest_info[] = { * The callbacks are ordered by txg number. */ typedef struct ztest_cb_list { - pthread_mutex_t zcl_callbacks_lock; + kmutex_t zcl_callbacks_lock; list_t zcl_callbacks; } ztest_cb_list_t; @@ -239,8 +239,8 @@ typedef struct ztest_cb_list { * Stuff we need to share writably between parent and child. */ typedef struct ztest_shared { - pthread_mutex_t zs_vdev_lock; - pthread_rwlock_t zs_name_lock; + kmutex_t zs_vdev_lock; + krwlock_t zs_name_lock; uint64_t zs_vdev_primaries; uint64_t zs_vdev_aux; uint64_t zs_enospc_count; @@ -249,7 +249,7 @@ typedef struct ztest_shared { uint64_t zs_alloc; uint64_t zs_space; ztest_info_t zs_info[ZTEST_FUNCS]; - pthread_mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS]; + kmutex_t zs_sync_lock[ZTEST_SYNC_LOCKS]; uint64_t zs_seq[ZTEST_SYNC_LOCKS]; ztest_cb_list_t zs_cb_list; } ztest_shared_t; @@ -825,7 +825,7 @@ ztest_spa_create_destroy(ztest_args_t *za) * Attempt to create an existing pool. It shouldn't matter * what's in the nvroot; we should fail with EEXIST. */ - (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + rw_enter(&ztest_shared->zs_name_lock, RW_READER); nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1); error = spa_create(za->za_pool, nvroot, NULL, NULL, NULL); nvlist_free(nvroot); @@ -841,7 +841,7 @@ ztest_spa_create_destroy(ztest_args_t *za) fatal(0, "spa_destroy() = %d", error); spa_close(spa, FTAG); - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); } static vdev_t * @@ -871,7 +871,7 @@ ztest_vdev_add_remove(ztest_args_t *za) nvlist_t *nvroot; int error; - (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock); + mutex_enter(&ztest_shared->zs_vdev_lock); spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -889,7 +889,7 @@ ztest_vdev_add_remove(ztest_args_t *za) error = spa_vdev_add(spa, nvroot); nvlist_free(nvroot); - (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + mutex_exit(&ztest_shared->zs_vdev_lock); if (error == ENOSPC) ztest_record_enospc("spa_vdev_add"); @@ -918,7 +918,7 @@ ztest_vdev_aux_add_remove(ztest_args_t *za) aux = ZPOOL_CONFIG_L2CACHE; } - (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock); + mutex_enter(&ztest_shared->zs_vdev_lock); spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -974,7 +974,7 @@ ztest_vdev_aux_add_remove(ztest_args_t *za) fatal(0, "spa_vdev_remove(%llu) = %d", guid, error); } - (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + mutex_exit(&ztest_shared->zs_vdev_lock); } /* @@ -1000,7 +1000,7 @@ ztest_vdev_attach_detach(ztest_args_t *za) int oldvd_is_log; int error, expected_error; - (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock); + mutex_enter(&ztest_shared->zs_vdev_lock); spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); @@ -1060,7 +1060,7 @@ ztest_vdev_attach_detach(ztest_args_t *za) if (error != 0 && error != ENODEV && error != EBUSY && error != ENOTSUP) fatal(0, "detach (%s) returned %d", oldpath, error); - (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + mutex_exit(&ztest_shared->zs_vdev_lock); return; } @@ -1153,7 +1153,7 @@ ztest_vdev_attach_detach(ztest_args_t *za) (longlong_t)newsize, replacing, error, expected_error); } - (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + mutex_exit(&ztest_shared->zs_vdev_lock); } /* @@ -1256,7 +1256,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za) size_t psize, newsize; uint64_t spa_newsize, spa_cursize, ms_count; - (void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock); + mutex_enter(&ztest_shared->zs_vdev_lock); mutex_enter(&spa_namespace_lock); spa_config_enter(spa, SCL_STATE, spa, RW_READER); @@ -1284,7 +1284,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za) if (psize == 0 || psize >= 4 * zopt_vdev_size) { spa_config_exit(spa, SCL_STATE, spa); mutex_exit(&spa_namespace_lock); - (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + mutex_exit(&ztest_shared->zs_vdev_lock); return; } ASSERT(psize > 0); @@ -1313,7 +1313,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za) } (void) spa_config_exit(spa, SCL_STATE, spa); mutex_exit(&spa_namespace_lock); - (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + mutex_exit(&ztest_shared->zs_vdev_lock); return; } @@ -1353,7 +1353,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za) spa->spa_name, oldnumbuf, newnumbuf); } spa_config_exit(spa, SCL_STATE, spa); - (void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock); + mutex_exit(&ztest_shared->zs_vdev_lock); } /* ARGSUSED */ @@ -1452,7 +1452,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za) uint64_t seq; uint64_t objects; - (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + rw_enter(&ztest_shared->zs_name_lock, RW_READER); (void) snprintf(name, 100, "%s/%s_temp_%llu", za->za_pool, za->za_pool, (u_longlong_t)za->za_instance); @@ -1495,7 +1495,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za) if (error) { if (error == ENOSPC) { ztest_record_enospc("dmu_objset_create"); - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); return; } fatal(0, "dmu_objset_create(%s) = %d", name, error); @@ -1577,7 +1577,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za) if (error) fatal(0, "dmu_objset_destroy(%s) = %d", name, error); - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); } /* @@ -1591,7 +1591,7 @@ ztest_dmu_snapshot_create_destroy(ztest_args_t *za) char snapname[100]; char osname[MAXNAMELEN]; - (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + rw_enter(&ztest_shared->zs_name_lock, RW_READER); dmu_objset_name(os, osname); (void) snprintf(snapname, 100, "%s@%llu", osname, (u_longlong_t)za->za_instance); @@ -1605,7 +1605,7 @@ ztest_dmu_snapshot_create_destroy(ztest_args_t *za) ztest_record_enospc("dmu_take_snapshot"); else if (error != 0 && error != EEXIST) fatal(0, "dmu_take_snapshot() = %d", error); - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); } /* @@ -1662,7 +1662,7 @@ ztest_dsl_dataset_promote_busy(ztest_args_t *za) char osname[MAXNAMELEN]; uint64_t curval = za->za_instance; - (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + rw_enter(&ztest_shared->zs_name_lock, RW_READER); dmu_objset_name(os, osname); ztest_dsl_dataset_cleanup(osname, curval); @@ -1747,7 +1747,7 @@ ztest_dsl_dataset_promote_busy(ztest_args_t *za) out: ztest_dsl_dataset_cleanup(osname, curval); - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); } /* @@ -2555,7 +2555,7 @@ ztest_dmu_write_parallel(ztest_args_t *za) int bs = ZTEST_DIROBJ_BLOCKSIZE; int do_free = 0; uint64_t off, txg, txg_how; - pthread_mutex_t *lp; + kmutex_t *lp; char osname[MAXNAMELEN]; char iobuf[SPA_MAXBLOCKSIZE]; blkptr_t blk = { 0 }; @@ -2617,7 +2617,7 @@ ztest_dmu_write_parallel(ztest_args_t *za) txg = dmu_tx_get_txg(tx); lp = &ztest_shared->zs_sync_lock[b]; - (void) pthread_mutex_lock(lp); + mutex_enter(lp); wbt->bt_objset = dmu_objset_id(os); wbt->bt_object = ZTEST_DIROBJ; @@ -2674,7 +2674,7 @@ ztest_dmu_write_parallel(ztest_args_t *za) dmu_buf_rele(bonus_db, FTAG); } - (void) pthread_mutex_unlock(lp); + mutex_exit(lp); if (ztest_random(1000) == 0) (void) poll(NULL, 0, 1); /* open dn_notxholds window */ @@ -2693,13 +2693,13 @@ ztest_dmu_write_parallel(ztest_args_t *za) /* * dmu_sync() the block we just wrote. */ - (void) pthread_mutex_lock(lp); + mutex_enter(lp); blkoff = P2ALIGN_TYPED(off, bs, uint64_t); error = dmu_buf_hold(os, ZTEST_DIROBJ, blkoff, FTAG, &db); za->za_dbuf = db; if (error) { - (void) pthread_mutex_unlock(lp); + mutex_exit(lp); return; } blkoff = off - blkoff; @@ -2708,18 +2708,18 @@ ztest_dmu_write_parallel(ztest_args_t *za) za->za_dbuf = NULL; if (error) { - (void) pthread_mutex_unlock(lp); + mutex_exit(lp); return; } if (blk.blk_birth == 0) { /* concurrent free */ - (void) pthread_mutex_unlock(lp); + mutex_exit(lp); return; } txg_suspend(dmu_objset_pool(os)); - (void) pthread_mutex_unlock(lp); + mutex_exit(lp); ASSERT(blk.blk_fill == 1); ASSERT3U(BP_GET_TYPE(&blk), ==, DMU_OT_UINT64_OTHER); @@ -3130,9 +3130,9 @@ ztest_commit_callback(void *arg, int error) ASSERT3U(data->zcd_txg, !=, 0); /* Remove our callback from the list */ - (void) pthread_mutex_lock(&zcl->zcl_callbacks_lock); + mutex_enter(&zcl->zcl_callbacks_lock); list_remove(&zcl->zcl_callbacks, data); - (void) pthread_mutex_unlock(&zcl->zcl_callbacks_lock); + mutex_exit(&zcl->zcl_callbacks_lock); out: umem_free(data, sizeof (ztest_cb_data_t)); @@ -3220,8 +3220,8 @@ ztest_dmu_commit_callbacks(ztest_args_t *za) /* * Read existing data to make sure there isn't a future leak. */ - VERIFY(0 == dmu_read(os, ZTEST_DIROBJ, za->za_diroff, sizeof (uint64_t), - &old_txg)); + VERIFY(0 == dmu_read(os, ZTEST_DIROBJ, za->za_diroff, + sizeof (uint64_t), &old_txg, DMU_READ_PREFETCH)); if (old_txg > txg) fatal(0, "future leak: got %llx, open txg is %llx", old_txg, @@ -3229,7 +3229,7 @@ ztest_dmu_commit_callbacks(ztest_args_t *za) dmu_write(os, ZTEST_DIROBJ, za->za_diroff, sizeof (uint64_t), &txg, tx); - (void) pthread_mutex_lock(&zcl->zcl_callbacks_lock); + mutex_enter(&zcl->zcl_callbacks_lock); /* * Since commit callbacks don't have any ordering requirement and since @@ -3274,7 +3274,7 @@ ztest_dmu_commit_callbacks(ztest_args_t *za) tmp_cb = cb_data[i]; } - (void) pthread_mutex_unlock(&zcl->zcl_callbacks_lock); + mutex_exit(&zcl->zcl_callbacks_lock); dmu_tx_commit(tx); } @@ -3290,7 +3290,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za) char osname[MAXNAMELEN]; int error; - (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + rw_enter(&ztest_shared->zs_name_lock, RW_READER); dmu_objset_name(os, osname); @@ -3329,7 +3329,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za) } } - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); } /* @@ -3493,7 +3493,7 @@ ztest_spa_rename(ztest_args_t *za) int error; spa_t *spa; - (void) pthread_rwlock_wrlock(&ztest_shared->zs_name_lock); + rw_enter(&ztest_shared->zs_name_lock, RW_WRITER); oldname = za->za_pool; newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL); @@ -3545,7 +3545,7 @@ ztest_spa_rename(ztest_args_t *za) umem_free(newname, strlen(newname) + 1); - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); } @@ -3884,19 +3884,19 @@ ztest_run(char *pool) ztest_args_t *za; spa_t *spa; char name[100]; - pthread_t resume_tid; + kthread_t *resume_thread; ztest_exiting = B_FALSE; - (void) pthread_mutex_init(&zs->zs_vdev_lock, NULL); - (void) pthread_rwlock_init(&zs->zs_name_lock, NULL); - (void) pthread_mutex_init(&zs->zs_cb_list.zcl_callbacks_lock, NULL); + mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL); + rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL); + mutex_init(&zs->zs_cb_list.zcl_callbacks_lock,NULL,MUTEX_DEFAULT,NULL); list_create(&zs->zs_cb_list.zcl_callbacks, sizeof (ztest_cb_data_t), offsetof(ztest_cb_data_t, zcd_node)); for (t = 0; t < ZTEST_SYNC_LOCKS; t++) - (void) pthread_mutex_init(&zs->zs_sync_lock[t], NULL); + mutex_init(&zs->zs_sync_lock[t], NULL, MUTEX_DEFAULT, NULL); /* * Destroy one disk before we even start. @@ -3963,7 +3963,8 @@ ztest_run(char *pool) /* * Create a thread to periodically resume suspended I/O. */ - VERIFY(pthread_create(&resume_tid, NULL, ztest_resume_thread, spa)==0); + resume_thread = thread_create(NULL, 0, ztest_resume_thread, spa, + THR_BOUND, NULL, 0, 0); /* * Verify that we can safely inquire about about any object, @@ -4012,7 +4013,7 @@ ztest_run(char *pool) if (t < zopt_datasets) { int test_future = FALSE; - (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + rw_enter(&ztest_shared->zs_name_lock, RW_READER); (void) snprintf(name, 100, "%s/%s_%d", pool, pool, d); error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0, ztest_create_cb, NULL); @@ -4020,7 +4021,7 @@ ztest_run(char *pool) test_future = TRUE; } else if (error == ENOSPC) { zs->zs_enospc_count++; - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); break; } else if (error != 0) { fatal(0, "dmu_objset_create(%s) = %d", @@ -4031,7 +4032,7 @@ ztest_run(char *pool) if (error) fatal(0, "dmu_objset_open('%s') = %d", name, error); - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); if (test_future) ztest_dmu_check_future_leak(&za[t]); zil_replay(za[d].za_os, za[d].za_os, @@ -4039,12 +4040,12 @@ ztest_run(char *pool) za[d].za_zilog = zil_open(za[d].za_os, NULL); } - VERIFY(pthread_create(&za[t].za_thread, NULL, ztest_thread, - &za[t]) == 0); + za[t].za_thread = thread_create(NULL, 0, ztest_thread, &za[t], + THR_BOUND, NULL, 0, 0); } while (--t >= 0) { - VERIFY(pthread_join(za[t].za_thread, NULL) == 0); + VERIFY(thr_join(za[t].za_thread, NULL, NULL) == 0); if (t < zopt_datasets) { zil_close(za[t].za_zilog); dmu_objset_close(za[t].za_os); @@ -4063,7 +4064,7 @@ ztest_run(char *pool) * If we had out-of-space errors, destroy a random objset. */ if (zs->zs_enospc_count != 0) { - (void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock); + rw_enter(&ztest_shared->zs_name_lock, RW_READER); d = (int)ztest_random(zopt_datasets); (void) snprintf(name, 100, "%s/%s_%d", pool, pool, d); if (zopt_verbose >= 3) @@ -4074,7 +4075,7 @@ ztest_run(char *pool) (void) dmu_objset_find(name, ztest_destroy_cb, &za[d], DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN); - (void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock); + rw_exit(&ztest_shared->zs_name_lock); } txg_wait_synced(spa_get_dsl(spa), 0); @@ -4083,7 +4084,7 @@ ztest_run(char *pool) /* Kill the resume thread */ ztest_exiting = B_TRUE; - VERIFY(pthread_join(resume_tid, NULL) == 0); + VERIFY(thr_join(resume_thread, NULL, NULL) == 0); ztest_resume(spa); /* @@ -4099,9 +4100,9 @@ ztest_run(char *pool) list_destroy(&zs->zs_cb_list.zcl_callbacks); - (void) pthread_mutex_destroy(&zs->zs_cb_list.zcl_callbacks_lock); - (void) pthread_rwlock_destroy(&zs->zs_name_lock); - (void) pthread_mutex_destroy(&zs->zs_vdev_lock); + mutex_destroy(&zs->zs_cb_list.zcl_callbacks_lock); + rw_destroy(&zs->zs_name_lock); + mutex_destroy(&zs->zs_vdev_lock); } void diff --git a/lib/libzpool/include/sys/zfs_context.h b/lib/libzpool/include/sys/zfs_context.h index 1f5849b6b9..15f7665cdd 100644 --- a/lib/libzpool/include/sys/zfs_context.h +++ b/lib/libzpool/include/sys/zfs_context.h @@ -192,18 +192,24 @@ _NOTE(CONSTCOND) } while (0) /* * Threads */ -#define curthread ((void *)(uintptr_t)pthread_self()) #define tsd_get(key) pthread_getspecific(key) #define tsd_set(key, val) pthread_setspecific(key, val) -typedef struct kthread kthread_t; typedef void (*thread_func_t)(void *); +typedef struct kthread { + list_node_t t_node; + pthread_t t_id; + pthread_attr_t t_attr; +} kthread_t; -#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \ - zk_thread_create((thread_func_t)func, arg) -#define thread_exit() pthread_exit(NULL) +#define curthread zk_curthread() +#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \ + zk_thread_create((thread_func_t)func, arg) +#define thr_join(kt, v1, v2) pthread_join(kt->t_id, v2) +extern kthread_t *zk_curthread(void); extern kthread_t *zk_thread_create(thread_func_t func, void *arg); +extern void thread_exit(void); #define issig(why) (FALSE) #define ISSIG(thr, why) (FALSE) diff --git a/lib/libzpool/kernel.c b/lib/libzpool/kernel.c index 4fe4eac54a..161f3d5bac 100644 --- a/lib/libzpool/kernel.c +++ b/lib/libzpool/kernel.c @@ -56,20 +56,66 @@ struct utsname utsname = { * threads * ========================================================================= */ -/*ARGSUSED*/ + +kmutex_t kthread_lock; +list_t kthread_list; + +kthread_t * +zk_curthread(void) +{ + kthread_t *kt; + pthread_t tid; + + tid = pthread_self(); + mutex_enter(&kthread_lock); + for (kt = list_head(&kthread_list); kt != NULL; + kt = list_next(&kthread_list, kt)) { + + if (kt->t_id == tid) { + mutex_exit(&kthread_lock); + return kt; + } + } + mutex_exit(&kthread_lock); + + return NULL; +} + kthread_t * zk_thread_create(thread_func_t func, void *arg) { - pthread_t tid; + kthread_t *kt; - pthread_attr_t attr; - VERIFY(pthread_attr_init(&attr) == 0); - VERIFY(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0); + kt = umem_zalloc(sizeof(kthread_t), UMEM_NOFAIL); - VERIFY(pthread_create(&tid, &attr, (void *(*)(void *))func, arg) == 0); + VERIFY(pthread_attr_init(&kt->t_attr) == 0); + VERIFY(pthread_attr_setdetachstate(&kt->t_attr, + PTHREAD_CREATE_DETACHED) == 0); + VERIFY(pthread_create(&kt->t_id, &kt->t_attr, + (void *(*)(void *))func, arg) == 0); - /* XXX: not portable */ - return ((void *)(uintptr_t)tid); + mutex_enter(&kthread_lock); + list_insert_head(&kthread_list, kt); + mutex_exit(&kthread_lock); + + return kt; +} + +void +thread_exit(void) +{ + kthread_t *kt; + + VERIFY((kt = curthread) != NULL); + + mutex_enter(&kthread_lock); + list_remove(&kthread_list, kt); + mutex_exit(&kthread_lock); + + VERIFY(pthread_attr_destroy(&kt->t_attr) == 0); + umem_free(kt, sizeof(kthread_t)); + + pthread_exit(NULL); } /* @@ -850,6 +896,10 @@ kernel_init(int mode) VERIFY((random_fd = open("/dev/random", O_RDONLY)) != -1); VERIFY((urandom_fd = open("/dev/urandom", O_RDONLY)) != -1); + mutex_init(&kthread_lock, NULL, MUTEX_DEFAULT, NULL); + list_create(&kthread_list, sizeof (kthread_t), + offsetof(kthread_t, t_node)); + system_taskq_init(); spa_init(mode); @@ -860,6 +910,9 @@ kernel_fini(void) { spa_fini(); + list_destroy(&kthread_list); + mutex_destroy(&kthread_lock); + close(random_fd); close(urandom_fd); diff --git a/lib/libzpool/taskq.c b/lib/libzpool/taskq.c index 494e233eae..40dfb67893 100644 --- a/lib/libzpool/taskq.c +++ b/lib/libzpool/taskq.c @@ -42,7 +42,7 @@ struct taskq { krwlock_t tq_threadlock; kcondvar_t tq_dispatch_cv; kcondvar_t tq_wait_cv; - pthread_t *tq_threadlist; + kthread_t **tq_threadlist; int tq_flags; int tq_active; int tq_nthreads; @@ -198,7 +198,7 @@ taskq_create(const char *name, int nthreads, pri_t pri, tq->tq_maxalloc = maxalloc; tq->tq_task.task_next = &tq->tq_task; tq->tq_task.task_prev = &tq->tq_task; - tq->tq_threadlist = kmem_alloc(nthreads * sizeof (pthread_t), KM_SLEEP); + tq->tq_threadlist = kmem_alloc(nthreads*sizeof(kthread_t *), KM_SLEEP); if (flags & TASKQ_PREPOPULATE) { mutex_enter(&tq->tq_lock); @@ -208,8 +208,8 @@ taskq_create(const char *name, int nthreads, pri_t pri, } for (t = 0; t < nthreads; t++) - VERIFY(pthread_create(&tq->tq_threadlist[t], - NULL, taskq_thread, tq) == 0); + VERIFY((tq->tq_threadlist[t] = thread_create(NULL, 0, + taskq_thread, tq, THR_BOUND, NULL, 0, 0)) != NULL); return (tq); } @@ -239,9 +239,9 @@ taskq_destroy(taskq_t *tq) mutex_exit(&tq->tq_lock); for (t = 0; t < nthreads; t++) - VERIFY(pthread_join(tq->tq_threadlist[t], NULL) == 0); + (void) thr_join(tq->tq_threadlist[t], NULL, NULL); - kmem_free(tq->tq_threadlist, nthreads * sizeof (pthread_t)); + kmem_free(tq->tq_threadlist, nthreads * sizeof(kthread_t *)); rw_destroy(&tq->tq_threadlock); mutex_destroy(&tq->tq_lock); @@ -260,7 +260,7 @@ taskq_member(taskq_t *tq, void *t) return (1); for (i = 0; i < tq->tq_nthreads; i++) - if (tq->tq_threadlist[i] == (pthread_t)(uintptr_t)t) + if (tq->tq_threadlist[i] == (kthread_t *)t) return (1); return (0);