Merge branch 'feature-pthreads' into refs/top-bases/feature-branch

This commit is contained in:
Brian Behlendorf 2010-05-27 13:25:20 -07:00
commit 3bd93e7103
8 changed files with 194 additions and 446 deletions

View File

@ -52,6 +52,7 @@
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#undef ZFS_MAXNAMELEN
#undef verify
#include <libzfs.h>
const char cmdname[] = "zdb";

View File

@ -25,6 +25,8 @@
#include <libzfs.h>
#undef verify /* both libzfs.h and zfs_context.h want to define this */
#include <sys/zfs_context.h>
#include <errno.h>

View File

@ -140,8 +140,7 @@ typedef struct ztest_args {
spa_t *za_spa;
objset_t *za_os;
zilog_t *za_zilog;
kthread_t *za_thread;
kt_did_t za_threadid;
thread_t za_thread;
uint64_t za_instance;
uint64_t za_random;
uint64_t za_diroff;
@ -232,7 +231,7 @@ ztest_info_t ztest_info[] = {
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
kmutex_t zcl_callbacks_lock;
mutex_t zcl_callbacks_lock;
list_t zcl_callbacks;
} ztest_cb_list_t;
@ -240,8 +239,8 @@ typedef struct ztest_cb_list {
* Stuff we need to share writably between parent and child.
*/
typedef struct ztest_shared {
kmutex_t zs_vdev_lock;
krwlock_t zs_name_lock;
mutex_t zs_vdev_lock;
rwlock_t zs_name_lock;
uint64_t zs_vdev_primaries;
uint64_t zs_vdev_aux;
uint64_t zs_enospc_count;
@ -250,7 +249,7 @@ typedef struct ztest_shared {
uint64_t zs_alloc;
uint64_t zs_space;
ztest_info_t zs_info[ZTEST_FUNCS];
kmutex_t zs_sync_lock[ZTEST_SYNC_LOCKS];
mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS];
uint64_t zs_seq[ZTEST_SYNC_LOCKS];
ztest_cb_list_t zs_cb_list;
} ztest_shared_t;
@ -826,7 +825,7 @@ ztest_spa_create_destroy(ztest_args_t *za)
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
rw_enter(&ztest_shared->zs_name_lock, RW_READER);
(void) rw_rdlock(&ztest_shared->zs_name_lock);
nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
error = spa_create(za->za_pool, nvroot, NULL, NULL, NULL);
nvlist_free(nvroot);
@ -842,7 +841,7 @@ ztest_spa_create_destroy(ztest_args_t *za)
fatal(0, "spa_destroy() = %d", error);
spa_close(spa, FTAG);
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
}
static vdev_t *
@ -872,7 +871,7 @@ ztest_vdev_add_remove(ztest_args_t *za)
nvlist_t *nvroot;
int error;
mutex_enter(&ztest_shared->zs_vdev_lock);
(void) mutex_lock(&ztest_shared->zs_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
@ -890,7 +889,7 @@ ztest_vdev_add_remove(ztest_args_t *za)
error = spa_vdev_add(spa, nvroot);
nvlist_free(nvroot);
mutex_exit(&ztest_shared->zs_vdev_lock);
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
if (error == ENOSPC)
ztest_record_enospc("spa_vdev_add");
@ -919,7 +918,7 @@ ztest_vdev_aux_add_remove(ztest_args_t *za)
aux = ZPOOL_CONFIG_L2CACHE;
}
mutex_enter(&ztest_shared->zs_vdev_lock);
(void) mutex_lock(&ztest_shared->zs_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
@ -975,7 +974,7 @@ ztest_vdev_aux_add_remove(ztest_args_t *za)
fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
}
mutex_exit(&ztest_shared->zs_vdev_lock);
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
}
/*
@ -1001,7 +1000,7 @@ ztest_vdev_attach_detach(ztest_args_t *za)
int oldvd_is_log;
int error, expected_error;
mutex_enter(&ztest_shared->zs_vdev_lock);
(void) mutex_lock(&ztest_shared->zs_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
@ -1061,7 +1060,7 @@ ztest_vdev_attach_detach(ztest_args_t *za)
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP)
fatal(0, "detach (%s) returned %d", oldpath, error);
mutex_exit(&ztest_shared->zs_vdev_lock);
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
return;
}
@ -1154,7 +1153,7 @@ ztest_vdev_attach_detach(ztest_args_t *za)
(longlong_t)newsize, replacing, error, expected_error);
}
mutex_exit(&ztest_shared->zs_vdev_lock);
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
}
/*
@ -1257,7 +1256,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
size_t psize, newsize;
uint64_t spa_newsize, spa_cursize, ms_count;
mutex_enter(&ztest_shared->zs_vdev_lock);
(void) mutex_lock(&ztest_shared->zs_vdev_lock);
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
@ -1285,7 +1284,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
if (psize == 0 || psize >= 4 * zopt_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&spa_namespace_lock);
mutex_exit(&ztest_shared->zs_vdev_lock);
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
return;
}
ASSERT(psize > 0);
@ -1314,7 +1313,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
}
(void) spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&spa_namespace_lock);
mutex_exit(&ztest_shared->zs_vdev_lock);
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
return;
}
@ -1354,7 +1353,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
spa->spa_name, oldnumbuf, newnumbuf);
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_shared->zs_vdev_lock);
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
}
/* ARGSUSED */
@ -1453,7 +1452,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
uint64_t seq;
uint64_t objects;
rw_enter(&ztest_shared->zs_name_lock, RW_READER);
(void) rw_rdlock(&ztest_shared->zs_name_lock);
(void) snprintf(name, 100, "%s/%s_temp_%llu", za->za_pool, za->za_pool,
(u_longlong_t)za->za_instance);
@ -1496,7 +1495,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_create");
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
return;
}
fatal(0, "dmu_objset_create(%s) = %d", name, error);
@ -1578,7 +1577,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
if (error)
fatal(0, "dmu_objset_destroy(%s) = %d", name, error);
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
}
/*
@ -1592,7 +1591,7 @@ ztest_dmu_snapshot_create_destroy(ztest_args_t *za)
char snapname[100];
char osname[MAXNAMELEN];
rw_enter(&ztest_shared->zs_name_lock, RW_READER);
(void) rw_rdlock(&ztest_shared->zs_name_lock);
dmu_objset_name(os, osname);
(void) snprintf(snapname, 100, "%s@%llu", osname,
(u_longlong_t)za->za_instance);
@ -1606,7 +1605,7 @@ ztest_dmu_snapshot_create_destroy(ztest_args_t *za)
ztest_record_enospc("dmu_take_snapshot");
else if (error != 0 && error != EEXIST)
fatal(0, "dmu_take_snapshot() = %d", error);
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
}
/*
@ -1663,7 +1662,7 @@ ztest_dsl_dataset_promote_busy(ztest_args_t *za)
char osname[MAXNAMELEN];
uint64_t curval = za->za_instance;
rw_enter(&ztest_shared->zs_name_lock, RW_READER);
(void) rw_rdlock(&ztest_shared->zs_name_lock);
dmu_objset_name(os, osname);
ztest_dsl_dataset_cleanup(osname, curval);
@ -1748,7 +1747,7 @@ ztest_dsl_dataset_promote_busy(ztest_args_t *za)
out:
ztest_dsl_dataset_cleanup(osname, curval);
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
}
/*
@ -2556,7 +2555,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
int bs = ZTEST_DIROBJ_BLOCKSIZE;
int do_free = 0;
uint64_t off, txg, txg_how;
kmutex_t *lp;
mutex_t *lp;
char osname[MAXNAMELEN];
char iobuf[SPA_MAXBLOCKSIZE];
blkptr_t blk = { 0 };
@ -2618,7 +2617,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
txg = dmu_tx_get_txg(tx);
lp = &ztest_shared->zs_sync_lock[b];
mutex_enter(lp);
(void) mutex_lock(lp);
wbt->bt_objset = dmu_objset_id(os);
wbt->bt_object = ZTEST_DIROBJ;
@ -2675,7 +2674,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
dmu_buf_rele(bonus_db, FTAG);
}
mutex_exit(lp);
(void) mutex_unlock(lp);
if (ztest_random(1000) == 0)
(void) poll(NULL, 0, 1); /* open dn_notxholds window */
@ -2694,13 +2693,13 @@ ztest_dmu_write_parallel(ztest_args_t *za)
/*
* dmu_sync() the block we just wrote.
*/
mutex_enter(lp);
(void) mutex_lock(lp);
blkoff = P2ALIGN_TYPED(off, bs, uint64_t);
error = dmu_buf_hold(os, ZTEST_DIROBJ, blkoff, FTAG, &db);
za->za_dbuf = db;
if (error) {
mutex_exit(lp);
(void) mutex_unlock(lp);
return;
}
blkoff = off - blkoff;
@ -2709,18 +2708,18 @@ ztest_dmu_write_parallel(ztest_args_t *za)
za->za_dbuf = NULL;
if (error) {
mutex_exit(lp);
(void) mutex_unlock(lp);
return;
}
if (blk.blk_birth == 0) { /* concurrent free */
mutex_exit(lp);
(void) mutex_unlock(lp);
return;
}
txg_suspend(dmu_objset_pool(os));
mutex_exit(lp);
(void) mutex_unlock(lp);
ASSERT(blk.blk_fill == 1);
ASSERT3U(BP_GET_TYPE(&blk), ==, DMU_OT_UINT64_OTHER);
@ -3131,9 +3130,9 @@ ztest_commit_callback(void *arg, int error)
ASSERT3U(data->zcd_txg, !=, 0);
/* Remove our callback from the list */
mutex_enter(&zcl->zcl_callbacks_lock);
(void) mutex_lock(&zcl->zcl_callbacks_lock);
list_remove(&zcl->zcl_callbacks, data);
mutex_exit(&zcl->zcl_callbacks_lock);
(void) mutex_unlock(&zcl->zcl_callbacks_lock);
out:
umem_free(data, sizeof (ztest_cb_data_t));
@ -3230,7 +3229,7 @@ ztest_dmu_commit_callbacks(ztest_args_t *za)
dmu_write(os, ZTEST_DIROBJ, za->za_diroff, sizeof (uint64_t), &txg, tx);
mutex_enter(&zcl->zcl_callbacks_lock);
(void) mutex_lock(&zcl->zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
@ -3275,7 +3274,7 @@ ztest_dmu_commit_callbacks(ztest_args_t *za)
tmp_cb = cb_data[i];
}
mutex_exit(&zcl->zcl_callbacks_lock);
(void) mutex_unlock(&zcl->zcl_callbacks_lock);
dmu_tx_commit(tx);
}
@ -3291,7 +3290,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za)
char osname[MAXNAMELEN];
int error;
rw_enter(&ztest_shared->zs_name_lock, RW_READER);
(void) rw_rdlock(&ztest_shared->zs_name_lock);
dmu_objset_name(os, osname);
@ -3330,7 +3329,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za)
}
}
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
}
/*
@ -3494,7 +3493,7 @@ ztest_spa_rename(ztest_args_t *za)
int error;
spa_t *spa;
rw_enter(&ztest_shared->zs_name_lock, RW_WRITER);
(void) rw_wrlock(&ztest_shared->zs_name_lock);
oldname = za->za_pool;
newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
@ -3546,7 +3545,7 @@ ztest_spa_rename(ztest_args_t *za)
umem_free(newname, strlen(newname) + 1);
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
}
@ -3804,8 +3803,6 @@ ztest_resume_thread(void *arg)
(void) poll(NULL, 0, 1000);
ztest_resume(spa);
}
thread_exit();
return (NULL);
}
@ -3870,7 +3867,6 @@ ztest_thread(void *arg)
break;
}
thread_exit();
return (NULL);
}
@ -3885,20 +3881,20 @@ ztest_run(char *pool)
ztest_args_t *za;
spa_t *spa;
char name[100];
kthread_t *resume_thread;
kt_did_t resume_id;
thread_t resume_tid;
ztest_exiting = B_FALSE;
mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL);
mutex_init(&zs->zs_cb_list.zcl_callbacks_lock,NULL,MUTEX_DEFAULT,NULL);
(void) _mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL);
(void) rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL);
(void) _mutex_init(&zs->zs_cb_list.zcl_callbacks_lock, USYNC_THREAD,
NULL);
list_create(&zs->zs_cb_list.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
for (t = 0; t < ZTEST_SYNC_LOCKS; t++)
mutex_init(&zs->zs_sync_lock[t], NULL, MUTEX_DEFAULT, NULL);
(void) _mutex_init(&zs->zs_sync_lock[t], USYNC_THREAD, NULL);
/*
* Destroy one disk before we even start.
@ -3965,9 +3961,8 @@ ztest_run(char *pool)
/*
* Create a thread to periodically resume suspended I/O.
*/
VERIFY3P((resume_thread = thread_create(NULL, 0, ztest_resume_thread,
spa, THR_BOUND, NULL, 0, 0)), !=, NULL);
resume_id = resume_thread->t_tid;
VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
&resume_tid) == 0);
/*
* Verify that we can safely inquire about about any object,
@ -4016,7 +4011,7 @@ ztest_run(char *pool)
if (t < zopt_datasets) {
int test_future = FALSE;
rw_enter(&ztest_shared->zs_name_lock, RW_READER);
(void) rw_rdlock(&ztest_shared->zs_name_lock);
(void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0,
ztest_create_cb, NULL);
@ -4024,7 +4019,7 @@ ztest_run(char *pool)
test_future = TRUE;
} else if (error == ENOSPC) {
zs->zs_enospc_count++;
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
break;
} else if (error != 0) {
fatal(0, "dmu_objset_create(%s) = %d",
@ -4035,7 +4030,7 @@ ztest_run(char *pool)
if (error)
fatal(0, "dmu_objset_open('%s') = %d",
name, error);
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
if (test_future)
ztest_dmu_check_future_leak(&za[t]);
zil_replay(za[d].za_os, za[d].za_os,
@ -4043,13 +4038,12 @@ ztest_run(char *pool)
za[d].za_zilog = zil_open(za[d].za_os, NULL);
}
VERIFY3P((za[t].za_thread = thread_create(NULL, 0, ztest_thread,
&za[t], THR_BOUND, NULL, 0, 0)), !=, NULL);
za[t].za_threadid = za[t].za_thread->t_tid;
VERIFY(thr_create(0, 0, ztest_thread, &za[t], THR_BOUND,
&za[t].za_thread) == 0);
}
while (--t >= 0) {
VERIFY(thread_join(za[t].za_threadid, NULL, NULL) == 0);
VERIFY(thr_join(za[t].za_thread, NULL, NULL) == 0);
if (t < zopt_datasets) {
zil_close(za[t].za_zilog);
dmu_objset_close(za[t].za_os);
@ -4068,7 +4062,7 @@ ztest_run(char *pool)
* If we had out-of-space errors, destroy a random objset.
*/
if (zs->zs_enospc_count != 0) {
rw_enter(&ztest_shared->zs_name_lock, RW_READER);
(void) rw_rdlock(&ztest_shared->zs_name_lock);
d = (int)ztest_random(zopt_datasets);
(void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
if (zopt_verbose >= 3)
@ -4079,7 +4073,7 @@ ztest_run(char *pool)
(void) dmu_objset_find(name, ztest_destroy_cb, &za[d],
DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
rw_exit(&ztest_shared->zs_name_lock);
(void) rw_unlock(&ztest_shared->zs_name_lock);
}
txg_wait_synced(spa_get_dsl(spa), 0);
@ -4088,7 +4082,7 @@ ztest_run(char *pool)
/* Kill the resume thread */
ztest_exiting = B_TRUE;
VERIFY(thread_join(resume_id, NULL, NULL) == 0);
VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
ztest_resume(spa);
/*
@ -4104,9 +4098,9 @@ ztest_run(char *pool)
list_destroy(&zs->zs_cb_list.zcl_callbacks);
mutex_destroy(&zs->zs_cb_list.zcl_callbacks_lock);
rw_destroy(&zs->zs_name_lock);
mutex_destroy(&zs->zs_vdev_lock);
(void) _mutex_destroy(&zs->zs_cb_list.zcl_callbacks_lock);
(void) rwlock_destroy(&zs->zs_name_lock);
(void) _mutex_destroy(&zs->zs_vdev_lock);
}
void

View File

@ -37,6 +37,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/debug.h>
#include <thread.h>
#include <unistd.h>
#if !defined(TEXT_DOMAIN)
@ -69,12 +70,11 @@ static va_list uu_panic_args;
static pthread_t uu_panic_thread;
static uint32_t _uu_main_error;
static __thread int _uu_main_thread = 0;
void
uu_set_error(uint_t code)
{
if (_uu_main_thread) {
if (thr_main() != 0) {
_uu_main_error = code;
return;
}
@ -103,7 +103,7 @@ uu_set_error(uint_t code)
uint32_t
uu_error(void)
{
if (_uu_main_thread)
if (thr_main() != 0)
return (_uu_main_error);
if (uu_error_key_setup < 0) /* can't happen? */
@ -251,6 +251,5 @@ uu_release_child(void)
static void
uu_init(void)
{
_uu_main_thread = 1;
(void) pthread_atfork(uu_lockup, uu_release, uu_release_child);
}

View File

@ -50,7 +50,8 @@ extern "C" {
#include <errno.h>
#include <string.h>
#include <strings.h>
#include <pthread.h>
#include <synch.h>
#include <thread.h>
#include <assert.h>
#include <alloca.h>
#include <umem.h>
@ -105,7 +106,6 @@ extern void vpanic(const char *, __va_list);
#define fm_panic panic
/* This definition is copied from assert.h. */
#ifndef verify
#if defined(__STDC__)
#if __STDC_VERSION__ - 0 >= 199901L
#define verify(EX) (void)((EX) || \
@ -116,10 +116,7 @@ extern void vpanic(const char *, __va_list);
#else
#define verify(EX) (void)((EX) || (_assert("EX", __FILE__, __LINE__), 0))
#endif /* __STDC__ */
#endif
#undef VERIFY
#undef ASSERT
#define VERIFY verify
#define ASSERT assert
@ -192,34 +189,15 @@ _NOTE(CONSTCOND) } while (0)
/*
* Threads
*/
#define THR_BOUND 0x00000001
#define TS_RUN 0x00000002
#define curthread ((void *)(uintptr_t)thr_self())
typedef void (*thread_func_t)(void *);
typedef pthread_t kt_did_t;
typedef struct kthread kthread_t;
typedef struct kthread {
list_node_t t_node;
kt_did_t t_tid;
pthread_attr_t t_attr;
} kthread_t;
#define tsd_get(key) pthread_getspecific(key)
#define tsd_set(key, val) pthread_setspecific(key, val)
#define curthread zk_thread_current()
#define thread_exit zk_thread_exit
#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
zk_thread_create(stk, stksize, (thread_func_t)func, arg, \
len, NULL, state, pri)
#define thread_join(tid, dtid, status) \
zk_thread_join(tid, dtid, status)
zk_thread_create(func, arg)
#define thread_exit() thr_exit(NULL)
extern kthread_t *zk_thread_current(void);
extern void zk_thread_exit(void);
extern kthread_t *zk_thread_create(caddr_t stk, size_t stksize,
thread_func_t func, void *arg, size_t len,
void *pp, int state, pri_t pri);
extern int zk_thread_join(kt_did_t tid, kthread_t *dtid, void **status);
extern kthread_t *zk_thread_create(void (*func)(), void *arg);
#define issig(why) (FALSE)
#define ISSIG(thr, why) (FALSE)
@ -227,20 +205,28 @@ extern int zk_thread_join(kt_did_t tid, kthread_t *dtid, void **status);
/*
* Mutexes
*/
#define MTX_MAGIC 0x9522f51362a6e326ull
#define MTX_INIT (void *)NULL
#define MTX_DEST (void *)-1UL
typedef struct kmutex {
void *m_owner;
uint64_t m_magic;
pthread_mutex_t m_lock;
boolean_t initialized;
mutex_t m_lock;
} kmutex_t;
#define MUTEX_DEFAULT 0
#define MUTEX_HELD(m) ((m)->m_owner == curthread)
#define MUTEX_DEFAULT USYNC_THREAD
#undef MUTEX_HELD
#define MUTEX_HELD(m) _mutex_held(&(m)->m_lock)
extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
extern void mutex_destroy(kmutex_t *mp);
/*
* Argh -- we have to get cheesy here because the kernel and userland
* have different signatures for the same routine.
*/
extern int _mutex_init(mutex_t *mp, int type, void *arg);
extern int _mutex_destroy(mutex_t *mp);
#define mutex_init(mp, b, c, d) zmutex_init((kmutex_t *)(mp))
#define mutex_destroy(mp) zmutex_destroy((kmutex_t *)(mp))
extern void zmutex_init(kmutex_t *mp);
extern void zmutex_destroy(kmutex_t *mp);
extern void mutex_enter(kmutex_t *mp);
extern void mutex_exit(kmutex_t *mp);
extern int mutex_tryenter(kmutex_t *mp);
@ -249,26 +235,23 @@ extern void *mutex_owner(kmutex_t *mp);
/*
* RW locks
*/
#define RW_MAGIC 0x4d31fb123648e78aull
#define RW_INIT (void *)NULL
#define RW_DEST (void *)-1UL
typedef struct krwlock {
void *rw_owner;
void *rw_wr_owner;
uint64_t rw_magic;
pthread_rwlock_t rw_lock;
uint_t rw_readers;
boolean_t initialized;
rwlock_t rw_lock;
} krwlock_t;
typedef int krw_t;
#define RW_READER 0
#define RW_WRITER 1
#define RW_DEFAULT 0
#define RW_DEFAULT USYNC_THREAD
#define RW_READ_HELD(x) ((x)->rw_readers > 0)
#define RW_WRITE_HELD(x) ((x)->rw_wr_owner == curthread)
#define RW_LOCK_HELD(x) (RW_READ_HELD(x) || RW_WRITE_HELD(x))
#undef RW_READ_HELD
#define RW_READ_HELD(x) _rw_read_held(&(x)->rw_lock)
#undef RW_WRITE_HELD
#define RW_WRITE_HELD(x) _rw_write_held(&(x)->rw_lock)
extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
extern void rw_destroy(krwlock_t *rwlp);
@ -286,13 +269,9 @@ extern gid_t *crgetgroups(cred_t *cr);
/*
* Condition variables
*/
#define CV_MAGIC 0xd31ea9a83b1b30c4ull
typedef struct kcondvar {
uint64_t cv_magic;
pthread_cond_t cv;
} kcondvar_t;
typedef cond_t kcondvar_t;
#define CV_DEFAULT 0
#define CV_DEFAULT USYNC_THREAD
extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
extern void cv_destroy(kcondvar_t *cv);
@ -353,7 +332,6 @@ extern void taskq_destroy(taskq_t *);
extern void taskq_wait(taskq_t *);
extern int taskq_member(taskq_t *, void *);
extern void system_taskq_init(void);
extern void system_taskq_fini(void);
#define XVA_MAPSIZE 3
#define XVA_MAGIC 0x78766174
@ -468,8 +446,7 @@ extern void delay(clock_t ticks);
#define minclsyspri 60
#define maxclsyspri 99
/* XXX: not portable */
#define CPU_SEQID (pthread_self() & (max_ncpus - 1))
#define CPU_SEQID (thr_self() & (max_ncpus - 1))
#define kcred NULL
#define CRED() NULL

View File

@ -30,14 +30,12 @@
#include <stdlib.h>
#include <string.h>
#include <zlib.h>
#include <sys/signal.h>
#include <sys/spa.h>
#include <sys/stat.h>
#include <sys/processor.h>
#include <sys/zfs_context.h>
#include <sys/zmod.h>
#include <sys/utsname.h>
#include <sys/time.h>
#include <sys/systeminfo.h>
/*
@ -57,158 +55,16 @@ struct utsname utsname = {
* threads
* =========================================================================
*/
/* NOTE: Tracking each tid on a list and using it for curthread lookups
* is slow at best but it provides an easy way to provide a kthread
* style API on top of pthreads. For now we just want ztest to work
* to validate correctness. Performance is not much of an issue
* since that is what the in-kernel version is for. That said
* reworking this to track the kthread_t structure as thread
* specific data would be probably the best way to speed this up.
*/
pthread_cond_t kthread_cond = PTHREAD_COND_INITIALIZER;
pthread_mutex_t kthread_lock = PTHREAD_MUTEX_INITIALIZER;
list_t kthread_list;
static int
thread_count(void)
{
kthread_t *kt;
int count = 0;
for (kt = list_head(&kthread_list); kt != NULL;
kt = list_next(&kthread_list, kt))
count++;
return count;
}
static void
thread_init(void)
{
kthread_t *kt;
/* Initialize list for tracking kthreads */
list_create(&kthread_list, sizeof (kthread_t),
offsetof(kthread_t, t_node));
/* Create entry for primary kthread */
kt = umem_zalloc(sizeof(kthread_t), UMEM_NOFAIL);
list_link_init(&kt->t_node);
VERIFY3U(kt->t_tid = pthread_self(), !=, 0);
VERIFY3S(pthread_attr_init(&kt->t_attr), ==, 0);
VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
list_insert_head(&kthread_list, kt);
VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
}
static void
thread_fini(void)
{
kthread_t *kt;
struct timespec ts = { 0 };
int count;
/* Wait for all threads to exit via thread_exit() */
VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
while ((count = thread_count()) > 1) {
clock_gettime(CLOCK_REALTIME, &ts);
ts.tv_sec += 1;
pthread_cond_timedwait(&kthread_cond, &kthread_lock, &ts);
}
ASSERT3S(thread_count(), ==, 1);
kt = list_head(&kthread_list);
list_remove(&kthread_list, kt);
VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
VERIFY(pthread_attr_destroy(&kt->t_attr) == 0);
umem_free(kt, sizeof(kthread_t));
/* Cleanup list for tracking kthreads */
list_destroy(&kthread_list);
}
/*ARGSUSED*/
kthread_t *
zk_thread_current(void)
zk_thread_create(void (*func)(), void *arg)
{
kt_did_t tid = pthread_self();
kthread_t *kt;
int count = 1;
thread_t tid;
/*
* Because a newly created thread may call zk_thread_current()
* before the thread parent has had time to add the thread's tid
* to our lookup list. We will loop as long as there are tid
* which have not yet been set which must be one of ours.
* Yes it's a hack, at some point we can just use native pthreads.
*/
while (count > 0) {
count = 0;
VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
for (kt = list_head(&kthread_list); kt != NULL;
kt = list_next(&kthread_list, kt)) {
VERIFY(thr_create(0, 0, (void *(*)(void *))func, arg, THR_DETACHED,
&tid) == 0);
if (kt->t_tid == tid) {
VERIFY3S(pthread_mutex_unlock(
&kthread_lock), ==, 0);
return kt;
}
if (kt->t_tid == (kt_did_t)-1)
count++;
}
VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
}
/* Unreachable */
ASSERT(0);
return NULL;
}
kthread_t *
zk_thread_create(caddr_t stk, size_t stksize, thread_func_t func, void *arg,
size_t len, void *pp, int state, pri_t pri)
{
kthread_t *kt;
kt = umem_zalloc(sizeof(kthread_t), UMEM_NOFAIL);
kt->t_tid = (kt_did_t)-1;
list_link_init(&kt->t_node);
VERIFY(pthread_attr_init(&kt->t_attr) == 0);
VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
list_insert_head(&kthread_list, kt);
VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
VERIFY3U(pthread_create(&kt->t_tid, &kt->t_attr,
(void *(*)(void *))func, arg), ==, 0);
return kt;
}
int
zk_thread_join(kt_did_t tid, kthread_t *dtid, void **status)
{
return pthread_join(tid, status);
}
void
zk_thread_exit(void)
{
kthread_t *kt;
VERIFY3P(kt = curthread, !=, NULL);
VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
list_remove(&kthread_list, kt);
VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
VERIFY(pthread_attr_destroy(&kt->t_attr) == 0);
umem_free(kt, sizeof(kthread_t));
pthread_cond_broadcast(&kthread_cond);
pthread_exit(NULL);
return ((void *)(uintptr_t)tid);
}
/*
@ -240,48 +96,41 @@ kstat_delete(kstat_t *ksp)
* =========================================================================
*/
void
mutex_init(kmutex_t *mp, char *name, int type, void *cookie)
zmutex_init(kmutex_t *mp)
{
ASSERT3S(type, ==, MUTEX_DEFAULT);
ASSERT3P(cookie, ==, NULL);
#ifdef IM_FEELING_LUCKY
ASSERT3U(mp->m_magic, !=, MTX_MAGIC);
#endif
mp->m_owner = MTX_INIT;
mp->m_magic = MTX_MAGIC;
VERIFY3S(pthread_mutex_init(&mp->m_lock, NULL), ==, 0);
mp->m_owner = NULL;
mp->initialized = B_TRUE;
(void) _mutex_init(&mp->m_lock, USYNC_THREAD, NULL);
}
void
mutex_destroy(kmutex_t *mp)
zmutex_destroy(kmutex_t *mp)
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT3P(mp->m_owner, ==, MTX_INIT);
VERIFY3S(pthread_mutex_destroy(&(mp)->m_lock), ==, 0);
mp->m_owner = MTX_DEST;
mp->m_magic = 0;
ASSERT(mp->initialized == B_TRUE);
ASSERT(mp->m_owner == NULL);
(void) _mutex_destroy(&(mp)->m_lock);
mp->m_owner = (void *)-1UL;
mp->initialized = B_FALSE;
}
void
mutex_enter(kmutex_t *mp)
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT3P(mp->m_owner, !=, MTX_DEST);
ASSERT3P(mp->m_owner, !=, curthread);
VERIFY3S(pthread_mutex_lock(&mp->m_lock), ==, 0);
ASSERT3P(mp->m_owner, ==, MTX_INIT);
ASSERT(mp->initialized == B_TRUE);
ASSERT(mp->m_owner != (void *)-1UL);
ASSERT(mp->m_owner != curthread);
VERIFY(mutex_lock(&mp->m_lock) == 0);
ASSERT(mp->m_owner == NULL);
mp->m_owner = curthread;
}
int
mutex_tryenter(kmutex_t *mp)
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT3P(mp->m_owner, !=, MTX_DEST);
if (0 == pthread_mutex_trylock(&mp->m_lock)) {
ASSERT3P(mp->m_owner, ==, MTX_INIT);
ASSERT(mp->initialized == B_TRUE);
ASSERT(mp->m_owner != (void *)-1UL);
if (0 == mutex_trylock(&mp->m_lock)) {
ASSERT(mp->m_owner == NULL);
mp->m_owner = curthread;
return (1);
} else {
@ -292,16 +141,16 @@ mutex_tryenter(kmutex_t *mp)
void
mutex_exit(kmutex_t *mp)
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT3P(mutex_owner(mp), ==, curthread);
mp->m_owner = MTX_INIT;
VERIFY3S(pthread_mutex_unlock(&mp->m_lock), ==, 0);
ASSERT(mp->initialized == B_TRUE);
ASSERT(mutex_owner(mp) == curthread);
mp->m_owner = NULL;
VERIFY(mutex_unlock(&mp->m_lock) == 0);
}
void *
mutex_owner(kmutex_t *mp)
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT(mp->initialized == B_TRUE);
return (mp->m_owner);
}
@ -314,48 +163,31 @@ mutex_owner(kmutex_t *mp)
void
rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
{
ASSERT3S(type, ==, RW_DEFAULT);
ASSERT3P(arg, ==, NULL);
#ifdef IM_FEELING_LUCKY
ASSERT3U(rwlp->rw_magic, !=, RW_MAGIC);
#endif
VERIFY3S(pthread_rwlock_init(&rwlp->rw_lock, NULL), ==, 0);
rwlp->rw_owner = RW_INIT;
rwlp->rw_wr_owner = RW_INIT;
rwlp->rw_readers = 0;
rwlp->rw_magic = RW_MAGIC;
rwlock_init(&rwlp->rw_lock, USYNC_THREAD, NULL);
rwlp->rw_owner = NULL;
rwlp->initialized = B_TRUE;
}
void
rw_destroy(krwlock_t *rwlp)
{
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
VERIFY3S(pthread_rwlock_destroy(&rwlp->rw_lock), ==, 0);
rwlp->rw_magic = 0;
rwlock_destroy(&rwlp->rw_lock);
rwlp->rw_owner = (void *)-1UL;
rwlp->initialized = B_FALSE;
}
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
ASSERT3P(rwlp->rw_owner, !=, curthread);
ASSERT3P(rwlp->rw_wr_owner, !=, curthread);
ASSERT(!RW_LOCK_HELD(rwlp));
ASSERT(rwlp->initialized == B_TRUE);
ASSERT(rwlp->rw_owner != (void *)-1UL);
ASSERT(rwlp->rw_owner != curthread);
if (rw == RW_READER) {
VERIFY3S(pthread_rwlock_rdlock(&rwlp->rw_lock), ==, 0);
ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
atomic_inc_uint(&rwlp->rw_readers);
} else {
VERIFY3S(pthread_rwlock_wrlock(&rwlp->rw_lock), ==, 0);
ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
ASSERT3U(rwlp->rw_readers, ==, 0);
rwlp->rw_wr_owner = curthread;
}
if (rw == RW_READER)
VERIFY(rw_rdlock(&rwlp->rw_lock) == 0);
else
VERIFY(rw_wrlock(&rwlp->rw_lock) == 0);
rwlp->rw_owner = curthread;
}
@ -363,16 +195,11 @@ rw_enter(krwlock_t *rwlp, krw_t rw)
void
rw_exit(krwlock_t *rwlp)
{
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
ASSERT(RW_LOCK_HELD(rwlp));
ASSERT(rwlp->initialized == B_TRUE);
ASSERT(rwlp->rw_owner != (void *)-1UL);
if (RW_READ_HELD(rwlp))
atomic_dec_uint(&rwlp->rw_readers);
else
rwlp->rw_wr_owner = RW_INIT;
rwlp->rw_owner = RW_INIT;
VERIFY3S(pthread_rwlock_unlock(&rwlp->rw_lock), ==, 0);
rwlp->rw_owner = NULL;
VERIFY(rw_unlock(&rwlp->rw_lock) == 0);
}
int
@ -380,29 +207,19 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
int rv;
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
ASSERT(rwlp->initialized == B_TRUE);
ASSERT(rwlp->rw_owner != (void *)-1UL);
if (rw == RW_READER)
rv = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
rv = rw_tryrdlock(&rwlp->rw_lock);
else
rv = pthread_rwlock_trywrlock(&rwlp->rw_lock);
rv = rw_trywrlock(&rwlp->rw_lock);
if (rv == 0) {
ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
if (rw == RW_READER)
atomic_inc_uint(&rwlp->rw_readers);
else {
ASSERT3U(rwlp->rw_readers, ==, 0);
rwlp->rw_wr_owner = curthread;
}
rwlp->rw_owner = curthread;
return (1);
}
VERIFY3S(rv, ==, EBUSY);
return (0);
}
@ -410,7 +227,8 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
int
rw_tryupgrade(krwlock_t *rwlp)
{
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
ASSERT(rwlp->initialized == B_TRUE);
ASSERT(rwlp->rw_owner != (void *)-1UL);
return (0);
}
@ -424,34 +242,22 @@ rw_tryupgrade(krwlock_t *rwlp)
void
cv_init(kcondvar_t *cv, char *name, int type, void *arg)
{
ASSERT3S(type, ==, CV_DEFAULT);
#ifdef IM_FEELING_LUCKY
ASSERT3U(cv->cv_magic, !=, CV_MAGIC);
#endif
cv->cv_magic = CV_MAGIC;
VERIFY3S(pthread_cond_init(&cv->cv, NULL), ==, 0);
VERIFY(cond_init(cv, type, NULL) == 0);
}
void
cv_destroy(kcondvar_t *cv)
{
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
VERIFY3S(pthread_cond_destroy(&cv->cv), ==, 0);
cv->cv_magic = 0;
VERIFY(cond_destroy(cv) == 0);
}
void
cv_wait(kcondvar_t *cv, kmutex_t *mp)
{
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
ASSERT3P(mutex_owner(mp), ==, curthread);
mp->m_owner = MTX_INIT;
int ret = pthread_cond_wait(&cv->cv, &mp->m_lock);
if (ret != 0)
VERIFY3S(ret, ==, EINTR);
ASSERT(mutex_owner(mp) == curthread);
mp->m_owner = NULL;
int ret = cond_wait(cv, &mp->m_lock);
VERIFY(ret == 0 || ret == EINTR);
mp->m_owner = curthread;
}
@ -459,38 +265,29 @@ clock_t
cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
{
int error;
struct timeval tv;
timestruc_t ts;
clock_t delta;
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
top:
delta = abstime - lbolt;
if (delta <= 0)
return (-1);
VERIFY(gettimeofday(&tv, NULL) == 0);
ts.tv_sec = delta / hz;
ts.tv_nsec = (delta % hz) * (NANOSEC / hz);
ts.tv_sec = tv.tv_sec + delta / hz;
ts.tv_nsec = tv.tv_usec * 1000 + (delta % hz) * (NANOSEC / hz);
if (ts.tv_nsec >= NANOSEC) {
ts.tv_sec++;
ts.tv_nsec -= NANOSEC;
}
ASSERT3P(mutex_owner(mp), ==, curthread);
mp->m_owner = MTX_INIT;
error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
ASSERT(mutex_owner(mp) == curthread);
mp->m_owner = NULL;
error = cond_reltimedwait(cv, &mp->m_lock, &ts);
mp->m_owner = curthread;
if (error == ETIMEDOUT)
if (error == ETIME)
return (-1);
if (error == EINTR)
goto top;
VERIFY3S(error, ==, 0);
ASSERT(error == 0);
return (1);
}
@ -498,15 +295,13 @@ top:
void
cv_signal(kcondvar_t *cv)
{
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
VERIFY3S(pthread_cond_signal(&cv->cv), ==, 0);
VERIFY(cond_signal(cv) == 0);
}
void
cv_broadcast(kcondvar_t *cv)
{
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
VERIFY3S(pthread_cond_broadcast(&cv->cv), ==, 0);
VERIFY(cond_broadcast(cv) == 0);
}
/*
@ -748,7 +543,7 @@ __dprintf(const char *file, const char *func, int line, const char *fmt, ...)
if (dprintf_find_string("pid"))
(void) printf("%d ", getpid());
if (dprintf_find_string("tid"))
(void) printf("%u ", (uint_t) pthread_self());
(void) printf("%u ", thr_self());
if (dprintf_find_string("cpu"))
(void) printf("%u ", getcpuid());
if (dprintf_find_string("time"))
@ -989,8 +784,8 @@ kernel_init(int mode)
VERIFY((random_fd = open("/dev/random", O_RDONLY)) != -1);
VERIFY((urandom_fd = open("/dev/urandom", O_RDONLY)) != -1);
thread_init();
system_taskq_init();
spa_init(mode);
}
@ -998,8 +793,6 @@ void
kernel_fini(void)
{
spa_fini();
system_taskq_fini();
thread_fini();
close(random_fd);
close(urandom_fd);

View File

@ -42,8 +42,7 @@ struct taskq {
krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv;
kcondvar_t tq_wait_cv;
kthread_t **tq_threadlist;
kt_did_t *tq_idlist;
thread_t *tq_threadlist;
int tq_flags;
int tq_active;
int tq_nthreads;
@ -164,7 +163,6 @@ taskq_thread(void *arg)
tq->tq_nthreads--;
cv_broadcast(&tq->tq_wait_cv);
mutex_exit(&tq->tq_lock);
thread_exit();
return (NULL);
}
@ -200,10 +198,7 @@ taskq_create(const char *name, int nthreads, pri_t pri,
tq->tq_maxalloc = maxalloc;
tq->tq_task.task_next = &tq->tq_task;
tq->tq_task.task_prev = &tq->tq_task;
VERIFY3P((tq->tq_threadlist = kmem_alloc(tq->tq_nthreads *
sizeof(kthread_t *), KM_SLEEP)), !=, NULL);
VERIFY3P((tq->tq_idlist = kmem_alloc(tq->tq_nthreads *
sizeof(kt_did_t), KM_SLEEP)), !=, NULL);
tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP);
if (flags & TASKQ_PREPOPULATE) {
mutex_enter(&tq->tq_lock);
@ -212,11 +207,9 @@ taskq_create(const char *name, int nthreads, pri_t pri,
mutex_exit(&tq->tq_lock);
}
for (t = 0; t < tq->tq_nthreads; t++) {
VERIFY((tq->tq_threadlist[t] = thread_create(NULL, 0,
taskq_thread, tq, THR_BOUND, NULL, 0, 0)) != NULL);
tq->tq_idlist[t] = tq->tq_threadlist[t]->t_tid;
}
for (t = 0; t < nthreads; t++)
(void) thr_create(0, 0, taskq_thread,
tq, THR_BOUND, &tq->tq_threadlist[t]);
return (tq);
}
@ -246,10 +239,9 @@ taskq_destroy(taskq_t *tq)
mutex_exit(&tq->tq_lock);
for (t = 0; t < nthreads; t++)
VERIFY3S(thread_join(tq->tq_idlist[t], NULL, NULL), ==, 0);
(void) thr_join(tq->tq_threadlist[t], NULL, NULL);
kmem_free(tq->tq_threadlist, nthreads * sizeof(kthread_t *));
kmem_free(tq->tq_idlist, nthreads * sizeof(kt_did_t));
kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));
rw_destroy(&tq->tq_threadlock);
mutex_destroy(&tq->tq_lock);
@ -268,7 +260,7 @@ taskq_member(taskq_t *tq, void *t)
return (1);
for (i = 0; i < tq->tq_nthreads; i++)
if (tq->tq_threadlist[i] == (kthread_t *)t)
if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t)
return (1);
return (0);
@ -280,9 +272,3 @@ system_taskq_init(void)
system_taskq = taskq_create("system_taskq", 64, minclsyspri, 4, 512,
TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
}
void
system_taskq_fini(void)
{
taskq_destroy(system_taskq);
}

View File

@ -446,8 +446,6 @@ txg_sync_thread(dsl_pool_t *dp)
rw_exit(&tx->tx_suspend);
cv_broadcast(&tx->tx_sync_done_cv);
}
thread_exit();
}
static void
@ -492,8 +490,6 @@ txg_quiesce_thread(dsl_pool_t *dp)
cv_broadcast(&tx->tx_sync_more_cv);
cv_broadcast(&tx->tx_quiesce_done_cv);
}
thread_exit();
}
/*