Merge branch 'fix-branch' into refs/top-bases/zfs-branch
This commit is contained in:
commit
7a34cab76d
|
@ -50,8 +50,7 @@ extern "C" {
|
|||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <synch.h>
|
||||
#include <thread.h>
|
||||
#include <pthread.h>
|
||||
#include <assert.h>
|
||||
#include <alloca.h>
|
||||
#include <umem.h>
|
||||
|
@ -187,13 +186,15 @@ _NOTE(CONSTCOND) } while (0)
|
|||
/*
|
||||
* Threads
|
||||
*/
|
||||
#define curthread ((void *)(uintptr_t)thr_self())
|
||||
|
||||
/* XXX: not portable */
|
||||
#define curthread ((void *)(uintptr_t)pthread_self())
|
||||
|
||||
typedef struct kthread kthread_t;
|
||||
|
||||
#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
|
||||
zk_thread_create(func, arg)
|
||||
#define thread_exit() thr_exit(NULL)
|
||||
#define thread_exit() pthread_exit(NULL)
|
||||
|
||||
extern kthread_t *zk_thread_create(void (*func)(void), void *arg);
|
||||
|
||||
|
@ -203,28 +204,18 @@ extern kthread_t *zk_thread_create(void (*func)(void), void *arg);
|
|||
/*
|
||||
* Mutexes
|
||||
*/
|
||||
#define MTX_MAGIC 0x9522f51362a6e326ull
|
||||
typedef struct kmutex {
|
||||
void *m_owner;
|
||||
boolean_t initialized;
|
||||
mutex_t m_lock;
|
||||
uint64_t m_magic;
|
||||
pthread_mutex_t m_lock;
|
||||
} kmutex_t;
|
||||
|
||||
#define MUTEX_DEFAULT USYNC_THREAD
|
||||
#undef MUTEX_HELD
|
||||
#define MUTEX_HELD(m) _mutex_held(&(m)->m_lock)
|
||||
#define MUTEX_DEFAULT 0
|
||||
#define MUTEX_HELD(m) ((m)->m_owner == curthread)
|
||||
|
||||
/*
|
||||
* Argh -- we have to get cheesy here because the kernel and userland
|
||||
* have different signatures for the same routine.
|
||||
*/
|
||||
extern int _mutex_init(mutex_t *mp, int type, void *arg);
|
||||
extern int _mutex_destroy(mutex_t *mp);
|
||||
|
||||
#define mutex_init(mp, b, c, d) zmutex_init((kmutex_t *)(mp))
|
||||
#define mutex_destroy(mp) zmutex_destroy((kmutex_t *)(mp))
|
||||
|
||||
extern void zmutex_init(kmutex_t *mp);
|
||||
extern void zmutex_destroy(kmutex_t *mp);
|
||||
extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
|
||||
extern void mutex_destroy(kmutex_t *mp);
|
||||
extern void mutex_enter(kmutex_t *mp);
|
||||
extern void mutex_exit(kmutex_t *mp);
|
||||
extern int mutex_tryenter(kmutex_t *mp);
|
||||
|
@ -233,23 +224,24 @@ extern void *mutex_owner(kmutex_t *mp);
|
|||
/*
|
||||
* RW locks
|
||||
*/
|
||||
#define RW_MAGIC 0x4d31fb123648e78aull
|
||||
typedef struct krwlock {
|
||||
void *rw_owner;
|
||||
boolean_t initialized;
|
||||
rwlock_t rw_lock;
|
||||
void *rw_owner;
|
||||
void *rw_wr_owner;
|
||||
uint64_t rw_magic;
|
||||
pthread_rwlock_t rw_lock;
|
||||
uint_t rw_readers;
|
||||
} krwlock_t;
|
||||
|
||||
typedef int krw_t;
|
||||
|
||||
#define RW_READER 0
|
||||
#define RW_WRITER 1
|
||||
#define RW_DEFAULT USYNC_THREAD
|
||||
#define RW_DEFAULT 0
|
||||
|
||||
#undef RW_READ_HELD
|
||||
#define RW_READ_HELD(x) _rw_read_held(&(x)->rw_lock)
|
||||
|
||||
#undef RW_WRITE_HELD
|
||||
#define RW_WRITE_HELD(x) _rw_write_held(&(x)->rw_lock)
|
||||
#define RW_READ_HELD(x) ((x)->rw_readers > 0)
|
||||
#define RW_WRITE_HELD(x) ((x)->rw_wr_owner == curthread)
|
||||
#define RW_LOCK_HELD(x) (RW_READ_HELD(x) || RW_WRITE_HELD(x))
|
||||
|
||||
extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
|
||||
extern void rw_destroy(krwlock_t *rwlp);
|
||||
|
@ -267,9 +259,13 @@ extern gid_t *crgetgroups(cred_t *cr);
|
|||
/*
|
||||
* Condition variables
|
||||
*/
|
||||
typedef cond_t kcondvar_t;
|
||||
#define CV_MAGIC 0xd31ea9a83b1b30c4ull
|
||||
typedef struct kcondvar {
|
||||
uint64_t cv_magic;
|
||||
pthread_cond_t cv;
|
||||
} kcondvar_t;
|
||||
|
||||
#define CV_DEFAULT USYNC_THREAD
|
||||
#define CV_DEFAULT 0
|
||||
|
||||
extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
|
||||
extern void cv_destroy(kcondvar_t *cv);
|
||||
|
@ -443,7 +439,8 @@ extern void delay(clock_t ticks);
|
|||
#define minclsyspri 60
|
||||
#define maxclsyspri 99
|
||||
|
||||
#define CPU_SEQID (thr_self() & (max_ncpus - 1))
|
||||
/* XXX: not portable */
|
||||
#define CPU_SEQID (pthread_self() & (max_ncpus - 1))
|
||||
|
||||
#define kcred NULL
|
||||
#define CRED() NULL
|
||||
|
|
|
@ -34,8 +34,8 @@
|
|||
#include <sys/stat.h>
|
||||
#include <sys/processor.h>
|
||||
#include <sys/zfs_context.h>
|
||||
#include <sys/zmod.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
/*
|
||||
* Emulation of kernel services in userland.
|
||||
|
@ -58,11 +58,15 @@ struct utsname utsname = {
|
|||
kthread_t *
|
||||
zk_thread_create(void (*func)(), void *arg)
|
||||
{
|
||||
thread_t tid;
|
||||
pthread_t tid;
|
||||
|
||||
VERIFY(thr_create(0, 0, (void *(*)(void *))func, arg, THR_DETACHED,
|
||||
&tid) == 0);
|
||||
pthread_attr_t attr;
|
||||
VERIFY(pthread_attr_init(&attr) == 0);
|
||||
VERIFY(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0);
|
||||
|
||||
VERIFY(pthread_create(&tid, &attr, (void *(*)(void *))func, arg) == 0);
|
||||
|
||||
/* XXX: not portable */
|
||||
return ((void *)(uintptr_t)tid);
|
||||
}
|
||||
|
||||
|
@ -95,30 +99,37 @@ kstat_delete(kstat_t *ksp)
|
|||
* =========================================================================
|
||||
*/
|
||||
void
|
||||
zmutex_init(kmutex_t *mp)
|
||||
mutex_init(kmutex_t *mp, char *name, int type, void *cookie)
|
||||
{
|
||||
ASSERT(type == MUTEX_DEFAULT);
|
||||
ASSERT(cookie == NULL);
|
||||
|
||||
#ifdef IM_FEELING_LUCKY
|
||||
ASSERT(mp->m_magic != MTX_MAGIC);
|
||||
#endif
|
||||
|
||||
mp->m_owner = NULL;
|
||||
mp->initialized = B_TRUE;
|
||||
(void) _mutex_init(&mp->m_lock, USYNC_THREAD, NULL);
|
||||
mp->m_magic = MTX_MAGIC;
|
||||
VERIFY3S(pthread_mutex_init(&mp->m_lock, NULL), ==, 0);
|
||||
}
|
||||
|
||||
void
|
||||
zmutex_destroy(kmutex_t *mp)
|
||||
mutex_destroy(kmutex_t *mp)
|
||||
{
|
||||
ASSERT(mp->initialized == B_TRUE);
|
||||
ASSERT(mp->m_magic == MTX_MAGIC);
|
||||
ASSERT(mp->m_owner == NULL);
|
||||
(void) _mutex_destroy(&(mp)->m_lock);
|
||||
VERIFY3S(pthread_mutex_destroy(&(mp)->m_lock), ==, 0);
|
||||
mp->m_owner = (void *)-1UL;
|
||||
mp->initialized = B_FALSE;
|
||||
mp->m_magic = 0;
|
||||
}
|
||||
|
||||
void
|
||||
mutex_enter(kmutex_t *mp)
|
||||
{
|
||||
ASSERT(mp->initialized == B_TRUE);
|
||||
ASSERT(mp->m_magic == MTX_MAGIC);
|
||||
ASSERT(mp->m_owner != (void *)-1UL);
|
||||
ASSERT(mp->m_owner != curthread);
|
||||
VERIFY(mutex_lock(&mp->m_lock) == 0);
|
||||
VERIFY3S(pthread_mutex_lock(&mp->m_lock), ==, 0);
|
||||
ASSERT(mp->m_owner == NULL);
|
||||
mp->m_owner = curthread;
|
||||
}
|
||||
|
@ -126,9 +137,9 @@ mutex_enter(kmutex_t *mp)
|
|||
int
|
||||
mutex_tryenter(kmutex_t *mp)
|
||||
{
|
||||
ASSERT(mp->initialized == B_TRUE);
|
||||
ASSERT(mp->m_magic == MTX_MAGIC);
|
||||
ASSERT(mp->m_owner != (void *)-1UL);
|
||||
if (0 == mutex_trylock(&mp->m_lock)) {
|
||||
if (0 == pthread_mutex_trylock(&mp->m_lock)) {
|
||||
ASSERT(mp->m_owner == NULL);
|
||||
mp->m_owner = curthread;
|
||||
return (1);
|
||||
|
@ -140,16 +151,16 @@ mutex_tryenter(kmutex_t *mp)
|
|||
void
|
||||
mutex_exit(kmutex_t *mp)
|
||||
{
|
||||
ASSERT(mp->initialized == B_TRUE);
|
||||
ASSERT(mp->m_magic == MTX_MAGIC);
|
||||
ASSERT(mutex_owner(mp) == curthread);
|
||||
mp->m_owner = NULL;
|
||||
VERIFY(mutex_unlock(&mp->m_lock) == 0);
|
||||
VERIFY3S(pthread_mutex_unlock(&mp->m_lock), ==, 0);
|
||||
}
|
||||
|
||||
void *
|
||||
mutex_owner(kmutex_t *mp)
|
||||
{
|
||||
ASSERT(mp->initialized == B_TRUE);
|
||||
ASSERT(mp->m_magic == MTX_MAGIC);
|
||||
return (mp->m_owner);
|
||||
}
|
||||
|
||||
|
@ -162,31 +173,48 @@ mutex_owner(kmutex_t *mp)
|
|||
void
|
||||
rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
|
||||
{
|
||||
rwlock_init(&rwlp->rw_lock, USYNC_THREAD, NULL);
|
||||
ASSERT(type == RW_DEFAULT);
|
||||
ASSERT(arg == NULL);
|
||||
|
||||
#ifdef IM_FEELING_LUCKY
|
||||
ASSERT(rwlp->rw_magic != RW_MAGIC);
|
||||
#endif
|
||||
|
||||
VERIFY3S(pthread_rwlock_init(&rwlp->rw_lock, NULL), ==, 0);
|
||||
rwlp->rw_owner = NULL;
|
||||
rwlp->initialized = B_TRUE;
|
||||
rwlp->rw_wr_owner = NULL;
|
||||
rwlp->rw_readers = 0;
|
||||
rwlp->rw_magic = RW_MAGIC;
|
||||
}
|
||||
|
||||
void
|
||||
rw_destroy(krwlock_t *rwlp)
|
||||
{
|
||||
rwlock_destroy(&rwlp->rw_lock);
|
||||
rwlp->rw_owner = (void *)-1UL;
|
||||
rwlp->initialized = B_FALSE;
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
VERIFY3S(pthread_rwlock_destroy(&rwlp->rw_lock), ==, 0);
|
||||
rwlp->rw_magic = 0;
|
||||
}
|
||||
|
||||
void
|
||||
rw_enter(krwlock_t *rwlp, krw_t rw)
|
||||
{
|
||||
ASSERT(!RW_LOCK_HELD(rwlp));
|
||||
ASSERT(rwlp->initialized == B_TRUE);
|
||||
ASSERT(rwlp->rw_owner != (void *)-1UL);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
ASSERT(rwlp->rw_owner != curthread);
|
||||
ASSERT(rwlp->rw_wr_owner != curthread);
|
||||
|
||||
if (rw == RW_READER)
|
||||
VERIFY(rw_rdlock(&rwlp->rw_lock) == 0);
|
||||
else
|
||||
VERIFY(rw_wrlock(&rwlp->rw_lock) == 0);
|
||||
if (rw == RW_READER) {
|
||||
VERIFY3S(pthread_rwlock_rdlock(&rwlp->rw_lock), ==, 0);
|
||||
ASSERT(rwlp->rw_wr_owner == NULL);
|
||||
|
||||
atomic_inc_uint(&rwlp->rw_readers);
|
||||
} else {
|
||||
VERIFY3S(pthread_rwlock_wrlock(&rwlp->rw_lock), ==, 0);
|
||||
ASSERT(rwlp->rw_wr_owner == NULL);
|
||||
ASSERT3U(rwlp->rw_readers, ==, 0);
|
||||
|
||||
rwlp->rw_wr_owner = curthread;
|
||||
}
|
||||
|
||||
rwlp->rw_owner = curthread;
|
||||
}
|
||||
|
@ -194,11 +222,16 @@ rw_enter(krwlock_t *rwlp, krw_t rw)
|
|||
void
|
||||
rw_exit(krwlock_t *rwlp)
|
||||
{
|
||||
ASSERT(rwlp->initialized == B_TRUE);
|
||||
ASSERT(rwlp->rw_owner != (void *)-1UL);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
ASSERT(RW_LOCK_HELD(rwlp));
|
||||
|
||||
if (RW_READ_HELD(rwlp))
|
||||
atomic_dec_uint(&rwlp->rw_readers);
|
||||
else
|
||||
rwlp->rw_wr_owner = NULL;
|
||||
|
||||
rwlp->rw_owner = NULL;
|
||||
VERIFY(rw_unlock(&rwlp->rw_lock) == 0);
|
||||
VERIFY3S(pthread_rwlock_unlock(&rwlp->rw_lock), ==, 0);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -206,19 +239,29 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
|
|||
{
|
||||
int rv;
|
||||
|
||||
ASSERT(rwlp->initialized == B_TRUE);
|
||||
ASSERT(rwlp->rw_owner != (void *)-1UL);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
if (rw == RW_READER)
|
||||
rv = rw_tryrdlock(&rwlp->rw_lock);
|
||||
rv = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
|
||||
else
|
||||
rv = rw_trywrlock(&rwlp->rw_lock);
|
||||
rv = pthread_rwlock_trywrlock(&rwlp->rw_lock);
|
||||
|
||||
if (rv == 0) {
|
||||
ASSERT(rwlp->rw_wr_owner == NULL);
|
||||
|
||||
if (rw == RW_READER)
|
||||
atomic_inc_uint(&rwlp->rw_readers);
|
||||
else {
|
||||
ASSERT3U(rwlp->rw_readers, ==, 0);
|
||||
rwlp->rw_wr_owner = curthread;
|
||||
}
|
||||
|
||||
rwlp->rw_owner = curthread;
|
||||
return (1);
|
||||
}
|
||||
|
||||
VERIFY3S(rv, ==, EBUSY);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -226,8 +269,7 @@ rw_tryenter(krwlock_t *rwlp, krw_t rw)
|
|||
int
|
||||
rw_tryupgrade(krwlock_t *rwlp)
|
||||
{
|
||||
ASSERT(rwlp->initialized == B_TRUE);
|
||||
ASSERT(rwlp->rw_owner != (void *)-1UL);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -241,22 +283,34 @@ rw_tryupgrade(krwlock_t *rwlp)
|
|||
void
|
||||
cv_init(kcondvar_t *cv, char *name, int type, void *arg)
|
||||
{
|
||||
VERIFY(cond_init(cv, type, NULL) == 0);
|
||||
ASSERT(type == CV_DEFAULT);
|
||||
|
||||
#ifdef IM_FEELING_LUCKY
|
||||
ASSERT(cv->cv_magic != CV_MAGIC);
|
||||
#endif
|
||||
|
||||
cv->cv_magic = CV_MAGIC;
|
||||
|
||||
VERIFY3S(pthread_cond_init(&cv->cv, NULL), ==, 0);
|
||||
}
|
||||
|
||||
void
|
||||
cv_destroy(kcondvar_t *cv)
|
||||
{
|
||||
VERIFY(cond_destroy(cv) == 0);
|
||||
ASSERT(cv->cv_magic == CV_MAGIC);
|
||||
VERIFY3S(pthread_cond_destroy(&cv->cv), ==, 0);
|
||||
cv->cv_magic = 0;
|
||||
}
|
||||
|
||||
void
|
||||
cv_wait(kcondvar_t *cv, kmutex_t *mp)
|
||||
{
|
||||
ASSERT(cv->cv_magic == CV_MAGIC);
|
||||
ASSERT(mutex_owner(mp) == curthread);
|
||||
mp->m_owner = NULL;
|
||||
int ret = cond_wait(cv, &mp->m_lock);
|
||||
VERIFY(ret == 0 || ret == EINTR);
|
||||
int ret = pthread_cond_wait(&cv->cv, &mp->m_lock);
|
||||
if (ret != 0)
|
||||
VERIFY3S(ret, ==, EINTR);
|
||||
mp->m_owner = curthread;
|
||||
}
|
||||
|
||||
|
@ -264,29 +318,38 @@ clock_t
|
|||
cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
|
||||
{
|
||||
int error;
|
||||
struct timeval tv;
|
||||
timestruc_t ts;
|
||||
clock_t delta;
|
||||
|
||||
ASSERT(cv->cv_magic == CV_MAGIC);
|
||||
|
||||
top:
|
||||
delta = abstime - lbolt;
|
||||
if (delta <= 0)
|
||||
return (-1);
|
||||
|
||||
ts.tv_sec = delta / hz;
|
||||
ts.tv_nsec = (delta % hz) * (NANOSEC / hz);
|
||||
VERIFY(gettimeofday(&tv, NULL) == 0);
|
||||
|
||||
ts.tv_sec = tv.tv_sec + delta / hz;
|
||||
ts.tv_nsec = tv.tv_usec * 1000 + (delta % hz) * (NANOSEC / hz);
|
||||
if (ts.tv_nsec >= NANOSEC) {
|
||||
ts.tv_sec++;
|
||||
ts.tv_nsec -= NANOSEC;
|
||||
}
|
||||
|
||||
ASSERT(mutex_owner(mp) == curthread);
|
||||
mp->m_owner = NULL;
|
||||
error = cond_reltimedwait(cv, &mp->m_lock, &ts);
|
||||
error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
|
||||
mp->m_owner = curthread;
|
||||
|
||||
if (error == ETIME)
|
||||
if (error == ETIMEDOUT)
|
||||
return (-1);
|
||||
|
||||
if (error == EINTR)
|
||||
goto top;
|
||||
|
||||
ASSERT(error == 0);
|
||||
VERIFY3S(error, ==, 0);
|
||||
|
||||
return (1);
|
||||
}
|
||||
|
@ -294,13 +357,15 @@ top:
|
|||
void
|
||||
cv_signal(kcondvar_t *cv)
|
||||
{
|
||||
VERIFY(cond_signal(cv) == 0);
|
||||
ASSERT(cv->cv_magic == CV_MAGIC);
|
||||
VERIFY3S(pthread_cond_signal(&cv->cv), ==, 0);
|
||||
}
|
||||
|
||||
void
|
||||
cv_broadcast(kcondvar_t *cv)
|
||||
{
|
||||
VERIFY(cond_broadcast(cv) == 0);
|
||||
ASSERT(cv->cv_magic == CV_MAGIC);
|
||||
VERIFY3S(pthread_cond_broadcast(&cv->cv), ==, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -542,7 +607,7 @@ __dprintf(const char *file, const char *func, int line, const char *fmt, ...)
|
|||
if (dprintf_find_string("pid"))
|
||||
(void) printf("%d ", getpid());
|
||||
if (dprintf_find_string("tid"))
|
||||
(void) printf("%u ", thr_self());
|
||||
(void) printf("%u ", (uint_t) pthread_self());
|
||||
if (dprintf_find_string("cpu"))
|
||||
(void) printf("%u ", getcpuid());
|
||||
if (dprintf_find_string("time"))
|
||||
|
@ -800,31 +865,6 @@ kernel_fini(void)
|
|||
urandom_fd = -1;
|
||||
}
|
||||
|
||||
int
|
||||
z_uncompress(void *dst, size_t *dstlen, const void *src, size_t srclen)
|
||||
{
|
||||
int ret;
|
||||
uLongf len = *dstlen;
|
||||
|
||||
if ((ret = uncompress(dst, &len, src, srclen)) == Z_OK)
|
||||
*dstlen = (size_t)len;
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
z_compress_level(void *dst, size_t *dstlen, const void *src, size_t srclen,
|
||||
int level)
|
||||
{
|
||||
int ret;
|
||||
uLongf len = *dstlen;
|
||||
|
||||
if ((ret = compress2(dst, &len, src, srclen, level)) == Z_OK)
|
||||
*dstlen = (size_t)len;
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
uid_t
|
||||
crgetuid(cred_t *cr)
|
||||
{
|
||||
|
|
|
@ -42,7 +42,7 @@ struct taskq {
|
|||
krwlock_t tq_threadlock;
|
||||
kcondvar_t tq_dispatch_cv;
|
||||
kcondvar_t tq_wait_cv;
|
||||
thread_t *tq_threadlist;
|
||||
pthread_t *tq_threadlist;
|
||||
int tq_flags;
|
||||
int tq_active;
|
||||
int tq_nthreads;
|
||||
|
@ -185,7 +185,7 @@ taskq_create(const char *name, int nthreads, pri_t pri,
|
|||
tq->tq_maxalloc = maxalloc;
|
||||
tq->tq_task.task_next = &tq->tq_task;
|
||||
tq->tq_task.task_prev = &tq->tq_task;
|
||||
tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP);
|
||||
tq->tq_threadlist = kmem_alloc(nthreads * sizeof (pthread_t), KM_SLEEP);
|
||||
|
||||
if (flags & TASKQ_PREPOPULATE) {
|
||||
mutex_enter(&tq->tq_lock);
|
||||
|
@ -195,8 +195,8 @@ taskq_create(const char *name, int nthreads, pri_t pri,
|
|||
}
|
||||
|
||||
for (t = 0; t < nthreads; t++)
|
||||
(void) thr_create(0, 0, taskq_thread,
|
||||
tq, THR_BOUND, &tq->tq_threadlist[t]);
|
||||
VERIFY(pthread_create(&tq->tq_threadlist[t],
|
||||
NULL, taskq_thread, tq) == 0);
|
||||
|
||||
return (tq);
|
||||
}
|
||||
|
@ -226,9 +226,9 @@ taskq_destroy(taskq_t *tq)
|
|||
mutex_exit(&tq->tq_lock);
|
||||
|
||||
for (t = 0; t < nthreads; t++)
|
||||
(void) thr_join(tq->tq_threadlist[t], NULL, NULL);
|
||||
VERIFY(pthread_join(tq->tq_threadlist[t], NULL) == 0);
|
||||
|
||||
kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));
|
||||
kmem_free(tq->tq_threadlist, nthreads * sizeof (pthread_t));
|
||||
|
||||
rw_destroy(&tq->tq_threadlock);
|
||||
mutex_destroy(&tq->tq_lock);
|
||||
|
@ -247,7 +247,7 @@ taskq_member(taskq_t *tq, void *t)
|
|||
return (1);
|
||||
|
||||
for (i = 0; i < tq->tq_nthreads; i++)
|
||||
if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t)
|
||||
if (tq->tq_threadlist[i] == (pthread_t)(uintptr_t)t)
|
||||
return (1);
|
||||
|
||||
return (0);
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/debug.h>
|
||||
#include <thread.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#if !defined(TEXT_DOMAIN)
|
||||
|
@ -70,11 +69,12 @@ static va_list uu_panic_args;
|
|||
static pthread_t uu_panic_thread;
|
||||
|
||||
static uint32_t _uu_main_error;
|
||||
static __thread int _uu_main_thread = 0;
|
||||
|
||||
void
|
||||
uu_set_error(uint_t code)
|
||||
{
|
||||
if (thr_main() != 0) {
|
||||
if (_uu_main_thread) {
|
||||
_uu_main_error = code;
|
||||
return;
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ uu_set_error(uint_t code)
|
|||
uint32_t
|
||||
uu_error(void)
|
||||
{
|
||||
if (thr_main() != 0)
|
||||
if (_uu_main_thread)
|
||||
return (_uu_main_error);
|
||||
|
||||
if (uu_error_key_setup < 0) /* can't happen? */
|
||||
|
@ -261,5 +261,6 @@ uu_init(void) __attribute__((constructor));
|
|||
static void
|
||||
uu_init(void)
|
||||
{
|
||||
_uu_main_thread = 1;
|
||||
(void) pthread_atfork(uu_lockup, uu_release, uu_release_child);
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ namespace_reload(libzfs_handle_t *hdl)
|
|||
return (no_memory(hdl));
|
||||
}
|
||||
|
||||
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
|
||||
if (zcmd_alloc_dst_nvlist(hdl, &zc, 32768) != 0)
|
||||
return (-1);
|
||||
|
||||
for (;;) {
|
||||
|
|
|
@ -1528,7 +1528,7 @@ arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle,
|
|||
mutex_exit(&state->arcs_mtx);
|
||||
|
||||
if (bytes_evicted < bytes)
|
||||
dprintf("only evicted %lld bytes from %x",
|
||||
dprintf("only evicted %lld bytes from %x\n",
|
||||
(longlong_t)bytes_evicted, state);
|
||||
|
||||
if (skipped)
|
||||
|
@ -1628,7 +1628,7 @@ top:
|
|||
}
|
||||
|
||||
if (bytes_deleted < bytes)
|
||||
dprintf("only deleted %lld bytes from %p",
|
||||
dprintf("only deleted %lld bytes from %p\n",
|
||||
(longlong_t)bytes_deleted, state);
|
||||
}
|
||||
|
||||
|
@ -1892,7 +1892,7 @@ arc_kmem_reap_now(arc_reclaim_strategy_t strat)
|
|||
static void
|
||||
arc_reclaim_thread(void)
|
||||
{
|
||||
clock_t growtime = 0;
|
||||
int64_t growtime = 0;
|
||||
arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
|
||||
callb_cpr_t cpr;
|
||||
|
||||
|
@ -1915,12 +1915,12 @@ arc_reclaim_thread(void)
|
|||
}
|
||||
|
||||
/* reset the growth delay for every reclaim */
|
||||
growtime = lbolt + (arc_grow_retry * hz);
|
||||
growtime = lbolt64 + (arc_grow_retry * hz);
|
||||
|
||||
arc_kmem_reap_now(last_reclaim);
|
||||
arc_warm = B_TRUE;
|
||||
|
||||
} else if (arc_no_grow && lbolt >= growtime) {
|
||||
} else if (arc_no_grow && lbolt64 >= growtime) {
|
||||
arc_no_grow = FALSE;
|
||||
}
|
||||
|
||||
|
@ -3491,6 +3491,7 @@ arc_fini(void)
|
|||
mutex_destroy(&arc_mru_ghost->arcs_mtx);
|
||||
mutex_destroy(&arc_mfu->arcs_mtx);
|
||||
mutex_destroy(&arc_mfu_ghost->arcs_mtx);
|
||||
mutex_destroy(&arc_l2c_only->arcs_mtx);
|
||||
|
||||
mutex_destroy(&zfs_write_limit_lock);
|
||||
|
||||
|
|
|
@ -1900,7 +1900,11 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
/* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
|
||||
* is critical the we not allow the compiler to inline this function in to
|
||||
* dbuf_sync_list() thereby drastically bloating the stack usage.
|
||||
*/
|
||||
noinline static void
|
||||
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
|
||||
{
|
||||
dmu_buf_impl_t *db = dr->dr_dbuf;
|
||||
|
@ -1940,7 +1944,11 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
|
|||
zio_nowait(zio);
|
||||
}
|
||||
|
||||
static void
|
||||
/* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
|
||||
* critical the we not allow the compiler to inline this function in to
|
||||
* dbuf_sync_list() thereby drastically bloating the stack usage.
|
||||
*/
|
||||
noinline static void
|
||||
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
|
||||
{
|
||||
arc_buf_t **datap = &dr->dt.dl.dr_data;
|
||||
|
@ -1993,6 +2001,10 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
|
|||
drp = &(*drp)->dr_next;
|
||||
ASSERT(dr->dr_next == NULL);
|
||||
*drp = dr->dr_next;
|
||||
if (dr->dr_dbuf->db_level != 0) {
|
||||
mutex_destroy(&dr->dt.di.dr_mtx);
|
||||
list_destroy(&dr->dt.di.dr_children);
|
||||
}
|
||||
kmem_free(dr, sizeof (dbuf_dirty_record_t));
|
||||
ASSERT(db->db_dirtycnt > 0);
|
||||
db->db_dirtycnt -= 1;
|
||||
|
|
|
@ -58,6 +58,8 @@ dnode_cons(void *arg, void *unused, int kmflag)
|
|||
rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
|
||||
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||
mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
|
||||
|
||||
refcount_create(&dn->dn_holds);
|
||||
refcount_create(&dn->dn_tx_holds);
|
||||
|
||||
|
@ -86,6 +88,7 @@ dnode_dest(void *arg, void *unused)
|
|||
rw_destroy(&dn->dn_struct_rwlock);
|
||||
mutex_destroy(&dn->dn_mtx);
|
||||
mutex_destroy(&dn->dn_dbufs_mtx);
|
||||
cv_destroy(&dn->dn_notxholds);
|
||||
refcount_destroy(&dn->dn_holds);
|
||||
refcount_destroy(&dn->dn_tx_holds);
|
||||
|
||||
|
@ -274,7 +277,6 @@ dnode_create(objset_impl_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
|
|||
uint64_t object)
|
||||
{
|
||||
dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
|
||||
(void) dnode_cons(dn, NULL, 0); /* XXX */
|
||||
|
||||
dn->dn_objset = os;
|
||||
dn->dn_object = object;
|
||||
|
|
|
@ -409,9 +409,13 @@ dnode_evict_dbufs(dnode_t *dn)
|
|||
if (evicting)
|
||||
delay(1);
|
||||
pass++;
|
||||
ASSERT(pass < 100); /* sanity check */
|
||||
if ((pass % 100) == 0)
|
||||
dprintf("Exceeded %d passes evicting dbufs\n", pass);
|
||||
} while (progress);
|
||||
|
||||
if (pass >= 100)
|
||||
dprintf("Required %d passes to evict dbufs\n", pass);
|
||||
|
||||
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||
if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
|
||||
mutex_enter(&dn->dn_bonus->db_mtx);
|
||||
|
@ -444,6 +448,8 @@ dnode_undirty_dbufs(list_t *list)
|
|||
} else {
|
||||
mutex_exit(&db->db_mtx);
|
||||
dnode_undirty_dbufs(&dr->dt.di.dr_children);
|
||||
mutex_destroy(&dr->dt.di.dr_mtx);
|
||||
list_destroy(&dr->dt.di.dr_children);
|
||||
}
|
||||
kmem_free(dr, sizeof (dbuf_dirty_record_t));
|
||||
dbuf_rele(db, (void *)(uintptr_t)txg);
|
||||
|
|
|
@ -219,6 +219,7 @@ dsl_pool_close(dsl_pool_t *dp)
|
|||
|
||||
txg_list_destroy(&dp->dp_dirty_datasets);
|
||||
txg_list_destroy(&dp->dp_dirty_dirs);
|
||||
txg_list_destroy(&dp->dp_sync_tasks);
|
||||
list_destroy(&dp->dp_synced_datasets);
|
||||
|
||||
arc_flush(dp->dp_spa);
|
||||
|
|
|
@ -28,22 +28,35 @@
|
|||
|
||||
#include <sys/debug.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/zmod.h>
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
#include <sys/systm.h>
|
||||
#else
|
||||
#include <sys/zmod.h>
|
||||
|
||||
typedef size_t zlen_t;
|
||||
#define compress_func z_compress_level
|
||||
#define uncompress_func z_uncompress
|
||||
|
||||
#else /* _KERNEL */
|
||||
|
||||
#include <strings.h>
|
||||
#include <zlib.h>
|
||||
|
||||
typedef uLongf zlen_t;
|
||||
#define compress_func compress2
|
||||
#define uncompress_func uncompress
|
||||
|
||||
#endif
|
||||
|
||||
size_t
|
||||
gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
||||
{
|
||||
size_t dstlen = d_len;
|
||||
zlen_t dstlen = d_len;
|
||||
|
||||
ASSERT(d_len <= s_len);
|
||||
|
||||
if (z_compress_level(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
|
||||
if (compress_func(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
|
||||
if (d_len != s_len)
|
||||
return (s_len);
|
||||
|
||||
|
@ -51,18 +64,18 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
|||
return (s_len);
|
||||
}
|
||||
|
||||
return (dstlen);
|
||||
return ((size_t) dstlen);
|
||||
}
|
||||
|
||||
/*ARGSUSED*/
|
||||
int
|
||||
gzip_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
||||
{
|
||||
size_t dstlen = d_len;
|
||||
zlen_t dstlen = d_len;
|
||||
|
||||
ASSERT(d_len >= s_len);
|
||||
|
||||
if (z_uncompress(d_start, &dstlen, s_start, s_len) != Z_OK)
|
||||
if (uncompress_func(d_start, &dstlen, s_start, s_len) != Z_OK)
|
||||
return (-1);
|
||||
|
||||
return (0);
|
||||
|
|
|
@ -63,6 +63,8 @@ space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift,
|
|||
avl_create(&sm->sm_root, space_map_seg_compare,
|
||||
sizeof (space_seg_t), offsetof(struct space_seg, ss_node));
|
||||
|
||||
cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL);
|
||||
|
||||
sm->sm_start = start;
|
||||
sm->sm_size = size;
|
||||
sm->sm_shift = shift;
|
||||
|
@ -74,6 +76,7 @@ space_map_destroy(space_map_t *sm)
|
|||
{
|
||||
ASSERT(!sm->sm_loaded && !sm->sm_loading);
|
||||
VERIFY3U(sm->sm_space, ==, 0);
|
||||
cv_destroy(&sm->sm_load_cv);
|
||||
avl_destroy(&sm->sm_root);
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,13 @@ txg_init(dsl_pool_t *dp, uint64_t txg)
|
|||
rw_init(&tx->tx_suspend, NULL, RW_DEFAULT, NULL);
|
||||
mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
|
||||
cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
|
||||
cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
|
||||
cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
|
||||
cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
|
||||
cv_init(&tx->tx_timeout_cv, NULL, CV_DEFAULT, NULL);
|
||||
cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
|
||||
|
||||
tx->tx_open_txg = txg;
|
||||
}
|
||||
|
||||
|
@ -77,6 +84,13 @@ txg_fini(dsl_pool_t *dp)
|
|||
|
||||
ASSERT(tx->tx_threads == 0);
|
||||
|
||||
cv_destroy(&tx->tx_sync_more_cv);
|
||||
cv_destroy(&tx->tx_sync_done_cv);
|
||||
cv_destroy(&tx->tx_quiesce_more_cv);
|
||||
cv_destroy(&tx->tx_quiesce_done_cv);
|
||||
cv_destroy(&tx->tx_timeout_cv);
|
||||
cv_destroy(&tx->tx_exit_cv);
|
||||
|
||||
rw_destroy(&tx->tx_suspend);
|
||||
mutex_destroy(&tx->tx_sync_lock);
|
||||
|
||||
|
|
|
@ -388,7 +388,7 @@ zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
|
|||
|
||||
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
|
||||
|
||||
rw_init(&l->l_rwlock, 0, 0, 0);
|
||||
rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL);
|
||||
rw_enter(&l->l_rwlock, RW_WRITER);
|
||||
l->l_blkid = zap_allocate_blocks(zap, 1);
|
||||
l->l_dbuf = NULL;
|
||||
|
@ -446,7 +446,7 @@ zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
|
|||
ASSERT(blkid != 0);
|
||||
|
||||
l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
|
||||
rw_init(&l->l_rwlock, 0, 0, 0);
|
||||
rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL);
|
||||
rw_enter(&l->l_rwlock, RW_WRITER);
|
||||
l->l_blkid = blkid;
|
||||
l->l_bs = highbit(db->db_size)-1;
|
||||
|
|
|
@ -286,7 +286,7 @@ mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
|
|||
ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
|
||||
|
||||
zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
|
||||
rw_init(&zap->zap_rwlock, 0, 0, 0);
|
||||
rw_init(&zap->zap_rwlock, NULL, RW_DEFAULT, NULL);
|
||||
rw_enter(&zap->zap_rwlock, RW_WRITER);
|
||||
zap->zap_objset = os;
|
||||
zap->zap_object = obj;
|
||||
|
|
|
@ -1574,6 +1574,8 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
|||
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
|
||||
zfsvfs.z_norm |= U8_TEXTPREP_TOUPPER;
|
||||
|
||||
/* XXX - This must be destroyed but I'm not quite sure yet so
|
||||
* I'm just annotating that fact when it's an issue. -Brian */
|
||||
mutex_init(&zfsvfs.z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
list_create(&zfsvfs.z_all_znodes, sizeof (znode_t),
|
||||
offsetof(znode_t, z_link_node));
|
||||
|
|
|
@ -901,7 +901,7 @@ zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q)
|
|||
t = ZIO_TYPE_NULL;
|
||||
|
||||
(void) taskq_dispatch(zio->io_spa->spa_zio_taskq[t][q],
|
||||
(task_func_t *)zio_execute, zio, TQ_SLEEP);
|
||||
(task_func_t *)zio_execute, zio, TQ_NOSLEEP);
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
|
|
|
@ -138,7 +138,7 @@ typedef struct ztest_args {
|
|||
spa_t *za_spa;
|
||||
objset_t *za_os;
|
||||
zilog_t *za_zilog;
|
||||
thread_t za_thread;
|
||||
pthread_t za_thread;
|
||||
uint64_t za_instance;
|
||||
uint64_t za_random;
|
||||
uint64_t za_diroff;
|
||||
|
@ -221,18 +221,18 @@ ztest_info_t ztest_info[] = {
|
|||
* Stuff we need to share writably between parent and child.
|
||||
*/
|
||||
typedef struct ztest_shared {
|
||||
mutex_t zs_vdev_lock;
|
||||
rwlock_t zs_name_lock;
|
||||
uint64_t zs_vdev_primaries;
|
||||
uint64_t zs_vdev_aux;
|
||||
uint64_t zs_enospc_count;
|
||||
hrtime_t zs_start_time;
|
||||
hrtime_t zs_stop_time;
|
||||
uint64_t zs_alloc;
|
||||
uint64_t zs_space;
|
||||
ztest_info_t zs_info[ZTEST_FUNCS];
|
||||
mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS];
|
||||
uint64_t zs_seq[ZTEST_SYNC_LOCKS];
|
||||
pthread_mutex_t zs_vdev_lock;
|
||||
pthread_rwlock_t zs_name_lock;
|
||||
uint64_t zs_vdev_primaries;
|
||||
uint64_t zs_vdev_aux;
|
||||
uint64_t zs_enospc_count;
|
||||
hrtime_t zs_start_time;
|
||||
hrtime_t zs_stop_time;
|
||||
uint64_t zs_alloc;
|
||||
uint64_t zs_space;
|
||||
ztest_info_t zs_info[ZTEST_FUNCS];
|
||||
pthread_mutex_t zs_sync_lock[ZTEST_SYNC_LOCKS];
|
||||
uint64_t zs_seq[ZTEST_SYNC_LOCKS];
|
||||
} ztest_shared_t;
|
||||
|
||||
static char ztest_dev_template[] = "%s/%s.%llua";
|
||||
|
@ -811,7 +811,7 @@ ztest_spa_create_destroy(ztest_args_t *za)
|
|||
* Attempt to create an existing pool. It shouldn't matter
|
||||
* what's in the nvroot; we should fail with EEXIST.
|
||||
*/
|
||||
(void) rw_rdlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
|
||||
nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
|
||||
error = spa_create(za->za_pool, nvroot, NULL, NULL, NULL);
|
||||
nvlist_free(nvroot);
|
||||
|
@ -827,7 +827,7 @@ ztest_spa_create_destroy(ztest_args_t *za)
|
|||
fatal(0, "spa_destroy() = %d", error);
|
||||
|
||||
spa_close(spa, FTAG);
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
}
|
||||
|
||||
static vdev_t *
|
||||
|
@ -857,7 +857,7 @@ ztest_vdev_add_remove(ztest_args_t *za)
|
|||
nvlist_t *nvroot;
|
||||
int error;
|
||||
|
||||
(void) mutex_lock(&ztest_shared->zs_vdev_lock);
|
||||
(void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
|
||||
|
||||
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
|
||||
|
||||
|
@ -875,7 +875,7 @@ ztest_vdev_add_remove(ztest_args_t *za)
|
|||
error = spa_vdev_add(spa, nvroot);
|
||||
nvlist_free(nvroot);
|
||||
|
||||
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
|
||||
(void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
|
||||
|
||||
if (error == ENOSPC)
|
||||
ztest_record_enospc("spa_vdev_add");
|
||||
|
@ -904,7 +904,7 @@ ztest_vdev_aux_add_remove(ztest_args_t *za)
|
|||
aux = ZPOOL_CONFIG_L2CACHE;
|
||||
}
|
||||
|
||||
(void) mutex_lock(&ztest_shared->zs_vdev_lock);
|
||||
(void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
|
||||
|
||||
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
|
||||
|
||||
|
@ -960,7 +960,7 @@ ztest_vdev_aux_add_remove(ztest_args_t *za)
|
|||
fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
|
||||
}
|
||||
|
||||
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
|
||||
(void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1149,7 +1149,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
|
|||
size_t fsize;
|
||||
int fd;
|
||||
|
||||
(void) mutex_lock(&ztest_shared->zs_vdev_lock);
|
||||
(void) pthread_mutex_lock(&ztest_shared->zs_vdev_lock);
|
||||
|
||||
/*
|
||||
* Pick a random leaf vdev.
|
||||
|
@ -1180,7 +1180,7 @@ ztest_vdev_LUN_growth(ztest_args_t *za)
|
|||
(void) close(fd);
|
||||
}
|
||||
|
||||
(void) mutex_unlock(&ztest_shared->zs_vdev_lock);
|
||||
(void) pthread_mutex_unlock(&ztest_shared->zs_vdev_lock);
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
|
@ -1280,7 +1280,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
|
|||
uint64_t objects;
|
||||
ztest_replay_t zr;
|
||||
|
||||
(void) rw_rdlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
|
||||
(void) snprintf(name, 100, "%s/%s_temp_%llu", za->za_pool, za->za_pool,
|
||||
(u_longlong_t)za->za_instance);
|
||||
|
||||
|
@ -1324,7 +1324,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
|
|||
if (error) {
|
||||
if (error == ENOSPC) {
|
||||
ztest_record_enospc("dmu_objset_create");
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
return;
|
||||
}
|
||||
fatal(0, "dmu_objset_create(%s) = %d", name, error);
|
||||
|
@ -1406,7 +1406,7 @@ ztest_dmu_objset_create_destroy(ztest_args_t *za)
|
|||
if (error)
|
||||
fatal(0, "dmu_objset_destroy(%s) = %d", name, error);
|
||||
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1420,7 +1420,7 @@ ztest_dmu_snapshot_create_destroy(ztest_args_t *za)
|
|||
char snapname[100];
|
||||
char osname[MAXNAMELEN];
|
||||
|
||||
(void) rw_rdlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
|
||||
dmu_objset_name(os, osname);
|
||||
(void) snprintf(snapname, 100, "%s@%llu", osname,
|
||||
(u_longlong_t)za->za_instance);
|
||||
|
@ -1433,7 +1433,7 @@ ztest_dmu_snapshot_create_destroy(ztest_args_t *za)
|
|||
ztest_record_enospc("dmu_take_snapshot");
|
||||
else if (error != 0 && error != EEXIST)
|
||||
fatal(0, "dmu_take_snapshot() = %d", error);
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1931,7 +1931,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
|
|||
int bs = ZTEST_DIROBJ_BLOCKSIZE;
|
||||
int do_free = 0;
|
||||
uint64_t off, txg, txg_how;
|
||||
mutex_t *lp;
|
||||
pthread_mutex_t *lp;
|
||||
char osname[MAXNAMELEN];
|
||||
char iobuf[SPA_MAXBLOCKSIZE];
|
||||
blkptr_t blk = { 0 };
|
||||
|
@ -1981,7 +1981,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
|
|||
txg = dmu_tx_get_txg(tx);
|
||||
|
||||
lp = &ztest_shared->zs_sync_lock[b];
|
||||
(void) mutex_lock(lp);
|
||||
(void) pthread_mutex_lock(lp);
|
||||
|
||||
wbt->bt_objset = dmu_objset_id(os);
|
||||
wbt->bt_object = ZTEST_DIROBJ;
|
||||
|
@ -2034,7 +2034,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
|
|||
dmu_write(os, ZTEST_DIROBJ, off, btsize, wbt, tx);
|
||||
}
|
||||
|
||||
(void) mutex_unlock(lp);
|
||||
(void) pthread_mutex_unlock(lp);
|
||||
|
||||
if (ztest_random(1000) == 0)
|
||||
(void) poll(NULL, 0, 1); /* open dn_notxholds window */
|
||||
|
@ -2053,7 +2053,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
|
|||
/*
|
||||
* dmu_sync() the block we just wrote.
|
||||
*/
|
||||
(void) mutex_lock(lp);
|
||||
(void) pthread_mutex_lock(lp);
|
||||
|
||||
blkoff = P2ALIGN_TYPED(off, bs, uint64_t);
|
||||
error = dmu_buf_hold(os, ZTEST_DIROBJ, blkoff, FTAG, &db);
|
||||
|
@ -2061,7 +2061,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
|
|||
if (error) {
|
||||
dprintf("dmu_buf_hold(%s, %d, %llx) = %d\n",
|
||||
osname, ZTEST_DIROBJ, blkoff, error);
|
||||
(void) mutex_unlock(lp);
|
||||
(void) pthread_mutex_unlock(lp);
|
||||
return;
|
||||
}
|
||||
blkoff = off - blkoff;
|
||||
|
@ -2069,7 +2069,7 @@ ztest_dmu_write_parallel(ztest_args_t *za)
|
|||
dmu_buf_rele(db, FTAG);
|
||||
za->za_dbuf = NULL;
|
||||
|
||||
(void) mutex_unlock(lp);
|
||||
(void) pthread_mutex_unlock(lp);
|
||||
|
||||
if (error) {
|
||||
dprintf("dmu_sync(%s, %d, %llx) = %d\n",
|
||||
|
@ -2452,7 +2452,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za)
|
|||
char osname[MAXNAMELEN];
|
||||
int error;
|
||||
|
||||
(void) rw_rdlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
|
||||
|
||||
dmu_objset_name(os, osname);
|
||||
|
||||
|
@ -2491,7 +2491,7 @@ ztest_dsl_prop_get_set(ztest_args_t *za)
|
|||
}
|
||||
}
|
||||
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2654,7 +2654,7 @@ ztest_spa_rename(ztest_args_t *za)
|
|||
int error;
|
||||
spa_t *spa;
|
||||
|
||||
(void) rw_wrlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_wrlock(&ztest_shared->zs_name_lock);
|
||||
|
||||
oldname = za->za_pool;
|
||||
newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
|
||||
|
@ -2706,7 +2706,7 @@ ztest_spa_rename(ztest_args_t *za)
|
|||
|
||||
umem_free(newname, strlen(newname) + 1);
|
||||
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3026,15 +3026,16 @@ ztest_run(char *pool)
|
|||
ztest_args_t *za;
|
||||
spa_t *spa;
|
||||
char name[100];
|
||||
thread_t resume_tid;
|
||||
pthread_t resume_tid;
|
||||
|
||||
ztest_exiting = B_FALSE;
|
||||
|
||||
(void) _mutex_init(&zs->zs_vdev_lock, USYNC_THREAD, NULL);
|
||||
(void) rwlock_init(&zs->zs_name_lock, USYNC_THREAD, NULL);
|
||||
(void) pthread_mutex_init(&zs->zs_vdev_lock, NULL);
|
||||
(void) pthread_rwlock_init(&zs->zs_name_lock, NULL);
|
||||
|
||||
for (t = 0; t < ZTEST_SYNC_LOCKS; t++)
|
||||
(void) _mutex_init(&zs->zs_sync_lock[t], USYNC_THREAD, NULL);
|
||||
(void) pthread_mutex_init(&zs->zs_sync_lock[t], NULL);
|
||||
|
||||
|
||||
/*
|
||||
* Destroy one disk before we even start.
|
||||
|
@ -3091,8 +3092,8 @@ ztest_run(char *pool)
|
|||
/*
|
||||
* Create a thread to periodically resume suspended I/O.
|
||||
*/
|
||||
VERIFY(thr_create(0, 0, ztest_resume, spa, THR_BOUND,
|
||||
&resume_tid) == 0);
|
||||
VERIFY(pthread_create(&resume_tid, NULL, ztest_suspend_monitor,
|
||||
NULL) == 0);
|
||||
|
||||
/*
|
||||
* Verify that we can safely inquire about about any object,
|
||||
|
@ -3142,7 +3143,7 @@ ztest_run(char *pool)
|
|||
if (t < zopt_datasets) {
|
||||
ztest_replay_t zr;
|
||||
int test_future = FALSE;
|
||||
(void) rw_rdlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
|
||||
(void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
|
||||
error = dmu_objset_create(name, DMU_OST_OTHER, NULL, 0,
|
||||
ztest_create_cb, NULL);
|
||||
|
@ -3150,7 +3151,7 @@ ztest_run(char *pool)
|
|||
test_future = TRUE;
|
||||
} else if (error == ENOSPC) {
|
||||
zs->zs_enospc_count++;
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
break;
|
||||
} else if (error != 0) {
|
||||
fatal(0, "dmu_objset_create(%s) = %d",
|
||||
|
@ -3161,7 +3162,7 @@ ztest_run(char *pool)
|
|||
if (error)
|
||||
fatal(0, "dmu_objset_open('%s') = %d",
|
||||
name, error);
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
if (test_future)
|
||||
ztest_dmu_check_future_leak(&za[t]);
|
||||
zr.zr_os = za[d].za_os;
|
||||
|
@ -3170,12 +3171,12 @@ ztest_run(char *pool)
|
|||
za[d].za_zilog = zil_open(za[d].za_os, NULL);
|
||||
}
|
||||
|
||||
VERIFY(thr_create(0, 0, ztest_thread, &za[t], THR_BOUND,
|
||||
&za[t].za_thread) == 0);
|
||||
VERIFY(pthread_create(&za[t].za_thread, NULL, ztest_thread,
|
||||
&za[t]) == 0);
|
||||
}
|
||||
|
||||
while (--t >= 0) {
|
||||
VERIFY(thr_join(za[t].za_thread, NULL, NULL) == 0);
|
||||
VERIFY(pthread_join(za[t].za_thread, NULL) == 0);
|
||||
if (t < zopt_datasets) {
|
||||
zil_close(za[t].za_zilog);
|
||||
dmu_objset_close(za[t].za_os);
|
||||
|
@ -3194,14 +3195,14 @@ ztest_run(char *pool)
|
|||
* If we had out-of-space errors, destroy a random objset.
|
||||
*/
|
||||
if (zs->zs_enospc_count != 0) {
|
||||
(void) rw_rdlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_rdlock(&ztest_shared->zs_name_lock);
|
||||
d = (int)ztest_random(zopt_datasets);
|
||||
(void) snprintf(name, 100, "%s/%s_%d", pool, pool, d);
|
||||
if (zopt_verbose >= 3)
|
||||
(void) printf("Destroying %s to free up space\n", name);
|
||||
(void) dmu_objset_find(name, ztest_destroy_cb, &za[d],
|
||||
DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
|
||||
(void) rw_unlock(&ztest_shared->zs_name_lock);
|
||||
(void) pthread_rwlock_unlock(&ztest_shared->zs_name_lock);
|
||||
}
|
||||
|
||||
txg_wait_synced(spa_get_dsl(spa), 0);
|
||||
|
@ -3210,7 +3211,7 @@ ztest_run(char *pool)
|
|||
|
||||
/* Kill the resume thread */
|
||||
ztest_exiting = B_TRUE;
|
||||
VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
|
||||
VERIFY(pthread_join(resume_tid, NULL) == 0);
|
||||
|
||||
/*
|
||||
* Right before closing the pool, kick off a bunch of async I/O;
|
||||
|
|
Loading…
Reference in New Issue