zfs/lib/libzpool/kernel.c

1529 lines
30 KiB
C
Raw Normal View History

2008-11-20 20:01:55 +00:00
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
2008-11-20 20:01:55 +00:00
*/
#include <assert.h>
#include <fcntl.h>
#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <zlib.h>
#include <libgen.h>
#include <sys/signal.h>
2008-11-20 20:01:55 +00:00
#include <sys/spa.h>
#include <sys/stat.h>
#include <sys/processor.h>
#include <sys/zfs_context.h>
#include <sys/rrwlock.h>
2008-11-20 20:01:55 +00:00
#include <sys/utsname.h>
#include <sys/time.h>
2009-02-18 20:51:31 +00:00
#include <sys/systeminfo.h>
#include <zfs_fletcher.h>
#include <sys/crypto/icp.h>
2008-11-20 20:01:55 +00:00
/*
* Emulation of kernel services in userland.
*/
int aok;
2008-11-20 20:01:55 +00:00
uint64_t physmem;
vnode_t *rootdir = (vnode_t *)0xabcd1234;
2009-02-18 20:51:31 +00:00
char hw_serial[HW_HOSTID_LEN];
struct utsname hw_utsname;
Illumos 5376 - arc_kmem_reap_now() should not result in clearing arc_no_grow 5376 arc_kmem_reap_now() should not result in clearing arc_no_grow Reviewed by: Christopher Siden <christopher.siden@delphix.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Steven Hartland <killing@multiplay.co.uk> Reviewed by: Richard Elling <richard.elling@richardelling.com> Approved by: Dan McDonald <danmcd@omniti.com> References: https://www.illumos.org/issues/5376 https://github.com/illumos/illumos-gate/commit/2ec99e3 Porting Notes: The good news is that many of the recent changes made upstream to the ARC tackled issues previously observed by ZoL with similar solutions. The bad news is those solution weren't identical to the ones we applied. This patch is designed to split the difference and apply as much of the upstream work as possible. * The arc_available_memory() function was removed previous in ZoL but due to the upstream changes it makes sense to add it back. This function has been customized for Linux so that it can be used to determine a low memory. This provides the same basic functionality as the illumos version allowing us to minimize changes through the rest of the code base. The exact mechanism used to detect a low memory state remains unchanged so this change isn't a significant as it might first appear. * This patch includes the long standing fix for arc_shrink() which was originally proposed in #2167. Since there were related changes to this function it made sense to include that work. * The arc_init() function has been re-factored. As before it sets sane default values for the ARC but then calls arc_tuning_update() to apply user specific tuning made via module options. The arc_tuning_update() function is then called periodically by the arc_reclaim_thread() to apply changes to the tunings made during normal operation. Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #3616 Closes #2167
2015-06-26 18:28:18 +00:00
vmem_t *zio_arena = NULL;
2008-11-20 20:01:55 +00:00
/* If set, all blocks read will be copied to the specified directory. */
char *vn_dumpdir = NULL;
/* this only exists to have its address taken */
struct proc p0;
2008-11-20 20:01:55 +00:00
/*
* =========================================================================
* threads
* =========================================================================
*/
pthread_cond_t kthread_cond = PTHREAD_COND_INITIALIZER;
pthread_mutex_t kthread_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_key_t kthread_key;
int kthread_nr = 0;
void
thread_init(void)
{
kthread_t *kt;
VERIFY3S(pthread_key_create(&kthread_key, NULL), ==, 0);
/* Create entry for primary kthread */
kt = umem_zalloc(sizeof (kthread_t), UMEM_NOFAIL);
kt->t_tid = pthread_self();
kt->t_func = NULL;
VERIFY3S(pthread_setspecific(kthread_key, kt), ==, 0);
/* Only the main thread should be running at the moment */
ASSERT3S(kthread_nr, ==, 0);
kthread_nr = 1;
}
void
thread_fini(void)
{
kthread_t *kt = curthread;
ASSERT(pthread_equal(kt->t_tid, pthread_self()));
ASSERT3P(kt->t_func, ==, NULL);
umem_free(kt, sizeof (kthread_t));
/* Wait for all threads to exit via thread_exit() */
VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
kthread_nr--; /* Main thread is exiting */
while (kthread_nr > 0)
VERIFY0(pthread_cond_wait(&kthread_cond, &kthread_lock));
ASSERT3S(kthread_nr, ==, 0);
VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
VERIFY3S(pthread_key_delete(kthread_key), ==, 0);
}
2008-11-20 20:01:55 +00:00
kthread_t *
zk_thread_current(void)
{
kthread_t *kt = pthread_getspecific(kthread_key);
ASSERT3P(kt, !=, NULL);
return (kt);
}
void *
zk_thread_helper(void *arg)
2008-11-20 20:01:55 +00:00
{
kthread_t *kt = (kthread_t *)arg;
VERIFY3S(pthread_setspecific(kthread_key, kt), ==, 0);
2008-11-20 20:01:55 +00:00
VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
kthread_nr++;
VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
Align thread priority with Linux defaults Under Linux filesystem threads responsible for handling I/O are normally created with the maximum priority. Non-I/O filesystem processes run with the default priority. ZFS should adopt the same priority scheme under Linux to maintain good performance and so that it will complete fairly when other Linux filesystems are active. The priorities have been updated to the following: $ ps -eLo rtprio,cls,pid,pri,nice,cmd | egrep 'z_|spl_|zvol|arc|dbu|meta' - TS 10743 19 -20 [spl_kmem_cache] - TS 10744 19 -20 [spl_system_task] - TS 10745 19 -20 [spl_dynamic_tas] - TS 10764 19 0 [dbu_evict] - TS 10765 19 0 [arc_prune] - TS 10766 19 0 [arc_reclaim] - TS 10767 19 0 [arc_user_evicts] - TS 10768 19 0 [l2arc_feed] - TS 10769 39 0 [z_unmount] - TS 10770 39 -20 [zvol] - TS 11011 39 -20 [z_null_iss] - TS 11012 39 -20 [z_null_int] - TS 11013 39 -20 [z_rd_iss] - TS 11014 39 -20 [z_rd_int_0] - TS 11022 38 -19 [z_wr_iss] - TS 11023 39 -20 [z_wr_iss_h] - TS 11024 39 -20 [z_wr_int_0] - TS 11032 39 -20 [z_wr_int_h] - TS 11033 39 -20 [z_fr_iss_0] - TS 11041 39 -20 [z_fr_int] - TS 11042 39 -20 [z_cl_iss] - TS 11043 39 -20 [z_cl_int] - TS 11044 39 -20 [z_ioctl_iss] - TS 11045 39 -20 [z_ioctl_int] - TS 11046 39 -20 [metaslab_group_] - TS 11050 19 0 [z_iput] - TS 11121 38 -19 [z_wr_iss] Note that under Linux the meaning of a processes priority is inverted with respect to illumos. High values on Linux indicate a _low_ priority while high value on illumos indicate a _high_ priority. In order to preserve the logical meaning of the minclsyspri and maxclsyspri macros when they are used by the illumos wrapper functions their values have been inverted. This way when changes are merged from upstream illumos we won't need to remember to invert the macro. It could also lead to confusion. This patch depends on https://github.com/zfsonlinux/spl/pull/466. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ned Bass <bass6@llnl.gov> Closes #3607
2015-07-24 17:08:31 +00:00
(void) setpriority(PRIO_PROCESS, 0, kt->t_pri);
2008-11-20 20:01:55 +00:00
kt->t_tid = pthread_self();
((thread_func_arg_t)kt->t_func)(kt->t_arg);
/* Unreachable, thread must exit with thread_exit() */
abort();
return (NULL);
}
kthread_t *
zk_thread_create(caddr_t stk, size_t stksize, thread_func_t func, void *arg,
size_t len, proc_t *pp, int state, pri_t pri, int detachstate)
{
kthread_t *kt;
pthread_attr_t attr;
char *stkstr;
ASSERT0(state & ~TS_RUN);
kt = umem_zalloc(sizeof (kthread_t), UMEM_NOFAIL);
kt->t_func = func;
kt->t_arg = arg;
Align thread priority with Linux defaults Under Linux filesystem threads responsible for handling I/O are normally created with the maximum priority. Non-I/O filesystem processes run with the default priority. ZFS should adopt the same priority scheme under Linux to maintain good performance and so that it will complete fairly when other Linux filesystems are active. The priorities have been updated to the following: $ ps -eLo rtprio,cls,pid,pri,nice,cmd | egrep 'z_|spl_|zvol|arc|dbu|meta' - TS 10743 19 -20 [spl_kmem_cache] - TS 10744 19 -20 [spl_system_task] - TS 10745 19 -20 [spl_dynamic_tas] - TS 10764 19 0 [dbu_evict] - TS 10765 19 0 [arc_prune] - TS 10766 19 0 [arc_reclaim] - TS 10767 19 0 [arc_user_evicts] - TS 10768 19 0 [l2arc_feed] - TS 10769 39 0 [z_unmount] - TS 10770 39 -20 [zvol] - TS 11011 39 -20 [z_null_iss] - TS 11012 39 -20 [z_null_int] - TS 11013 39 -20 [z_rd_iss] - TS 11014 39 -20 [z_rd_int_0] - TS 11022 38 -19 [z_wr_iss] - TS 11023 39 -20 [z_wr_iss_h] - TS 11024 39 -20 [z_wr_int_0] - TS 11032 39 -20 [z_wr_int_h] - TS 11033 39 -20 [z_fr_iss_0] - TS 11041 39 -20 [z_fr_int] - TS 11042 39 -20 [z_cl_iss] - TS 11043 39 -20 [z_cl_int] - TS 11044 39 -20 [z_ioctl_iss] - TS 11045 39 -20 [z_ioctl_int] - TS 11046 39 -20 [metaslab_group_] - TS 11050 19 0 [z_iput] - TS 11121 38 -19 [z_wr_iss] Note that under Linux the meaning of a processes priority is inverted with respect to illumos. High values on Linux indicate a _low_ priority while high value on illumos indicate a _high_ priority. In order to preserve the logical meaning of the minclsyspri and maxclsyspri macros when they are used by the illumos wrapper functions their values have been inverted. This way when changes are merged from upstream illumos we won't need to remember to invert the macro. It could also lead to confusion. This patch depends on https://github.com/zfsonlinux/spl/pull/466. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ned Bass <bass6@llnl.gov> Closes #3607
2015-07-24 17:08:31 +00:00
kt->t_pri = pri;
VERIFY0(pthread_attr_init(&attr));
VERIFY0(pthread_attr_setdetachstate(&attr, detachstate));
/*
* We allow the default stack size in user space to be specified by
* setting the ZFS_STACK_SIZE environment variable. This allows us
* the convenience of observing and debugging stack overruns in
* user space. Explicitly specified stack sizes will be honored.
* The usage of ZFS_STACK_SIZE is discussed further in the
* ENVIRONMENT VARIABLES sections of the ztest(1) man page.
*/
if (stksize == 0) {
stkstr = getenv("ZFS_STACK_SIZE");
if (stkstr == NULL)
stksize = TS_STACK_MAX;
else
stksize = MAX(atoi(stkstr), TS_STACK_MIN);
}
VERIFY3S(stksize, >, 0);
stksize = P2ROUNDUP(MAX(stksize, TS_STACK_MIN), PAGESIZE);
/*
* If this ever fails, it may be because the stack size is not a
* multiple of system page size.
*/
VERIFY0(pthread_attr_setstacksize(&attr, stksize));
VERIFY0(pthread_attr_setguardsize(&attr, PAGESIZE));
VERIFY0(pthread_create(&kt->t_tid, &attr, &zk_thread_helper, kt));
VERIFY0(pthread_attr_destroy(&attr));
return (kt);
}
void
zk_thread_exit(void)
{
kthread_t *kt = curthread;
ASSERT(pthread_equal(kt->t_tid, pthread_self()));
umem_free(kt, sizeof (kthread_t));
VERIFY0(pthread_mutex_lock(&kthread_lock));
kthread_nr--;
VERIFY0(pthread_mutex_unlock(&kthread_lock));
VERIFY0(pthread_cond_broadcast(&kthread_cond));
pthread_exit((void *)TS_MAGIC);
}
void
zk_thread_join(kt_did_t tid)
{
void *ret;
pthread_join((pthread_t)tid, &ret);
VERIFY3P(ret, ==, (void *)TS_MAGIC);
2008-11-20 20:01:55 +00:00
}
/*
* =========================================================================
* kstats
* =========================================================================
*/
/*ARGSUSED*/
kstat_t *
kstat_create(const char *module, int instance, const char *name,
const char *class, uchar_t type, ulong_t ndata, uchar_t ks_flag)
2008-11-20 20:01:55 +00:00
{
return (NULL);
}
/*ARGSUSED*/
void
kstat_install(kstat_t *ksp)
{}
/*ARGSUSED*/
void
kstat_delete(kstat_t *ksp)
{}
Add visibility in to arc_read This change is an attempt to add visibility into the arc_read calls occurring on a system, in real time. To do this, a list was added to the in memory SPA data structure for a pool, with each element on the list corresponding to a call to arc_read. These entries are then exported through the kstat interface, which can then be interpreted in userspace. For each arc_read call, the following information is exported: * A unique identifier (uint64_t) * The time the entry was added to the list (hrtime_t) (*not* wall clock time; relative to the other entries on the list) * The objset ID (uint64_t) * The object number (uint64_t) * The indirection level (uint64_t) * The block ID (uint64_t) * The name of the function originating the arc_read call (char[24]) * The arc_flags from the arc_read call (uint32_t) * The PID of the reading thread (pid_t) * The command or name of thread originating read (char[16]) From this exported information one can see, in real time, exactly what is being read, what function is generating the read, and whether or not the read was found to be already cached. There is still some work to be done, but this should serve as a good starting point. Specifically, dbuf_read's are not accounted for in the currently exported information. Thus, a follow up patch should probably be added to export these calls that never call into arc_read (they only hit the dbuf hash table). In addition, it might be nice to create a utility similar to "arcstat.py" to digest the exported information and display it in a more readable format. Or perhaps, log the information and allow for it to be "replayed" at a later time. Signed-off-by: Prakash Surya <surya1@llnl.gov> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2013-09-06 23:09:05 +00:00
/*ARGSUSED*/
void
kstat_waitq_enter(kstat_io_t *kiop)
{}
/*ARGSUSED*/
void
kstat_waitq_exit(kstat_io_t *kiop)
{}
/*ARGSUSED*/
void
kstat_runq_enter(kstat_io_t *kiop)
{}
/*ARGSUSED*/
void
kstat_runq_exit(kstat_io_t *kiop)
{}
/*ARGSUSED*/
void
kstat_waitq_to_runq(kstat_io_t *kiop)
{}
/*ARGSUSED*/
void
kstat_runq_back_to_waitq(kstat_io_t *kiop)
{}
Add visibility in to arc_read This change is an attempt to add visibility into the arc_read calls occurring on a system, in real time. To do this, a list was added to the in memory SPA data structure for a pool, with each element on the list corresponding to a call to arc_read. These entries are then exported through the kstat interface, which can then be interpreted in userspace. For each arc_read call, the following information is exported: * A unique identifier (uint64_t) * The time the entry was added to the list (hrtime_t) (*not* wall clock time; relative to the other entries on the list) * The objset ID (uint64_t) * The object number (uint64_t) * The indirection level (uint64_t) * The block ID (uint64_t) * The name of the function originating the arc_read call (char[24]) * The arc_flags from the arc_read call (uint32_t) * The PID of the reading thread (pid_t) * The command or name of thread originating read (char[16]) From this exported information one can see, in real time, exactly what is being read, what function is generating the read, and whether or not the read was found to be already cached. There is still some work to be done, but this should serve as a good starting point. Specifically, dbuf_read's are not accounted for in the currently exported information. Thus, a follow up patch should probably be added to export these calls that never call into arc_read (they only hit the dbuf hash table). In addition, it might be nice to create a utility similar to "arcstat.py" to digest the exported information and display it in a more readable format. Or perhaps, log the information and allow for it to be "replayed" at a later time. Signed-off-by: Prakash Surya <surya1@llnl.gov> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2013-09-06 23:09:05 +00:00
void
kstat_set_raw_ops(kstat_t *ksp,
int (*headers)(char *buf, size_t size),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index))
{}
2008-11-20 20:01:55 +00:00
/*
* =========================================================================
* mutexes
* =========================================================================
*/
2008-11-20 20:01:55 +00:00
void
mutex_init(kmutex_t *mp, char *name, int type, void *cookie)
2008-11-20 20:01:55 +00:00
{
ASSERT3S(type, ==, MUTEX_DEFAULT);
ASSERT3P(cookie, ==, NULL);
mp->m_owner = MTX_INIT;
mp->m_magic = MTX_MAGIC;
VERIFY3S(pthread_mutex_init(&mp->m_lock, NULL), ==, 0);
2008-11-20 20:01:55 +00:00
}
void
mutex_destroy(kmutex_t *mp)
2008-11-20 20:01:55 +00:00
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT3P(mp->m_owner, ==, MTX_INIT);
ASSERT0(pthread_mutex_destroy(&(mp)->m_lock));
mp->m_owner = MTX_DEST;
mp->m_magic = 0;
2008-11-20 20:01:55 +00:00
}
void
mutex_enter(kmutex_t *mp)
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT3P(mp->m_owner, !=, MTX_DEST);
ASSERT3P(mp->m_owner, !=, curthread);
VERIFY3S(pthread_mutex_lock(&mp->m_lock), ==, 0);
ASSERT3P(mp->m_owner, ==, MTX_INIT);
2008-11-20 20:01:55 +00:00
mp->m_owner = curthread;
}
int
mutex_tryenter(kmutex_t *mp)
{
int err;
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT3P(mp->m_owner, !=, MTX_DEST);
if (0 == (err = pthread_mutex_trylock(&mp->m_lock))) {
ASSERT3P(mp->m_owner, ==, MTX_INIT);
2008-11-20 20:01:55 +00:00
mp->m_owner = curthread;
return (1);
} else {
VERIFY3S(err, ==, EBUSY);
2008-11-20 20:01:55 +00:00
return (0);
}
}
void
mutex_exit(kmutex_t *mp)
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
ASSERT3P(mutex_owner(mp), ==, curthread);
mp->m_owner = MTX_INIT;
VERIFY3S(pthread_mutex_unlock(&mp->m_lock), ==, 0);
2008-11-20 20:01:55 +00:00
}
void *
mutex_owner(kmutex_t *mp)
{
ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
2008-11-20 20:01:55 +00:00
return (mp->m_owner);
}
int
mutex_held(kmutex_t *mp)
{
return (mp->m_owner == curthread);
}
2008-11-20 20:01:55 +00:00
/*
* =========================================================================
* rwlocks
* =========================================================================
*/
2008-11-20 20:01:55 +00:00
void
rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
{
ASSERT3S(type, ==, RW_DEFAULT);
ASSERT3P(arg, ==, NULL);
VERIFY3S(pthread_rwlock_init(&rwlp->rw_lock, NULL), ==, 0);
rwlp->rw_owner = RW_INIT;
rwlp->rw_wr_owner = RW_INIT;
rwlp->rw_readers = 0;
rwlp->rw_magic = RW_MAGIC;
2008-11-20 20:01:55 +00:00
}
void
rw_destroy(krwlock_t *rwlp)
{
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
ASSERT(rwlp->rw_readers == 0 && rwlp->rw_wr_owner == RW_INIT);
VERIFY3S(pthread_rwlock_destroy(&rwlp->rw_lock), ==, 0);
rwlp->rw_magic = 0;
2008-11-20 20:01:55 +00:00
}
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
ASSERT3P(rwlp->rw_owner, !=, curthread);
ASSERT3P(rwlp->rw_wr_owner, !=, curthread);
2008-11-20 20:01:55 +00:00
if (rw == RW_READER) {
VERIFY3S(pthread_rwlock_rdlock(&rwlp->rw_lock), ==, 0);
ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
atomic_inc_uint(&rwlp->rw_readers);
} else {
VERIFY3S(pthread_rwlock_wrlock(&rwlp->rw_lock), ==, 0);
ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
ASSERT3U(rwlp->rw_readers, ==, 0);
rwlp->rw_wr_owner = curthread;
}
2008-11-20 20:01:55 +00:00
rwlp->rw_owner = curthread;
}
void
rw_exit(krwlock_t *rwlp)
{
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
ASSERT(RW_LOCK_HELD(rwlp));
if (RW_READ_HELD(rwlp))
atomic_dec_uint(&rwlp->rw_readers);
else
rwlp->rw_wr_owner = RW_INIT;
2008-11-20 20:01:55 +00:00
rwlp->rw_owner = RW_INIT;
VERIFY3S(pthread_rwlock_unlock(&rwlp->rw_lock), ==, 0);
2008-11-20 20:01:55 +00:00
}
int
rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
int rv;
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
2008-11-20 20:01:55 +00:00
if (rw == RW_READER)
rv = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
2008-11-20 20:01:55 +00:00
else
rv = pthread_rwlock_trywrlock(&rwlp->rw_lock);
2008-11-20 20:01:55 +00:00
if (rv == 0) {
ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
if (rw == RW_READER)
atomic_inc_uint(&rwlp->rw_readers);
else {
ASSERT3U(rwlp->rw_readers, ==, 0);
rwlp->rw_wr_owner = curthread;
}
2008-11-20 20:01:55 +00:00
rwlp->rw_owner = curthread;
return (1);
}
VERIFY3S(rv, ==, EBUSY);
2008-11-20 20:01:55 +00:00
return (0);
}
int
rw_tryupgrade(krwlock_t *rwlp)
{
ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
2008-11-20 20:01:55 +00:00
return (0);
}
/*
* =========================================================================
* condition variables
* =========================================================================
*/
2008-11-20 20:01:55 +00:00
void
cv_init(kcondvar_t *cv, char *name, int type, void *arg)
{
ASSERT3S(type, ==, CV_DEFAULT);
cv->cv_magic = CV_MAGIC;
VERIFY0(pthread_cond_init(&cv->cv, NULL));
2008-11-20 20:01:55 +00:00
}
void
cv_destroy(kcondvar_t *cv)
{
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
VERIFY0(pthread_cond_destroy(&cv->cv));
cv->cv_magic = 0;
2008-11-20 20:01:55 +00:00
}
void
cv_wait(kcondvar_t *cv, kmutex_t *mp)
{
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
ASSERT3P(mutex_owner(mp), ==, curthread);
mp->m_owner = MTX_INIT;
VERIFY0(pthread_cond_wait(&cv->cv, &mp->m_lock));
2008-11-20 20:01:55 +00:00
mp->m_owner = curthread;
}
clock_t
cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
{
int error;
struct timeval tv;
2008-11-20 20:01:55 +00:00
timestruc_t ts;
clock_t delta;
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
delta = abstime - ddi_get_lbolt();
2008-11-20 20:01:55 +00:00
if (delta <= 0)
return (-1);
VERIFY(gettimeofday(&tv, NULL) == 0);
ts.tv_sec = tv.tv_sec + delta / hz;
ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % hz) * (NANOSEC / hz);
if (ts.tv_nsec >= NANOSEC) {
ts.tv_sec++;
ts.tv_nsec -= NANOSEC;
}
2008-11-20 20:01:55 +00:00
ASSERT3P(mutex_owner(mp), ==, curthread);
mp->m_owner = MTX_INIT;
error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
2008-11-20 20:01:55 +00:00
mp->m_owner = curthread;
if (error == ETIMEDOUT)
2008-11-20 20:01:55 +00:00
return (-1);
VERIFY0(error);
2008-11-20 20:01:55 +00:00
return (1);
}
/*ARGSUSED*/
clock_t
cv_timedwait_hires(kcondvar_t *cv, kmutex_t *mp, hrtime_t tim, hrtime_t res,
int flag)
{
int error;
struct timeval tv;
timestruc_t ts;
hrtime_t delta;
ASSERT(flag == 0 || flag == CALLOUT_FLAG_ABSOLUTE);
delta = tim;
if (flag & CALLOUT_FLAG_ABSOLUTE)
delta -= gethrtime();
if (delta <= 0)
return (-1);
VERIFY(gettimeofday(&tv, NULL) == 0);
ts.tv_sec = tv.tv_sec + delta / NANOSEC;
ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % NANOSEC);
if (ts.tv_nsec >= NANOSEC) {
ts.tv_sec++;
ts.tv_nsec -= NANOSEC;
}
ASSERT(mutex_owner(mp) == curthread);
mp->m_owner = MTX_INIT;
error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
mp->m_owner = curthread;
if (error == ETIMEDOUT)
return (-1);
VERIFY0(error);
return (1);
}
2008-11-20 20:01:55 +00:00
void
cv_signal(kcondvar_t *cv)
{
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
VERIFY0(pthread_cond_signal(&cv->cv));
2008-11-20 20:01:55 +00:00
}
void
cv_broadcast(kcondvar_t *cv)
{
ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
VERIFY0(pthread_cond_broadcast(&cv->cv));
2008-11-20 20:01:55 +00:00
}
/*
* =========================================================================
* vnode operations
* =========================================================================
*/
/*
* Note: for the xxxat() versions of these functions, we assume that the
* starting vp is always rootdir (which is true for spa_directory.c, the only
* ZFS consumer of these interfaces). We assert this is true, and then emulate
* them by adding '/' in front of the path.
*/
/*ARGSUSED*/
int
vn_open(char *path, int x1, int flags, int mode, vnode_t **vpp, int x2, int x3)
{
int fd = -1;
int dump_fd = -1;
2008-11-20 20:01:55 +00:00
vnode_t *vp;
int old_umask = 0;
char *realpath;
2008-11-20 20:01:55 +00:00
struct stat64 st;
int err;
2008-11-20 20:01:55 +00:00
realpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2008-11-20 20:01:55 +00:00
/*
* If we're accessing a real disk from userland, we need to use
* the character interface to avoid caching. This is particularly
* important if we're trying to look at a real in-kernel storage
* pool from userland, e.g. via zdb, because otherwise we won't
* see the changes occurring under the segmap cache.
* On the other hand, the stupid character device returns zero
* for its size. So -- gag -- we open the block device to get
* its size, and remember it for subsequent VOP_GETATTR().
*/
#if defined(__sun__) || defined(__sun)
2008-11-20 20:01:55 +00:00
if (strncmp(path, "/dev/", 5) == 0) {
#else
if (0) {
#endif
2008-11-20 20:01:55 +00:00
char *dsk;
fd = open64(path, O_RDONLY);
if (fd == -1) {
err = errno;
free(realpath);
return (err);
}
2008-11-20 20:01:55 +00:00
if (fstat64(fd, &st) == -1) {
err = errno;
2008-11-20 20:01:55 +00:00
close(fd);
free(realpath);
return (err);
2008-11-20 20:01:55 +00:00
}
close(fd);
(void) sprintf(realpath, "%s", path);
dsk = strstr(path, "/dsk/");
if (dsk != NULL)
(void) sprintf(realpath + (dsk - path) + 1, "r%s",
dsk + 1);
} else {
(void) sprintf(realpath, "%s", path);
if (!(flags & FCREAT) && stat64(realpath, &st) == -1) {
err = errno;
free(realpath);
return (err);
}
2008-11-20 20:01:55 +00:00
}
if (!(flags & FCREAT) && S_ISBLK(st.st_mode)) {
#ifdef __linux__
flags |= O_DIRECT;
#endif
/* We shouldn't be writing to block devices in userspace */
VERIFY(!(flags & FWRITE));
}
2008-11-20 20:01:55 +00:00
if (flags & FCREAT)
old_umask = umask(0);
/*
* The construct 'flags - FREAD' conveniently maps combinations of
* FREAD and FWRITE to the corresponding O_RDONLY, O_WRONLY, and O_RDWR.
*/
fd = open64(realpath, flags - FREAD, mode);
if (fd == -1) {
err = errno;
free(realpath);
return (err);
}
2008-11-20 20:01:55 +00:00
if (flags & FCREAT)
(void) umask(old_umask);
if (vn_dumpdir != NULL) {
char *dumppath = umem_zalloc(MAXPATHLEN, UMEM_NOFAIL);
(void) snprintf(dumppath, MAXPATHLEN,
"%s/%s", vn_dumpdir, basename(realpath));
dump_fd = open64(dumppath, O_CREAT | O_WRONLY, 0666);
umem_free(dumppath, MAXPATHLEN);
if (dump_fd == -1) {
err = errno;
free(realpath);
close(fd);
return (err);
}
} else {
dump_fd = -1;
}
free(realpath);
if (fstat64_blk(fd, &st) == -1) {
err = errno;
2008-11-20 20:01:55 +00:00
close(fd);
if (dump_fd != -1)
close(dump_fd);
return (err);
2008-11-20 20:01:55 +00:00
}
(void) fcntl(fd, F_SETFD, FD_CLOEXEC);
*vpp = vp = umem_zalloc(sizeof (vnode_t), UMEM_NOFAIL);
vp->v_fd = fd;
vp->v_size = st.st_size;
vp->v_path = spa_strdup(path);
vp->v_dump_fd = dump_fd;
2008-11-20 20:01:55 +00:00
return (0);
}
/*ARGSUSED*/
int
vn_openat(char *path, int x1, int flags, int mode, vnode_t **vpp, int x2,
int x3, vnode_t *startvp, int fd)
{
char *realpath = umem_alloc(strlen(path) + 2, UMEM_NOFAIL);
int ret;
ASSERT(startvp == rootdir);
(void) sprintf(realpath, "/%s", path);
/* fd ignored for now, need if want to simulate nbmand support */
ret = vn_open(realpath, x1, flags, mode, vpp, x2, x3);
umem_free(realpath, strlen(path) + 2);
return (ret);
}
/*ARGSUSED*/
int
vn_rdwr(int uio, vnode_t *vp, void *addr, ssize_t len, offset_t offset,
int x1, int x2, rlim64_t x3, void *x4, ssize_t *residp)
{
ssize_t rc, done = 0, split;
2008-11-20 20:01:55 +00:00
if (uio == UIO_READ) {
rc = pread64(vp->v_fd, addr, len, offset);
if (vp->v_dump_fd != -1 && rc != -1) {
int status;
status = pwrite64(vp->v_dump_fd, addr, rc, offset);
ASSERT(status != -1);
}
2008-11-20 20:01:55 +00:00
} else {
/*
* To simulate partial disk writes, we split writes into two
* system calls so that the process can be killed in between.
*/
int sectors = len >> SPA_MINBLOCKSHIFT;
split = (sectors > 0 ? rand() % sectors : 0) <<
SPA_MINBLOCKSHIFT;
rc = pwrite64(vp->v_fd, addr, split, offset);
if (rc != -1) {
done = rc;
rc = pwrite64(vp->v_fd, (char *)addr + split,
len - split, offset + split);
}
2008-11-20 20:01:55 +00:00
}
#ifdef __linux__
if (rc == -1 && errno == EINVAL) {
/*
* Under Linux, this most likely means an alignment issue
* (memory or disk) due to O_DIRECT, so we abort() in order to
* catch the offender.
*/
abort();
}
#endif
if (rc == -1)
2008-11-20 20:01:55 +00:00
return (errno);
done += rc;
2008-11-20 20:01:55 +00:00
if (residp)
*residp = len - done;
else if (done != len)
2008-11-20 20:01:55 +00:00
return (EIO);
return (0);
}
void
vn_close(vnode_t *vp)
{
close(vp->v_fd);
if (vp->v_dump_fd != -1)
close(vp->v_dump_fd);
2008-11-20 20:01:55 +00:00
spa_strfree(vp->v_path);
umem_free(vp, sizeof (vnode_t));
}
/*
* At a minimum we need to update the size since vdev_reopen()
* will no longer call vn_openat().
*/
int
fop_getattr(vnode_t *vp, vattr_t *vap)
{
struct stat64 st;
int err;
if (fstat64_blk(vp->v_fd, &st) == -1) {
err = errno;
close(vp->v_fd);
return (err);
}
vap->va_size = st.st_size;
return (0);
}
2008-11-20 20:01:55 +00:00
/*
* =========================================================================
* Figure out which debugging statements to print
* =========================================================================
*/
static char *dprintf_string;
static int dprintf_print_all;
int
dprintf_find_string(const char *string)
{
char *tmp_str = dprintf_string;
int len = strlen(string);
/*
* Find out if this is a string we want to print.
* String format: file1.c,function_name1,file2.c,file3.c
*/
while (tmp_str != NULL) {
if (strncmp(tmp_str, string, len) == 0 &&
(tmp_str[len] == ',' || tmp_str[len] == '\0'))
return (1);
tmp_str = strchr(tmp_str, ',');
if (tmp_str != NULL)
tmp_str++; /* Get rid of , */
}
return (0);
}
void
dprintf_setup(int *argc, char **argv)
{
int i, j;
/*
* Debugging can be specified two ways: by setting the
* environment variable ZFS_DEBUG, or by including a
* "debug=..." argument on the command line. The command
* line setting overrides the environment variable.
*/
for (i = 1; i < *argc; i++) {
int len = strlen("debug=");
/* First look for a command line argument */
if (strncmp("debug=", argv[i], len) == 0) {
dprintf_string = argv[i] + len;
/* Remove from args */
for (j = i; j < *argc; j++)
argv[j] = argv[j+1];
argv[j] = NULL;
(*argc)--;
}
}
if (dprintf_string == NULL) {
/* Look for ZFS_DEBUG environment variable */
dprintf_string = getenv("ZFS_DEBUG");
}
/*
* Are we just turning on all debugging?
*/
if (dprintf_find_string("on"))
dprintf_print_all = 1;
if (dprintf_string != NULL)
zfs_flags |= ZFS_DEBUG_DPRINTF;
2008-11-20 20:01:55 +00:00
}
/*
* =========================================================================
* debug printfs
* =========================================================================
*/
void
__dprintf(const char *file, const char *func, int line, const char *fmt, ...)
{
const char *newfile;
va_list adx;
/*
* Get rid of annoying "../common/" prefix to filename.
*/
newfile = strrchr(file, '/');
if (newfile != NULL) {
newfile = newfile + 1; /* Get rid of leading / */
} else {
newfile = file;
}
if (dprintf_print_all ||
dprintf_find_string(newfile) ||
dprintf_find_string(func)) {
/* Print out just the function name if requested */
flockfile(stdout);
if (dprintf_find_string("pid"))
(void) printf("%d ", getpid());
if (dprintf_find_string("tid"))
(void) printf("%u ", (uint_t)pthread_self());
2008-11-20 20:01:55 +00:00
if (dprintf_find_string("cpu"))
(void) printf("%u ", getcpuid());
if (dprintf_find_string("time"))
(void) printf("%llu ", gethrtime());
if (dprintf_find_string("long"))
(void) printf("%s, line %d: ", newfile, line);
(void) printf("%s: ", func);
va_start(adx, fmt);
(void) vprintf(fmt, adx);
va_end(adx);
funlockfile(stdout);
}
}
/*
* =========================================================================
* cmn_err() and panic()
* =========================================================================
*/
static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" };
void
vpanic(const char *fmt, va_list adx)
{
(void) fprintf(stderr, "error: ");
(void) vfprintf(stderr, fmt, adx);
(void) fprintf(stderr, "\n");
abort(); /* think of it as a "user-level crash dump" */
}
void
panic(const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vpanic(fmt, adx);
va_end(adx);
}
void
vcmn_err(int ce, const char *fmt, va_list adx)
{
if (ce == CE_PANIC)
vpanic(fmt, adx);
if (ce != CE_NOTE) { /* suppress noise in userland stress testing */
(void) fprintf(stderr, "%s", ce_prefix[ce]);
(void) vfprintf(stderr, fmt, adx);
(void) fprintf(stderr, "%s", ce_suffix[ce]);
}
}
/*PRINTFLIKE2*/
void
cmn_err(int ce, const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vcmn_err(ce, fmt, adx);
va_end(adx);
}
/*
* =========================================================================
* kobj interfaces
* =========================================================================
*/
struct _buf *
kobj_open_file(char *name)
{
struct _buf *file;
vnode_t *vp;
/* set vp as the _fd field of the file */
if (vn_openat(name, UIO_SYSSPACE, FREAD, 0, &vp, 0, 0, rootdir,
-1) != 0)
return ((void *)-1UL);
file = umem_zalloc(sizeof (struct _buf), UMEM_NOFAIL);
file->_fd = (intptr_t)vp;
return (file);
}
int
kobj_read_file(struct _buf *file, char *buf, unsigned size, unsigned off)
{
Build user-space with different gcc optimization levels This fix resolves warnings reported during compiling of user-space libraries with different gcc optimization levels. Tested with gcc versions: 4.9.2 (Debian), and 6.1.1 (Fedora). The patch enables use of following opt levels: O0, O1, O2, O3, Og, Os, Ofast. List of warnings: [GCC 4.9.2 -Os] libzfs_sendrecv.c:3726:26: error: 'clp' may be used uninitialized in this function [-Werror=maybe-uninitialized] [GCC 4.9.2 -Og] fs_fletcher.c:323:26: error: 'idx' may be used uninitialized in this function [-Werror=maybe-uninitialized] dsl_dataset.c:1290:12: error: 'atp' may be used uninitialized in this function [-Werror=maybe-uninitialized] [GCC 4.9.2 -Ofast] u8_textprep.c:1310:9: error: 'tc[3ul]' may be used uninitialized in this function [-Werror=maybe-uninitialized] u8_textprep.c:177:23: error: 'u8t[0ul]' may be used uninitialized in this function [-Werror=maybe-uninitialized] dsl_dataset.c:2089:37: error: ‘hds’ may be used uninitialized in this function [-Werror=maybe-uninitialized] dsl_dataset.c:3216:2: error: ‘ds’ may be used uninitialized in this function [-Werror=maybe-uninitialized] dsl_dataset.c:1591:2: error: ‘ds’ may be used uninitialized in this function [-Werror=maybe-uninitialized] dsl_dataset.c:3341:2: error: ‘ds’ may be used uninitialized in this function [-Werror=maybe-uninitialized] vdev_raidz.c:1153:8: error: 'dcount[2]' may be used uninitialized in this function [-Werror=maybe-uninitialized] vdev_raidz.c:1167:17: error: 'dst[2]' may be used uninitialized in this function [-Werror=maybe-uninitialized] kernel.c:1005:2: error: ‘resid’ may be used uninitialized in this function [-Werror=maybe-uninitialized] libzfs_dataset.c:2826:8: error: ‘val’ may be used uninitialized in this function [-Werror=maybe-uninitialized] libzfs_dataset.c:3056:35: error: ‘val’ may be used uninitialized in this function [-Werror=maybe-uninitialized] libzfs_dataset.c:1584:13: error: ‘val’ may be used uninitialized in this function [-Werror=maybe-uninitialized] libzfs_dataset.c:3056:35: error: ‘val’ may be used uninitialized in this function [-Werror=maybe-uninitialized] libzfs_dataset.c:1792:66: error: ‘val’ may be used uninitialized in this function [-Werror=maybe-uninitialized] libzfs_dataset.c:3986:35: error: ‘val’ may be used uninitialized in this function [-Werror=maybe-uninitialized] [GCC 6.1.1] Resolved in PR #4907 Signed-off-by: Gvozden Neskovic <neskovic@gmail.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4937
2016-07-26 19:08:51 +00:00
ssize_t resid = 0;
2008-11-20 20:01:55 +00:00
if (vn_rdwr(UIO_READ, (vnode_t *)file->_fd, buf, size, (offset_t)off,
UIO_SYSSPACE, 0, 0, 0, &resid) != 0)
return (-1);
2008-11-20 20:01:55 +00:00
return (size - resid);
}
void
kobj_close_file(struct _buf *file)
{
vn_close((vnode_t *)file->_fd);
umem_free(file, sizeof (struct _buf));
}
int
kobj_get_filesize(struct _buf *file, uint64_t *size)
{
struct stat64 st;
vnode_t *vp = (vnode_t *)file->_fd;
if (fstat64(vp->v_fd, &st) == -1) {
vn_close(vp);
return (errno);
}
*size = st.st_size;
return (0);
}
/*
* =========================================================================
* misc routines
* =========================================================================
*/
void
delay(clock_t ticks)
{
(void) poll(0, 0, ticks * (1000 / hz));
2008-11-20 20:01:55 +00:00
}
/*
* Find highest one bit set.
* Returns bit number + 1 of highest bit that is set, otherwise returns 0.
* High order bit is 31 (or 63 in _LP64 kernel).
*/
int
highbit64(uint64_t i)
2008-11-20 20:01:55 +00:00
{
register int h = 1;
if (i == 0)
return (0);
if (i & 0xffffffff00000000ULL) {
2008-11-20 20:01:55 +00:00
h += 32; i >>= 32;
}
if (i & 0xffff0000) {
h += 16; i >>= 16;
}
if (i & 0xff00) {
h += 8; i >>= 8;
}
if (i & 0xf0) {
h += 4; i >>= 4;
}
if (i & 0xc) {
h += 2; i >>= 2;
}
if (i & 0x2) {
h += 1;
}
return (h);
}
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
/*
* Find lowest one bit set.
* Returns bit number + 1 of lowest bit that is set, otherwise returns 0.
* This is basically a reimplementation of ffsll(), which is GNU specific.
*/
int
lowbit64(uint64_t i)
{
register int h = 64;
if (i == 0)
return (0);
if (i & 0x00000000ffffffffULL)
h -= 32;
else
i >>= 32;
if (i & 0x0000ffff)
h -= 16;
else
i >>= 16;
if (i & 0x00ff)
h -= 8;
else
i >>= 8;
if (i & 0x0f)
h -= 4;
else
i >>= 4;
if (i & 0x3)
h -= 2;
else
i >>= 2;
if (i & 0x1)
h -= 1;
return (h);
}
/*
* Find highest one bit set.
* Returns bit number + 1 of highest bit that is set, otherwise returns 0.
* High order bit is 31 (or 63 in _LP64 kernel).
*/
int
highbit(ulong_t i)
{
register int h = 1;
if (i == 0)
return (0);
#ifdef _LP64
if (i & 0xffffffff00000000ul) {
h += 32; i >>= 32;
}
#endif
if (i & 0xffff0000) {
h += 16; i >>= 16;
}
if (i & 0xff00) {
h += 8; i >>= 8;
}
if (i & 0xf0) {
h += 4; i >>= 4;
}
if (i & 0xc) {
h += 2; i >>= 2;
}
if (i & 0x2) {
h += 1;
}
return (h);
}
/*
* Find lowest one bit set.
* Returns bit number + 1 of lowest bit that is set, otherwise returns 0.
* Low order bit is 0.
*/
int
lowbit(ulong_t i)
{
register int h = 1;
if (i == 0)
return (0);
#ifdef _LP64
if (!(i & 0xffffffff)) {
h += 32; i >>= 32;
}
#endif
if (!(i & 0xffff)) {
h += 16; i >>= 16;
}
if (!(i & 0xff)) {
h += 8; i >>= 8;
}
if (!(i & 0xf)) {
h += 4; i >>= 4;
}
if (!(i & 0x3)) {
h += 2; i >>= 2;
}
if (!(i & 0x1)) {
h += 1;
}
return (h);
}
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
2008-11-20 20:01:55 +00:00
static int random_fd = -1, urandom_fd = -1;
void
random_init(void)
{
VERIFY((random_fd = open("/dev/random", O_RDONLY)) != -1);
VERIFY((urandom_fd = open("/dev/urandom", O_RDONLY)) != -1);
}
void
random_fini(void)
{
close(random_fd);
close(urandom_fd);
random_fd = -1;
urandom_fd = -1;
}
2008-11-20 20:01:55 +00:00
static int
random_get_bytes_common(uint8_t *ptr, size_t len, int fd)
{
size_t resid = len;
ssize_t bytes;
ASSERT(fd != -1);
while (resid != 0) {
bytes = read(fd, ptr, resid);
ASSERT3S(bytes, >=, 0);
ptr += bytes;
resid -= bytes;
}
return (0);
}
int
random_get_bytes(uint8_t *ptr, size_t len)
{
return (random_get_bytes_common(ptr, len, random_fd));
}
int
random_get_pseudo_bytes(uint8_t *ptr, size_t len)
{
return (random_get_bytes_common(ptr, len, urandom_fd));
}
int
ddi_strtoul(const char *hw_serial, char **nptr, int base, unsigned long *result)
{
char *end;
*result = strtoul(hw_serial, &end, base);
if (*result == 0)
return (errno);
return (0);
}
int
ddi_strtoull(const char *str, char **nptr, int base, u_longlong_t *result)
{
char *end;
*result = strtoull(str, &end, base);
if (*result == 0)
return (errno);
return (0);
}
utsname_t *
utsname(void)
{
return (&hw_utsname);
}
2008-11-20 20:01:55 +00:00
/*
* =========================================================================
* kernel emulation setup & teardown
* =========================================================================
*/
static int
umem_out_of_memory(void)
{
char errmsg[] = "out of memory -- generating core dump\n";
(void) fprintf(stderr, "%s", errmsg);
2008-11-20 20:01:55 +00:00
abort();
return (0);
}
static unsigned long
get_spl_hostid(void)
{
FILE *f;
unsigned long hostid;
f = fopen("/sys/module/spl/parameters/spl_hostid", "r");
if (!f)
return (0);
if (fscanf(f, "%lu", &hostid) != 1)
hostid = 0;
fclose(f);
return (hostid & 0xffffffff);
}
unsigned long
get_system_hostid(void)
{
unsigned long system_hostid = get_spl_hostid();
if (system_hostid == 0)
system_hostid = gethostid() & 0xffffffff;
return (system_hostid);
}
2008-11-20 20:01:55 +00:00
void
kernel_init(int mode)
{
extern uint_t rrw_tsd_key;
2008-11-20 20:01:55 +00:00
umem_nofail_callback(umem_out_of_memory);
physmem = sysconf(_SC_PHYS_PAGES);
dprintf("physmem = %llu pages (%.2f GB)\n", physmem,
(double)physmem * sysconf(_SC_PAGE_SIZE) / (1ULL << 30));
(void) snprintf(hw_serial, sizeof (hw_serial), "%ld",
(mode & FWRITE) ? get_system_hostid() : 0);
2008-11-20 20:01:55 +00:00
random_init();
VERIFY0(uname(&hw_utsname));
2008-11-20 20:01:55 +00:00
thread_init();
system_taskq_init();
icp_init();
2008-11-20 20:01:55 +00:00
spa_init(mode);
fletcher_4_init();
tsd_create(&rrw_tsd_key, rrw_tsd_destroy);
2008-11-20 20:01:55 +00:00
}
void
kernel_fini(void)
{
fletcher_4_fini();
2008-11-20 20:01:55 +00:00
spa_fini();
icp_fini();
system_taskq_fini();
thread_fini();
random_fini();
2008-11-20 20:01:55 +00:00
}
uid_t
crgetuid(cred_t *cr)
{
return (0);
}
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 11:45:09 +00:00
uid_t
crgetruid(cred_t *cr)
{
return (0);
}
2008-11-20 20:01:55 +00:00
gid_t
crgetgid(cred_t *cr)
{
return (0);
}
int
crgetngroups(cred_t *cr)
{
return (0);
}
gid_t *
crgetgroups(cred_t *cr)
{
return (NULL);
}
int
zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr)
{
return (0);
}
int
zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr)
{
return (0);
}
int
zfs_secpolicy_destroy_perms(const char *name, cred_t *cr)
{
return (0);
}
Add `zfs allow` and `zfs unallow` support ZFS allows for specific permissions to be delegated to normal users with the `zfs allow` and `zfs unallow` commands. In addition, non- privileged users should be able to run all of the following commands: * zpool [list | iostat | status | get] * zfs [list | get] Historically this functionality was not available on Linux. In order to add it the secpolicy_* functions needed to be implemented and mapped to the equivalent Linux capability. Only then could the permissions on the `/dev/zfs` be relaxed and the internal ZFS permission checks used. Even with this change some limitations remain. Under Linux only the root user is allowed to modify the namespace (unless it's a private namespace). This means the mount, mountpoint, canmount, unmount, and remount delegations cannot be supported with the existing code. It may be possible to add this functionality in the future. This functionality was validated with the cli_user and delegation test cases from the ZFS Test Suite. These tests exhaustively verify each of the supported permissions which can be delegated and ensures only an authorized user can perform it. Two minor bug fixes were required for test-running.py. First, the Timer() object cannot be safely created in a `try:` block when there is an unconditional `finally` block which references it. Second, when running as a normal user also check for scripts using the both the .ksh and .sh suffixes. Finally, existing users who are simulating delegations by setting group permissions on the /dev/zfs device should revert that customization when updating to a version with this change. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Tony Hutter <hutter2@llnl.gov> Closes #362 Closes #434 Closes #4100 Closes #4394 Closes #4410 Closes #4487
2016-06-07 16:16:52 +00:00
int
secpolicy_zfs(const cred_t *cr)
{
return (0);
}
2008-11-20 20:01:55 +00:00
ksiddomain_t *
ksid_lookupdomain(const char *dom)
{
ksiddomain_t *kd;
kd = umem_zalloc(sizeof (ksiddomain_t), UMEM_NOFAIL);
kd->kd_name = spa_strdup(dom);
return (kd);
}
void
ksiddomain_rele(ksiddomain_t *ksid)
{
spa_strfree(ksid->kd_name);
umem_free(ksid, sizeof (ksiddomain_t));
}
char *
kmem_vasprintf(const char *fmt, va_list adx)
{
char *buf = NULL;
va_list adx_copy;
va_copy(adx_copy, adx);
VERIFY(vasprintf(&buf, fmt, adx_copy) != -1);
va_end(adx_copy);
return (buf);
}
char *
kmem_asprintf(const char *fmt, ...)
{
char *buf = NULL;
va_list adx;
va_start(adx, fmt);
VERIFY(vasprintf(&buf, fmt, adx) != -1);
va_end(adx);
return (buf);
}
/* ARGSUSED */
int
zfs_onexit_fd_hold(int fd, minor_t *minorp)
{
*minorp = 0;
return (0);
}
/* ARGSUSED */
void
zfs_onexit_fd_rele(int fd)
{
}
/* ARGSUSED */
int
zfs_onexit_add_cb(minor_t minor, void (*func)(void *), void *data,
uint64_t *action_handle)
{
return (0);
}
/* ARGSUSED */
int
zfs_onexit_del_cb(minor_t minor, uint64_t action_handle, boolean_t fire)
{
return (0);
}
/* ARGSUSED */
int
zfs_onexit_cb_data(minor_t minor, uint64_t action_handle, void **data)
{
return (0);
}
fstrans_cookie_t
spl_fstrans_mark(void)
{
return ((fstrans_cookie_t)0);
}
void
spl_fstrans_unmark(fstrans_cookie_t cookie)
{
}
int
spl_fstrans_check(void)
{
return (0);
}
OpenZFS 2605, 6980, 6902 2605 want to resume interrupted zfs send Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Paul Dagnelie <pcd@delphix.com> Reviewed by: Richard Elling <Richard.Elling@RichardElling.com> Reviewed by: Xin Li <delphij@freebsd.org> Reviewed by: Arne Jansen <sensille@gmx.net> Approved by: Dan McDonald <danmcd@omniti.com> Ported-by: kernelOfTruth <kerneloftruth@gmail.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> OpenZFS-issue: https://www.illumos.org/issues/2605 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/9c3fd12 6980 6902 causes zfs send to break due to 32-bit/64-bit struct mismatch Reviewed by: Paul Dagnelie <pcd@delphix.com> Reviewed by: George Wilson <george.wilson@delphix.com> Approved by: Robert Mustacchi <rm@joyent.com> Ported by: Brian Behlendorf <behlendorf1@llnl.gov> OpenZFS-issue: https://www.illumos.org/issues/6980 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/ea4a67f Porting notes: - All rsend and snapshop tests enabled and updated for Linux. - Fix misuse of input argument in traverse_visitbp(). - Fix ISO C90 warnings and errors. - Fix gcc 'missing braces around initializer' in 'struct send_thread_arg to_arg =' warning. - Replace 4 argument fletcher_4_native() with 3 argument version, this change was made in OpenZFS 4185 which has not been ported. - Part of the sections for 'zfs receive' and 'zfs send' was rewritten and reordered to approximate upstream. - Fix mktree xattr creation, 'user.' prefix required. - Minor fixes to newly enabled test cases - Long holds for volumes allowed during receive for minor registration.
2016-01-06 21:22:48 +00:00
void *zvol_tag = "zvol_tag";
void
zvol_create_minors(spa_t *spa, const char *name, boolean_t async)
{
}
void
zvol_remove_minor(spa_t *spa, const char *name, boolean_t async)
{
}
void
zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
{
}
void
zvol_rename_minors(spa_t *spa, const char *oldname, const char *newname,
boolean_t async)
{
}