Add libtpool (thread pools)

OpenZFS provides a library called tpool which implements thread
pools for user space applications.  Porting this library means
the zpool utility no longer needs to borrow the kernel mutex and
taskq interfaces from libzpool.  This code was updated to use
the tpool library which behaves in a very similar fashion.

Porting libtpool was relatively straight forward and minimal
modifications were needed.  The core changes were:

* Fully convert the library to use pthreads.
* Updated signal handling.
* lmalloc/lfree converted to calloc/free
* Implemented portable pthread_attr_clone() function.

Finally, update the build system such that libzpool.so is no
longer linked in to zfs(8), zpool(8), etc.  All that is required
is libzfs to which the zcommon soures were added (which is the way
it always should have been).  Removing the libzpool dependency
resulted in several build issues which needed to be resolved.

* Moved zfeature support to module/zcommon/zfeature_common.c
* Moved ratelimiting to to module/zfs/zfs_ratelimit.c
* Moved get_system_hostid() to lib/libspl/gethostid.c
* Removed use of cmn_err() in zcommon source
* Removed dprintf_setup() call from zpool_main.c and zfs_main.c
* Removed highbit() and lowbit()
* Removed unnecessary library dependencies from Makefiles
* Removed fletcher-4 kstat in user space
* Added sha2 support explicitly to libzfs
* Added highbit64() and lowbit64() to zpool_util.c

Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6442
This commit is contained in:
Brian Behlendorf 2017-08-09 15:31:08 -07:00 committed by GitHub
parent 5146d802b4
commit 46364cb2f3
46 changed files with 1191 additions and 410 deletions

View File

@ -16,7 +16,4 @@ mount_zfs_SOURCES = \
mount_zfs_LDADD = \ mount_zfs_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \ $(top_builddir)/lib/libzfs/libzfs.la
$(top_builddir)/lib/libzpool/libzpool.la \
$(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la

View File

@ -15,8 +15,6 @@ raidz_test_SOURCES = \
raidz_bench.c raidz_bench.c
raidz_test_LDADD = \ raidz_test_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la $(top_builddir)/lib/libzpool/libzpool.la
raidz_test_LDADD += -lm -ldl raidz_test_LDADD += -lm -ldl

View File

@ -14,7 +14,5 @@ zdb_SOURCES = \
zdb_LDADD = \ zdb_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la \
$(top_builddir)/lib/libzfs/libzfs.la \ $(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la $(top_builddir)/lib/libzpool/libzpool.la

View File

@ -40,15 +40,12 @@ FMA_SRC = \
zed_SOURCES = $(ZED_SRC) $(FMA_SRC) zed_SOURCES = $(ZED_SRC) $(FMA_SRC)
zed_LDADD = \ zed_LDADD = \
$(top_builddir)/lib/libavl/libavl.la \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libspl/libspl.la \
$(top_builddir)/lib/libuutil/libuutil.la \ $(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la \ $(top_builddir)/lib/libzfs/libzfs.la
$(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la
zed_LDFLAGS = -lrt -pthread zed_LDADD += -lrt
zed_LDFLAGS = -pthread
zedconfdir = $(sysconfdir)/zfs/zed.d zedconfdir = $(sysconfdir)/zfs/zed.d

View File

@ -15,8 +15,5 @@ zfs_SOURCES = \
zfs_LDADD = \ zfs_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \ $(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la \
$(top_builddir)/lib/libzfs/libzfs.la \ $(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la $(top_builddir)/lib/libzfs_core/libzfs_core.la
zfs_LDFLAGS = -pthread

View File

@ -55,6 +55,7 @@
#include <sys/mount.h> #include <sys/mount.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/fs/zfs.h> #include <sys/fs/zfs.h>
#include <sys/systeminfo.h>
#include <sys/types.h> #include <sys/types.h>
#include <time.h> #include <time.h>
@ -7045,8 +7046,6 @@ main(int argc, char **argv)
(void) setlocale(LC_ALL, ""); (void) setlocale(LC_ALL, "");
(void) textdomain(TEXT_DOMAIN); (void) textdomain(TEXT_DOMAIN);
dprintf_setup(&argc, argv);
opterr = 0; opterr = 0;
/* /*

View File

@ -11,7 +11,5 @@ zhack_SOURCES = \
zhack_LDADD = \ zhack_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la \
$(top_builddir)/lib/libzfs/libzfs.la \ $(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la $(top_builddir)/lib/libzpool/libzpool.la

View File

@ -13,7 +13,5 @@ zinject_SOURCES = \
zinject_LDADD = \ zinject_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la \
$(top_builddir)/lib/libzfs/libzfs.la \ $(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la $(top_builddir)/lib/libzpool/libzpool.la

View File

@ -16,10 +16,9 @@ zpool_SOURCES = \
zpool_LDADD = \ zpool_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \ $(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la \ $(top_builddir)/lib/libzfs/libzfs.la
$(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la \ zpool_LDADD += -lm $(LIBBLKID)
-lm $(LIBBLKID)
zpoolconfdir = $(sysconfdir)/zfs/zpool.d zpoolconfdir = $(sysconfdir)/zfs/zpool.d
zpoolexecdir = $(libexecdir)/zfs/zpool.d zpoolexecdir = $(libexecdir)/zfs/zpool.d

View File

@ -33,6 +33,7 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <strings.h> #include <strings.h>
#include <thread_pool.h>
#include <libzfs.h> #include <libzfs.h>
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -668,34 +669,21 @@ all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
static void static void
all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl) all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
{ {
taskq_t *t; tpool_t *t;
int i;
/* 5 * boot_ncpus selfishly chosen since it works best on LLNL's HW */
int max_threads = 5 * boot_ncpus;
/* t = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
* Under Linux we use a taskq to parallelize running a command
* on each vdev. It is therefore necessary to initialize this
* functionality for the duration of the threads.
*/
thread_init();
t = taskq_create("z_pool_cmd", max_threads, defclsyspri, max_threads,
INT_MAX, 0);
if (t == NULL) if (t == NULL)
return; return;
/* Spawn off the command for each vdev */ /* Spawn off the command for each vdev */
for (i = 0; i < vcdl->count; i++) { for (int i = 0; i < vcdl->count; i++) {
(void) taskq_dispatch(t, vdev_run_cmd_thread, (void) tpool_dispatch(t, vdev_run_cmd_thread,
(void *) &vcdl->data[i], TQ_SLEEP); (void *) &vcdl->data[i]);
} }
/* Wait for threads to finish */ /* Wait for threads to finish */
taskq_wait(t); tpool_wait(t);
taskq_destroy(t); tpool_destroy(t);
thread_fini();
} }
/* /*

View File

@ -50,6 +50,7 @@
#include <zfs_prop.h> #include <zfs_prop.h>
#include <sys/fs/zfs.h> #include <sys/fs/zfs.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/fm/fs/zfs.h> #include <sys/fm/fs/zfs.h>
#include <sys/fm/util.h> #include <sys/fm/util.h>
#include <sys/fm/protocol.h> #include <sys/fm/protocol.h>
@ -2645,15 +2646,7 @@ zpool_do_import(int argc, char **argv)
idata.cachefile = cachefile; idata.cachefile = cachefile;
idata.scan = do_scan; idata.scan = do_scan;
/*
* Under Linux the zpool_find_import_impl() function leverages the
* taskq implementation to parallelize device scanning. It is
* therefore necessary to initialize this functionality for the
* duration of the zpool_search_import() function.
*/
thread_init();
pools = zpool_search_import(g_zfs, &idata); pools = zpool_search_import(g_zfs, &idata);
thread_fini();
if (pools != NULL && idata.exists && if (pools != NULL && idata.exists &&
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) { (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
@ -7968,8 +7961,6 @@ main(int argc, char **argv)
(void) textdomain(TEXT_DOMAIN); (void) textdomain(TEXT_DOMAIN);
srand(time(NULL)); srand(time(NULL));
dprintf_setup(&argc, argv);
opterr = 0; opterr = 0;
/* /*

View File

@ -111,3 +111,29 @@ isnumber(char *str)
return (1); return (1);
} }
/*
* Find highest one bit set.
* Returns bit number + 1 of highest bit that is set, otherwise returns 0.
*/
int
highbit64(uint64_t i)
{
if (i == 0)
return (0);
return (NBBY * sizeof (uint64_t) - __builtin_clzll(i));
}
/*
* Find lowest one bit set.
* Returns bit number + 1 of lowest bit that is set, otherwise returns 0.
*/
int
lowbit64(uint64_t i)
{
if (i == 0)
return (0);
return (__builtin_ffsll(i));
}

View File

@ -43,6 +43,8 @@ void zpool_no_memory(void);
uint_t num_logs(nvlist_t *nv); uint_t num_logs(nvlist_t *nv);
uint64_t array64_max(uint64_t array[], unsigned int len); uint64_t array64_max(uint64_t array[], unsigned int len);
int isnumber(char *str); int isnumber(char *str);
int highbit64(uint64_t i);
int lowbit64(uint64_t i);
/* /*
* Misc utility functions * Misc utility functions

View File

@ -11,7 +11,4 @@ zstreamdump_SOURCES = \
zstreamdump_LDADD = \ zstreamdump_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \ $(top_builddir)/lib/libzfs/libzfs.la
$(top_builddir)/lib/libzpool/libzpool.la \
$(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la

View File

@ -16,9 +16,8 @@ ztest_SOURCES = \
ztest_LDADD = \ ztest_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la \
$(top_builddir)/lib/libzfs/libzfs.la \ $(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la $(top_builddir)/lib/libzpool/libzpool.la
ztest_LDADD += -lm ztest_LDADD += -lm
ztest_LDFLAGS = -pthread

View File

@ -87,6 +87,7 @@ AC_CONFIG_FILES([
lib/libefi/Makefile lib/libefi/Makefile
lib/libicp/Makefile lib/libicp/Makefile
lib/libnvpair/Makefile lib/libnvpair/Makefile
lib/libtpool/Makefile
lib/libunicode/Makefile lib/libunicode/Makefile
lib/libuutil/Makefile lib/libuutil/Makefile
lib/libzpool/Makefile lib/libzpool/Makefile

View File

@ -19,7 +19,8 @@ USER_H = \
$(top_srcdir)/include/libuutil_impl.h \ $(top_srcdir)/include/libuutil_impl.h \
$(top_srcdir)/include/libzfs.h \ $(top_srcdir)/include/libzfs.h \
$(top_srcdir)/include/libzfs_core.h \ $(top_srcdir)/include/libzfs_core.h \
$(top_srcdir)/include/libzfs_impl.h $(top_srcdir)/include/libzfs_impl.h \
$(top_srcdir)/include/thread_pool.h
EXTRA_DIST = $(COMMON_H) $(KERNEL_H) $(USER_H) EXTRA_DIST = $(COMMON_H) $(KERNEL_H) $(USER_H)

View File

@ -368,7 +368,6 @@ typedef enum {
ZPOOL_STATUS_OK ZPOOL_STATUS_OK
} zpool_status_t; } zpool_status_t;
extern unsigned long get_system_hostid(void);
extern zpool_status_t zpool_get_status(zpool_handle_t *, char **, extern zpool_status_t zpool_get_status(zpool_handle_t *, char **,
zpool_errata_t *); zpool_errata_t *);
extern zpool_status_t zpool_import_status(nvlist_t *, char **, extern zpool_status_t zpool_import_status(nvlist_t *, char **,

View File

@ -668,15 +668,11 @@ extern uint64_t physmem;
extern int highbit64(uint64_t i); extern int highbit64(uint64_t i);
extern int lowbit64(uint64_t i); extern int lowbit64(uint64_t i);
extern int highbit(ulong_t i);
extern int lowbit(ulong_t i);
extern int random_get_bytes(uint8_t *ptr, size_t len); extern int random_get_bytes(uint8_t *ptr, size_t len);
extern int random_get_pseudo_bytes(uint8_t *ptr, size_t len); extern int random_get_pseudo_bytes(uint8_t *ptr, size_t len);
extern void kernel_init(int); extern void kernel_init(int);
extern void kernel_fini(void); extern void kernel_fini(void);
extern void thread_init(void);
extern void thread_fini(void);
extern void random_init(void); extern void random_init(void);
extern void random_fini(void); extern void random_fini(void);

72
include/thread_pool.h Normal file
View File

@ -0,0 +1,72 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _THREAD_POOL_H_
#define _THREAD_POOL_H_
#include <sys/types.h>
#include <thread.h>
#include <pthread.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct tpool tpool_t; /* opaque thread pool descriptor */
#if defined(__STDC__)
extern tpool_t *tpool_create(uint_t min_threads, uint_t max_threads,
uint_t linger, pthread_attr_t *attr);
extern int tpool_dispatch(tpool_t *tpool,
void (*func)(void *), void *arg);
extern void tpool_destroy(tpool_t *tpool);
extern void tpool_abandon(tpool_t *tpool);
extern void tpool_wait(tpool_t *tpool);
extern void tpool_suspend(tpool_t *tpool);
extern int tpool_suspended(tpool_t *tpool);
extern void tpool_resume(tpool_t *tpool);
extern int tpool_member(tpool_t *tpool);
#else /* Non ANSI */
extern tpool_t *tpool_create();
extern int tpool_dispatch();
extern void tpool_destroy();
extern void tpool_abandon();
extern void tpool_wait();
extern void tpool_suspend();
extern int tpool_suspended();
extern void tpool_resume();
extern int tpool_member();
#endif /* __STDC__ */
#ifdef __cplusplus
}
#endif
#endif /* _THREAD_POOL_H_ */

View File

@ -1,6 +1,6 @@
# NB: GNU Automake Manual, Chapter 8.3.5: Libtool Convenience Libraries # NB: GNU Automake Manual, Chapter 8.3.5: Libtool Convenience Libraries
# These six libraries are intermediary build components. # These six libraries are intermediary build components.
SUBDIRS = libspl libavl libefi libshare libunicode libicp SUBDIRS = libavl libefi libicp libshare libspl libtpool libunicode
# These four libraries, which are installed as the final build product, # These four libraries, which are installed as the final build product,
# incorporate the six convenience libraries given above. # incorporate the six convenience libraries given above.

View File

@ -25,9 +25,7 @@ nodist_libnvpair_la_SOURCES = \
$(USER_C) \ $(USER_C) \
$(KERNEL_C) $(KERNEL_C)
libnvpair_la_LIBADD = \ libnvpair_la_LIBADD = $(LIBTIRPC)
$(top_builddir)/lib/libuutil/libuutil.la \
$(LIBTIRPC)
libnvpair_la_LDFLAGS = -version-info 1:1:0 libnvpair_la_LDFLAGS = -version-info 1:1:0
EXTRA_DIST = $(USER_C) EXTRA_DIST = $(USER_C)

View File

@ -19,6 +19,7 @@ noinst_LTLIBRARIES = libspl.la
USER_C = \ USER_C = \
getexecname.c \ getexecname.c \
gethostid.c \
gethrtime.c \ gethrtime.c \
gethrestime.c \ gethrestime.c \
getmntany.c \ getmntany.c \

86
lib/libspl/gethostid.c Normal file
View File

@ -0,0 +1,86 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
*/
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
static unsigned long
get_spl_hostid(void)
{
FILE *f;
unsigned long hostid;
char *env;
/*
* Allow the hostid to be subverted for testing.
*/
env = getenv("ZFS_HOSTID");
if (env) {
hostid = strtoull(env, NULL, 0);
return (hostid & HOSTID_MASK);
}
f = fopen("/sys/module/spl/parameters/spl_hostid", "r");
if (!f)
return (0);
if (fscanf(f, "%lu", &hostid) != 1)
hostid = 0;
fclose(f);
return (hostid & HOSTID_MASK);
}
unsigned long
get_system_hostid(void)
{
unsigned long system_hostid = get_spl_hostid();
/*
* We do not use the library call gethostid() because
* it generates a hostid value that the kernel is
* unaware of, if the spl_hostid module parameter has not
* been set and there is no system hostid file (e.g.
* /etc/hostid). The kernel and userspace must agree.
* See comments above hostid_read() in the SPL.
*/
if (system_hostid == 0) {
int fd, rc;
unsigned long hostid;
int hostid_size = 4; /* 4 bytes regardless of arch */
fd = open("/etc/hostid", O_RDONLY);
if (fd >= 0) {
rc = read(fd, &hostid, hostid_size);
if (rc > 0)
system_hostid = (hostid & HOSTID_MASK);
close(fd);
}
}
return (system_hostid);
}

View File

@ -27,11 +27,12 @@
#ifndef _LIBSPL_SYS_SYSTEMINFO_H #ifndef _LIBSPL_SYS_SYSTEMINFO_H
#define _LIBSPL_SYS_SYSTEMINFO_H #define _LIBSPL_SYS_SYSTEMINFO_H
#define HOSTID_MASK 0xFFFFFFFF
#define HW_INVALID_HOSTID 0xFFFFFFFF /* an invalid hostid */ #define HW_INVALID_HOSTID 0xFFFFFFFF /* an invalid hostid */
#define HW_HOSTID_LEN 11 /* minimum buffer size needed */ #define HW_HOSTID_LEN 11 /* minimum buffer size needed */
/* to hold a decimal or hex */ /* to hold a decimal or hex */
/* hostid string */ /* hostid string */
#define sysinfo(cmd, buf, cnt) (-1) unsigned long get_system_hostid(void);
#endif #endif

26
lib/libtpool/Makefile.am Normal file
View File

@ -0,0 +1,26 @@
include $(top_srcdir)/config/Rules.am
AM_CFLAGS += $(DEBUG_STACKFLAGS)
DEFAULT_INCLUDES += \
-I$(top_srcdir)/include \
-I$(top_srcdir)/lib/libspl/include
noinst_LTLIBRARIES = libtpool.la
USER_C = \
thread_pool.c \
thread_pool_impl.h
KERNEL_C =
nodist_libtpool_la_SOURCES = \
$(USER_C) \
$(KERNEL_C)
libtpool_la_LIBADD = \
$(top_builddir)/lib/libspl/libspl.la
libtpool_la_LDFLAGS = -pthread
EXTRA_DIST = $(USER_C)

650
lib/libtpool/thread_pool.c Normal file
View File

@ -0,0 +1,650 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <stdlib.h>
#include <signal.h>
#include <errno.h>
#include <assert.h>
#include "thread_pool_impl.h"
static pthread_mutex_t thread_pool_lock = PTHREAD_MUTEX_INITIALIZER;
static tpool_t *thread_pools = NULL;
static void
delete_pool(tpool_t *tpool)
{
tpool_job_t *job;
ASSERT(tpool->tp_current == 0 && tpool->tp_active == NULL);
/*
* Unlink the pool from the global list of all pools.
*/
(void) pthread_mutex_lock(&thread_pool_lock);
if (thread_pools == tpool)
thread_pools = tpool->tp_forw;
if (thread_pools == tpool)
thread_pools = NULL;
else {
tpool->tp_back->tp_forw = tpool->tp_forw;
tpool->tp_forw->tp_back = tpool->tp_back;
}
pthread_mutex_unlock(&thread_pool_lock);
/*
* There should be no pending jobs, but just in case...
*/
for (job = tpool->tp_head; job != NULL; job = tpool->tp_head) {
tpool->tp_head = job->tpj_next;
free(job);
}
(void) pthread_attr_destroy(&tpool->tp_attr);
free(tpool);
}
/*
* Worker thread is terminating.
*/
static void
worker_cleanup(void *arg)
{
tpool_t *tpool = (tpool_t *)arg;
if (--tpool->tp_current == 0 &&
(tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) {
if (tpool->tp_flags & TP_ABANDON) {
pthread_mutex_unlock(&tpool->tp_mutex);
delete_pool(tpool);
return;
}
if (tpool->tp_flags & TP_DESTROY)
(void) pthread_cond_broadcast(&tpool->tp_busycv);
}
pthread_mutex_unlock(&tpool->tp_mutex);
}
static void
notify_waiters(tpool_t *tpool)
{
if (tpool->tp_head == NULL && tpool->tp_active == NULL) {
tpool->tp_flags &= ~TP_WAIT;
(void) pthread_cond_broadcast(&tpool->tp_waitcv);
}
}
/*
* Called by a worker thread on return from a tpool_dispatch()d job.
*/
static void
job_cleanup(void *arg)
{
tpool_t *tpool = (tpool_t *)arg;
pthread_t my_tid = pthread_self();
tpool_active_t *activep;
tpool_active_t **activepp;
pthread_mutex_lock(&tpool->tp_mutex);
/* CSTYLED */
for (activepp = &tpool->tp_active;; activepp = &activep->tpa_next) {
activep = *activepp;
if (activep->tpa_tid == my_tid) {
*activepp = activep->tpa_next;
break;
}
}
if (tpool->tp_flags & TP_WAIT)
notify_waiters(tpool);
}
static void *
tpool_worker(void *arg)
{
tpool_t *tpool = (tpool_t *)arg;
int elapsed;
tpool_job_t *job;
void (*func)(void *);
tpool_active_t active;
pthread_mutex_lock(&tpool->tp_mutex);
pthread_cleanup_push(worker_cleanup, tpool);
/*
* This is the worker's main loop.
* It will only be left if a timeout or an error has occured.
*/
active.tpa_tid = pthread_self();
for (;;) {
elapsed = 0;
tpool->tp_idle++;
if (tpool->tp_flags & TP_WAIT)
notify_waiters(tpool);
while ((tpool->tp_head == NULL ||
(tpool->tp_flags & TP_SUSPEND)) &&
!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) {
if (tpool->tp_current <= tpool->tp_minimum ||
tpool->tp_linger == 0) {
(void) pthread_cond_wait(&tpool->tp_workcv,
&tpool->tp_mutex);
} else {
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
ts.tv_sec += tpool->tp_linger;
if (pthread_cond_timedwait(&tpool->tp_workcv,
&tpool->tp_mutex, &ts) != 0) {
elapsed = 1;
break;
}
}
}
tpool->tp_idle--;
if (tpool->tp_flags & TP_DESTROY)
break;
if (tpool->tp_flags & TP_ABANDON) {
/* can't abandon a suspended pool */
if (tpool->tp_flags & TP_SUSPEND) {
tpool->tp_flags &= ~TP_SUSPEND;
(void) pthread_cond_broadcast(
&tpool->tp_workcv);
}
if (tpool->tp_head == NULL)
break;
}
if ((job = tpool->tp_head) != NULL &&
!(tpool->tp_flags & TP_SUSPEND)) {
elapsed = 0;
func = job->tpj_func;
arg = job->tpj_arg;
tpool->tp_head = job->tpj_next;
if (job == tpool->tp_tail)
tpool->tp_tail = NULL;
tpool->tp_njobs--;
active.tpa_next = tpool->tp_active;
tpool->tp_active = &active;
pthread_mutex_unlock(&tpool->tp_mutex);
pthread_cleanup_push(job_cleanup, tpool);
free(job);
sigset_t maskset;
(void) pthread_sigmask(SIG_SETMASK, NULL, &maskset);
/*
* Call the specified function.
*/
func(arg);
/*
* We don't know what this thread has been doing,
* so we reset its signal mask and cancellation
* state back to the values prior to calling func().
*/
(void) pthread_sigmask(SIG_SETMASK, &maskset, NULL);
(void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED,
NULL);
(void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE,
NULL);
pthread_cleanup_pop(1);
}
if (elapsed && tpool->tp_current > tpool->tp_minimum) {
/*
* We timed out and there is no work to be done
* and the number of workers exceeds the minimum.
* Exit now to reduce the size of the pool.
*/
break;
}
}
pthread_cleanup_pop(1);
return (arg);
}
/*
* Create a worker thread, with default signals blocked.
*/
static int
create_worker(tpool_t *tpool)
{
pthread_t thread;
sigset_t oset;
int error;
(void) pthread_sigmask(SIG_SETMASK, NULL, &oset);
error = pthread_create(&thread, &tpool->tp_attr, tpool_worker, tpool);
(void) pthread_sigmask(SIG_SETMASK, &oset, NULL);
return (error);
}
/*
* pthread_attr_clone: make a copy of a pthread_attr_t. When old_attr
* is NULL initialize the cloned attr using default values.
*/
static int
pthread_attr_clone(pthread_attr_t *attr, const pthread_attr_t *old_attr)
{
int error;
error = pthread_attr_init(attr);
if (error || (old_attr == NULL))
return (error);
cpu_set_t cpuset;
size_t cpusetsize = sizeof (cpuset);
error = pthread_attr_getaffinity_np(old_attr, cpusetsize, &cpuset);
if (error == 0)
error = pthread_attr_setaffinity_np(attr, cpusetsize, &cpuset);
if (error)
goto error;
int detachstate;
error = pthread_attr_getdetachstate(old_attr, &detachstate);
if (error == 0)
error = pthread_attr_setdetachstate(attr, detachstate);
if (error)
goto error;
size_t guardsize;
error = pthread_attr_getguardsize(old_attr, &guardsize);
if (error == 0)
error = pthread_attr_setguardsize(attr, guardsize);
if (error)
goto error;
int inheritsched;
error = pthread_attr_getinheritsched(old_attr, &inheritsched);
if (error == 0)
error = pthread_attr_setinheritsched(attr, inheritsched);
if (error)
goto error;
struct sched_param param;
error = pthread_attr_getschedparam(old_attr, &param);
if (error == 0)
error = pthread_attr_setschedparam(attr, &param);
if (error)
goto error;
int policy;
error = pthread_attr_getschedpolicy(old_attr, &policy);
if (error == 0)
error = pthread_attr_setschedpolicy(attr, policy);
if (error)
goto error;
int scope;
error = pthread_attr_getscope(old_attr, &scope);
if (error == 0)
error = pthread_attr_setscope(attr, scope);
if (error)
goto error;
void *stackaddr;
size_t stacksize;
error = pthread_attr_getstack(old_attr, &stackaddr, &stacksize);
if (error == 0)
error = pthread_attr_setstack(attr, stackaddr, stacksize);
if (error)
goto error;
return (0);
error:
pthread_attr_destroy(attr);
return (error);
}
tpool_t *
tpool_create(uint_t min_threads, uint_t max_threads, uint_t linger,
pthread_attr_t *attr)
{
tpool_t *tpool;
void *stackaddr;
size_t stacksize;
size_t minstack;
int error;
if (min_threads > max_threads || max_threads < 1) {
errno = EINVAL;
return (NULL);
}
if (attr != NULL) {
if (pthread_attr_getstack(attr, &stackaddr, &stacksize) != 0) {
errno = EINVAL;
return (NULL);
}
/*
* Allow only one thread in the pool with a specified stack.
* Require threads to have at least the minimum stack size.
*/
minstack = PTHREAD_STACK_MIN;
if (stackaddr != NULL) {
if (stacksize < minstack || max_threads != 1) {
errno = EINVAL;
return (NULL);
}
} else if (stacksize != 0 && stacksize < minstack) {
errno = EINVAL;
return (NULL);
}
}
tpool = calloc(1, sizeof (*tpool));
if (tpool == NULL) {
errno = ENOMEM;
return (NULL);
}
(void) pthread_mutex_init(&tpool->tp_mutex, NULL);
(void) pthread_cond_init(&tpool->tp_busycv, NULL);
(void) pthread_cond_init(&tpool->tp_workcv, NULL);
(void) pthread_cond_init(&tpool->tp_waitcv, NULL);
tpool->tp_minimum = min_threads;
tpool->tp_maximum = max_threads;
tpool->tp_linger = linger;
/*
* We cannot just copy the attribute pointer.
* We need to initialize a new pthread_attr_t structure
* with the values from the user-supplied pthread_attr_t.
* If the attribute pointer is NULL, we need to initialize
* the new pthread_attr_t structure with default values.
*/
error = pthread_attr_clone(&tpool->tp_attr, attr);
if (error) {
free(tpool);
errno = error;
return (NULL);
}
/* make all pool threads be detached daemon threads */
(void) pthread_attr_setdetachstate(&tpool->tp_attr,
PTHREAD_CREATE_DETACHED);
/* insert into the global list of all thread pools */
pthread_mutex_lock(&thread_pool_lock);
if (thread_pools == NULL) {
tpool->tp_forw = tpool;
tpool->tp_back = tpool;
thread_pools = tpool;
} else {
thread_pools->tp_back->tp_forw = tpool;
tpool->tp_forw = thread_pools;
tpool->tp_back = thread_pools->tp_back;
thread_pools->tp_back = tpool;
}
pthread_mutex_unlock(&thread_pool_lock);
return (tpool);
}
/*
* Dispatch a work request to the thread pool.
* If there are idle workers, awaken one.
* Else, if the maximum number of workers has
* not been reached, spawn a new worker thread.
* Else just return with the job added to the queue.
*/
int
tpool_dispatch(tpool_t *tpool, void (*func)(void *), void *arg)
{
tpool_job_t *job;
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
if ((job = calloc(1, sizeof (*job))) == NULL)
return (-1);
job->tpj_next = NULL;
job->tpj_func = func;
job->tpj_arg = arg;
pthread_mutex_lock(&tpool->tp_mutex);
if (tpool->tp_head == NULL)
tpool->tp_head = job;
else
tpool->tp_tail->tpj_next = job;
tpool->tp_tail = job;
tpool->tp_njobs++;
if (!(tpool->tp_flags & TP_SUSPEND)) {
if (tpool->tp_idle > 0)
(void) pthread_cond_signal(&tpool->tp_workcv);
else if (tpool->tp_current < tpool->tp_maximum &&
create_worker(tpool) == 0)
tpool->tp_current++;
}
pthread_mutex_unlock(&tpool->tp_mutex);
return (0);
}
static void
tpool_cleanup(void *arg)
{
tpool_t *tpool = (tpool_t *)arg;
pthread_mutex_unlock(&tpool->tp_mutex);
}
/*
* Assumes: by the time tpool_destroy() is called no one will use this
* thread pool in any way and no one will try to dispatch entries to it.
* Calling tpool_destroy() from a job in the pool will cause deadlock.
*/
void
tpool_destroy(tpool_t *tpool)
{
tpool_active_t *activep;
ASSERT(!tpool_member(tpool));
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
pthread_mutex_lock(&tpool->tp_mutex);
pthread_cleanup_push(tpool_cleanup, tpool);
/* mark the pool as being destroyed; wakeup idle workers */
tpool->tp_flags |= TP_DESTROY;
tpool->tp_flags &= ~TP_SUSPEND;
(void) pthread_cond_broadcast(&tpool->tp_workcv);
/* cancel all active workers */
for (activep = tpool->tp_active; activep; activep = activep->tpa_next)
(void) pthread_cancel(activep->tpa_tid);
/* wait for all active workers to finish */
while (tpool->tp_active != NULL) {
tpool->tp_flags |= TP_WAIT;
(void) pthread_cond_wait(&tpool->tp_waitcv, &tpool->tp_mutex);
}
/* the last worker to terminate will wake us up */
while (tpool->tp_current != 0)
(void) pthread_cond_wait(&tpool->tp_busycv, &tpool->tp_mutex);
pthread_cleanup_pop(1); /* pthread_mutex_unlock(&tpool->tp_mutex); */
delete_pool(tpool);
}
/*
* Like tpool_destroy(), but don't cancel workers or wait for them to finish.
* The last worker to terminate will delete the pool.
*/
void
tpool_abandon(tpool_t *tpool)
{
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
pthread_mutex_lock(&tpool->tp_mutex);
if (tpool->tp_current == 0) {
/* no workers, just delete the pool */
pthread_mutex_unlock(&tpool->tp_mutex);
delete_pool(tpool);
} else {
/* wake up all workers, last one will delete the pool */
tpool->tp_flags |= TP_ABANDON;
tpool->tp_flags &= ~TP_SUSPEND;
(void) pthread_cond_broadcast(&tpool->tp_workcv);
pthread_mutex_unlock(&tpool->tp_mutex);
}
}
/*
* Wait for all jobs to complete.
* Calling tpool_wait() from a job in the pool will cause deadlock.
*/
void
tpool_wait(tpool_t *tpool)
{
ASSERT(!tpool_member(tpool));
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
pthread_mutex_lock(&tpool->tp_mutex);
pthread_cleanup_push(tpool_cleanup, tpool);
while (tpool->tp_head != NULL || tpool->tp_active != NULL) {
tpool->tp_flags |= TP_WAIT;
(void) pthread_cond_wait(&tpool->tp_waitcv, &tpool->tp_mutex);
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
}
pthread_cleanup_pop(1); /* pthread_mutex_unlock(&tpool->tp_mutex); */
}
void
tpool_suspend(tpool_t *tpool)
{
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
pthread_mutex_lock(&tpool->tp_mutex);
tpool->tp_flags |= TP_SUSPEND;
pthread_mutex_unlock(&tpool->tp_mutex);
}
int
tpool_suspended(tpool_t *tpool)
{
int suspended;
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
pthread_mutex_lock(&tpool->tp_mutex);
suspended = (tpool->tp_flags & TP_SUSPEND) != 0;
pthread_mutex_unlock(&tpool->tp_mutex);
return (suspended);
}
void
tpool_resume(tpool_t *tpool)
{
int excess;
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
pthread_mutex_lock(&tpool->tp_mutex);
if (!(tpool->tp_flags & TP_SUSPEND)) {
pthread_mutex_unlock(&tpool->tp_mutex);
return;
}
tpool->tp_flags &= ~TP_SUSPEND;
(void) pthread_cond_broadcast(&tpool->tp_workcv);
excess = tpool->tp_njobs - tpool->tp_idle;
while (excess-- > 0 && tpool->tp_current < tpool->tp_maximum) {
if (create_worker(tpool) != 0)
break; /* pthread_create() failed */
tpool->tp_current++;
}
pthread_mutex_unlock(&tpool->tp_mutex);
}
int
tpool_member(tpool_t *tpool)
{
pthread_t my_tid = pthread_self();
tpool_active_t *activep;
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
pthread_mutex_lock(&tpool->tp_mutex);
for (activep = tpool->tp_active; activep; activep = activep->tpa_next) {
if (activep->tpa_tid == my_tid) {
pthread_mutex_unlock(&tpool->tp_mutex);
return (1);
}
}
pthread_mutex_unlock(&tpool->tp_mutex);
return (0);
}
void
postfork1_child_tpool(void)
{
pthread_t my_tid = pthread_self();
tpool_t *tpool;
tpool_job_t *job;
/*
* All of the thread pool workers are gone, except possibly
* for the current thread, if it is a thread pool worker thread.
* Retain the thread pools, but make them all empty. Whatever
* jobs were queued or running belong to the parent process.
*/
top:
if ((tpool = thread_pools) == NULL)
return;
do {
tpool_active_t *activep;
(void) pthread_mutex_init(&tpool->tp_mutex, NULL);
(void) pthread_cond_init(&tpool->tp_busycv, NULL);
(void) pthread_cond_init(&tpool->tp_workcv, NULL);
(void) pthread_cond_init(&tpool->tp_waitcv, NULL);
for (job = tpool->tp_head; job; job = tpool->tp_head) {
tpool->tp_head = job->tpj_next;
free(job);
}
tpool->tp_tail = NULL;
tpool->tp_njobs = 0;
for (activep = tpool->tp_active; activep;
activep = activep->tpa_next) {
if (activep->tpa_tid == my_tid) {
activep->tpa_next = NULL;
break;
}
}
tpool->tp_idle = 0;
tpool->tp_current = 0;
if ((tpool->tp_active = activep) != NULL)
tpool->tp_current = 1;
tpool->tp_flags &= ~TP_WAIT;
if (tpool->tp_flags & (TP_DESTROY | TP_ABANDON)) {
tpool->tp_flags &= ~TP_DESTROY;
tpool->tp_flags |= TP_ABANDON;
if (tpool->tp_current == 0) {
delete_pool(tpool);
goto top; /* start over */
}
}
} while ((tpool = tpool->tp_forw) != thread_pools);
}

View File

@ -0,0 +1,93 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _THREAD_POOL_IMPL_H
#define _THREAD_POOL_IMPL_H
#include <thread_pool.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Thread pool implementation definitions.
* See <thread_pool.h> for interface declarations.
*/
/*
* FIFO queued job
*/
typedef struct tpool_job tpool_job_t;
struct tpool_job {
tpool_job_t *tpj_next; /* list of jobs */
void (*tpj_func)(void *); /* function to call */
void *tpj_arg; /* its argument */
};
/*
* List of active threads, linked through their stacks.
*/
typedef struct tpool_active tpool_active_t;
struct tpool_active {
tpool_active_t *tpa_next; /* list of active threads */
pthread_t tpa_tid; /* active thread id */
};
/*
* The thread pool.
*/
struct tpool {
tpool_t *tp_forw; /* circular list of all thread pools */
tpool_t *tp_back;
pthread_mutex_t tp_mutex; /* protects the pool data */
pthread_cond_t tp_busycv; /* synchronization in tpool_dispatch */
pthread_cond_t tp_workcv; /* synchronization with workers */
pthread_cond_t tp_waitcv; /* synchronization in tpool_wait() */
tpool_active_t *tp_active; /* threads performing work */
tpool_job_t *tp_head; /* FIFO job queue */
tpool_job_t *tp_tail;
pthread_attr_t tp_attr; /* attributes of the workers */
int tp_flags; /* see below */
uint_t tp_linger; /* seconds before idle workers exit */
int tp_njobs; /* number of jobs in job queue */
int tp_minimum; /* minimum number of worker threads */
int tp_maximum; /* maximum number of worker threads */
int tp_current; /* current number of worker threads */
int tp_idle; /* number of idle workers */
};
/* tp_flags */
#define TP_WAIT 0x01 /* waiting in tpool_wait() */
#define TP_SUSPEND 0x02 /* pool is being suspended */
#define TP_DESTROY 0x04 /* pool is being destroyed */
#define TP_ABANDON 0x08 /* pool is abandoned (auto-destroy) */
#ifdef __cplusplus
}
#endif
#endif /* _THREAD_POOL_IMPL_H */

View File

@ -28,8 +28,7 @@ nodist_libuutil_la_SOURCES = \
libuutil_la_LIBADD = \ libuutil_la_LIBADD = \
$(top_builddir)/lib/libavl/libavl.la \ $(top_builddir)/lib/libavl/libavl.la \
$(top_builddir)/lib/libspl/libspl.la \ $(top_builddir)/lib/libspl/libspl.la
$(top_builddir)/lib/libefi/libefi.la
libuutil_la_LDFLAGS = -pthread -version-info 1:1:0 libuutil_la_LDFLAGS = -pthread -version-info 1:1:0

View File

@ -1,10 +1,16 @@
include $(top_srcdir)/config/Rules.am include $(top_srcdir)/config/Rules.am
VPATH = \
$(top_srcdir)/module/icp \
$(top_srcdir)/module/zcommon \
$(top_srcdir)/lib/libzfs
libzfs_pcdir = $(datarootdir)/pkgconfig libzfs_pcdir = $(datarootdir)/pkgconfig
libzfs_pc_DATA = libzfs.pc libzfs_core.pc libzfs_pc_DATA = libzfs.pc libzfs_core.pc
DEFAULT_INCLUDES += \ DEFAULT_INCLUDES += \
-I$(top_srcdir)/include \ -I$(top_srcdir)/include \
-I$(top_srcdir)/module/icp/include \
-I$(top_srcdir)/lib/libspl/include -I$(top_srcdir)/lib/libspl/include
lib_LTLIBRARIES = libzfs.la lib_LTLIBRARIES = libzfs.la
@ -23,17 +29,35 @@ USER_C = \
libzfs_status.c \ libzfs_status.c \
libzfs_util.c libzfs_util.c
KERNEL_C = KERNEL_C = \
algs/sha2/sha2.c \
zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_aarch64_neon.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zfs_uio.c \
zpool_prop.c \
zprop_common.c
nodist_libzfs_la_SOURCES = \ nodist_libzfs_la_SOURCES = \
$(USER_C) \ $(USER_C) \
$(KERNEL_C) $(KERNEL_C)
libzfs_la_LIBADD = \ libzfs_la_LIBADD = \
$(top_builddir)/lib/libzfs_core/libzfs_core.la \ $(top_builddir)/lib/libefi/libefi.la \
$(top_builddir)/lib/libshare/libshare.la \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libzpool/libzpool.la $(top_builddir)/lib/libshare/libshare.la \
$(top_builddir)/lib/libtpool/libtpool.la \
$(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la
libzfs_la_LIBADD += -lm $(LIBBLKID) $(LIBUDEV) libzfs_la_LIBADD += -lm $(LIBBLKID) $(LIBUDEV)
libzfs_la_LDFLAGS = -version-info 2:0:0 libzfs_la_LDFLAGS = -version-info 2:0:0

View File

@ -60,6 +60,7 @@
#include <sys/vtoc.h> #include <sys/vtoc.h>
#include <sys/dktp/fdisk.h> #include <sys/dktp/fdisk.h>
#include <sys/efi_partition.h> #include <sys/efi_partition.h>
#include <thread_pool.h>
#include <sys/vdev_impl.h> #include <sys/vdev_impl.h>
#include <blkid/blkid.h> #include <blkid/blkid.h>
#include "libzfs.h" #include "libzfs.h"
@ -1388,7 +1389,7 @@ typedef struct rdsk_node {
nvlist_t *rn_config; /* Label config */ nvlist_t *rn_config; /* Label config */
avl_tree_t *rn_avl; avl_tree_t *rn_avl;
avl_node_t rn_node; avl_node_t rn_node;
kmutex_t *rn_lock; pthread_mutex_t *rn_lock;
boolean_t rn_labelpaths; boolean_t rn_labelpaths;
} rdsk_node_t; } rdsk_node_t;
@ -1603,14 +1604,14 @@ zpool_open_func(void *arg)
slice->rn_hdl = hdl; slice->rn_hdl = hdl;
slice->rn_order = IMPORT_ORDER_PREFERRED_1; slice->rn_order = IMPORT_ORDER_PREFERRED_1;
slice->rn_labelpaths = B_FALSE; slice->rn_labelpaths = B_FALSE;
mutex_enter(rn->rn_lock); pthread_mutex_lock(rn->rn_lock);
if (avl_find(rn->rn_avl, slice, &where)) { if (avl_find(rn->rn_avl, slice, &where)) {
mutex_exit(rn->rn_lock); pthread_mutex_unlock(rn->rn_lock);
free(slice->rn_name); free(slice->rn_name);
free(slice); free(slice);
} else { } else {
avl_insert(rn->rn_avl, slice, where); avl_insert(rn->rn_avl, slice, where);
mutex_exit(rn->rn_lock); pthread_mutex_unlock(rn->rn_lock);
zpool_open_func(slice); zpool_open_func(slice);
} }
} }
@ -1629,14 +1630,14 @@ zpool_open_func(void *arg)
slice->rn_hdl = hdl; slice->rn_hdl = hdl;
slice->rn_order = IMPORT_ORDER_PREFERRED_2; slice->rn_order = IMPORT_ORDER_PREFERRED_2;
slice->rn_labelpaths = B_FALSE; slice->rn_labelpaths = B_FALSE;
mutex_enter(rn->rn_lock); pthread_mutex_lock(rn->rn_lock);
if (avl_find(rn->rn_avl, slice, &where)) { if (avl_find(rn->rn_avl, slice, &where)) {
mutex_exit(rn->rn_lock); pthread_mutex_unlock(rn->rn_lock);
free(slice->rn_name); free(slice->rn_name);
free(slice); free(slice);
} else { } else {
avl_insert(rn->rn_avl, slice, where); avl_insert(rn->rn_avl, slice, where);
mutex_exit(rn->rn_lock); pthread_mutex_unlock(rn->rn_lock);
zpool_open_func(slice); zpool_open_func(slice);
} }
} }
@ -1679,7 +1680,7 @@ zpool_clear_label(int fd)
* Scan a list of directories for zfs devices. * Scan a list of directories for zfs devices.
*/ */
static int static int
zpool_find_import_scan(libzfs_handle_t *hdl, kmutex_t *lock, zpool_find_import_scan(libzfs_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t **slice_cache, char **dir, int dirs) avl_tree_t **slice_cache, char **dir, int dirs)
{ {
avl_tree_t *cache; avl_tree_t *cache;
@ -1735,9 +1736,9 @@ zpool_find_import_scan(libzfs_handle_t *hdl, kmutex_t *lock,
slice->rn_hdl = hdl; slice->rn_hdl = hdl;
slice->rn_order = i + IMPORT_ORDER_SCAN_OFFSET; slice->rn_order = i + IMPORT_ORDER_SCAN_OFFSET;
slice->rn_labelpaths = B_FALSE; slice->rn_labelpaths = B_FALSE;
mutex_enter(lock); pthread_mutex_lock(lock);
avl_add(cache, slice); avl_add(cache, slice);
mutex_exit(lock); pthread_mutex_unlock(lock);
} }
(void) closedir(dirp); (void) closedir(dirp);
@ -1761,7 +1762,7 @@ error:
* Use libblkid to quickly enumerate all known zfs devices. * Use libblkid to quickly enumerate all known zfs devices.
*/ */
static int static int
zpool_find_import_blkid(libzfs_handle_t *hdl, kmutex_t *lock, zpool_find_import_blkid(libzfs_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t **slice_cache) avl_tree_t **slice_cache)
{ {
rdsk_node_t *slice; rdsk_node_t *slice;
@ -1815,14 +1816,14 @@ zpool_find_import_blkid(libzfs_handle_t *hdl, kmutex_t *lock,
else else
slice->rn_order = IMPORT_ORDER_DEFAULT; slice->rn_order = IMPORT_ORDER_DEFAULT;
mutex_enter(lock); pthread_mutex_lock(lock);
if (avl_find(*slice_cache, slice, &where)) { if (avl_find(*slice_cache, slice, &where)) {
free(slice->rn_name); free(slice->rn_name);
free(slice); free(slice);
} else { } else {
avl_insert(*slice_cache, slice, where); avl_insert(*slice_cache, slice, where);
} }
mutex_exit(lock); pthread_mutex_unlock(lock);
} }
blkid_dev_iterate_end(iter); blkid_dev_iterate_end(iter);
@ -1860,14 +1861,14 @@ zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
vdev_entry_t *ve, *venext; vdev_entry_t *ve, *venext;
config_entry_t *ce, *cenext; config_entry_t *ce, *cenext;
name_entry_t *ne, *nenext; name_entry_t *ne, *nenext;
kmutex_t lock; pthread_mutex_t lock;
avl_tree_t *cache; avl_tree_t *cache;
rdsk_node_t *slice; rdsk_node_t *slice;
void *cookie; void *cookie;
taskq_t *t; tpool_t *t;
verify(iarg->poolname == NULL || iarg->guid == 0); verify(iarg->poolname == NULL || iarg->guid == 0);
mutex_init(&lock, NULL, MUTEX_DEFAULT, NULL); pthread_mutex_init(&lock, NULL);
/* /*
* Locate pool member vdevs using libblkid or by directory scanning. * Locate pool member vdevs using libblkid or by directory scanning.
@ -1896,15 +1897,13 @@ zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
* validating labels, a large number of threads can be used due to * validating labels, a large number of threads can be used due to
* minimal contention. * minimal contention.
*/ */
t = taskq_create("z_import", 2 * boot_ncpus, defclsyspri, t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
2 * boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
for (slice = avl_first(cache); slice; for (slice = avl_first(cache); slice;
(slice = avl_walk(cache, slice, AVL_AFTER))) (slice = avl_walk(cache, slice, AVL_AFTER)))
(void) taskq_dispatch(t, zpool_open_func, slice, TQ_SLEEP); (void) tpool_dispatch(t, zpool_open_func, slice);
taskq_wait(t); tpool_wait(t);
taskq_destroy(t); tpool_destroy(t);
/* /*
* Process the cache filtering out any entries which are not * Process the cache filtering out any entries which are not
@ -1974,7 +1973,7 @@ zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
} }
avl_destroy(cache); avl_destroy(cache);
free(cache); free(cache);
mutex_destroy(&lock); pthread_mutex_destroy(&lock);
ret = get_configs(hdl, &pools, iarg->can_be_active); ret = get_configs(hdl, &pools, iarg->can_be_active);

View File

@ -40,6 +40,7 @@
#include <zone.h> #include <zone.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/efi_partition.h> #include <sys/efi_partition.h>
#include <sys/systeminfo.h>
#include <sys/vtoc.h> #include <sys/vtoc.h>
#include <sys/zfs_ioctl.h> #include <sys/zfs_ioctl.h>
#include <dlfcn.h> #include <dlfcn.h>

View File

@ -44,6 +44,7 @@
#include <libzfs.h> #include <libzfs.h>
#include <string.h> #include <string.h>
#include <unistd.h> #include <unistd.h>
#include <sys/systeminfo.h>
#include "libzfs_impl.h" #include "libzfs_impl.h"
#include "zfeature_common.h" #include "zfeature_common.h"

View File

@ -19,13 +19,14 @@ USER_C = \
util.c util.c
KERNEL_C = \ KERNEL_C = \
zfeature_common.c \
zfs_comutil.c \ zfs_comutil.c \
zfs_deleg.c \ zfs_deleg.c \
zfs_fletcher.c \ zfs_fletcher.c \
zfs_fletcher_aarch64_neon.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \ zfs_fletcher_intel.c \
zfs_fletcher_sse.c \ zfs_fletcher_sse.c \
zfs_fletcher_avx512.c \
zfs_fletcher_aarch64_neon.c \
zfs_fletcher_superscalar.c \ zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \ zfs_fletcher_superscalar4.c \
zfs_namecheck.c \ zfs_namecheck.c \
@ -115,13 +116,13 @@ KERNEL_C = \
zap_leaf.c \ zap_leaf.c \
zap_micro.c \ zap_micro.c \
zfeature.c \ zfeature.c \
zfeature_common.c \
zfs_byteswap.c \ zfs_byteswap.c \
zfs_debug.c \ zfs_debug.c \
zfs_fm.c \ zfs_fm.c \
zfs_fuid.c \ zfs_fuid.c \
zfs_sa.c \ zfs_sa.c \
zfs_znode.c \ zfs_znode.c \
zfs_ratelimit.c \
zfs_rlock.c \ zfs_rlock.c \
zil.c \ zil.c \
zio.c \ zio.c \
@ -136,12 +137,13 @@ nodist_libzpool_la_SOURCES = \
$(KERNEL_C) $(KERNEL_C)
libzpool_la_LIBADD = \ libzpool_la_LIBADD = \
$(top_builddir)/lib/libunicode/libunicode.la \ $(top_builddir)/lib/libavl/libavl.la \
$(top_builddir)/lib/libuutil/libuutil.la \ $(top_builddir)/lib/libicp/libicp.la \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libicp/libicp.la $(top_builddir)/lib/libspl/libspl.la \
$(top_builddir)/lib/libunicode/libunicode.la
libzpool_la_LIBADD += $(ZLIB) -ldl libzpool_la_LIBADD += $(ZLIB) -ldl
libzpool_la_LDFLAGS = -version-info 2:0:0 libzpool_la_LDFLAGS = -pthread -version-info 2:0:0
EXTRA_DIST = $(USER_C) EXTRA_DIST = $(USER_C)

View File

@ -71,7 +71,7 @@ pthread_mutex_t kthread_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_key_t kthread_key; pthread_key_t kthread_key;
int kthread_nr = 0; int kthread_nr = 0;
void static void
thread_init(void) thread_init(void)
{ {
kthread_t *kt; kthread_t *kt;
@ -90,7 +90,7 @@ thread_init(void)
kthread_nr = 1; kthread_nr = 1;
} }
void static void
thread_fini(void) thread_fini(void)
{ {
kthread_t *kt = curthread; kthread_t *kt = curthread;
@ -1051,149 +1051,30 @@ delay(clock_t ticks)
/* /*
* Find highest one bit set. * Find highest one bit set.
* Returns bit number + 1 of highest bit that is set, otherwise returns 0. * Returns bit number + 1 of highest bit that is set, otherwise returns 0.
* High order bit is 31 (or 63 in _LP64 kernel). * The __builtin_clzll() function is supported by both GCC and Clang.
*/ */
int int
highbit64(uint64_t i) highbit64(uint64_t i)
{ {
register int h = 1;
if (i == 0) if (i == 0)
return (0); return (0);
if (i & 0xffffffff00000000ULL) {
h += 32; i >>= 32; return (NBBY * sizeof (uint64_t) - __builtin_clzll(i));
}
if (i & 0xffff0000) {
h += 16; i >>= 16;
}
if (i & 0xff00) {
h += 8; i >>= 8;
}
if (i & 0xf0) {
h += 4; i >>= 4;
}
if (i & 0xc) {
h += 2; i >>= 2;
}
if (i & 0x2) {
h += 1;
}
return (h);
} }
/* /*
* Find lowest one bit set. * Find lowest one bit set.
* Returns bit number + 1 of lowest bit that is set, otherwise returns 0. * Returns bit number + 1 of lowest bit that is set, otherwise returns 0.
* This is basically a reimplementation of ffsll(), which is GNU specific. * The __builtin_ffsll() function is supported by both GCC and Clang.
*/ */
int int
lowbit64(uint64_t i) lowbit64(uint64_t i)
{ {
register int h = 64;
if (i == 0) if (i == 0)
return (0); return (0);
if (i & 0x00000000ffffffffULL) return (__builtin_ffsll(i));
h -= 32;
else
i >>= 32;
if (i & 0x0000ffff)
h -= 16;
else
i >>= 16;
if (i & 0x00ff)
h -= 8;
else
i >>= 8;
if (i & 0x0f)
h -= 4;
else
i >>= 4;
if (i & 0x3)
h -= 2;
else
i >>= 2;
if (i & 0x1)
h -= 1;
return (h);
}
/*
* Find highest one bit set.
* Returns bit number + 1 of highest bit that is set, otherwise returns 0.
* High order bit is 31 (or 63 in _LP64 kernel).
*/
int
highbit(ulong_t i)
{
register int h = 1;
if (i == 0)
return (0);
#ifdef _LP64
if (i & 0xffffffff00000000ul) {
h += 32; i >>= 32;
}
#endif
if (i & 0xffff0000) {
h += 16; i >>= 16;
}
if (i & 0xff00) {
h += 8; i >>= 8;
}
if (i & 0xf0) {
h += 4; i >>= 4;
}
if (i & 0xc) {
h += 2; i >>= 2;
}
if (i & 0x2) {
h += 1;
}
return (h);
}
/*
* Find lowest one bit set.
* Returns bit number + 1 of lowest bit that is set, otherwise returns 0.
* Low order bit is 0.
*/
int
lowbit(ulong_t i)
{
register int h = 1;
if (i == 0)
return (0);
#ifdef _LP64
if (!(i & 0xffffffff)) {
h += 32; i >>= 32;
}
#endif
if (!(i & 0xffff)) {
h += 16; i >>= 16;
}
if (!(i & 0xff)) {
h += 8; i >>= 8;
}
if (!(i & 0xf)) {
h += 4; i >>= 4;
}
if (!(i & 0x3)) {
h += 2; i >>= 2;
}
if (!(i & 0x1)) {
h += 1;
}
return (h);
} }
static int random_fd = -1, urandom_fd = -1; static int random_fd = -1, urandom_fd = -1;
@ -1288,64 +1169,6 @@ umem_out_of_memory(void)
return (0); return (0);
} }
#define HOSTID_MASK 0xffffffff
static unsigned long
get_spl_hostid(void)
{
FILE *f;
unsigned long hostid;
char *env;
/*
* Allow the hostid to be subverted for testing.
*/
env = getenv("ZFS_HOSTID");
if (env) {
hostid = strtoull(env, NULL, 0);
return (hostid & HOSTID_MASK);
}
f = fopen("/sys/module/spl/parameters/spl_hostid", "r");
if (!f)
return (0);
if (fscanf(f, "%lu", &hostid) != 1)
hostid = 0;
fclose(f);
return (hostid & HOSTID_MASK);
}
unsigned long
get_system_hostid(void)
{
unsigned long system_hostid = get_spl_hostid();
/*
* We do not use the library call gethostid() because
* it generates a hostid value that the kernel is
* unaware of, if the spl_hostid module parameter has not
* been set and there is no system hostid file (e.g.
* /etc/hostid). The kernel and userspace must agree.
* See comments above hostid_read() in the SPL.
*/
if (system_hostid == 0) {
int fd, rc;
unsigned long hostid;
int hostid_size = 4; /* 4 bytes regardless of arch */
fd = open("/etc/hostid", O_RDONLY);
if (fd >= 0) {
rc = read(fd, &hostid, hostid_size);
if (rc > 0)
system_hostid = (hostid & HOSTID_MASK);
close(fd);
}
}
return (system_hostid);
}
void void
kernel_init(int mode) kernel_init(int mode)
{ {

View File

@ -52,7 +52,7 @@
static void Encode(uint8_t *, uint32_t *, size_t); static void Encode(uint8_t *, uint32_t *, size_t);
static void Encode64(uint8_t *, uint64_t *, size_t); static void Encode64(uint8_t *, uint64_t *, size_t);
#if defined(__amd64) #if defined(__amd64) && defined(_KERNEL)
#define SHA512Transform(ctx, in) SHA512TransformBlocks((ctx), (in), 1) #define SHA512Transform(ctx, in) SHA512TransformBlocks((ctx), (in), 1)
#define SHA256Transform(ctx, in) SHA256TransformBlocks((ctx), (in), 1) #define SHA256Transform(ctx, in) SHA256TransformBlocks((ctx), (in), 1)
@ -62,7 +62,7 @@ void SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num);
#else #else
static void SHA256Transform(SHA2_CTX *, const uint8_t *); static void SHA256Transform(SHA2_CTX *, const uint8_t *);
static void SHA512Transform(SHA2_CTX *, const uint8_t *); static void SHA512Transform(SHA2_CTX *, const uint8_t *);
#endif /* __amd64 */ #endif /* __amd64 && _KERNEL */
static uint8_t PADDING[128] = { 0x80, /* all zeros */ }; static uint8_t PADDING[128] = { 0x80, /* all zeros */ };
@ -142,7 +142,7 @@ static uint8_t PADDING[128] = { 0x80, /* all zeros */ };
#endif /* _BIG_ENDIAN */ #endif /* _BIG_ENDIAN */
#if !defined(__amd64) #if !defined(__amd64) || !defined(_KERNEL)
/* SHA256 Transform */ /* SHA256 Transform */
static void static void
@ -600,7 +600,7 @@ SHA512Transform(SHA2_CTX *ctx, const uint8_t *blk)
ctx->state.s64[7] += h; ctx->state.s64[7] += h;
} }
#endif /* !__amd64 */ #endif /* !__amd64 || !_KERNEL */
/* /*
@ -783,10 +783,6 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
uint32_t i, buf_index, buf_len, buf_limit; uint32_t i, buf_index, buf_len, buf_limit;
const uint8_t *input = inptr; const uint8_t *input = inptr;
uint32_t algotype = ctx->algotype; uint32_t algotype = ctx->algotype;
#if defined(__amd64)
uint32_t block_count;
#endif /* !__amd64 */
/* check for noop */ /* check for noop */
if (input_len == 0) if (input_len == 0)
@ -842,7 +838,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
i = buf_len; i = buf_len;
} }
#if !defined(__amd64) #if !defined(__amd64) || !defined(_KERNEL)
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) { if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
for (; i + buf_limit - 1 < input_len; i += buf_limit) { for (; i + buf_limit - 1 < input_len; i += buf_limit) {
SHA256Transform(ctx, &input[i]); SHA256Transform(ctx, &input[i]);
@ -854,6 +850,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
} }
#else #else
uint32_t block_count;
if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) { if (algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) {
block_count = (input_len - i) >> 6; block_count = (input_len - i) >> 6;
if (block_count > 0) { if (block_count > 0) {
@ -869,7 +866,7 @@ SHA2Update(SHA2_CTX *ctx, const void *inptr, size_t input_len)
i += block_count << 7; i += block_count << 7;
} }
} }
#endif /* !__amd64 */ #endif /* !__amd64 || !_KERNEL */
/* /*
* general optimization: * general optimization:
@ -951,8 +948,6 @@ SHA2Final(void *digest, SHA2_CTX *ctx)
bzero(ctx, sizeof (*ctx)); bzero(ctx, sizeof (*ctx));
} }
#ifdef _KERNEL #ifdef _KERNEL
EXPORT_SYMBOL(SHA2Init); EXPORT_SYMBOL(SHA2Init);
EXPORT_SYMBOL(SHA2Update); EXPORT_SYMBOL(SHA2Update);

View File

@ -312,7 +312,7 @@ mod_hash_create_ptrhash(char *name, size_t nchains,
* The high bits, which are also unused, will get taken out when * The high bits, which are also unused, will get taken out when
* mod_hash takes hashkey % nchains. * mod_hash takes hashkey % nchains.
*/ */
rshift = highbit(key_elem_size); rshift = highbit64(key_elem_size);
return mod_hash_create_extended(name, nchains, mod_hash_null_keydtor, return mod_hash_create_extended(name, nchains, mod_hash_null_keydtor,
val_dtor, mod_hash_byptr, (void *)rshift, mod_hash_ptrkey_cmp, val_dtor, mod_hash_byptr, (void *)rshift, mod_hash_ptrkey_cmp,

View File

@ -7,16 +7,17 @@ EXTRA_CFLAGS = $(ZFS_MODULE_CFLAGS) @KERNELCPPFLAGS@
obj-$(CONFIG_ZFS) := $(MODULE).o obj-$(CONFIG_ZFS) := $(MODULE).o
$(MODULE)-objs += zfs_deleg.o $(MODULE)-objs += zfeature_common.o
$(MODULE)-objs += zfs_prop.o
$(MODULE)-objs += zprop_common.o
$(MODULE)-objs += zfs_namecheck.o
$(MODULE)-objs += zfs_comutil.o $(MODULE)-objs += zfs_comutil.o
$(MODULE)-objs += zfs_deleg.o
$(MODULE)-objs += zfs_fletcher.o $(MODULE)-objs += zfs_fletcher.o
$(MODULE)-objs += zfs_uio.o
$(MODULE)-objs += zpool_prop.o
$(MODULE)-objs += zfs_fletcher_superscalar.o $(MODULE)-objs += zfs_fletcher_superscalar.o
$(MODULE)-objs += zfs_fletcher_superscalar4.o $(MODULE)-objs += zfs_fletcher_superscalar4.o
$(MODULE)-objs += zfs_namecheck.o
$(MODULE)-objs += zfs_prop.o
$(MODULE)-objs += zfs_uio.o
$(MODULE)-objs += zpool_prop.o
$(MODULE)-objs += zprop_common.o
$(MODULE)-$(CONFIG_X86) += zfs_fletcher_intel.o $(MODULE)-$(CONFIG_X86) += zfs_fletcher_intel.o
$(MODULE)-$(CONFIG_X86) += zfs_fletcher_sse.o $(MODULE)-$(CONFIG_X86) += zfs_fletcher_sse.o

View File

@ -319,3 +319,12 @@ zpool_feature_init(void)
userobj_accounting_deps); userobj_accounting_deps);
} }
} }
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(zfeature_lookup_name);
EXPORT_SYMBOL(zfeature_is_supported);
EXPORT_SYMBOL(zfeature_is_valid_guid);
EXPORT_SYMBOL(zfeature_depends_on);
EXPORT_SYMBOL(zpool_feature_init);
EXPORT_SYMBOL(spa_feature_table);
#endif

View File

@ -207,85 +207,10 @@ const char *zfs_history_event_names[ZFS_NUM_LEGACY_HISTORY_EVENTS] = {
"pool split", "pool split",
}; };
/*
* Initialize rate limit struct
*
* rl: zfs_ratelimit_t struct
* burst: Number to allow in an interval before rate limiting
* interval: Interval time in seconds
*/
void
zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int burst,
unsigned int interval)
{
rl->count = 0;
rl->start = 0;
rl->interval = interval;
rl->burst = burst;
mutex_init(&rl->lock, NULL, MUTEX_DEFAULT, NULL);
}
/*
* Finalize rate limit struct
*
* rl: zfs_ratelimit_t struct
*/
void
zfs_ratelimit_fini(zfs_ratelimit_t *rl)
{
mutex_destroy(&rl->lock);
}
/*
* Re-implementation of the kernel's __ratelimit() function
*
* We had to write our own rate limiter because the kernel's __ratelimit()
* function annoyingly prints out how many times it rate limited to the kernel
* logs (and there's no way to turn it off):
*
* __ratelimit: 59 callbacks suppressed
*
* If the kernel ever allows us to disable these prints, we should go back to
* using __ratelimit() instead.
*
* Return values are the same as __ratelimit():
*
* 0: If we're rate limiting
* 1: If we're not rate limiting.
*/
int
zfs_ratelimit(zfs_ratelimit_t *rl)
{
hrtime_t now;
hrtime_t elapsed;
int rc = 1;
mutex_enter(&rl->lock);
now = gethrtime();
elapsed = now - rl->start;
rl->count++;
if (NSEC2SEC(elapsed) >= rl->interval) {
rl->start = now;
rl->count = 0;
} else {
if (rl->count >= rl->burst) {
rc = 0; /* We're ratelimiting */
}
}
mutex_exit(&rl->lock);
return (rc);
}
#if defined(_KERNEL) && defined(HAVE_SPL) #if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(zfs_allocatable_devs); EXPORT_SYMBOL(zfs_allocatable_devs);
EXPORT_SYMBOL(zpool_get_rewind_policy); EXPORT_SYMBOL(zpool_get_rewind_policy);
EXPORT_SYMBOL(zfs_zpl_version_map); EXPORT_SYMBOL(zfs_zpl_version_map);
EXPORT_SYMBOL(zfs_spa_version_map); EXPORT_SYMBOL(zfs_spa_version_map);
EXPORT_SYMBOL(zfs_history_event_names); EXPORT_SYMBOL(zfs_history_event_names);
EXPORT_SYMBOL(zfs_ratelimit_init);
EXPORT_SYMBOL(zfs_ratelimit_fini);
EXPORT_SYMBOL(zfs_ratelimit);
#endif #endif

View File

@ -233,7 +233,7 @@ zfs_deleg_whokey(char *attr, zfs_deleg_who_type_t type,
ZFS_DELEG_FIELD_SEP_CHR); ZFS_DELEG_FIELD_SEP_CHR);
break; break;
default: default:
cmn_err(CE_PANIC, "bad zfs_deleg_who_type_t %d", type); ASSERT(!"bad zfs_deleg_who_type_t");
} }
} }

View File

@ -212,7 +212,9 @@ static struct fletcher_4_impl_selector {
{ "scalar", IMPL_SCALAR } { "scalar", IMPL_SCALAR }
}; };
#if defined(_KERNEL)
static kstat_t *fletcher_4_kstat; static kstat_t *fletcher_4_kstat;
#endif
static struct fletcher_4_kstat { static struct fletcher_4_kstat {
uint64_t native; uint64_t native;
@ -589,7 +591,7 @@ fletcher_4_incremental_byteswap(void *buf, size_t size, void *data)
return (0); return (0);
} }
#if defined(_KERNEL)
/* Fletcher 4 kstats */ /* Fletcher 4 kstats */
static int static int
@ -642,6 +644,7 @@ fletcher_4_kstat_addr(kstat_t *ksp, loff_t n)
return (ksp->ks_private); return (ksp->ks_private);
} }
#endif
#define FLETCHER_4_FASTEST_FN_COPY(type, src) \ #define FLETCHER_4_FASTEST_FN_COPY(type, src) \
{ \ { \
@ -753,6 +756,7 @@ fletcher_4_init(void)
vmem_free(databuf, data_size); vmem_free(databuf, data_size);
#if defined(_KERNEL)
/* install kstats for all implementations */ /* install kstats for all implementations */
fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc", fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc",
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
@ -765,6 +769,7 @@ fletcher_4_init(void)
fletcher_4_kstat_addr); fletcher_4_kstat_addr);
kstat_install(fletcher_4_kstat); kstat_install(fletcher_4_kstat);
} }
#endif
/* Finish initialization */ /* Finish initialization */
fletcher_4_initialized = B_TRUE; fletcher_4_initialized = B_TRUE;
@ -773,10 +778,12 @@ fletcher_4_init(void)
void void
fletcher_4_fini(void) fletcher_4_fini(void)
{ {
#if defined(_KERNEL)
if (fletcher_4_kstat != NULL) { if (fletcher_4_kstat != NULL) {
kstat_delete(fletcher_4_kstat); kstat_delete(fletcher_4_kstat);
fletcher_4_kstat = NULL; fletcher_4_kstat = NULL;
} }
#endif
} }
/* ABD adapters */ /* ABD adapters */

View File

@ -82,7 +82,6 @@ $(MODULE)-objs += zap.o
$(MODULE)-objs += zap_leaf.o $(MODULE)-objs += zap_leaf.o
$(MODULE)-objs += zap_micro.o $(MODULE)-objs += zap_micro.o
$(MODULE)-objs += zfeature.o $(MODULE)-objs += zfeature.o
$(MODULE)-objs += zfeature_common.o
$(MODULE)-objs += zfs_acl.o $(MODULE)-objs += zfs_acl.o
$(MODULE)-objs += zfs_byteswap.o $(MODULE)-objs += zfs_byteswap.o
$(MODULE)-objs += zfs_ctldir.o $(MODULE)-objs += zfs_ctldir.o
@ -93,6 +92,7 @@ $(MODULE)-objs += zfs_fuid.o
$(MODULE)-objs += zfs_ioctl.o $(MODULE)-objs += zfs_ioctl.o
$(MODULE)-objs += zfs_log.o $(MODULE)-objs += zfs_log.o
$(MODULE)-objs += zfs_onexit.o $(MODULE)-objs += zfs_onexit.o
$(MODULE)-objs += zfs_ratelimit.o
$(MODULE)-objs += zfs_replay.o $(MODULE)-objs += zfs_replay.o
$(MODULE)-objs += zfs_rlock.o $(MODULE)-objs += zfs_rlock.o
$(MODULE)-objs += zfs_sa.o $(MODULE)-objs += zfs_sa.o

View File

@ -0,0 +1,99 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
*/
#include <sys/zfs_ratelimit.h>
/*
* Initialize rate limit struct
*
* rl: zfs_ratelimit_t struct
* burst: Number to allow in an interval before rate limiting
* interval: Interval time in seconds
*/
void
zfs_ratelimit_init(zfs_ratelimit_t *rl, unsigned int burst,
unsigned int interval)
{
rl->count = 0;
rl->start = 0;
rl->interval = interval;
rl->burst = burst;
mutex_init(&rl->lock, NULL, MUTEX_DEFAULT, NULL);
}
/*
* Finalize rate limit struct
*
* rl: zfs_ratelimit_t struct
*/
void
zfs_ratelimit_fini(zfs_ratelimit_t *rl)
{
mutex_destroy(&rl->lock);
}
/*
* Re-implementation of the kernel's __ratelimit() function
*
* We had to write our own rate limiter because the kernel's __ratelimit()
* function annoyingly prints out how many times it rate limited to the kernel
* logs (and there's no way to turn it off):
*
* __ratelimit: 59 callbacks suppressed
*
* If the kernel ever allows us to disable these prints, we should go back to
* using __ratelimit() instead.
*
* Return values are the same as __ratelimit():
*
* 0: If we're rate limiting
* 1: If we're not rate limiting.
*/
int
zfs_ratelimit(zfs_ratelimit_t *rl)
{
hrtime_t now;
hrtime_t elapsed;
int error = 1;
mutex_enter(&rl->lock);
now = gethrtime();
elapsed = now - rl->start;
rl->count++;
if (NSEC2SEC(elapsed) >= rl->interval) {
rl->start = now;
rl->count = 0;
} else {
if (rl->count >= rl->burst) {
error = 0; /* We're ratelimiting */
}
}
mutex_exit(&rl->lock);
return (error);
}

View File

@ -19,14 +19,6 @@ s:usr/src/test/zfs-tests/runfiles:tests/runfiles:g
s:usr/src/test/zfs-tests/tests/functional:tests/zfs-tests/tests/functional:g s:usr/src/test/zfs-tests/tests/functional:tests/zfs-tests/tests/functional:g
s:usr/src/test/zfs-tests/tests/perf:tests/zfs-tests/tests/perf:g s:usr/src/test/zfs-tests/tests/perf:tests/zfs-tests/tests/perf:g
s:usr/src/test/test-runner/cmd/run.py:tests/test-runner/cmd/test-runner.py:g s:usr/src/test/test-runner/cmd/run.py:tests/test-runner/cmd/test-runner.py:g
#
# The usr/src/common/zfs/ files go in a couple different dirs.
# usr/src/common/zfs/zfeature_common.c goes in module/zfs
#
s:usr/src/common/zfs/zfeature_common.c:module/zfs/zfeature_common.c:g
# ...but most of the rest of the C files go in module/zcommon
s/usr\/src\/common\/zfs\/\(.*\)\.c/module\/zcommon\/\1.c/g s/usr\/src\/common\/zfs\/\(.*\)\.c/module\/zcommon\/\1.c/g
# crypto framework # crypto framework

View File

@ -13,11 +13,7 @@ DEFAULT_INCLUDES += \
-I$(top_srcdir)/lib/libspl/include -I$(top_srcdir)/lib/libspl/include
many_fds_LDADD = \ many_fds_LDADD = \
$(top_builddir)/lib/libnvpair/libnvpair.la \ $(top_builddir)/lib/libzfs/libzfs.la
$(top_builddir)/lib/libuutil/libuutil.la \
$(top_builddir)/lib/libzpool/libzpool.la \
$(top_builddir)/lib/libzfs/libzfs.la \
$(top_builddir)/lib/libzfs_core/libzfs_core.la
pkgexec_PROGRAMS = many_fds pkgexec_PROGRAMS = many_fds
many_fds_SOURCES = many_fds.c many_fds_SOURCES = many_fds.c