Compare commits

..

No commits in common. "zfs-0.7-release" and "zfs-0.7.12" have entirely different histories.

54 changed files with 327 additions and 982 deletions

View File

@ -161,7 +161,7 @@ coding convention.
### Commit Message Formats
#### New Changes
Commit messages for new changes must meet the following guidelines:
* In 72 characters or less, provide a summary of the change as the
* In 50 characters or less, provide a summary of the change as the
first line in the commit message.
* A body which provides a description of the change. If necessary,
please summarize important information such as why the proposed

2
META
View File

@ -1,7 +1,7 @@
Meta: 1
Name: zfs
Branch: 1.0
Version: 0.7.13
Version: 0.7.12
Release: 1
Release-Tags: relext
License: CDDL

View File

@ -7,8 +7,6 @@ DEFAULT_INCLUDES += \
#
# Ignore the prefix for the mount helper. It must be installed in /sbin/
# because this path is hardcoded in the mount(8) for security reasons.
# However, if needed, the configure option --with-mounthelperdir= can be used
# to override the default install location.
#
sbindir=$(mounthelperdir)
sbin_PROGRAMS = mount.zfs

View File

@ -100,11 +100,10 @@ usage() {
cat << EOF
Usage: vdev_id [-h]
vdev_id <-d device> [-c config_file] [-p phys_per_port]
[-g sas_direct|sas_switch|scsi] [-m]
[-g sas_direct|sas_switch] [-m]
-c specify name of alernate config file [default=$CONFIG]
-d specify basename of device (i.e. sda)
-e Create enclose device symlinks only (/dev/by-enclosure)
-g Storage network topology [default="$TOPOLOGY"]
-m Run in multipath mode
-p number of phy's per switch port [default=$PHYS_PER_PORT]
@ -136,7 +135,7 @@ map_channel() {
MAPPED_CHAN=`awk "\\$1 == \"channel\" && \\$2 == ${PORT} \
{ print \\$3; exit }" $CONFIG`
;;
"sas_direct"|"scsi")
"sas_direct")
MAPPED_CHAN=`awk "\\$1 == \"channel\" && \
\\$2 == \"${PCI_ID}\" && \\$3 == ${PORT} \
{ print \\$4; exit }" $CONFIG`
@ -277,23 +276,6 @@ sas_handler() {
d=$(eval echo \${$i})
SLOT=`echo $d | sed -e 's/^.*://'`
;;
"ses")
# look for this SAS path in all SCSI Enclosure Services
# (SES) enclosures
sas_address=`cat $end_device_dir/sas_address 2>/dev/null`
enclosures=`lsscsi -g | \
sed -n -e '/enclosu/s/^.* \([^ ][^ ]*\) *$/\1/p'`
for enclosure in $enclosures; do
set -- $(sg_ses -p aes $enclosure | \
awk "/device slot number:/{slot=\$12} \
/SAS address: $sas_address/\
{print slot}")
SLOT=$1
if [ -n "$SLOT" ] ; then
break
fi
done
;;
esac
if [ -z "$SLOT" ] ; then
return
@ -307,156 +289,6 @@ sas_handler() {
echo ${CHAN}${SLOT}${PART}
}
scsi_handler() {
if [ -z "$FIRST_BAY_NUMBER" ] ; then
FIRST_BAY_NUMBER=`awk "\\$1 == \"first_bay_number\" \
{print \\$2; exit}" $CONFIG`
fi
FIRST_BAY_NUMBER=${FIRST_BAY_NUMBER:-0}
if [ -z "$PHYS_PER_PORT" ] ; then
PHYS_PER_PORT=`awk "\\$1 == \"phys_per_port\" \
{print \\$2; exit}" $CONFIG`
fi
PHYS_PER_PORT=${PHYS_PER_PORT:-4}
if ! echo $PHYS_PER_PORT | grep -q -E '^[0-9]+$' ; then
echo "Error: phys_per_port value $PHYS_PER_PORT is non-numeric"
exit 1
fi
if [ -z "$MULTIPATH_MODE" ] ; then
MULTIPATH_MODE=`awk "\\$1 == \"multipath\" \
{print \\$2; exit}" $CONFIG`
fi
# Use first running component device if we're handling a dm-mpath device
if [ "$MULTIPATH_MODE" = "yes" ] ; then
# If udev didn't tell us the UUID via DM_NAME, check /dev/mapper
if [ -z "$DM_NAME" ] ; then
DM_NAME=`ls -l --full-time /dev/mapper |
awk "/\/$DEV$/{print \\$9}"`
fi
# For raw disks udev exports DEVTYPE=partition when
# handling partitions, and the rules can be written to
# take advantage of this to append a -part suffix. For
# dm devices we get DEVTYPE=disk even for partitions so
# we have to append the -part suffix directly in the
# helper.
if [ "$DEVTYPE" != "partition" ] ; then
PART=`echo $DM_NAME | awk -Fp '/p/{print "-part"$2}'`
fi
# Strip off partition information.
DM_NAME=`echo $DM_NAME | sed 's/p[0-9][0-9]*$//'`
if [ -z "$DM_NAME" ] ; then
return
fi
# Get the raw scsi device name from multipath -ll. Strip off
# leading pipe symbols to make field numbering consistent.
DEV=`multipath -ll $DM_NAME |
awk '/running/{gsub("^[|]"," "); print $3 ; exit}'`
if [ -z "$DEV" ] ; then
return
fi
fi
if echo $DEV | grep -q ^/devices/ ; then
sys_path=$DEV
else
sys_path=`udevadm info -q path -p /sys/block/$DEV 2>/dev/null`
fi
# expect sys_path like this, for example:
# /devices/pci0000:00/0000:00:0b.0/0000:09:00.0/0000:0a:05.0/0000:0c:00.0/host3/target3:1:0/3:1:0:21/block/sdv
# Use positional parameters as an ad-hoc array
set -- $(echo "$sys_path" | tr / ' ')
num_dirs=$#
scsi_host_dir="/sys"
# Get path up to /sys/.../hostX
i=1
while [ $i -le $num_dirs ] ; do
d=$(eval echo \${$i})
scsi_host_dir="$scsi_host_dir/$d"
echo $d | grep -q -E '^host[0-9]+$' && break
i=$(($i + 1))
done
if [ $i = $num_dirs ] ; then
return
fi
PCI_ID=$(eval echo \${$(($i -1))} | awk -F: '{print $2":"$3}')
# In scsi mode, the directory two levels beneath
# /sys/.../hostX reveals the port and slot.
port_dir=$scsi_host_dir
j=$(($i + 2))
i=$(($i + 1))
while [ $i -le $j ] ; do
port_dir="$port_dir/$(eval echo \${$i})"
i=$(($i + 1))
done
set -- $(echo $port_dir | sed -e 's/^.*:\([^:]*\):\([^:]*\)$/\1 \2/')
PORT=$1
SLOT=$(($2 + $FIRST_BAY_NUMBER))
if [ -z "$SLOT" ] ; then
return
fi
CHAN=`map_channel $PCI_ID $PORT`
SLOT=`map_slot $SLOT $CHAN`
if [ -z "$CHAN" ] ; then
return
fi
echo ${CHAN}${SLOT}${PART}
}
# Figure out the name for the enclosure symlink
enclosure_handler () {
# We get all the info we need from udev's DEVPATH variable:
#
# DEVPATH=/sys/devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/subsystem/devices/0:0:0:0/scsi_generic/sg0
# Get the enclosure ID ("0:0:0:0")
ENC=$(basename $(readlink -m "/sys/$DEVPATH/../.."))
if [ ! -d /sys/class/enclosure/$ENC ] ; then
# Not an enclosure, bail out
return
fi
# Get the long sysfs device path to our enclosure. Looks like:
# /devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/port-0:0/ ... /enclosure/0:0:0:0
ENC_DEVICE=$(readlink /sys/class/enclosure/$ENC)
# Grab the full path to the hosts port dir:
# /devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/port-0:0
PORT_DIR=$(echo $ENC_DEVICE | grep -Eo '.+host[0-9]+/port-[0-9]+:[0-9]+')
# Get the port number
PORT_ID=$(echo $PORT_DIR | grep -Eo "[0-9]+$")
# The PCI directory is two directories up from the port directory
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.0
PCI_ID_LONG=$(basename $(readlink -m "/sys/$PORT_DIR/../.."))
# Strip down the PCI address from 0000:05:00.0 to 05:00.0
PCI_ID=$(echo "$PCI_ID_LONG" | sed -r 's/^[0-9]+://g')
# Name our device according to vdev_id.conf (like "L0" or "U1").
NAME=$(awk "/channel/{if (\$1 == \"channel\" && \$2 == \"$PCI_ID\" && \
\$3 == \"$PORT_ID\") {print \$4int(count[\$4])}; count[\$4]++}" $CONFIG)
echo "${NAME}"
}
alias_handler () {
# Special handling is needed to correctly append a -part suffix
# to partitions of device mapper devices. The DEVTYPE attribute
@ -512,7 +344,7 @@ alias_handler () {
done
}
while getopts 'c:d:eg:mp:h' OPTION; do
while getopts 'c:d:g:mp:h' OPTION; do
case ${OPTION} in
c)
CONFIG=${OPTARG}
@ -520,16 +352,6 @@ while getopts 'c:d:eg:mp:h' OPTION; do
d)
DEV=${OPTARG}
;;
e)
# When udev sees a scsi_generic device, it calls this script with -e to
# create the enclosure device symlinks only. We also need
# "enclosure_symlinks yes" set in vdev_id.config to actually create the
# symlink.
ENCLOSURE_MODE=$(awk '{if ($1 == "enclosure_symlinks") print $2}' $CONFIG)
if [ "$ENCLOSURE_MODE" != "yes" ] ; then
exit 0
fi
;;
g)
TOPOLOGY=$OPTARG
;;
@ -549,7 +371,7 @@ if [ ! -r $CONFIG ] ; then
exit 0
fi
if [ -z "$DEV" -a -z "$ENCLOSURE_MODE" ] ; then
if [ -z "$DEV" ] ; then
echo "Error: missing required option -d"
exit 1
fi
@ -562,37 +384,16 @@ if [ -z "$BAY" ] ; then
BAY=`awk "\\$1 == \"slot\" {print \\$2; exit}" $CONFIG`
fi
TOPOLOGY=${TOPOLOGY:-sas_direct}
# Should we create /dev/by-enclosure symlinks?
if [ "$ENCLOSURE_MODE" = "yes" -a "$TOPOLOGY" = "sas_direct" ] ; then
ID_ENCLOSURE=$(enclosure_handler)
if [ -z "$ID_ENCLOSURE" ] ; then
exit 0
fi
# Just create the symlinks to the enclosure devices and then exit.
ENCLOSURE_PREFIX=$(awk '/enclosure_symlinks_prefix/{print $2}' $CONFIG)
if [ -z "$ENCLOSURE_PREFIX" ] ; then
ENCLOSURE_PREFIX="enc"
fi
echo "ID_ENCLOSURE=$ID_ENCLOSURE"
echo "ID_ENCLOSURE_PATH=by-enclosure/$ENCLOSURE_PREFIX-$ID_ENCLOSURE"
exit 0
fi
# First check if an alias was defined for this device.
ID_VDEV=`alias_handler`
if [ -z "$ID_VDEV" ] ; then
BAY=${BAY:-bay}
TOPOLOGY=${TOPOLOGY:-sas_direct}
case $TOPOLOGY in
sas_direct|sas_switch)
ID_VDEV=`sas_handler`
;;
scsi)
ID_VDEV=`scsi_handler`
;;
*)
echo "Error: unknown topology $TOPOLOGY"
exit 1

View File

@ -171,8 +171,8 @@ typedef struct ztest_shared_opts {
} ztest_shared_opts_t;
static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_pool = "ztest",
.zo_dir = "/tmp",
.zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
.zo_dir = { '/', 't', 'm', 'p', '\0' },
.zo_alt_ztest = { '\0' },
.zo_alt_libpath = { '\0' },
.zo_vdevs = 5,

View File

@ -1,21 +0,0 @@
dnl #
dnl # Linux 5.0: access_ok() drops 'type' parameter:
dnl #
dnl # - access_ok(type, addr, size)
dnl # + access_ok(addr, size)
dnl #
AC_DEFUN([ZFS_AC_KERNEL_ACCESS_OK_TYPE], [
AC_MSG_CHECKING([whether access_ok() has 'type' parameter])
ZFS_LINUX_TRY_COMPILE([
#include <linux/uaccess.h>
],[
const void __user __attribute__((unused)) *addr = (void *) 0xdeadbeef;
unsigned long __attribute__((unused)) size = 1;
int error __attribute__((unused)) = access_ok(0, addr, size);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ACCESS_OK_TYPE, 1, [kernel has access_ok with 'type' parameter])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -1,10 +1,10 @@
dnl #
dnl # Linux 4.14 API,
dnl #
dnl # The bio_set_dev() helper macro was introduced as part of the transition
dnl # The bio_set_dev() helper was introduced as part of the transition
dnl # to have struct gendisk in struct bio.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV_MACRO], [
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV], [
AC_MSG_CHECKING([whether bio_set_dev() exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/bio.h>
@ -20,34 +20,3 @@ AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV_MACRO], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 5.0 API,
dnl #
dnl # The bio_set_dev() helper macro was updated to internally depend on
dnl # bio_associate_blkg() symbol which is exported GPL-only.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV_GPL_ONLY], [
AC_MSG_CHECKING([whether bio_set_dev() is GPL-only])
ZFS_LINUX_TRY_COMPILE([
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/fs.h>
MODULE_LICENSE("$ZFS_META_LICENSE");
],[
struct block_device *bdev = NULL;
struct bio *bio = NULL;
bio_set_dev(bio, bdev);
],[
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_SET_DEV_GPL_ONLY, 1,
[bio_set_dev() GPL-only])
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV], [
ZFS_AC_KERNEL_BIO_SET_DEV_MACRO
ZFS_AC_KERNEL_BIO_SET_DEV_GPL_ONLY
])

View File

@ -1,41 +1,18 @@
dnl #
dnl # Handle differences in kernel FPU code.
dnl #
dnl # Kernel
dnl # 5.0: All kernel fpu functions are GPL only, so we can't use them.
dnl # (nothing defined)
dnl #
dnl # 4.2: Use __kernel_fpu_{begin,end}()
dnl # HAVE_UNDERSCORE_KERNEL_FPU & KERNEL_EXPORTS_X86_FPU
dnl #
dnl # Pre-4.2: Use kernel_fpu_{begin,end}()
dnl # HAVE_KERNEL_FPU & KERNEL_EXPORTS_X86_FPU
dnl # 4.2 API change
dnl # asm/i387.h is replaced by asm/fpu/api.h
dnl #
AC_DEFUN([ZFS_AC_KERNEL_FPU], [
AC_MSG_CHECKING([which kernel_fpu function to use])
AC_MSG_CHECKING([whether asm/fpu/api.h exists])
ZFS_LINUX_TRY_COMPILE([
#include <asm/i387.h>
#include <asm/xcr.h>
#include <linux/kernel.h>
#include <asm/fpu/api.h>
],[
kernel_fpu_begin();
kernel_fpu_end();
__kernel_fpu_begin();
],[
AC_MSG_RESULT(kernel_fpu_*)
AC_DEFINE(HAVE_KERNEL_FPU, 1, [kernel has kernel_fpu_* functions])
AC_DEFINE(KERNEL_EXPORTS_X86_FPU, 1, [kernel exports FPU functions])
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FPU_API_H, 1, [kernel has <asm/fpu/api.h> interface])
],[
ZFS_LINUX_TRY_COMPILE([
#include <linux/kernel.h>
#include <asm/fpu/api.h>
],[
__kernel_fpu_begin();
__kernel_fpu_end();
],[
AC_MSG_RESULT(__kernel_fpu_*)
AC_DEFINE(HAVE_UNDERSCORE_KERNEL_FPU, 1, [kernel has __kernel_fpu_* functions])
AC_DEFINE(KERNEL_EXPORTS_X86_FPU, 1, [kernel exports FPU functions])
],[
AC_MSG_RESULT(not exported)
])
AC_MSG_RESULT(no)
])
])

View File

@ -1,26 +0,0 @@
dnl #
dnl # Determine an available miscellaneous minor number which can be used
dnl # for the /dev/zfs device. This is needed because kernel module
dnl # auto-loading depends on registering a reserved non-conflicting minor
dnl # number. Start with a large known available unreserved minor and work
dnl # our way down to lower value if a collision is detected.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_MISC_MINOR], [
AC_MSG_CHECKING([for available /dev/zfs minor])
for i in $(seq 249 -1 200); do
if ! grep -q "^#define\s\+.*_MINOR\s\+.*$i" \
${LINUX}/include/linux/miscdevice.h; then
ZFS_DEVICE_MINOR="$i"
AC_MSG_RESULT($ZFS_DEVICE_MINOR)
AC_DEFINE_UNQUOTED([ZFS_DEVICE_MINOR],
[$ZFS_DEVICE_MINOR], [/dev/zfs minor])
break
fi
done
AS_IF([ test -z "$ZFS_DEVICE_MINOR"], [
AC_MSG_ERROR([
*** No available misc minor numbers available for use.])
])
])

View File

@ -5,9 +5,7 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL
ZFS_AC_SPL
ZFS_AC_QAT
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_TEST_MODULE
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_CONFIG
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
@ -257,7 +255,7 @@ AC_DEFUN([ZFS_AC_KERNEL], [
AS_IF([test "$utsrelease"], [
kernsrcver=`(echo "#include <$utsrelease>";
echo "kernsrcver=UTS_RELEASE") |
${CPP} -I $kernelbuild/include - |
cpp -I $kernelbuild/include |
grep "^kernsrcver=" | cut -d \" -f 2`
AS_IF([test -z "$kernsrcver"], [

View File

@ -122,9 +122,6 @@ AC_CONFIG_FILES([
contrib/dracut/02zfsexpandknowledge/Makefile
contrib/dracut/90zfs/Makefile
contrib/initramfs/Makefile
contrib/initramfs/hooks/Makefile
contrib/initramfs/scripts/Makefile
contrib/initramfs/scripts/local-top/Makefile
module/Makefile
module/avl/Makefile
module/nvpair/Makefile

View File

@ -24,7 +24,6 @@ $(pkgdracut_SCRIPTS):%:%.in
-e 's,@udevruledir\@,$(udevruledir),g' \
-e 's,@sysconfdir\@,$(sysconfdir),g' \
-e 's,@systemdunitdir\@,$(systemdunitdir),g' \
-e 's,@mounthelperdir\@,$(mounthelperdir),g' \
$< >'$@'
distclean-local::

View File

@ -5,7 +5,7 @@ check() {
[ "${1}" = "-d" ] && return 0
# Verify the zfs tool chain
for tool in "@sbindir@/zpool" "@sbindir@/zfs" "@mounthelperdir@/mount.zfs" ; do
for tool in "@sbindir@/zpool" "@sbindir@/zfs" "@sbindir@/mount.zfs" ; do
test -x "$tool" || return 1
done
# Verify grep exists
@ -53,7 +53,7 @@ install() {
# Fallback: Guess the path and include all matches
dracut_install /usr/lib/gcc/*/*/libgcc_s.so*
fi
dracut_install @mounthelperdir@/mount.zfs
dracut_install @sbindir@/mount.zfs
dracut_install @udevdir@/vdev_id
dracut_install awk
dracut_install head

View File

@ -3,11 +3,12 @@ initrddir = $(datarootdir)/initramfs-tools
initrd_SCRIPTS = \
conf.d/zfs conf-hooks.d/zfs hooks/zfs scripts/zfs scripts/local-top/zfs
SUBDIRS = hooks scripts
EXTRA_DIST = \
$(top_srcdir)/contrib/initramfs/conf.d/zfs \
$(top_srcdir)/contrib/initramfs/conf-hooks.d/zfs \
$(top_srcdir)/contrib/initramfs/hooks/zfs \
$(top_srcdir)/contrib/initramfs/scripts/zfs \
$(top_srcdir)/contrib/initramfs/scripts/local-top/zfs \
$(top_srcdir)/contrib/initramfs/README.initramfs.markdown
install-initrdSCRIPTS: $(EXTRA_DIST)

View File

@ -1 +0,0 @@
zfs

View File

@ -1,21 +0,0 @@
hooksdir = $(datarootdir)/initramfs-tools/hooks
hooks_SCRIPTS = \
zfs
EXTRA_DIST = \
$(top_srcdir)/contrib/initramfs/hooks/zfs.in
$(hooks_SCRIPTS):%:%.in
-$(SED) -e 's,@sbindir\@,$(sbindir),g' \
-e 's,@sysconfdir\@,$(sysconfdir),g' \
-e 's,@udevdir\@,$(udevdir),g' \
-e 's,@udevruledir\@,$(udevruledir),g' \
-e 's,@mounthelperdir\@,$(mounthelperdir),g' \
$< >'$@'
clean-local::
-$(RM) $(hooks_SCRIPTS)
distclean-local::
-$(RM) $(hooks_SCRIPTS)

View File

@ -8,13 +8,11 @@ PREREQ="zdev"
# These prerequisites are provided by the zfsutils package. The zdb utility is
# not strictly required, but it can be useful at the initramfs recovery prompt.
COPY_EXEC_LIST="@sbindir@/zdb @sbindir@/zpool @sbindir@/zfs"
COPY_EXEC_LIST="$COPY_EXEC_LIST @mounthelperdir@/mount.zfs @udevdir@/vdev_id"
COPY_FILE_LIST="/etc/hostid @sysconfdir@/zfs/zpool.cache"
COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/default/zfs"
COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/zfs/zfs-functions"
COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/zfs/vdev_id.conf"
COPY_FILE_LIST="$COPY_FILE_LIST @udevruledir@/69-vdev.rules"
COPY_EXEC_LIST="/sbin/zdb /sbin/zpool /sbin/zfs /sbin/mount.zfs"
COPY_EXEC_LIST="$COPY_EXEC_LIST /usr/bin/dirname /lib/udev/vdev_id"
COPY_FILE_LIST="/etc/hostid /etc/zfs/zpool.cache /etc/default/zfs"
COPY_FILE_LIST="$COPY_FILE_LIST /etc/zfs/zfs-functions /etc/zfs/vdev_id.conf"
COPY_FILE_LIST="$COPY_FILE_LIST /lib/udev/rules.d/69-vdev.rules"
# These prerequisites are provided by the base system.
COPY_EXEC_LIST="$COPY_EXEC_LIST /usr/bin/dirname /bin/hostname /sbin/blkid"
@ -85,7 +83,7 @@ else
fi
for ii in zfs zfs.conf spl spl.conf
do
do
if [ -f "/etc/modprobe.d/$ii" ]; then
if [ ! -d "$DESTDIR/etc/modprobe.d" ]; then
mkdir -p $DESTDIR/etc/modprobe.d

View File

@ -1 +0,0 @@
zfs

View File

@ -1,20 +0,0 @@
scriptsdir = $(datarootdir)/initramfs-tools/scripts
scripts_SCRIPTS = \
zfs
SUBDIRS = local-top
EXTRA_DIST = \
$(top_srcdir)/contrib/initramfs/scripts/zfs.in
$(scripts_SCRIPTS):%:%.in
-$(SED) -e 's,@sbindir\@,$(sbindir),g' \
-e 's,@sysconfdir\@,$(sysconfdir),g' \
$< >'$@'
clean-local::
-$(RM) $(scripts_SCRIPTS)
distclean-local::
-$(RM) $(scripts_SCRIPTS)

View File

@ -1,3 +0,0 @@
localtopdir = $(datarootdir)/initramfs-tools/scripts/local-top
EXTRA_DIST = zfs

View File

@ -11,9 +11,9 @@
# Paths to what we need - in the initrd, these paths are hardcoded,
# so override the defines in zfs-functions.
ZFS="@sbindir@/zfs"
ZPOOL="@sbindir@/zpool"
ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache"
ZFS="/sbin/zfs"
ZPOOL="/sbin/zpool"
ZPOOL_CACHE="/etc/zfs/zpool.cache"
export ZFS ZPOOL ZPOOL_CACHE
# This runs any scripts that should run before we start importing
@ -616,7 +616,7 @@ setup_snapshot_booting()
# Separate the full snapshot ('$snap') into it's filesystem and
# snapshot names. Would have been nice with a split() function..
rootfs="${snap%%@*}"
snapname="${snap##*@}"
snapname="${snap##*@}"
ZFS_BOOTFS="${rootfs}_${snapname}"
if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline

View File

@ -1,3 +1,3 @@
# The default behavior is to allow udev to load the kernel modules on demand.
# Uncomment the following line to unconditionally load them at boot.
# Always load kernel modules at boot. The default behavior is to load the
# kernel modules in the zfs-import-*.service or when blkid(8) detects a pool.
#zfs

View File

@ -12,6 +12,7 @@ ConditionPathExists=@sysconfdir@/zfs/zpool.cache
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=-/sbin/modprobe zfs
ExecStart=@sbindir@/zpool import -c @sysconfdir@/zfs/zpool.cache -aN
[Install]

View File

@ -11,6 +11,7 @@ ConditionPathExists=!@sysconfdir@/zfs/zpool.cache
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=-/sbin/modprobe zfs
ExecStart=@sbindir@/zpool import -aN -o cachefile=none
[Install]

View File

@ -4,7 +4,6 @@ pkgsysconf_DATA = \
vdev_id.conf.alias.example \
vdev_id.conf.sas_direct.example \
vdev_id.conf.sas_switch.example \
vdev_id.conf.multipath.example \
vdev_id.conf.scsi.example
vdev_id.conf.multipath.example
EXTRA_DIST = $(pkgsysconf_DATA)

View File

@ -2,9 +2,6 @@ multipath no
topology sas_direct
phys_per_port 4
# Additionally create /dev/by-enclousure/ symlinks for enclosure devices
enclosure_symlinks yes
# PCI_ID HBA PORT CHANNEL NAME
channel 85:00.0 1 A
channel 85:00.0 0 B

View File

@ -1,9 +0,0 @@
multipath no
topology scsi
phys_per_port 1
# Usually scsi disks are numbered from 0, but this can be offset, to
# match the physical bay numbers, as follows:
first_bay_number 1
# PCI_ID HBA PORT CHANNEL NAME
channel 0c:00.0 0 Y

View File

@ -27,7 +27,6 @@
#define _ZFS_KMAP_H
#include <linux/highmem.h>
#include <linux/uaccess.h>
#ifdef HAVE_1ARG_KMAP_ATOMIC
/* 2.6.37 API change */
@ -38,11 +37,4 @@
#define zfs_kunmap_atomic(addr, km_type) kunmap_atomic(addr, km_type)
#endif
/* 5.0 API change - no more 'type' argument for access_ok() */
#ifdef HAVE_ACCESS_OK_TYPE
#define zfs_access_ok(type, addr, size) access_ok(type, addr, size)
#else
#define zfs_access_ok(type, addr, size) access_ok(addr, size)
#endif
#endif /* _ZFS_KMAP_H */

View File

@ -81,7 +81,7 @@
#endif
#if defined(_KERNEL)
#if defined(HAVE_UNDERSCORE_KERNEL_FPU)
#if defined(HAVE_FPU_API_H)
#include <asm/fpu/api.h>
#include <asm/fpu/internal.h>
#define kfpu_begin() \
@ -94,18 +94,12 @@
__kernel_fpu_end(); \
preempt_enable(); \
}
#elif defined(HAVE_KERNEL_FPU)
#else
#include <asm/i387.h>
#include <asm/xcr.h>
#define kfpu_begin() kernel_fpu_begin()
#define kfpu_end() kernel_fpu_end()
#else
/* Kernel doesn't export any kernel_fpu_* functions */
#include <asm/fpu/internal.h> /* For kernel xgetbv() */
#define kfpu_begin() panic("This code should never run")
#define kfpu_end() panic("This code should never run")
#endif /* defined(HAVE_KERNEL_FPU) */
#endif /* defined(HAVE_FPU_API_H) */
#else
/*
* fpu dummy methods for userspace
@ -284,13 +278,11 @@ __simd_state_enabled(const uint64_t state)
boolean_t has_osxsave;
uint64_t xcr0;
#if defined(_KERNEL)
#if defined(X86_FEATURE_OSXSAVE) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_OSXSAVE)
has_osxsave = !!boot_cpu_has(X86_FEATURE_OSXSAVE);
#else
#elif defined(_KERNEL) && !defined(X86_FEATURE_OSXSAVE)
has_osxsave = B_FALSE;
#endif
#elif !defined(_KERNEL)
#else
has_osxsave = __cpuid_has_osxsave();
#endif
@ -315,12 +307,8 @@ static inline boolean_t
zfs_sse_available(void)
{
#if defined(_KERNEL)
#if defined(KERNEL_EXPORTS_X86_FPU)
return (!!boot_cpu_has(X86_FEATURE_XMM));
#else
return (B_FALSE);
#endif
#elif !defined(_KERNEL)
return (__cpuid_has_sse());
#endif
}
@ -332,12 +320,8 @@ static inline boolean_t
zfs_sse2_available(void)
{
#if defined(_KERNEL)
#if defined(KERNEL_EXPORTS_X86_FPU)
return (!!boot_cpu_has(X86_FEATURE_XMM2));
#else
return (B_FALSE);
#endif
#elif !defined(_KERNEL)
return (__cpuid_has_sse2());
#endif
}
@ -349,12 +333,8 @@ static inline boolean_t
zfs_sse3_available(void)
{
#if defined(_KERNEL)
#if defined(KERNEL_EXPORTS_X86_FPU)
return (!!boot_cpu_has(X86_FEATURE_XMM3));
#else
return (B_FALSE);
#endif
#elif !defined(_KERNEL)
return (__cpuid_has_sse3());
#endif
}
@ -366,12 +346,8 @@ static inline boolean_t
zfs_ssse3_available(void)
{
#if defined(_KERNEL)
#if defined(KERNEL_EXPORTS_X86_FPU)
return (!!boot_cpu_has(X86_FEATURE_SSSE3));
#else
return (B_FALSE);
#endif
#elif !defined(_KERNEL)
return (__cpuid_has_ssse3());
#endif
}
@ -383,12 +359,8 @@ static inline boolean_t
zfs_sse4_1_available(void)
{
#if defined(_KERNEL)
#if defined(KERNEL_EXPORTS_X86_FPU)
return (!!boot_cpu_has(X86_FEATURE_XMM4_1));
#else
return (B_FALSE);
#endif
#elif !defined(_KERNEL)
return (__cpuid_has_sse4_1());
#endif
}
@ -400,12 +372,8 @@ static inline boolean_t
zfs_sse4_2_available(void)
{
#if defined(_KERNEL)
#if defined(KERNEL_EXPORTS_X86_FPU)
return (!!boot_cpu_has(X86_FEATURE_XMM4_2));
#else
return (B_FALSE);
#endif
#elif !defined(_KERNEL)
return (__cpuid_has_sse4_2());
#endif
}
@ -418,12 +386,8 @@ zfs_avx_available(void)
{
boolean_t has_avx;
#if defined(_KERNEL)
#if defined(KERNEL_EXPORTS_X86_FPU)
has_avx = !!boot_cpu_has(X86_FEATURE_AVX);
#else
has_avx = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx = __cpuid_has_avx();
#endif
@ -437,13 +401,11 @@ static inline boolean_t
zfs_avx2_available(void)
{
boolean_t has_avx2;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX2) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX2)
has_avx2 = !!boot_cpu_has(X86_FEATURE_AVX2);
#else
#elif defined(_KERNEL) && !defined(X86_FEATURE_AVX2)
has_avx2 = B_FALSE;
#endif
#elif !defined(_KERNEL)
#else
has_avx2 = __cpuid_has_avx2();
#endif
@ -456,13 +418,11 @@ zfs_avx2_available(void)
static inline boolean_t
zfs_bmi1_available(void)
{
#if defined(_KERNEL)
#if defined(X86_FEATURE_BMI1) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_BMI1)
return (!!boot_cpu_has(X86_FEATURE_BMI1));
#else
#elif defined(_KERNEL) && !defined(X86_FEATURE_BMI1)
return (B_FALSE);
#endif
#elif !defined(_KERNEL)
#else
return (__cpuid_has_bmi1());
#endif
}
@ -473,17 +433,16 @@ zfs_bmi1_available(void)
static inline boolean_t
zfs_bmi2_available(void)
{
#if defined(_KERNEL)
#if defined(X86_FEATURE_BMI2) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_BMI2)
return (!!boot_cpu_has(X86_FEATURE_BMI2));
#else
#elif defined(_KERNEL) && !defined(X86_FEATURE_BMI2)
return (B_FALSE);
#endif
#elif !defined(_KERNEL)
#else
return (__cpuid_has_bmi2());
#endif
}
/*
* AVX-512 family of instruction sets:
*
@ -507,12 +466,8 @@ zfs_avx512f_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512F) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512F)
has_avx512 = !!boot_cpu_has(X86_FEATURE_AVX512F);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512f();
#endif
@ -526,13 +481,9 @@ zfs_avx512cd_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512CD) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512CD)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512CD);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512cd();
#endif
@ -546,13 +497,9 @@ zfs_avx512er_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512ER) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512ER)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512ER);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512er();
#endif
@ -566,13 +513,9 @@ zfs_avx512pf_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512PF) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512PF)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512PF);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512pf();
#endif
@ -586,13 +529,9 @@ zfs_avx512bw_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512BW) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512BW)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512BW);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512bw();
#endif
@ -606,13 +545,9 @@ zfs_avx512dq_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512DQ) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512DQ)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512DQ);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512dq();
#endif
@ -626,13 +561,9 @@ zfs_avx512vl_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512VL) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512VL)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VL);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512vl();
#endif
@ -646,13 +577,9 @@ zfs_avx512ifma_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512IFMA) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512IFMA)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512IFMA);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512ifma();
#endif
@ -666,13 +593,9 @@ zfs_avx512vbmi_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(_KERNEL)
#if defined(X86_FEATURE_AVX512VBMI) && defined(KERNEL_EXPORTS_X86_FPU)
#if defined(_KERNEL) && defined(X86_FEATURE_AVX512VBMI)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VBMI);
#else
has_avx512 = B_FALSE;
#endif
#elif !defined(_KERNEL)
has_avx512 = __cpuid_has_avx512f() &&
__cpuid_has_avx512vbmi();

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
@ -294,7 +294,7 @@ boolean_t dbuf_try_add_ref(dmu_buf_t *db, objset_t *os, uint64_t obj,
uint64_t dbuf_refcount(dmu_buf_impl_t *db);
void dbuf_rele(dmu_buf_impl_t *db, void *tag);
void dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting);
void dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag);
dmu_buf_impl_t *dbuf_find(struct objset *os, uint64_t object, uint8_t level,
uint64_t blkid);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
@ -339,7 +339,7 @@ int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots,
void *ref, dnode_t **dnp);
boolean_t dnode_add_ref(dnode_t *dn, void *ref);
void dnode_rele(dnode_t *dn, void *ref);
void dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting);
void dnode_rele_and_unlock(dnode_t *dn, void *tag);
void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,

View File

@ -42,7 +42,7 @@
#include <sys/uio.h>
extern int uiomove(void *, size_t, enum uio_rw, uio_t *);
extern int uio_prefaultpages(ssize_t, uio_t *);
extern void uio_prefaultpages(ssize_t, uio_t *);
extern int uiocopy(void *, size_t, enum uio_rw, uio_t *, size_t *);
extern void uioskip(uio_t *, size_t);

View File

@ -963,15 +963,14 @@ libzfs_load_module(const char *module)
load = 0;
}
if (load) {
if (libzfs_run_process("/sbin/modprobe", argv, 0))
return (ENOEXEC);
if (!libzfs_module_loaded(module))
return (ENXIO);
}
if (load && libzfs_run_process("/sbin/modprobe", argv, 0))
return (ENOEXEC);
}
/* Module loading is synchronous it must be available */
if (!libzfs_module_loaded(module))
return (ENXIO);
/*
* Device creation by udev is asynchronous and waiting may be
* required. Busy wait for 10ms and then fall back to polling every

View File

@ -38,19 +38,6 @@ defined by udev. This may be an absolute path or the base filename.
Maps a physical path to a channel name (typically representing a single
disk enclosure).
.TP
\fIenclosure_symlinks\fR <yes|no>
Additionally create /dev/by-enclosure symlinks to the disk enclosure
sg devices using the naming scheme from from vdev_id.conf.
\fIenclosure_symlinks\fR is only allowed for sas_direct mode.
.TP
\fIenclosure_symlinks_prefix\fR <prefix>
Specify the prefix for the enclosure symlinks in the form of:
/dev/by-enclosure/<prefix>-<channel><num>
Defaults to "enc" if not specified.
.TP
\fIpci_slot\fR - specifies the PCI SLOT of the HBA
hosting the disk enclosure being mapped, as found in the output of
.BR lspci (8).
@ -103,7 +90,7 @@ internally uses this value to determine which HBA or switch port a
device is connected to. The default is 4.
.TP
\fIslot\fR <bay|phy|port|id|lun|ses>
\fIslot\fR <bay|phy|port|id|lun>
Specifies from which element of a SAS identifier the slot number is
taken. The default is bay.
@ -116,12 +103,6 @@ taken. The default is bay.
\fIid\fR - use the scsi id as the slot number.
\fIlun\fR - use the scsi lun as the slot number.
\fIses\fR - use the SCSI Enclosure Services (SES) enclosure device slot number,
as reported by
.BR sg_ses (8).
This is intended for use only on systems where \fIbay\fR is unsupported,
noting that \fIport\fR and \fIid\fR may be unstable across disk replacement.
.SH EXAMPLES
A non-multipath configuration with direct-attached SAS enclosures and an
arbitrary slot re-mapping.
@ -182,27 +163,6 @@ definitions - one per physical path.
channel 86:00.0 0 B
.fi
.P
A configuration with enclosure_symlinks enabled.
.P
.nf
multipath yes
enclosure_symlinks yes
# PCI_ID HBA PORT CHANNEL NAME
channel 05:00.0 1 U
channel 05:00.0 0 L
channel 06:00.0 1 U
channel 06:00.0 0 L
.fi
In addition to the disks symlinks, this configuration will create:
.P
.nf
/dev/by-enclosure/enc-L0
/dev/by-enclosure/enc-L1
/dev/by-enclosure/enc-U0
/dev/by-enclosure/enc-U1
.fi
.P
A configuration using device link aliases.
.P
.nf

View File

@ -29,7 +29,7 @@
.\" Copyright 2016 Nexenta Systems, Inc.
.\" Copyright 2016 Richard Laager. All rights reserved.
.\"
.Dd Jan 05, 2019
.Dd July 13, 2018
.Dt ZFS 8 SMM
.Os Linux
.Sh NAME
@ -3981,9 +3981,9 @@ renames the remaining snapshots, and then creates a new snapshot, as follows:
# zfs destroy -r pool/users@7daysago
# zfs rename -r pool/users@6daysago @7daysago
# zfs rename -r pool/users@5daysago @6daysago
# zfs rename -r pool/users@4daysago @5daysago
# zfs rename -r pool/users@3daysago @4daysago
# zfs rename -r pool/users@2daysago @3daysago
# zfs rename -r pool/users@yesterday @5daysago
# zfs rename -r pool/users@yesterday @4daysago
# zfs rename -r pool/users@yesterday @3daysago
# zfs rename -r pool/users@yesterday @2daysago
# zfs rename -r pool/users@today @yesterday
# zfs snapshot -r pool/users@today

View File

@ -36,12 +36,12 @@ modules:
list='$(SUBDIR_TARGETS)'; for targetdir in $$list; do \
$(MAKE) -C $$targetdir; \
done
$(MAKE) -C @LINUX_OBJ@ M=`pwd` @KERNELMAKE_PARAMS@ CONFIG_ZFS=m $@
$(MAKE) -C @LINUX_OBJ@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ CONFIG_ZFS=m $@
clean:
@# Only cleanup the kernel build directories when CONFIG_KERNEL
@# is defined. This indicates that kernel modules should be built.
@CONFIG_KERNEL_TRUE@ $(MAKE) -C @LINUX_OBJ@ M=`pwd` @KERNELMAKE_PARAMS@ $@
@CONFIG_KERNEL_TRUE@ $(MAKE) -C @LINUX_OBJ@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ $@
if [ -f @SPL_SYMBOLS@ ]; then $(RM) @SPL_SYMBOLS@; fi
if [ -f @LINUX_SYMBOLS@ ]; then $(RM) @LINUX_SYMBOLS@; fi
@ -49,7 +49,7 @@ clean:
modules_install:
@# Install the kernel modules
$(MAKE) -C @LINUX_OBJ@ M=`pwd` $@ \
$(MAKE) -C @LINUX_OBJ@ SUBDIRS=`pwd` $@ \
INSTALL_MOD_PATH=$(DESTDIR)$(INSTALL_MOD_PATH) \
INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) \
KERNELRELEASE=@LINUX_VERSION@

View File

@ -50,7 +50,6 @@
#include <sys/types.h>
#include <sys/uio_impl.h>
#include <linux/kmap_compat.h>
#include <linux/uaccess.h>
/*
* Move "n" bytes at byte address "p"; "rw" indicates the direction
@ -78,23 +77,8 @@ uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio)
if (copy_to_user(iov->iov_base+skip, p, cnt))
return (EFAULT);
} else {
if (uio->uio_fault_disable) {
if (!zfs_access_ok(VERIFY_READ,
(iov->iov_base + skip), cnt)) {
return (EFAULT);
}
pagefault_disable();
if (__copy_from_user_inatomic(p,
(iov->iov_base + skip), cnt)) {
pagefault_enable();
return (EFAULT);
}
pagefault_enable();
} else {
if (copy_from_user(p,
(iov->iov_base + skip), cnt))
return (EFAULT);
}
if (copy_from_user(p, iov->iov_base+skip, cnt))
return (EFAULT);
}
break;
case UIO_SYSSPACE:
@ -172,7 +156,7 @@ EXPORT_SYMBOL(uiomove);
* error will terminate the process as this is only a best attempt to get
* the pages resident.
*/
int
void
uio_prefaultpages(ssize_t n, struct uio *uio)
{
const struct iovec *iov;
@ -186,7 +170,7 @@ uio_prefaultpages(ssize_t n, struct uio *uio)
switch (uio->uio_segflg) {
case UIO_SYSSPACE:
case UIO_BVEC:
return (0);
return;
case UIO_USERSPACE:
case UIO_USERISPACE:
break;
@ -210,7 +194,7 @@ uio_prefaultpages(ssize_t n, struct uio *uio)
p = iov->iov_base + skip;
while (cnt) {
if (fuword8((uint8_t *)p, &tmp))
return (EFAULT);
return;
incr = MIN(cnt, PAGESIZE);
p += incr;
cnt -= incr;
@ -220,10 +204,8 @@ uio_prefaultpages(ssize_t n, struct uio *uio)
*/
p--;
if (fuword8((uint8_t *)p, &tmp))
return (EFAULT);
return;
}
return (0);
}
EXPORT_SYMBOL(uio_prefaultpages);

View File

@ -4004,9 +4004,9 @@ arc_all_memory(void)
{
#ifdef _KERNEL
#ifdef CONFIG_HIGHMEM
return (ptob(zfs_totalram_pages - totalhigh_pages));
return (ptob(totalram_pages - totalhigh_pages));
#else
return (ptob(zfs_totalram_pages));
return (ptob(totalram_pages));
#endif /* CONFIG_HIGHMEM */
#else
return (ptob(physmem) / 2);

View File

@ -72,6 +72,8 @@ static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
void *tag, dmu_buf_impl_t **dbp, int depth);
static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
uint_t zfs_dbuf_evict_key;
static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
@ -503,6 +505,14 @@ dbuf_evict_one(void)
dmu_buf_impl_t *db;
ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
/*
* Set the thread's tsd to indicate that it's processing evictions.
* Once a thread stops evicting from the dbuf cache it will
* reset its tsd to NULL.
*/
ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL);
(void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE);
db = multilist_sublist_tail(mls);
while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
db = multilist_sublist_prev(mls, db);
@ -520,6 +530,7 @@ dbuf_evict_one(void)
} else {
multilist_sublist_unlock(mls);
}
(void) tsd_set(zfs_dbuf_evict_key, NULL);
}
/*
@ -572,6 +583,29 @@ dbuf_evict_thread(void)
static void
dbuf_evict_notify(void)
{
/*
* We use thread specific data to track when a thread has
* started processing evictions. This allows us to avoid deeply
* nested stacks that would have a call flow similar to this:
*
* dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
* ^ |
* | |
* +-----dbuf_destroy()<--dbuf_evict_one()<--------+
*
* The dbuf_eviction_thread will always have its tsd set until
* that thread exits. All other threads will only set their tsd
* if they are participating in the eviction process. This only
* happens if the eviction thread is unable to process evictions
* fast enough. To keep the dbuf cache size in check, other threads
* can evict from the dbuf cache directly. Those threads will set
* their tsd values so that we ensure that they only evict one dbuf
* from the dbuf cache.
*/
if (tsd_get(zfs_dbuf_evict_key) != NULL)
return;
/*
* We check if we should evict without holding the dbuf_evict_lock,
* because it's OK to occasionally make the wrong decision here,
@ -647,6 +681,7 @@ retry:
dbuf_cache_multilist_index_func);
zfs_refcount_create(&dbuf_cache_size);
tsd_create(&zfs_dbuf_evict_key, NULL);
dbuf_evict_thread_exit = B_FALSE;
mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
@ -683,6 +718,7 @@ dbuf_fini(void)
cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
tsd_destroy(&zfs_dbuf_evict_key);
mutex_destroy(&dbuf_evict_lock);
cv_destroy(&dbuf_evict_cv);
@ -968,7 +1004,7 @@ dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
db->db_state = DB_UNCACHED;
}
cv_broadcast(&db->db_changed);
dbuf_rele_and_unlock(db, NULL, B_FALSE);
dbuf_rele_and_unlock(db, NULL);
}
static int
@ -2142,8 +2178,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
* value in dnode_move(), since DB_DNODE_EXIT doesn't actually
* release any lock.
*/
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, db, B_TRUE);
dnode_rele(dn, db);
db->db_dnode_handle = NULL;
dbuf_hash_remove(db);
@ -2169,10 +2204,8 @@ dbuf_destroy(dmu_buf_impl_t *db)
* If this dbuf is referenced from an indirect dbuf,
* decrement the ref count on the indirect dbuf.
*/
if (parent && parent != dndb) {
mutex_enter(&parent->db_mtx);
dbuf_rele_and_unlock(parent, db, B_TRUE);
}
if (parent && parent != dndb)
dbuf_rele(parent, db);
}
/*
@ -2879,7 +2912,7 @@ void
dbuf_rele(dmu_buf_impl_t *db, void *tag)
{
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, tag, B_FALSE);
dbuf_rele_and_unlock(db, tag);
}
void
@ -2890,19 +2923,10 @@ dmu_buf_rele(dmu_buf_t *db, void *tag)
/*
* dbuf_rele() for an already-locked dbuf. This is necessary to allow
* db_dirtycnt and db_holds to be updated atomically. The 'evicting'
* argument should be set if we are already in the dbuf-evicting code
* path, in which case we don't want to recursively evict. This allows us to
* avoid deeply nested stacks that would have a call flow similar to this:
*
* dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
* ^ |
* | |
* +-----dbuf_destroy()<--dbuf_evict_one()<--------+
*
* db_dirtycnt and db_holds to be updated atomically.
*/
void
dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
{
int64_t holds;
@ -2997,8 +3021,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
db->db.db_size, db);
mutex_exit(&db->db_mtx);
if (!evicting)
dbuf_evict_notify();
dbuf_evict_notify();
}
if (do_arc_evict)
@ -3291,7 +3314,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
return;
}
@ -3647,7 +3670,7 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
db->db_data_pending = NULL;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
}
static void

View File

@ -1533,11 +1533,11 @@ void
dnode_rele(dnode_t *dn, void *tag)
{
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, tag, B_FALSE);
dnode_rele_and_unlock(dn, tag);
}
void
dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
dnode_rele_and_unlock(dnode_t *dn, void *tag)
{
uint64_t refs;
/* Get while the hold prevents the dnode from moving. */
@ -1568,8 +1568,7 @@ dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
* that the handle has zero references, but that will be
* asserted anyway when the handle gets destroyed.
*/
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, dnh, evicting);
dbuf_rele(db, dnh);
}
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
@ -429,19 +429,6 @@ dnode_evict_dbufs(dnode_t *dn)
avl_insert_here(&dn->dn_dbufs, db_marker, db,
AVL_BEFORE);
/*
* We need to use the "marker" dbuf rather than
* simply getting the next dbuf, because
* dbuf_destroy() may actually remove multiple dbufs.
* It can call itself recursively on the parent dbuf,
* which may also be removed from dn_dbufs. The code
* flow would look like:
*
* dbuf_destroy():
* dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
* if (!cacheable || pending_evict)
* dbuf_destroy()
*/
dbuf_destroy(db);
db_next = AVL_NEXT(&dn->dn_dbufs, db_marker);
@ -502,7 +489,7 @@ dnode_undirty_dbufs(list_t *list)
list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
}
}

View File

@ -35,7 +35,6 @@
#include <sys/zio.h>
#include <sys/sunldi.h>
#include <linux/mod_compat.h>
#include <linux/vfs_compat.h>
char *zfs_vdev_scheduler = VDEV_SCHEDULER;
static void *zfs_vdev_holder = VDEV_HOLDER;
@ -77,7 +76,7 @@ vdev_bdev_mode(int smode)
ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
if ((smode & FREAD) && !(smode & FWRITE))
mode = SB_RDONLY;
mode = MS_RDONLY;
return (mode);
}
@ -502,38 +501,13 @@ vdev_submit_bio_impl(struct bio *bio)
#endif
}
#ifdef HAVE_BIO_SET_DEV
#if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
/*
* The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
* GPL-only bio_associate_blkg() symbol thus inadvertently converting
* the entire macro. Provide a minimal version which always assigns the
* request queue's root_blkg to the bio.
*/
static inline void
vdev_bio_associate_blkg(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
ASSERT3P(q, !=, NULL);
ASSERT3P(q->root_blkg, !=, NULL);
ASSERT3P(bio->bi_blkg, ==, NULL);
if (blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
}
#define bio_associate_blkg vdev_bio_associate_blkg
#endif
#else
/*
* Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
*/
#ifndef HAVE_BIO_SET_DEV
static inline void
bio_set_dev(struct bio *bio, struct block_device *bdev)
{
bio->bi_bdev = bdev;
}
#endif /* HAVE_BIO_SET_DEV */
#endif /* !HAVE_BIO_SET_DEV */
static inline void
vdev_submit_bio(struct bio *bio)

View File

@ -6634,14 +6634,11 @@ static const struct file_operations zfsdev_fops = {
};
static struct miscdevice zfs_misc = {
.minor = ZFS_DEVICE_MINOR,
.minor = MISC_DYNAMIC_MINOR,
.name = ZFS_DRIVER,
.fops = &zfsdev_fops,
};
MODULE_ALIAS_MISCDEV(ZFS_DEVICE_MINOR);
MODULE_ALIAS("devname:zfs");
static int
zfs_attach(void)
{
@ -6652,24 +6649,12 @@ zfs_attach(void)
zfsdev_state_list->zs_minor = -1;
error = misc_register(&zfs_misc);
if (error == -EBUSY) {
/*
* Fallback to dynamic minor allocation in the event of a
* collision with a reserved minor in linux/miscdevice.h.
* In this case the kernel modules must be manually loaded.
*/
printk(KERN_INFO "ZFS: misc_register() with static minor %d "
"failed %d, retrying with MISC_DYNAMIC_MINOR\n",
ZFS_DEVICE_MINOR, error);
zfs_misc.minor = MISC_DYNAMIC_MINOR;
error = misc_register(&zfs_misc);
if (error != 0) {
printk(KERN_INFO "ZFS: misc_register() failed %d\n", error);
return (error);
}
if (error)
printk(KERN_INFO "ZFS: misc_register() failed %d\n", error);
return (error);
return (0);
}
static void

View File

@ -66,7 +66,6 @@
#include <sys/dmu_objset.h>
#include <sys/spa_boot.h>
#include <sys/zpl.h>
#include <linux/vfs_compat.h>
#include "zfs_comutil.h"
enum {
@ -260,7 +259,7 @@ zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
return (!!(zfsvfs->z_sb->s_flags & MS_RDONLY));
}
/*ARGSUSED*/
@ -354,15 +353,15 @@ acltype_changed_cb(void *arg, uint64_t newval)
switch (newval) {
case ZFS_ACLTYPE_OFF:
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
zfsvfs->z_sb->s_flags &= ~MS_POSIXACL;
break;
case ZFS_ACLTYPE_POSIXACL:
#ifdef CONFIG_FS_POSIX_ACL
zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIXACL;
zfsvfs->z_sb->s_flags |= SB_POSIXACL;
zfsvfs->z_sb->s_flags |= MS_POSIXACL;
#else
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
zfsvfs->z_sb->s_flags &= ~MS_POSIXACL;
#endif /* CONFIG_FS_POSIX_ACL */
break;
default:
@ -391,9 +390,9 @@ readonly_changed_cb(void *arg, uint64_t newval)
return;
if (newval)
sb->s_flags |= SB_RDONLY;
sb->s_flags |= MS_RDONLY;
else
sb->s_flags &= ~SB_RDONLY;
sb->s_flags &= ~MS_RDONLY;
}
static void
@ -421,9 +420,9 @@ nbmand_changed_cb(void *arg, uint64_t newval)
return;
if (newval == TRUE)
sb->s_flags |= SB_MANDLOCK;
sb->s_flags |= MS_MANDLOCK;
else
sb->s_flags &= ~SB_MANDLOCK;
sb->s_flags &= ~MS_MANDLOCK;
}
static void
@ -1764,8 +1763,8 @@ zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
int error;
if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
!(*flags & SB_RDONLY)) {
*flags |= SB_RDONLY;
!(*flags & MS_RDONLY)) {
*flags |= MS_RDONLY;
return (EROFS);
}

View File

@ -675,10 +675,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
xuio = (xuio_t *)uio;
else
#endif
if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFAULT));
}
uio_prefaultpages(MIN(n, max_blksz), uio);
/*
* If in append mode, set the io offset pointer to eof.
@ -823,19 +820,8 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
if (abuf == NULL) {
tx_bytes = uio->uio_resid;
uio->uio_fault_disable = B_TRUE;
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes, tx);
if (error == EFAULT) {
dmu_tx_commit(tx);
if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
break;
}
continue;
} else if (error != 0) {
dmu_tx_commit(tx);
break;
}
tx_bytes -= uio->uio_resid;
} else {
tx_bytes = nbytes;
@ -935,12 +921,8 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
ASSERT(tx_bytes == nbytes);
n -= nbytes;
if (!xuio && n > 0) {
if (uio_prefaultpages(MIN(n, max_blksz), uio)) {
error = EFAULT;
break;
}
}
if (!xuio && n > 0)
uio_prefaultpages(MIN(n, max_blksz), uio);
}
zfs_inode_update(zp);

View File

@ -195,9 +195,6 @@ chmod u+x ${RPM_BUILD_ROOT}%{kmodinstdir_prefix}/*/extra/*/*/*
rm -rf $RPM_BUILD_ROOT
%changelog
* Fri Feb 22 2019 Tony Hutter <hutter2@llnl.gov> - 0.7.13-1
- Released 0.7.13-1, detailed release notes are available at:
- https://github.com/zfsonlinux/zfs/releases/tag/zfs-0.7.13
* Thu Nov 08 2018 Tony Hutter <hutter2@llnl.gov> - 0.7.12-1
- Released 0.7.12-1, detailed release notes are available at:
- https://github.com/zfsonlinux/zfs/releases/tag/zfs-0.7.12

View File

@ -283,15 +283,6 @@ fi
%endif
exit 0
# On RHEL/CentOS 7 the static nodes aren't refreshed by default after
# installing a package. This is the default behavior for Fedora.
%posttrans
%if 0%{?rhel} == 7 || 0%{?centos} == 7
systemctl restart kmod-static-nodes
systemctl restart systemd-tmpfiles-setup-dev
udevadm trigger
%endif
%preun
%if 0%{?_systemd}
%if 0%{?systemd_preun:1}
@ -381,9 +372,6 @@ systemctl --system daemon-reload >/dev/null || true
%endif
%changelog
* Fri Feb 22 2019 Tony Hutter <hutter2@llnl.gov> - 0.7.13-1
- Released 0.7.13-1, detailed release notes are available at:
- https://github.com/zfsonlinux/zfs/releases/tag/zfs-0.7.13
* Thu Nov 08 2018 Tony Hutter <hutter2@llnl.gov> - 0.7.12-1
- Released 0.7.12-1, detailed release notes are available at:
- https://github.com/zfsonlinux/zfs/releases/tag/zfs-0.7.12

View File

@ -50,10 +50,10 @@ function new_change_commit()
{
error=0
# subject is not longer than 72 characters
long_subject=$(git log -n 1 --pretty=%s "$REF" | grep -E -m 1 '.{73}')
# subject is not longer than 50 characters
long_subject=$(git log -n 1 --pretty=%s "$REF" | grep -E -m 1 '.{51}')
if [ -n "$long_subject" ]; then
echo "error: commit subject over 72 characters"
echo "error: commit subject over 50 characters"
error=1
fi

View File

@ -65,10 +65,6 @@ PRE_BUILD="configure
then
echo --enable-debug-dmu-tx
fi
if [[ \${ZFS_DKMS_ENABLE_DEBUGINFO,,} == @(y|yes) ]]
then
echo --enable-debuginfo
fi
}
)
"

View File

@ -12,19 +12,13 @@
#
#
# Copyright (c) 2012, 2018 by Delphix. All rights reserved.
# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
# Copyright (c) 2017 Datto Inc.
#
# some python 2.7 system don't have a configparser shim
try:
import configparser
except ImportError:
import ConfigParser as configparser
import ConfigParser
import os
import sys
import logging
from datetime import datetime
from optparse import OptionParser
from pwd import getpwnam
@ -32,6 +26,8 @@ from pwd import getpwuid
from select import select
from subprocess import PIPE
from subprocess import Popen
from sys import argv
from sys import maxint
from threading import Timer
from time import time
@ -40,10 +36,6 @@ TESTDIR = '/usr/share/zfs/'
KILL = 'kill'
TRUE = 'true'
SUDO = 'sudo'
LOG_FILE = 'LOG_FILE'
LOG_OUT = 'LOG_OUT'
LOG_ERR = 'LOG_ERR'
LOG_FILE_OBJ = None
class Result(object):
@ -87,7 +79,7 @@ class Output(object):
"""
def __init__(self, stream):
self.stream = stream
self._buf = b''
self._buf = ''
self.lines = []
def fileno(self):
@ -112,15 +104,15 @@ class Output(object):
buf = os.read(fd, 4096)
if not buf:
return None
if b'\n' not in buf:
if '\n' not in buf:
self._buf += buf
return []
buf = self._buf + buf
tmp, rest = buf.rsplit(b'\n', 1)
tmp, rest = buf.rsplit('\n', 1)
self._buf = rest
now = datetime.now()
rows = tmp.split(b'\n')
rows = tmp.split('\n')
self.lines += [(now, r) for r in rows]
@ -212,23 +204,23 @@ class Cmd(object):
if needed. Run the command, and update the result object.
"""
if options.dryrun is True:
print(self)
print self
return
privcmd = self.update_cmd_privs(self.pathname, self.user)
try:
old = os.umask(0)
if not os.path.isdir(self.outputdir):
os.makedirs(self.outputdir, mode=0o777)
os.makedirs(self.outputdir, mode=0777)
os.umask(old)
except OSError as e:
except OSError, e:
fail('%s' % e)
self.result.starttime = time()
proc = Popen(privcmd, stdout=PIPE, stderr=PIPE)
# Allow a special timeout value of 0 to mean infinity
if int(self.timeout) == 0:
self.timeout = sys.maxsize
self.timeout = maxint
t = Timer(int(self.timeout), self.kill_cmd, [proc])
try:
@ -255,52 +247,50 @@ class Cmd(object):
self.result.runtime = '%02d:%02d' % (m, s)
self.result.result = 'SKIP'
def log(self, options):
def log(self, logger, options):
"""
This function is responsible for writing all output. This includes
the console output, the logfile of all results (with timestamped
merged stdout and stderr), and for each test, the unmodified
stdout/stderr/merged in it's own file.
"""
if logger is None:
return
logname = getpwuid(os.getuid()).pw_name
user = ' (run as %s)' % (self.user if len(self.user) else logname)
msga = 'Test: %s%s ' % (self.pathname, user)
msgb = '[%s] [%s]\n' % (self.result.runtime, self.result.result)
msgb = '[%s] [%s]' % (self.result.runtime, self.result.result)
pad = ' ' * (80 - (len(msga) + len(msgb)))
result_line = msga + pad + msgb
# The result line is always written to the log file. If -q was
# specified only failures are written to the console, otherwise
# the result line is written to the console.
write_log(bytearray(result_line, encoding='utf-8'), LOG_FILE)
# If -q is specified, only print a line for tests that didn't pass.
# This means passing tests need to be logged as DEBUG, or the one
# line summary will only be printed in the logfile for failures.
if not options.quiet:
write_log(result_line, LOG_OUT)
elif options.quiet and self.result.result is not 'PASS':
write_log(result_line, LOG_OUT)
logger.info('%s%s%s' % (msga, pad, msgb))
elif self.result.result is not 'PASS':
logger.info('%s%s%s' % (msga, pad, msgb))
else:
logger.debug('%s%s%s' % (msga, pad, msgb))
lines = sorted(self.result.stdout + self.result.stderr,
key=lambda x: x[0])
cmp=lambda x, y: cmp(x[0], y[0]))
# Write timestamped output (stdout and stderr) to the logfile
for dt, line in lines:
timestamp = bytearray(dt.strftime("%H:%M:%S.%f ")[:11],
encoding='utf-8')
write_log(b'%s %s\n' % (timestamp, line), LOG_FILE)
logger.debug('%s %s' % (dt.strftime("%H:%M:%S.%f ")[:11], line))
# Write the separate stdout/stderr/merged files, if the data exists
if len(self.result.stdout):
with open(os.path.join(self.outputdir, 'stdout'), 'wb') as out:
with open(os.path.join(self.outputdir, 'stdout'), 'w') as out:
for _, line in self.result.stdout:
os.write(out.fileno(), b'%s\n' % line)
os.write(out.fileno(), '%s\n' % line)
if len(self.result.stderr):
with open(os.path.join(self.outputdir, 'stderr'), 'wb') as err:
with open(os.path.join(self.outputdir, 'stderr'), 'w') as err:
for _, line in self.result.stderr:
os.write(err.fileno(), b'%s\n' % line)
os.write(err.fileno(), '%s\n' % line)
if len(self.result.stdout) and len(self.result.stderr):
with open(os.path.join(self.outputdir, 'merged'), 'wb') as merged:
with open(os.path.join(self.outputdir, 'merged'), 'w') as merged:
for _, line in lines:
os.write(merged.fileno(), b'%s\n' % line)
os.write(merged.fileno(), '%s\n' % line)
class Test(Cmd):
@ -328,7 +318,7 @@ class Test(Cmd):
(self.pathname, self.outputdir, self.timeout, self.pre,
pre_user, self.post, post_user, self.user, self.tags)
def verify(self):
def verify(self, logger):
"""
Check the pre/post scripts, user and Test. Omit the Test from this
run if there are any problems.
@ -338,19 +328,19 @@ class Test(Cmd):
for f in [f for f in files if len(f)]:
if not verify_file(f):
write_log("Warning: Test '%s' not added to this run because"
" it failed verification.\n" % f, LOG_ERR)
logger.info("Warning: Test '%s' not added to this run because"
" it failed verification." % f)
return False
for user in [user for user in users if len(user)]:
if not verify_user(user):
write_log("Not adding Test '%s' to this run.\n" %
self.pathname, LOG_ERR)
if not verify_user(user, logger):
logger.info("Not adding Test '%s' to this run." %
self.pathname)
return False
return True
def run(self, options):
def run(self, logger, options):
"""
Create Cmd instances for the pre/post scripts. If the pre script
doesn't pass, skip this Test. Run the post script regardless.
@ -368,18 +358,18 @@ class Test(Cmd):
if len(pretest.pathname):
pretest.run(options)
cont = pretest.result.result is 'PASS'
pretest.log(options)
pretest.log(logger, options)
if cont:
test.run(options)
else:
test.skip()
test.log(options)
test.log(logger, options)
if len(posttest.pathname):
posttest.run(options)
posttest.log(options)
posttest.log(logger, options)
class TestGroup(Test):
@ -403,7 +393,7 @@ class TestGroup(Test):
(self.pathname, self.outputdir, self.tests, self.timeout,
self.pre, pre_user, self.post, post_user, self.user, self.tags)
def verify(self):
def verify(self, logger):
"""
Check the pre/post scripts, user and tests in this TestGroup. Omit
the TestGroup entirely, or simply delete the relevant tests in the
@ -421,34 +411,34 @@ class TestGroup(Test):
for f in [f for f in auxfiles if len(f)]:
if self.pathname != os.path.dirname(f):
write_log("Warning: TestGroup '%s' not added to this run. "
"Auxiliary script '%s' exists in a different "
"directory.\n" % (self.pathname, f), LOG_ERR)
logger.info("Warning: TestGroup '%s' not added to this run. "
"Auxiliary script '%s' exists in a different "
"directory." % (self.pathname, f))
return False
if not verify_file(f):
write_log("Warning: TestGroup '%s' not added to this run. "
"Auxiliary script '%s' failed verification.\n" %
(self.pathname, f), LOG_ERR)
logger.info("Warning: TestGroup '%s' not added to this run. "
"Auxiliary script '%s' failed verification." %
(self.pathname, f))
return False
for user in [user for user in users if len(user)]:
if not verify_user(user):
write_log("Not adding TestGroup '%s' to this run.\n" %
self.pathname, LOG_ERR)
if not verify_user(user, logger):
logger.info("Not adding TestGroup '%s' to this run." %
self.pathname)
return False
# If one of the tests is invalid, delete it, log it, and drive on.
for test in self.tests:
if not verify_file(os.path.join(self.pathname, test)):
del self.tests[self.tests.index(test)]
write_log("Warning: Test '%s' removed from TestGroup '%s' "
"because it failed verification.\n" %
(test, self.pathname), LOG_ERR)
logger.info("Warning: Test '%s' removed from TestGroup '%s' "
"because it failed verification." %
(test, self.pathname))
return len(self.tests) is not 0
def run(self, options):
def run(self, logger, options):
"""
Create Cmd instances for the pre/post scripts. If the pre script
doesn't pass, skip all the tests in this TestGroup. Run the post
@ -469,7 +459,7 @@ class TestGroup(Test):
if len(pretest.pathname):
pretest.run(options)
cont = pretest.result.result is 'PASS'
pretest.log(options)
pretest.log(logger, options)
for fname in self.tests:
test = Cmd(os.path.join(self.pathname, fname),
@ -480,11 +470,11 @@ class TestGroup(Test):
else:
test.skip()
test.log(options)
test.log(logger, options)
if len(posttest.pathname):
posttest.run(options)
posttest.log(options)
posttest.log(logger, options)
class TestRun(object):
@ -496,7 +486,7 @@ class TestRun(object):
self.starttime = time()
self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S')
self.outputdir = os.path.join(options.outputdir, self.timestamp)
self.setup_logging(options)
self.logger = self.setup_logging(options)
self.defaults = [
('outputdir', BASEDIR),
('quiet', False),
@ -529,7 +519,7 @@ class TestRun(object):
for prop in Test.props:
setattr(test, prop, getattr(options, prop))
if test.verify():
if test.verify(self.logger):
self.tests[pathname] = test
def addtestgroup(self, dirname, filenames, options):
@ -551,9 +541,9 @@ class TestRun(object):
self.testgroups[dirname] = testgroup
self.testgroups[dirname].tests = sorted(filenames)
testgroup.verify()
testgroup.verify(self.logger)
def read(self, options):
def read(self, logger, options):
"""
Read in the specified runfile, and apply the TestRun properties
listed in the 'DEFAULT' section to our TestRun. Then read each
@ -562,7 +552,7 @@ class TestRun(object):
in the 'DEFAULT' section. If the Test or TestGroup passes
verification, add it to the TestRun.
"""
config = configparser.RawConfigParser()
config = ConfigParser.RawConfigParser()
if not len(config.read(options.runfile)):
fail("Coulnd't read config file %s" % options.runfile)
@ -594,7 +584,7 @@ class TestRun(object):
# Repopulate tests using eval to convert the string to a list
testgroup.tests = eval(config.get(section, 'tests'))
if testgroup.verify():
if testgroup.verify(logger):
self.testgroups[section] = testgroup
else:
test = Test(section)
@ -603,7 +593,7 @@ class TestRun(object):
if config.has_option(sect, prop):
setattr(test, prop, config.get(sect, prop))
if test.verify():
if test.verify(logger):
self.tests[section] = test
def write(self, options):
@ -618,7 +608,7 @@ class TestRun(object):
defaults = dict([(prop, getattr(options, prop)) for prop, _ in
self.defaults])
config = configparser.RawConfigParser(defaults)
config = ConfigParser.RawConfigParser(defaults)
for test in sorted(self.tests.keys()):
config.add_section(test)
@ -647,15 +637,14 @@ class TestRun(object):
"""
done = False
components = 0
tmp_dict = dict(list(self.tests.items()) +
list(self.testgroups.items()))
tmp_dict = dict(self.tests.items() + self.testgroups.items())
total = len(tmp_dict)
base = self.outputdir
while not done:
paths = []
components -= 1
for testfile in list(tmp_dict.keys()):
for testfile in tmp_dict.keys():
uniq = '/'.join(testfile.split('/')[components:]).lstrip('/')
if uniq not in paths:
paths.append(uniq)
@ -666,23 +655,42 @@ class TestRun(object):
def setup_logging(self, options):
"""
This funtion creates the output directory and gets a file object
for the logfile. This function must be called before write_log()
can be used.
Two loggers are set up here. The first is for the logfile which
will contain one line summarizing the test, including the test
name, result, and running time. This logger will also capture the
timestamped combined stdout and stderr of each run. The second
logger is optional console output, which will contain only the one
line summary. The loggers are initialized at two different levels
to facilitate segregating the output.
"""
if options.dryrun is True:
return
global LOG_FILE_OBJ
testlogger = logging.getLogger(__name__)
testlogger.setLevel(logging.DEBUG)
if options.cmd is not 'wrconfig':
try:
old = os.umask(0)
os.makedirs(self.outputdir, mode=0o777)
os.makedirs(self.outputdir, mode=0777)
os.umask(old)
filename = os.path.join(self.outputdir, 'log')
LOG_FILE_OBJ = open(filename, buffering=0, mode='wb')
except OSError as e:
except OSError, e:
fail('%s' % e)
filename = os.path.join(self.outputdir, 'log')
logfile = logging.FileHandler(filename)
logfile.setLevel(logging.DEBUG)
logfilefmt = logging.Formatter('%(message)s')
logfile.setFormatter(logfilefmt)
testlogger.addHandler(logfile)
cons = logging.StreamHandler()
cons.setLevel(logging.INFO)
consfmt = logging.Formatter('%(message)s')
cons.setFormatter(consfmt)
testlogger.addHandler(cons)
return testlogger
def run(self, options):
"""
@ -699,31 +707,31 @@ class TestRun(object):
if not os.path.exists(logsymlink):
os.symlink(self.outputdir, logsymlink)
else:
write_log('Could not make a symlink to directory %s\n' %
self.outputdir, LOG_ERR)
print 'Could not make a symlink to directory %s' % (
self.outputdir)
iteration = 0
while iteration < options.iterations:
for test in sorted(self.tests.keys()):
self.tests[test].run(options)
self.tests[test].run(self.logger, options)
for testgroup in sorted(self.testgroups.keys()):
self.testgroups[testgroup].run(options)
self.testgroups[testgroup].run(self.logger, options)
iteration += 1
def summary(self):
if Result.total is 0:
return 2
print('\nResults Summary')
for key in list(Result.runresults.keys()):
print '\nResults Summary'
for key in Result.runresults.keys():
if Result.runresults[key] is not 0:
print('%s\t% 4d' % (key, Result.runresults[key]))
print '%s\t% 4d' % (key, Result.runresults[key])
m, s = divmod(time() - self.starttime, 60)
h, m = divmod(m, 60)
print('\nRunning Time:\t%02d:%02d:%02d' % (h, m, s))
print('Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) /
float(Result.total)) * 100))
print('Log directory:\t%s' % self.outputdir)
print '\nRunning Time:\t%02d:%02d:%02d' % (h, m, s)
print 'Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) /
float(Result.total)) * 100)
print 'Log directory:\t%s' % self.outputdir
if Result.runresults['FAIL'] > 0:
return 1
@ -734,23 +742,6 @@ class TestRun(object):
return 0
def write_log(msg, target):
"""
Write the provided message to standard out, standard error or
the logfile. If specifying LOG_FILE, then `msg` must be a bytes
like object. This way we can still handle output from tests that
may be in unexpected encodings.
"""
if target == LOG_OUT:
os.write(sys.stdout.fileno(), bytearray(msg, encoding='utf-8'))
elif target == LOG_ERR:
os.write(sys.stderr.fileno(), bytearray(msg, encoding='utf-8'))
elif target == LOG_FILE:
os.write(LOG_FILE_OBJ.fileno(), msg)
else:
fail('log_msg called with unknown target "%s"' % target)
def verify_file(pathname):
"""
Verify that the supplied pathname is an executable regular file.
@ -766,7 +757,7 @@ def verify_file(pathname):
return False
def verify_user(user):
def verify_user(user, logger):
"""
Verify that the specified user exists on this system, and can execute
sudo without being prompted for a password.
@ -779,15 +770,13 @@ def verify_user(user):
try:
getpwnam(user)
except KeyError:
write_log("Warning: user '%s' does not exist.\n" % user,
LOG_ERR)
logger.info("Warning: user '%s' does not exist.", user)
return False
p = Popen(testcmd)
p.wait()
if p.returncode is not 0:
write_log("Warning: user '%s' cannot use passwordless sudo.\n" % user,
LOG_ERR)
logger.info("Warning: user '%s' cannot use passwordless sudo.", user)
return False
else:
Cmd.verified_users.append(user)
@ -815,7 +804,7 @@ def find_tests(testrun, options):
def fail(retstr, ret=1):
print('%s: %s' % (sys.argv[0], retstr))
print '%s: %s' % (argv[0], retstr)
exit(ret)
@ -905,7 +894,7 @@ def main():
if options.cmd is 'runtests':
find_tests(testrun, options)
elif options.cmd is 'rdconfig':
testrun.read(options)
testrun.read(testrun.logger, options)
elif options.cmd is 'wrconfig':
find_tests(testrun, options)
testrun.write(options)

View File

@ -31,132 +31,74 @@
#include <string.h>
#include <sys/mman.h>
#include <pthread.h>
#include <errno.h>
#include <err.h>
/*
* --------------------------------------------------------------------
* Bug Issue Id: #7512
* The bug time sequence:
* 1. context #1, zfs_write assign a txg "n".
* 2. In the same process, context #2, mmap page fault (which means the mm_sem
* is hold) occurred, zfs_dirty_inode open a txg failed, and wait previous
* txg "n" completed.
* 3. context #1 call uiomove to write, however page fault is occurred in
* uiomove, which means it need mm_sem, but mm_sem is hold by
* context #2, so it stuck and can't complete, then txg "n" will not
* complete.
* Bug Id: 5032643
*
* So context #1 and context #2 trap into the "dead lock".
* Simply writing to a file and mmaping that file at the same time can
* result in deadlock. Nothing perverse like writing from the file's
* own mapping is required.
* --------------------------------------------------------------------
*/
#define NORMAL_WRITE_TH_NUM 2
static void *
normal_writer(void *filename)
mapper(void *fdp)
{
char *file_path = filename;
int fd = -1;
ssize_t write_num = 0;
int page_size = getpagesize();
void *addr;
int fd = *(int *)fdp;
fd = open(file_path, O_RDWR | O_CREAT, 0777);
if (fd == -1) {
err(1, "failed to open %s", file_path);
if ((addr =
mmap(0, 8192, PROT_READ, MAP_SHARED, fd, 0)) == MAP_FAILED) {
perror("mmap");
exit(1);
}
char *buf = malloc(1);
while (1) {
write_num = write(fd, buf, 1);
if (write_num == 0) {
err(1, "write failed!");
break;
}
lseek(fd, page_size, SEEK_CUR);
}
if (buf) {
free(buf);
}
}
static void *
map_writer(void *filename)
{
int fd = -1;
int ret = 0;
char *buf = NULL;
int page_size = getpagesize();
int op_errno = 0;
char *file_path = filename;
while (1) {
ret = access(file_path, F_OK);
if (ret) {
op_errno = errno;
if (op_errno == ENOENT) {
fd = open(file_path, O_RDWR | O_CREAT, 0777);
if (fd == -1) {
err(1, "open file failed");
}
ret = ftruncate(fd, page_size);
if (ret == -1) {
err(1, "truncate file failed");
}
} else {
err(1, "access file failed!");
}
} else {
fd = open(file_path, O_RDWR, 0777);
if (fd == -1) {
err(1, "open file failed");
}
}
if ((buf = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0)) == MAP_FAILED) {
err(1, "map file failed");
}
if (fd != -1)
close(fd);
char s[10] = {0, };
memcpy(buf, s, 10);
ret = munmap(buf, page_size);
if (ret != 0) {
err(1, "unmap file failed");
for (;;) {
if (mmap(addr, 8192, PROT_READ,
MAP_SHARED|MAP_FIXED, fd, 0) == MAP_FAILED) {
perror("mmap");
exit(1);
}
}
/* NOTREACHED */
return ((void *)1);
}
int
main(int argc, char **argv)
{
pthread_t map_write_tid;
pthread_t normal_write_tid[NORMAL_WRITE_TH_NUM];
int i = 0;
int fd;
char buf[1024];
pthread_t tid;
if (argc != 3) {
(void) printf("usage: %s <normal write file name>"
"<map write file name>\n", argv[0]);
memset(buf, 'a', sizeof (buf));
if (argc != 2) {
(void) printf("usage: %s <file name>\n", argv[0]);
exit(1);
}
for (i = 0; i < NORMAL_WRITE_TH_NUM; i++) {
if (pthread_create(&normal_write_tid[i], NULL, normal_writer,
argv[1])) {
err(1, "pthread_create normal_writer failed.");
if ((fd = open(argv[1], O_RDWR|O_CREAT|O_TRUNC, 0666)) == -1) {
perror("open");
exit(1);
}
(void) pthread_setconcurrency(2);
if (pthread_create(&tid, NULL, mapper, &fd) != 0) {
perror("pthread_create");
close(fd);
exit(1);
}
for (;;) {
if (write(fd, buf, sizeof (buf)) == -1) {
perror("write");
close(fd);
exit(1);
}
}
if (pthread_create(&map_write_tid, NULL, map_writer, argv[2])) {
err(1, "pthread_create map_writer failed.");
}
close(fd);
/* NOTREACHED */
pthread_join(map_write_tid, NULL);
return (0);
}

View File

@ -53,14 +53,12 @@ if ! is_mp; then
fi
log_must chmod 777 $TESTDIR
mmapwrite $TESTDIR/normal_write_file $TESTDIR/map_write_file &
mmapwrite $TESTDIR/test-write-file &
PID_MMAPWRITE=$!
log_note "mmapwrite $TESTDIR/normal_write_file $TESTDIR/map_write_file"\
"pid: $PID_MMAPWRITE"
log_note "mmapwrite $TESTDIR/test-write-file pid: $PID_MMAPWRITE"
log_must sleep 30
log_must kill -9 $PID_MMAPWRITE
log_must ls -l $TESTDIR/normal_write_file
log_must ls -l $TESTDIR/map_write_file
log_must ls -l $TESTDIR/test-write-file
log_pass "write(2) a mmap(2)'ing file succeeded."

View File

@ -8,7 +8,3 @@ ENV{DEVTYPE}=="partition", IMPORT{program}="@udevdir@/vdev_id -d %k"
KERNEL=="*[!0-9]", ENV{SUBSYSTEM}=="block", ENV{ID_VDEV}=="?*", SYMLINK+="$env{ID_VDEV_PATH}"
KERNEL=="*[0-9]", ENV{SUBSYSTEM}=="block", ENV{DEVTYPE}=="partition", ENV{ID_VDEV}=="?*", SYMLINK+="$env{ID_VDEV_PATH}-part%n"
KERNEL=="dm-[0-9]*", ENV{SUBSYSTEM}=="block", ENV{ID_VDEV}=="?*", SYMLINK+="$env{ID_VDEV_PATH}"
# Enclosure device symlink rules
ENV{SUBSYSTEM}=="scsi_generic", IMPORT{program}="@udevdir@/vdev_id -e"
ENV{SUBSYSTEM}=="scsi_generic", ENV{ID_ENCLOSURE}=="?*", SYMLINK+="$env{ID_ENCLOSURE_PATH}"

View File

@ -7,6 +7,6 @@ ENV{ID_FS_TYPE}=="zfs_member", RUN+="/sbin/modprobe zfs"
KERNEL=="null", SYMLINK+="root"
SYMLINK=="null", SYMLINK+="root"
KERNEL=="zfs", MODE="0666", OPTIONS+="static_node=zfs"
SUBSYSTEM=="misc", KERNEL=="zfs", MODE="0666"
LABEL="zfs_end"