Compare commits

..

No commits in common. "zfs-0.7-release" and "zfs-0.7.3" have entirely different histories.

452 changed files with 2960 additions and 9677 deletions

View File

@ -161,7 +161,7 @@ coding convention.
### Commit Message Formats
#### New Changes
Commit messages for new changes must meet the following guidelines:
* In 72 characters or less, provide a summary of the change as the
* In 50 characters or less, provide a summary of the change as the
first line in the commit message.
* A body which provides a description of the change. If necessary,
please summarize important information such as why the proposed

30
.github/codecov.yml vendored
View File

@ -1,30 +0,0 @@
codecov:
notify:
require_ci_to_pass: no
coverage:
precision: 2
round: down
range: "50...100"
status:
project:
default:
threshold: 1%
patch:
default:
threshold: 1%
parsers:
gcov:
branch_detection:
conditional: yes
loop: yes
method: no
macro: no
comment:
layout: "header, sunburst, diff"
behavior: default
require_changes: no

View File

@ -1,3 +1,4 @@
nullPointer:./module/zfs/zfs_vnops.c:839
preprocessorErrorDirective:./module/zfs/vdev_raidz_math_avx512f.c:243
preprocessorErrorDirective:./module/zfs/vdev_raidz_math_sse2.c:266

2
.gitignore vendored
View File

@ -19,8 +19,6 @@
*.mod.c
*~
*.swp
*.gcno
*.gcda
.deps
.libs
.dirstamp

2
META
View File

@ -1,7 +1,7 @@
Meta: 1
Name: zfs
Branch: 1.0
Version: 0.7.13
Version: 0.7.3
Release: 1
Release-Tags: relext
License: CDDL

View File

@ -23,18 +23,16 @@ EXTRA_DIST = autogen.sh copy-builtin
EXTRA_DIST += config/config.awk config/rpm.am config/deb.am config/tgz.am
EXTRA_DIST += META DISCLAIMER COPYRIGHT README.markdown OPENSOLARIS.LICENSE
@CODE_COVERAGE_RULES@
distclean-local::
-$(RM) -R autom4te*.cache
-find . \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \
-o -name .pc -o -name .hg -o -name .git \) -prune -o \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-o -name '.*.rej' -o -size 0 -o -name '*%' -o -name '.*.cmd' \
-o -name 'core' -o -name 'Makefile' -o -name 'Module.symvers' \
-o -name '*.order' -o -name '*.markers' -o -name '*.gcda' \
-o -name '*.gcno' \) \
-o -name '.*.rej' -o -name '.script-config' -o -size 0 \
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \
-o -name 'Makefile' -o -name 'Module.symvers' \
-o -name '*.order' -o -name '*.markers' \) \
-type f -print | xargs $(RM)
dist-hook:
@ -67,10 +65,10 @@ lint: cppcheck paxcheck
cppcheck:
@if type cppcheck > /dev/null 2>&1; then \
cppcheck --quiet --force --error-exitcode=2 --inline-suppr \
cppcheck --quiet --force --error-exitcode=2 \
--suppressions-list=.github/suppressions.txt \
-UHAVE_SSE2 -UHAVE_AVX512F -UHAVE_UIO_ZEROCOPY \
-UHAVE_DNLC ${top_srcdir}; \
-UHAVE_SSE2 -UHAVE_AVX512F \
${top_srcdir}; \
fi
paxcheck:

View File

@ -1,9 +1,9 @@
![img](http://zfsonlinux.org/images/zfs-linux.png)
<p align="center"><img src="http://zfsonlinux.org/images/zfs-linux.png"/></p>
ZFS is an advanced file system and volume manager which was originally
developed for Solaris and is now maintained by the Illumos community.
ZFS on Linux is an advanced file system and volume manager which was originally
developed for Solaris and is now maintained by the OpenZFS community.
[![codecov](https://codecov.io/gh/zfsonlinux/zfs/branch/master/graph/badge.svg)](https://codecov.io/gh/zfsonlinux/zfs)
ZFS on Linux, which is also known as ZoL, is currently feature complete. It
includes fully functional and stable SPA, DMU, ZVOL, and ZPL layers. And it's native!
# Official Resources
* [Site](http://zfsonlinux.org)
@ -16,4 +16,4 @@ Full documentation for installing ZoL on your favorite Linux distribution can
be found at [our site](http://zfsonlinux.org/).
# Contribute & Develop
We have a separate document with [contribution guidelines](./.github/CONTRIBUTING.md).
We have a separate document with [contribution guidelines](./.github/CONTRIBUTING.md).

View File

@ -32,56 +32,38 @@
# If you are having troubles when using this script from cron(8) please try
# adjusting your PATH before reporting problems.
#
# Note some of this code uses older code (eg getopt instead of argparse,
# subprocess.Popen() instead of subprocess.run()) because we need to support
# some very old versions of Python.
"""Print statistics on the ZFS Adjustable Replacement Cache (ARC)
# /usr/bin & /sbin
#
# Binaries used are:
#
# dc(1), kldstat(8), sed(1), sysctl(8) & vmstat(8)
#
# Binaries that I am working on phasing out are:
#
# dc(1) & sed(1)
Provides basic information on the ARC, its efficiency, the L2ARC (if present),
the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See the
in-source documentation and code at
https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details.
"""
import getopt
import os
import sys
import time
import errno
import getopt
import re
from os import listdir
from subprocess import Popen, PIPE
from decimal import Decimal as D
usetunable = True
show_tunable_descriptions = False
alternate_tunable_layout = False
def handle_Exception(ex_cls, ex, tb):
if ex is IOError:
if ex.errno == errno.EPIPE:
sys.exit()
if ex is KeyboardInterrupt:
sys.exit()
sys.excepthook = handle_Exception
kstat_pobj = re.compile("^([^:]+):\s+(.+)\s*$", flags=re.M)
def get_Kstat():
"""Collect information on the ZFS subsystem from the /proc virtual
file system. The name "kstat" is a holdover from the Solaris utility
of the same name.
"""
def load_proc_kstats(fn, namespace):
"""Collect information on a specific subsystem of the ARC"""
kstats = [line.strip() for line in open(fn)]
del kstats[0:2]
for kstat in kstats:
kstat = kstat.strip()
name, _, value = kstat.split()
name, unused, value = kstat.split()
Kstat[namespace + name] = D(value)
Kstat = {}
@ -95,79 +77,82 @@ def get_Kstat():
return Kstat
def fBytes(b=0):
"""Return human-readable representation of a byte value in
powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal
points. Values smaller than one KiB are returned without
decimal points.
"""
def div1():
sys.stdout.write("\n")
for i in range(18):
sys.stdout.write("%s" % "----")
sys.stdout.write("\n")
prefixes = [
[2**80, "YiB"], # yobibytes (yotta)
[2**70, "ZiB"], # zebibytes (zetta)
[2**60, "EiB"], # exbibytes (exa)
[2**50, "PiB"], # pebibytes (peta)
[2**40, "TiB"], # tebibytes (tera)
[2**30, "GiB"], # gibibytes (giga)
[2**20, "MiB"], # mebibytes (mega)
[2**10, "KiB"]] # kibibytes (kilo)
if b >= 2**10:
def div2():
sys.stdout.write("\n")
for limit, unit in prefixes:
if b >= limit:
value = b / limit
break
result = "%0.2f\t%s" % (value, unit)
def fBytes(Bytes=0, Decimal=2):
kbytes = (2 ** 10)
mbytes = (2 ** 20)
gbytes = (2 ** 30)
tbytes = (2 ** 40)
pbytes = (2 ** 50)
ebytes = (2 ** 60)
zbytes = (2 ** 70)
ybytes = (2 ** 80)
if Bytes >= ybytes:
return str("%0." + str(Decimal) + "f") % (Bytes / ybytes) + "\tYiB"
elif Bytes >= zbytes:
return str("%0." + str(Decimal) + "f") % (Bytes / zbytes) + "\tZiB"
elif Bytes >= ebytes:
return str("%0." + str(Decimal) + "f") % (Bytes / ebytes) + "\tEiB"
elif Bytes >= pbytes:
return str("%0." + str(Decimal) + "f") % (Bytes / pbytes) + "\tPiB"
elif Bytes >= tbytes:
return str("%0." + str(Decimal) + "f") % (Bytes / tbytes) + "\tTiB"
elif Bytes >= gbytes:
return str("%0." + str(Decimal) + "f") % (Bytes / gbytes) + "\tGiB"
elif Bytes >= mbytes:
return str("%0." + str(Decimal) + "f") % (Bytes / mbytes) + "\tMiB"
elif Bytes >= kbytes:
return str("%0." + str(Decimal) + "f") % (Bytes / kbytes) + "\tKiB"
elif Bytes == 0:
return str("%d" % 0) + "\tBytes"
else:
result = "%d\tBytes" % b
return result
return str("%d" % Bytes) + "\tBytes"
def fHits(hits=0):
"""Create a human-readable representation of the number of hits.
The single-letter symbols used are SI to avoid the confusion caused
by the different "short scale" and "long scale" representations in
English, which use the same words for different values. See
https://en.wikipedia.org/wiki/Names_of_large_numbers and
https://physics.nist.gov/cuu/Units/prefixes.html
"""
numbers = [
[10**24, 'Y'], # yotta (septillion)
[10**21, 'Z'], # zetta (sextillion)
[10**18, 'E'], # exa (quintrillion)
[10**15, 'P'], # peta (quadrillion)
[10**12, 'T'], # tera (trillion)
[10**9, 'G'], # giga (billion)
[10**6, 'M'], # mega (million)
[10**3, 'k']] # kilo (thousand)
if hits >= 1000:
for limit, symbol in numbers:
if hits >= limit:
value = hits/limit
break
result = "%0.2f%s" % (value, symbol)
def fHits(Hits=0, Decimal=2):
khits = (10 ** 3)
mhits = (10 ** 6)
bhits = (10 ** 9)
thits = (10 ** 12)
qhits = (10 ** 15)
Qhits = (10 ** 18)
shits = (10 ** 21)
Shits = (10 ** 24)
if Hits >= Shits:
return str("%0." + str(Decimal) + "f") % (Hits / Shits) + "S"
elif Hits >= shits:
return str("%0." + str(Decimal) + "f") % (Hits / shits) + "s"
elif Hits >= Qhits:
return str("%0." + str(Decimal) + "f") % (Hits / Qhits) + "Q"
elif Hits >= qhits:
return str("%0." + str(Decimal) + "f") % (Hits / qhits) + "q"
elif Hits >= thits:
return str("%0." + str(Decimal) + "f") % (Hits / thits) + "t"
elif Hits >= bhits:
return str("%0." + str(Decimal) + "f") % (Hits / bhits) + "b"
elif Hits >= mhits:
return str("%0." + str(Decimal) + "f") % (Hits / mhits) + "m"
elif Hits >= khits:
return str("%0." + str(Decimal) + "f") % (Hits / khits) + "k"
elif Hits == 0:
return str("%d" % 0)
else:
result = "%d" % hits
return result
return str("%d" % Hits)
def fPerc(lVal=0, rVal=0, Decimal=2):
"""Calculate percentage value and return in human-readable format"""
if rVal > 0:
return str("%0." + str(Decimal) + "f") % (100 * (lVal / rVal)) + "%"
else:
@ -175,7 +160,6 @@ def fPerc(lVal=0, rVal=0, Decimal=2):
def get_arc_summary(Kstat):
"""Collect general data on the ARC"""
output = {}
memory_throttle_count = Kstat[
@ -192,18 +176,16 @@ def get_arc_summary(Kstat):
# ARC Misc.
deleted = Kstat["kstat.zfs.misc.arcstats.deleted"]
mutex_miss = Kstat["kstat.zfs.misc.arcstats.mutex_miss"]
evict_skip = Kstat["kstat.zfs.misc.arcstats.evict_skip"]
# ARC Misc.
output["arc_misc"] = {}
output["arc_misc"]["deleted"] = fHits(deleted)
output["arc_misc"]['mutex_miss'] = fHits(mutex_miss)
output["arc_misc"]['evict_skips'] = fHits(evict_skip)
output["arc_misc"]['evict_skips'] = fHits(mutex_miss)
# ARC Sizing
arc_size = Kstat["kstat.zfs.misc.arcstats.size"]
mru_size = Kstat["kstat.zfs.misc.arcstats.mru_size"]
mfu_size = Kstat["kstat.zfs.misc.arcstats.mfu_size"]
mru_size = Kstat["kstat.zfs.misc.arcstats.p"]
target_max_size = Kstat["kstat.zfs.misc.arcstats.c_max"]
target_min_size = Kstat["kstat.zfs.misc.arcstats.c_min"]
target_size = Kstat["kstat.zfs.misc.arcstats.c"]
@ -248,14 +230,27 @@ def get_arc_summary(Kstat):
]
output['arc_size_break'] = {}
output['arc_size_break']['recently_used_cache_size'] = {
'per': fPerc(mru_size, mru_size + mfu_size),
'num': fBytes(mru_size),
}
output['arc_size_break']['frequently_used_cache_size'] = {
'per': fPerc(mfu_size, mru_size + mfu_size),
'num': fBytes(mfu_size),
}
if arc_size > target_size:
mfu_size = (arc_size - mru_size)
output['arc_size_break']['recently_used_cache_size'] = {
'per': fPerc(mru_size, arc_size),
'num': fBytes(mru_size),
}
output['arc_size_break']['frequently_used_cache_size'] = {
'per': fPerc(mfu_size, arc_size),
'num': fBytes(mfu_size),
}
elif arc_size < target_size:
mfu_size = (target_size - mru_size)
output['arc_size_break']['recently_used_cache_size'] = {
'per': fPerc(mru_size, target_size),
'num': fBytes(mru_size),
}
output['arc_size_break']['frequently_used_cache_size'] = {
'per': fPerc(mfu_size, target_size),
'num': fBytes(mfu_size),
}
# ARC Hash Breakdown
hash_chain_max = Kstat["kstat.zfs.misc.arcstats.hash_chain_max"]
@ -278,8 +273,6 @@ def get_arc_summary(Kstat):
def _arc_summary(Kstat):
"""Print information on the ARC"""
# ARC Sizing
arc = get_arc_summary(Kstat)
@ -295,7 +288,7 @@ def _arc_summary(Kstat):
sys.stdout.write("\tMutex Misses:\t\t\t\t%s\n" %
arc['arc_misc']['mutex_miss'])
sys.stdout.write("\tEvict Skips:\t\t\t\t%s\n" %
arc['arc_misc']['evict_skips'])
arc['arc_misc']['mutex_miss'])
sys.stdout.write("\n")
# ARC Sizing
@ -354,8 +347,6 @@ def _arc_summary(Kstat):
def get_arc_efficiency(Kstat):
"""Collect information on the efficiency of the ARC"""
output = {}
arc_hits = Kstat["kstat.zfs.misc.arcstats.hits"]
@ -479,8 +470,6 @@ def get_arc_efficiency(Kstat):
def _arc_efficiency(Kstat):
"""Print information on the efficiency of the ARC"""
arc = get_arc_efficiency(Kstat)
sys.stdout.write("ARC Total accesses:\t\t\t\t\t%s\n" %
@ -591,8 +580,6 @@ def _arc_efficiency(Kstat):
def get_l2arc_summary(Kstat):
"""Collection information on the L2ARC"""
output = {}
l2_abort_lowmem = Kstat["kstat.zfs.misc.arcstats.l2_abort_lowmem"]
@ -687,7 +674,6 @@ def get_l2arc_summary(Kstat):
def _l2arc_summary(Kstat):
"""Print information on the L2ARC"""
arc = get_l2arc_summary(Kstat)
@ -772,8 +758,6 @@ def _l2arc_summary(Kstat):
def get_dmu_summary(Kstat):
"""Collect information on the DMU"""
output = {}
zfetch_hits = Kstat["kstat.zfs.misc.zfetchstats.hits"]
@ -799,7 +783,6 @@ def get_dmu_summary(Kstat):
def _dmu_summary(Kstat):
"""Print information on the DMU"""
arc = get_dmu_summary(Kstat)
@ -821,8 +804,6 @@ def _dmu_summary(Kstat):
def get_vdev_summary(Kstat):
"""Collect information on the VDEVs"""
output = {}
vdev_cache_delegations = \
@ -853,8 +834,6 @@ def get_vdev_summary(Kstat):
def _vdev_summary(Kstat):
"""Print information on the VDEVs"""
arc = get_vdev_summary(Kstat)
if arc['vdev_cache_total'] > 0:
@ -874,12 +853,10 @@ def _vdev_summary(Kstat):
def _tunable_summary(Kstat):
"""Print information on tunables, including descriptions if requested"""
global show_tunable_descriptions
global alternate_tunable_layout
names = os.listdir("/sys/module/zfs/parameters/")
names = listdir("/sys/module/zfs/parameters/")
values = {}
for name in names:
@ -890,21 +867,13 @@ def _tunable_summary(Kstat):
descriptions = {}
if show_tunable_descriptions:
command = ["/sbin/modinfo", "zfs", "-0"]
try:
command = ["/sbin/modinfo", "zfs", "-0"]
p = Popen(command, stdin=PIPE, stdout=PIPE,
stderr=PIPE, shell=False, close_fds=True)
p.wait()
# By default, Python 2 returns a string as the first element of the
# tuple from p.communicate(), while Python 3 returns bytes which
# must be decoded first. The better way to do this would be with
# subprocess.run() or at least .check_output(), but this fails on
# CentOS 6 because of its old version of Python 2
desc = bytes.decode(p.communicate()[0])
description_list = desc.strip().split('\0')
description_list = p.communicate()[0].strip().split('\0')
if p.returncode == 0:
for tunable in description_list:
@ -923,23 +892,19 @@ def _tunable_summary(Kstat):
(sys.argv[0], command[0], e.strerror))
sys.stderr.write("Tunable descriptions will be disabled.\n")
sys.stdout.write("ZFS Tunables:\n")
names.sort()
if alternate_tunable_layout:
fmt = "\t%s=%s\n"
else:
fmt = "\t%-50s%s\n"
sys.stdout.write("ZFS Tunable:\n")
for name in names:
if not name:
continue
format = "\t%-50s%s\n"
if alternate_tunable_layout:
format = "\t%s=%s\n"
if show_tunable_descriptions and name in descriptions:
sys.stdout.write("\t# %s\n" % descriptions[name])
sys.stdout.write(fmt % (name, values[name]))
sys.stdout.write(format % (name, values[name]))
unSub = [
@ -953,18 +918,14 @@ unSub = [
def zfs_header():
"""Print title string with date"""
daydate = time.strftime("%a %b %d %H:%M:%S %Y")
daydate = time.strftime('%a %b %d %H:%M:%S %Y')
sys.stdout.write('\n'+'-'*72+'\n')
sys.stdout.write('ZFS Subsystem Report\t\t\t\t%s' % daydate)
sys.stdout.write('\n')
div1()
sys.stdout.write("ZFS Subsystem Report\t\t\t\t%s" % daydate)
div2()
def usage():
"""Print usage information"""
sys.stdout.write("Usage: arc_summary.py [-h] [-a] [-d] [-p PAGE]\n\n")
sys.stdout.write("\t -h, --help : "
"Print this help message and exit\n")
@ -985,20 +946,12 @@ def usage():
def main():
"""Main function"""
global show_tunable_descriptions
global alternate_tunable_layout
try:
opts, args = getopt.getopt(
sys.argv[1:],
"adp:h", ["alternate", "description", "page=", "help"]
)
except getopt.error as e:
sys.stderr.write("Error: %s\n" % e.msg)
usage()
sys.exit(1)
opts, args = getopt.getopt(
sys.argv[1:], "adp:h", ["alternate", "description", "page=", "help"]
)
args = {}
for opt, arg in opts:
@ -1010,7 +963,7 @@ def main():
args['p'] = arg
if opt in ('-h', '--help'):
usage()
sys.exit(0)
sys.exit()
Kstat = get_Kstat()
@ -1025,14 +978,14 @@ def main():
except IndexError:
sys.stderr.write('the argument to -p must be between 1 and ' +
str(len(unSub)) + '\n')
sys.exit(1)
sys.exit()
else:
pages = unSub
zfs_header()
for page in pages:
page(Kstat)
sys.stdout.write("\n")
div2()
if __name__ == '__main__':

View File

@ -112,6 +112,7 @@ cur = {}
d = {}
out = None
kstat = None
float_pobj = re.compile("^[0-9]+(\.[0-9]+)?$")
def detailed_usage():
@ -218,7 +219,6 @@ def print_values():
sep
))
sys.stdout.write("\n")
sys.stdout.flush()
def print_header():
@ -238,7 +238,7 @@ def get_terminal_lines():
data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234')
sz = struct.unpack('hh', data)
return sz[0]
except Exception:
except:
pass
@ -279,12 +279,12 @@ def init():
"outfile",
"help",
"verbose",
"separator",
"seperator",
"columns"
]
)
except getopt.error as msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(msg)
usage()
opts = None
@ -298,7 +298,7 @@ def init():
hflag = True
if opt in ('-v', '--verbose'):
vflag = True
if opt in ('-s', '--separator'):
if opt in ('-s', '--seperator'):
sep = arg
i += 1
if opt in ('-f', '--columns'):

View File

@ -474,7 +474,7 @@ def main():
"help",
"infile",
"outfile",
"separator",
"seperator",
"types",
"verbose",
"extended"
@ -499,7 +499,7 @@ def main():
ofile = arg
if opt in ('-r', '--raw'):
raw += 1
if opt in ('-s', '--separator'):
if opt in ('-s', '--seperator'):
sep = arg
if opt in ('-t', '--types'):
tflag = True

View File

@ -7,8 +7,6 @@ DEFAULT_INCLUDES += \
#
# Ignore the prefix for the mount helper. It must be installed in /sbin/
# because this path is hardcoded in the mount(8) for security reasons.
# However, if needed, the configure option --with-mounthelperdir= can be used
# to override the default install location.
#
sbindir=$(mounthelperdir)
sbin_PROGRAMS = mount.zfs

View File

@ -100,11 +100,10 @@ usage() {
cat << EOF
Usage: vdev_id [-h]
vdev_id <-d device> [-c config_file] [-p phys_per_port]
[-g sas_direct|sas_switch|scsi] [-m]
[-g sas_direct|sas_switch] [-m]
-c specify name of alernate config file [default=$CONFIG]
-d specify basename of device (i.e. sda)
-e Create enclose device symlinks only (/dev/by-enclosure)
-g Storage network topology [default="$TOPOLOGY"]
-m Run in multipath mode
-p number of phy's per switch port [default=$PHYS_PER_PORT]
@ -136,7 +135,7 @@ map_channel() {
MAPPED_CHAN=`awk "\\$1 == \"channel\" && \\$2 == ${PORT} \
{ print \\$3; exit }" $CONFIG`
;;
"sas_direct"|"scsi")
"sas_direct")
MAPPED_CHAN=`awk "\\$1 == \"channel\" && \
\\$2 == \"${PCI_ID}\" && \\$3 == ${PORT} \
{ print \\$4; exit }" $CONFIG`
@ -277,23 +276,6 @@ sas_handler() {
d=$(eval echo \${$i})
SLOT=`echo $d | sed -e 's/^.*://'`
;;
"ses")
# look for this SAS path in all SCSI Enclosure Services
# (SES) enclosures
sas_address=`cat $end_device_dir/sas_address 2>/dev/null`
enclosures=`lsscsi -g | \
sed -n -e '/enclosu/s/^.* \([^ ][^ ]*\) *$/\1/p'`
for enclosure in $enclosures; do
set -- $(sg_ses -p aes $enclosure | \
awk "/device slot number:/{slot=\$12} \
/SAS address: $sas_address/\
{print slot}")
SLOT=$1
if [ -n "$SLOT" ] ; then
break
fi
done
;;
esac
if [ -z "$SLOT" ] ; then
return
@ -307,156 +289,6 @@ sas_handler() {
echo ${CHAN}${SLOT}${PART}
}
scsi_handler() {
if [ -z "$FIRST_BAY_NUMBER" ] ; then
FIRST_BAY_NUMBER=`awk "\\$1 == \"first_bay_number\" \
{print \\$2; exit}" $CONFIG`
fi
FIRST_BAY_NUMBER=${FIRST_BAY_NUMBER:-0}
if [ -z "$PHYS_PER_PORT" ] ; then
PHYS_PER_PORT=`awk "\\$1 == \"phys_per_port\" \
{print \\$2; exit}" $CONFIG`
fi
PHYS_PER_PORT=${PHYS_PER_PORT:-4}
if ! echo $PHYS_PER_PORT | grep -q -E '^[0-9]+$' ; then
echo "Error: phys_per_port value $PHYS_PER_PORT is non-numeric"
exit 1
fi
if [ -z "$MULTIPATH_MODE" ] ; then
MULTIPATH_MODE=`awk "\\$1 == \"multipath\" \
{print \\$2; exit}" $CONFIG`
fi
# Use first running component device if we're handling a dm-mpath device
if [ "$MULTIPATH_MODE" = "yes" ] ; then
# If udev didn't tell us the UUID via DM_NAME, check /dev/mapper
if [ -z "$DM_NAME" ] ; then
DM_NAME=`ls -l --full-time /dev/mapper |
awk "/\/$DEV$/{print \\$9}"`
fi
# For raw disks udev exports DEVTYPE=partition when
# handling partitions, and the rules can be written to
# take advantage of this to append a -part suffix. For
# dm devices we get DEVTYPE=disk even for partitions so
# we have to append the -part suffix directly in the
# helper.
if [ "$DEVTYPE" != "partition" ] ; then
PART=`echo $DM_NAME | awk -Fp '/p/{print "-part"$2}'`
fi
# Strip off partition information.
DM_NAME=`echo $DM_NAME | sed 's/p[0-9][0-9]*$//'`
if [ -z "$DM_NAME" ] ; then
return
fi
# Get the raw scsi device name from multipath -ll. Strip off
# leading pipe symbols to make field numbering consistent.
DEV=`multipath -ll $DM_NAME |
awk '/running/{gsub("^[|]"," "); print $3 ; exit}'`
if [ -z "$DEV" ] ; then
return
fi
fi
if echo $DEV | grep -q ^/devices/ ; then
sys_path=$DEV
else
sys_path=`udevadm info -q path -p /sys/block/$DEV 2>/dev/null`
fi
# expect sys_path like this, for example:
# /devices/pci0000:00/0000:00:0b.0/0000:09:00.0/0000:0a:05.0/0000:0c:00.0/host3/target3:1:0/3:1:0:21/block/sdv
# Use positional parameters as an ad-hoc array
set -- $(echo "$sys_path" | tr / ' ')
num_dirs=$#
scsi_host_dir="/sys"
# Get path up to /sys/.../hostX
i=1
while [ $i -le $num_dirs ] ; do
d=$(eval echo \${$i})
scsi_host_dir="$scsi_host_dir/$d"
echo $d | grep -q -E '^host[0-9]+$' && break
i=$(($i + 1))
done
if [ $i = $num_dirs ] ; then
return
fi
PCI_ID=$(eval echo \${$(($i -1))} | awk -F: '{print $2":"$3}')
# In scsi mode, the directory two levels beneath
# /sys/.../hostX reveals the port and slot.
port_dir=$scsi_host_dir
j=$(($i + 2))
i=$(($i + 1))
while [ $i -le $j ] ; do
port_dir="$port_dir/$(eval echo \${$i})"
i=$(($i + 1))
done
set -- $(echo $port_dir | sed -e 's/^.*:\([^:]*\):\([^:]*\)$/\1 \2/')
PORT=$1
SLOT=$(($2 + $FIRST_BAY_NUMBER))
if [ -z "$SLOT" ] ; then
return
fi
CHAN=`map_channel $PCI_ID $PORT`
SLOT=`map_slot $SLOT $CHAN`
if [ -z "$CHAN" ] ; then
return
fi
echo ${CHAN}${SLOT}${PART}
}
# Figure out the name for the enclosure symlink
enclosure_handler () {
# We get all the info we need from udev's DEVPATH variable:
#
# DEVPATH=/sys/devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/subsystem/devices/0:0:0:0/scsi_generic/sg0
# Get the enclosure ID ("0:0:0:0")
ENC=$(basename $(readlink -m "/sys/$DEVPATH/../.."))
if [ ! -d /sys/class/enclosure/$ENC ] ; then
# Not an enclosure, bail out
return
fi
# Get the long sysfs device path to our enclosure. Looks like:
# /devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/port-0:0/ ... /enclosure/0:0:0:0
ENC_DEVICE=$(readlink /sys/class/enclosure/$ENC)
# Grab the full path to the hosts port dir:
# /devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/port-0:0
PORT_DIR=$(echo $ENC_DEVICE | grep -Eo '.+host[0-9]+/port-[0-9]+:[0-9]+')
# Get the port number
PORT_ID=$(echo $PORT_DIR | grep -Eo "[0-9]+$")
# The PCI directory is two directories up from the port directory
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.0
PCI_ID_LONG=$(basename $(readlink -m "/sys/$PORT_DIR/../.."))
# Strip down the PCI address from 0000:05:00.0 to 05:00.0
PCI_ID=$(echo "$PCI_ID_LONG" | sed -r 's/^[0-9]+://g')
# Name our device according to vdev_id.conf (like "L0" or "U1").
NAME=$(awk "/channel/{if (\$1 == \"channel\" && \$2 == \"$PCI_ID\" && \
\$3 == \"$PORT_ID\") {print \$4int(count[\$4])}; count[\$4]++}" $CONFIG)
echo "${NAME}"
}
alias_handler () {
# Special handling is needed to correctly append a -part suffix
# to partitions of device mapper devices. The DEVTYPE attribute
@ -512,7 +344,7 @@ alias_handler () {
done
}
while getopts 'c:d:eg:mp:h' OPTION; do
while getopts 'c:d:g:mp:h' OPTION; do
case ${OPTION} in
c)
CONFIG=${OPTARG}
@ -520,16 +352,6 @@ while getopts 'c:d:eg:mp:h' OPTION; do
d)
DEV=${OPTARG}
;;
e)
# When udev sees a scsi_generic device, it calls this script with -e to
# create the enclosure device symlinks only. We also need
# "enclosure_symlinks yes" set in vdev_id.config to actually create the
# symlink.
ENCLOSURE_MODE=$(awk '{if ($1 == "enclosure_symlinks") print $2}' $CONFIG)
if [ "$ENCLOSURE_MODE" != "yes" ] ; then
exit 0
fi
;;
g)
TOPOLOGY=$OPTARG
;;
@ -549,7 +371,7 @@ if [ ! -r $CONFIG ] ; then
exit 0
fi
if [ -z "$DEV" -a -z "$ENCLOSURE_MODE" ] ; then
if [ -z "$DEV" ] ; then
echo "Error: missing required option -d"
exit 1
fi
@ -562,37 +384,16 @@ if [ -z "$BAY" ] ; then
BAY=`awk "\\$1 == \"slot\" {print \\$2; exit}" $CONFIG`
fi
TOPOLOGY=${TOPOLOGY:-sas_direct}
# Should we create /dev/by-enclosure symlinks?
if [ "$ENCLOSURE_MODE" = "yes" -a "$TOPOLOGY" = "sas_direct" ] ; then
ID_ENCLOSURE=$(enclosure_handler)
if [ -z "$ID_ENCLOSURE" ] ; then
exit 0
fi
# Just create the symlinks to the enclosure devices and then exit.
ENCLOSURE_PREFIX=$(awk '/enclosure_symlinks_prefix/{print $2}' $CONFIG)
if [ -z "$ENCLOSURE_PREFIX" ] ; then
ENCLOSURE_PREFIX="enc"
fi
echo "ID_ENCLOSURE=$ID_ENCLOSURE"
echo "ID_ENCLOSURE_PATH=by-enclosure/$ENCLOSURE_PREFIX-$ID_ENCLOSURE"
exit 0
fi
# First check if an alias was defined for this device.
ID_VDEV=`alias_handler`
if [ -z "$ID_VDEV" ] ; then
BAY=${BAY:-bay}
TOPOLOGY=${TOPOLOGY:-sas_direct}
case $TOPOLOGY in
sas_direct|sas_switch)
ID_VDEV=`sas_handler`
;;
scsi)
ID_VDEV=`scsi_handler`
;;
*)
echo "Error: unknown topology $TOPOLOGY"
exit 1

View File

@ -24,7 +24,7 @@
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
* Copyright (c) 2017 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
*/
@ -2716,6 +2716,10 @@ dump_label(const char *dev)
exit(1);
}
if (ioctl(fd, BLKFLSBUF) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
@ -2723,10 +2727,6 @@ dump_label(const char *dev)
exit(1);
}
if (S_ISBLK(statbuf.st_mode) && ioctl(fd, BLKFLSBUF) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
@ -3313,7 +3313,7 @@ dump_block_stats(spa_t *spa)
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
int e, c;
bp_embedded_type_t i;
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
@ -3354,7 +3354,7 @@ dump_block_stats(spa_t *spa)
zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
/*
* If we've traversed the data blocks then we need to wait for those
@ -3370,12 +3370,6 @@ dump_block_stats(spa_t *spa)
}
}
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
zcb.zcb_haderrors |= err;
if (zcb.zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
@ -3659,22 +3653,6 @@ dump_simulated_ddt(spa_t *spa)
dump_dedup_ratio(&dds_total);
}
static void
zdb_set_skip_mmp(char *target)
{
spa_t *spa;
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
}
static void
dump_zpool(spa_t *spa)
{
@ -3911,6 +3889,13 @@ name:
return (NULL);
}
/* ARGSUSED */
static int
random_get_pseudo_bytes_cb(void *buf, size_t len, void *unused)
{
return (random_get_pseudo_bytes(buf, len));
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
@ -4073,8 +4058,17 @@ zdb_read_block(char *thing, spa_t *spa)
* every decompress function at every inflated blocksize.
*/
enum zio_compress c;
void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
abd_copy_to_buf(pbuf2, pabd, psize);
VERIFY0(abd_iterate_func(pabd, psize, SPA_MAXBLOCKSIZE - psize,
random_get_pseudo_bytes_cb, NULL));
VERIFY0(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize,
SPA_MAXBLOCKSIZE - psize));
/*
* XXX - On the one hand, with SPA_MAXBLOCKSIZE at 16MB,
* this could take a while and we should let the user know
@ -4084,29 +4078,13 @@ zdb_read_block(char *thing, spa_t *spa)
for (lsize = psize + SPA_MINBLOCKSIZE;
lsize <= SPA_MAXBLOCKSIZE; lsize += SPA_MINBLOCKSIZE) {
for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) {
/*
* ZLE can easily decompress non zle stream.
* So have an option to disable it.
*/
if (c == ZIO_COMPRESS_ZLE &&
getenv("ZDB_NO_ZLE"))
continue;
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize, (u_longlong_t)lsize,
zio_compress_table[c].ci_name);
/*
* We randomize lbuf2, and decompress to both
* lbuf and lbuf2. This way, we will know if
* decompression fill exactly to lsize.
*/
VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
if (zio_decompress_data(c, pabd,
lbuf, psize, lsize) == 0 &&
zio_decompress_data(c, pabd,
zio_decompress_data_buf(c, pbuf2,
lbuf2, psize, lsize) == 0 &&
bcmp(lbuf, lbuf2, lsize) == 0)
break;
@ -4114,9 +4092,11 @@ zdb_read_block(char *thing, spa_t *spa)
if (c != ZIO_COMPRESS_FUNCTIONS)
break;
}
umem_free(pbuf2, SPA_MAXBLOCKSIZE);
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (lsize > SPA_MAXBLOCKSIZE) {
if (lsize <= psize) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
@ -4155,12 +4135,11 @@ zdb_embedded_block(char *thing)
{
blkptr_t bp;
unsigned long long *words = (void *)&bp;
char *buf;
char buf[SPA_MAXBLOCKSIZE];
int err;
buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
memset(&bp, 0, sizeof (blkptr_t));
bzero(&bp, sizeof (bp));
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
@ -4178,7 +4157,6 @@ zdb_embedded_block(char *thing)
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
umem_free(buf, SPA_MAXBLOCKSIZE);
}
int
@ -4193,7 +4171,7 @@ main(int argc, char **argv)
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool;
char *target;
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int flags = ZFS_IMPORT_MISSING_LOG;
@ -4396,20 +4374,6 @@ main(int argc, char **argv)
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
} else {
target_pool = target;
}
if (dump_opt['e']) {
importargs_t args = { 0 };
nvlist_t *cfg = NULL;
@ -4418,36 +4382,48 @@ main(int argc, char **argv)
args.path = searchdirs;
args.can_be_active = B_TRUE;
error = zpool_tryimport(g_zfs, target_pool, &cfg, &args);
error = zpool_tryimport(g_zfs, target, &cfg, &args);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_REWIND_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
error = spa_import(target_pool, cfg, NULL,
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
error = spa_import(target, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (target_pool != target)
free(target_pool);
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
}
if (error == 0) {
if (target_is_spa || dump_opt['R']) {
zdb_set_skip_mmp(target);
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
@ -4470,7 +4446,6 @@ main(int argc, char **argv)
}
}
} else {
zdb_set_skip_mmp(target);
error = open_objset(target, DMU_OST_ANY, FTAG, &os);
}
}

View File

@ -69,8 +69,7 @@ dist_zedexec_SCRIPTS = \
zed.d/statechange-notify.sh \
zed.d/vdev_clear-led.sh \
zed.d/vdev_attach-led.sh \
zed.d/pool_import-led.sh \
zed.d/resilver_finish-start-scrub.sh
zed.d/pool_import-led.sh
zedconfdefaults = \
all-syslog.sh \
@ -81,8 +80,7 @@ zedconfdefaults = \
statechange-notify.sh \
vdev_clear-led.sh \
vdev_attach-led.sh \
pool_import-led.sh \
resilver_finish-start-scrub.sh
pool_import-led.sh
install-data-hook:
$(MKDIR_P) "$(DESTDIR)$(zedconfdir)"

View File

@ -10,8 +10,6 @@
: "${ZED_DEBUG_LOG:="${TMPDIR:="/tmp"}/zed.debug.log"}"
zed_exit_if_ignoring_this_event
lockfile="$(basename -- "${ZED_DEBUG_LOG}").lock"
umask 077

View File

@ -5,8 +5,6 @@
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
zed_exit_if_ignoring_this_event
zed_log_msg "eid=${ZEVENT_EID}" "class=${ZEVENT_SUBCLASS}" \
"${ZEVENT_POOL_GUID:+"pool_guid=${ZEVENT_POOL_GUID}"}" \
"${ZEVENT_VDEV_PATH:+"vdev_path=${ZEVENT_VDEV_PATH}"}" \

View File

@ -1,17 +0,0 @@
#!/bin/sh
# resilver_finish-start-scrub.sh
# Run a scrub after a resilver
#
# Exit codes:
# 1: Internal error
# 2: Script wasn't enabled in zed.rc
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
[ "${ZED_SCRUB_AFTER_RESILVER}" = "1" ] || exit 2
[ -n "${ZEVENT_POOL}" ] || exit 1
[ -n "${ZEVENT_SUBCLASS}" ] || exit 1
zed_check_cmd "${ZPOOL}" || exit 1
zed_log_msg "Starting scrub after resilver on ${ZEVENT_POOL}"
"${ZPOOL}" scrub "${ZEVENT_POOL}"

View File

@ -397,7 +397,7 @@ zed_rate_limit()
zed_lock "${lockfile}" "${lockfile_fd}"
time_now="$(date +%s)"
time_prev="$(grep -E "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \
time_prev="$(egrep "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \
| tail -1 | cut -d\; -f1)"
if [ -n "${time_prev}" ] \
@ -406,7 +406,7 @@ zed_rate_limit()
else
umask_bak="$(umask)"
umask 077
grep -E -v "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \
egrep -v "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \
> "${statefile}.$$"
echo "${time_now};${tag}" >> "${statefile}.$$"
mv -f "${statefile}.$$" "${statefile}"
@ -438,23 +438,3 @@ zed_guid_to_pool()
$ZPOOL get -H -ovalue,name guid | awk '$1=='"$guid"' {print $2}'
fi
}
# zed_exit_if_ignoring_this_event
#
# Exit the script if we should ignore this event, as determined by
# $ZED_SYSLOG_SUBCLASS_INCLUDE and $ZED_SYSLOG_SUBCLASS_EXCLUDE in zed.rc.
# This function assumes you've imported the normal zed variables.
zed_exit_if_ignoring_this_event()
{
if [ -n "${ZED_SYSLOG_SUBCLASS_INCLUDE}" ]; then
eval "case ${ZEVENT_SUBCLASS} in
${ZED_SYSLOG_SUBCLASS_INCLUDE});;
*) exit 0;;
esac"
elif [ -n "${ZED_SYSLOG_SUBCLASS_EXCLUDE}" ]; then
eval "case ${ZEVENT_SUBCLASS} in
${ZED_SYSLOG_SUBCLASS_EXCLUDE}) exit 0;;
*);;
esac"
fi
}

View File

@ -86,9 +86,6 @@
#
ZED_USE_ENCLOSURE_LEDS=1
##
# Run a scrub after every resilver
#ZED_SCRUB_AFTER_RESILVER=1
##
# The syslog priority (e.g., specified as a "facility.level" pair).
@ -100,14 +97,3 @@ ZED_USE_ENCLOSURE_LEDS=1
#
#ZED_SYSLOG_TAG="zed"
##
# Which set of event subclasses to log
# By default, events from all subclasses are logged.
# If ZED_SYSLOG_SUBCLASS_INCLUDE is set, only subclasses
# matching the pattern are logged. Use the pipe symbol (|)
# or shell wildcards (*, ?) to match multiple subclasses.
# Otherwise, if ZED_SYSLOG_SUBCLASS_EXCLUDE is set, the
# matching subclasses are excluded from logging.
#ZED_SYSLOG_SUBCLASS_INCLUDE="checksum|scrub_*|vdev.*"
#ZED_SYSLOG_SUBCLASS_EXCLUDE="statechange|config_*|history_event"

View File

@ -155,8 +155,6 @@ _zed_conf_display_help(const char *prog, int got_err)
"Run daemon in the foreground.");
fprintf(fp, "%*c%*s %s\n", w1, 0x20, -w2, "-M",
"Lock all pages in memory.");
fprintf(fp, "%*c%*s %s\n", w1, 0x20, -w2, "-P",
"$PATH for ZED to use (only used by ZTS).");
fprintf(fp, "%*c%*s %s\n", w1, 0x20, -w2, "-Z",
"Zero state file.");
fprintf(fp, "\n");
@ -249,7 +247,7 @@ _zed_conf_parse_path(char **resultp, const char *path)
void
zed_conf_parse_opts(struct zed_conf *zcp, int argc, char **argv)
{
const char * const opts = ":hLVc:d:p:P:s:vfFMZ";
const char * const opts = ":hLVc:d:p:s:vfFMZ";
int opt;
if (!zcp || !argv || !argv[0])
@ -277,9 +275,6 @@ zed_conf_parse_opts(struct zed_conf *zcp, int argc, char **argv)
case 'p':
_zed_conf_parse_path(&zcp->pid_file, optarg);
break;
case 'P':
_zed_conf_parse_path(&zcp->path, optarg);
break;
case 's':
_zed_conf_parse_path(&zcp->state_file, optarg);
break;

View File

@ -37,7 +37,6 @@ struct zed_conf {
int state_fd; /* fd to state file */
libzfs_handle_t *zfs_hdl; /* handle to libzfs */
int zevent_fd; /* fd for access to zevents */
char *path; /* custom $PATH for zedlets to use */
};
struct zed_conf *zed_conf_create(void);

View File

@ -733,14 +733,12 @@ _zed_event_add_nvpair(uint64_t eid, zed_strings_t *zsp, nvpair_t *nvp)
/*
* Restrict various environment variables to safe and sane values
* when constructing the environment for the child process, unless
* we're running with a custom $PATH (like under the ZFS test suite).
* when constructing the environment for the child process.
*
* Reference: Secure Programming Cookbook by Viega & Messier, Section 1.1.
*/
static void
_zed_event_add_env_restrict(uint64_t eid, zed_strings_t *zsp,
const char *path)
_zed_event_add_env_restrict(uint64_t eid, zed_strings_t *zsp)
{
const char *env_restrict[][2] = {
{ "IFS", " \t\n" },
@ -755,35 +753,11 @@ _zed_event_add_env_restrict(uint64_t eid, zed_strings_t *zsp,
{ "ZFS_RELEASE", ZFS_META_RELEASE },
{ NULL, NULL }
};
/*
* If we have a custom $PATH, use the default ZFS binary locations
* instead of the hard-coded ones.
*/
const char *env_path[][2] = {
{ "IFS", " \t\n" },
{ "PATH", NULL }, /* $PATH copied in later on */
{ "ZDB", "zdb" },
{ "ZED", "zed" },
{ "ZFS", "zfs" },
{ "ZINJECT", "zinject" },
{ "ZPOOL", "zpool" },
{ "ZFS_ALIAS", ZFS_META_ALIAS },
{ "ZFS_VERSION", ZFS_META_VERSION },
{ "ZFS_RELEASE", ZFS_META_RELEASE },
{ NULL, NULL }
};
const char *(*pa)[2];
assert(zsp != NULL);
pa = path != NULL ? env_path : env_restrict;
for (; *(*pa); pa++) {
/* Use our custom $PATH if we have one */
if (path != NULL && strcmp((*pa)[0], "PATH") == 0)
(*pa)[1] = path;
for (pa = env_restrict; *(*pa); pa++) {
_zed_event_add_var(eid, zsp, NULL, (*pa)[0], "%s", (*pa)[1]);
}
}
@ -928,7 +902,7 @@ zed_event_service(struct zed_conf *zcp)
while ((nvp = nvlist_next_nvpair(nvl, nvp)))
_zed_event_add_nvpair(eid, zsp, nvp);
_zed_event_add_env_restrict(eid, zsp, zcp->path);
_zed_event_add_env_restrict(eid, zsp);
_zed_event_add_env_preserve(eid, zsp);
_zed_event_add_var(eid, zsp, ZED_VAR_PREFIX, "PID",

View File

@ -6072,7 +6072,7 @@ share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol,
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Contains partially-completed state from "
"\"zfs receive -s\", which can be resumed with "
"\"zfs receive -r\", which can be resumed with "
"\"zfs send -t\"\n"),
cmdname, zfs_get_name(zhp));
return (1);
@ -7041,7 +7041,6 @@ main(int argc, char **argv)
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) textdomain(TEXT_DOMAIN);
@ -7096,26 +7095,17 @@ main(int argc, char **argv)
libzfs_print_on_error(g_zfs, B_TRUE);
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
libzfs_mnttab_cache(g_zfs, B_TRUE);
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
ret = command_table[i].func(argc - 1, argv + 1);
} else if (strchr(cmdname, '=') != NULL) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
ret = command_table[i].func(argc, argv);
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
@ -7123,10 +7113,6 @@ main(int argc, char **argv)
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);

View File

@ -268,7 +268,7 @@ zhack_feature_enable_sync(void *arg, dmu_tx_t *tx)
static void
zhack_do_feature_enable(int argc, char **argv)
{
int c;
char c;
char *desc, *target;
spa_t *spa;
objset_t *mos;
@ -363,7 +363,7 @@ feature_decr_sync(void *arg, dmu_tx_t *tx)
static void
zhack_do_feature_ref(int argc, char **argv)
{
int c;
char c;
char *target;
boolean_t decr = B_FALSE;
spa_t *spa;
@ -483,7 +483,7 @@ main(int argc, char **argv)
char *path[MAX_NUM_PATHS];
const char *subcommand;
int rv = 0;
int c;
char c;
g_importargs.path = path;

View File

@ -525,11 +525,10 @@ run_one(cmd_args_t *args, uint32_t id, uint32_t T, uint32_t N,
memset(cmd, 0, cmd_size);
cmd->cmd_magic = ZPIOS_CMD_MAGIC;
snprintf(cmd->cmd_pool, sizeof (cmd->cmd_pool), "%s", args->pool);
snprintf(cmd->cmd_pre, sizeof (cmd->cmd_pre), "%s", args->pre);
snprintf(cmd->cmd_post, sizeof (cmd->cmd_post), "%s", args->post);
snprintf(cmd->cmd_log, sizeof (cmd->cmd_log), "%s", args->log);
strncpy(cmd->cmd_pool, args->pool, ZPIOS_NAME_SIZE - 1);
strncpy(cmd->cmd_pre, args->pre, ZPIOS_PATH_SIZE - 1);
strncpy(cmd->cmd_post, args->post, ZPIOS_PATH_SIZE - 1);
strncpy(cmd->cmd_log, args->log, ZPIOS_PATH_SIZE - 1);
cmd->cmd_id = id;
cmd->cmd_chunk_size = C;
cmd->cmd_thread_count = T;

View File

@ -60,15 +60,9 @@ dist_zpoolexec_SCRIPTS = \
zpool.d/pend_sec \
zpool.d/off_ucor \
zpool.d/ata_err \
zpool.d/nvme_err \
zpool.d/pwr_cyc \
zpool.d/upath \
zpool.d/vendor \
zpool.d/smart_test \
zpool.d/test_type \
zpool.d/test_status \
zpool.d/test_progress \
zpool.d/test_ended
zpool.d/vendor
zpoolconfdefaults = \
enc \
@ -104,15 +98,9 @@ zpoolconfdefaults = \
pend_sec \
off_ucor \
ata_err \
nvme_err \
pwr_cyc \
upath \
vendor \
smart_test \
test_type \
test_status \
test_progress \
test_ended
vendor
install-data-hook:
$(MKDIR_P) "$(DESTDIR)$(zpoolconfdir)"

View File

@ -1 +0,0 @@
smart

View File

@ -23,45 +23,8 @@ off_ucor: Show SMART offline uncorrectable errors (ATA).
ata_err: Show SMART ATA errors (ATA).
pwr_cyc: Show SMART power cycle count (ATA).
serial: Show disk serial number.
nvme_err: Show SMART NVMe errors (NVMe).
smart_test: Show SMART self-test results summary.
test_type: Show SMART self-test type (short, long... ).
test_status: Show SMART self-test status.
test_progress: Show SMART self-test percentage done.
test_ended: Show when the last SMART self-test ended (if supported).
"
# Hack for developer testing
#
# If you set $samples to a directory containing smartctl output text files,
# we will use them instead of running smartctl on the vdevs. This can be
# useful if you want to test a bunch of different smartctl outputs. Also, if
# $samples is set, and additional 'file' column is added to the zpool output
# showing the filename.
samples=
# get_filename_from_dir DIR
#
# Look in directory DIR and return a filename from it. The filename returned
# is chosen quasi-sequentially (based off our PID). This allows us to return
# a different filename every time this script is invoked (which we do for each
# vdev), without having to maintain state.
get_filename_from_dir()
{
dir=$1
pid="$$"
num_files=$(find "$dir" -maxdepth 1 -type f | wc -l)
mod=$((pid % num_files))
i=0
find "$dir" -type f -printf "%f\n" | while read -r file ; do
if [ "$mod" = "$i" ] ; then
echo "$file"
break
fi
i=$((i+1))
done
}
script=$(basename "$0")
if [ "$1" = "-h" ] ; then
@ -71,18 +34,10 @@ fi
smartctl_path=$(which smartctl)
if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ] || [ -n "$samples" ] ; then
if [ -n "$samples" ] ; then
# cat a smartctl output text file instead of running smartctl
# on a vdev (only used for developer testing).
file=$(get_filename_from_dir $samples)
echo "file=$file"
raw_out=$(cat "$samples/$file")
else
raw_out=$(eval "sudo $smartctl_path -a $VDEV_UPATH")
fi
if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ]; then
raw_out=$(eval "sudo $smartctl_path -a $VDEV_UPATH")
# What kind of drive are we? Look for the right line in smartctl:
# Are we a SAS or ATA drive? Look for the right line in smartctl:
#
# SAS:
# Transport protocol: SAS
@ -90,9 +45,7 @@ if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ] || [ -n "$samples" ] ; then
# SATA:
# ATA Version is: 8
#
# NVMe:
# SMART/Health Information (NVMe Log 0xnn, NSID 0xnn)
#
type=$(echo "$raw_out" | grep -m 1 -Eo '^ATA|SAS$')
out=$(echo "$raw_out" | awk '
# SAS specific
/read:/{print "rrd="$4"\nr_cor="$5"\nr_proc="$7"\nr_ucor="$8}
@ -101,11 +54,10 @@ if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ] || [ -n "$samples" ] ; then
/Elements in grown defect list/{print "defect="$6}
# SAS common
/SAS/{type="sas"}
/Drive Temperature:/{print "temp="$4}
# Status can be a long string, substitute spaces for '_'
/SMART Health Status:/{printf "health="; for(i=4;i<=NF-1;i++){printf "%s_", $i}; printf "%s\n", $i}
/number of hours powered up/{print "hours_on="$7; hours_on=int($7)}
/number of hours powered up/{print "hours_on="$7}
/Serial number:/{print "serial="$3}
# SATA specific
@ -118,111 +70,40 @@ if [ -b "$VDEV_UPATH" ] && [ -x "$smartctl_path" ] || [ -n "$samples" ] ; then
/Power_Cycle_Count/{print "pwr_cyc="$10}
# SATA common
/SATA/{type="sata"}
/Temperature_Celsius/{print "temp="$10}
/Airflow_Temperature_Cel/{print "temp="$10}
/Current Temperature:/{print "temp="$3}
/SMART overall-health self-assessment test result:/{print "health="$6}
/Power_On_Hours/{print "hours_on="$10; hours_on=int($10)}
/Power_On_Hours/{print "hours_on="$10}
/Serial Number:/{print "serial="$3}
# NVMe common
/NVMe/{type="nvme"}
/Temperature:/{print "temp="$2}
/SMART overall-health self-assessment test result:/{print "health="$6}
/Power On Hours:/{gsub("[^0-9]","",$4); print "hours_on="$4}
/Serial Number:/{print "serial="$3}
/Power Cycles:/{print "pwr_cyc="$3}
# NVMe specific
/Media and Data Integrity Errors:/{print "nvme_err="$6}
# SMART self-test info
/Self-test execution status:/{progress=tolower($4)} # SAS
/SMART Self-test log/{test_seen=1} # SAS
/SMART Extended Self-test Log/{test_seen=1} # SATA
/# 1/{
test_type=tolower($3"_"$4);
# Status could be one word ("Completed") or multiple ("Completed: read
# failure"). Look for the ":" to see if we need to grab more words.
if ($5 ~ ":")
status=tolower($5""$6"_"$7)
else
status=tolower($5)
if (status=="self")
status="running";
if (type == "sas") {
hours=int($(NF-4))
} else {
hours=int($(NF-1))
# SATA reports percent remaining, rather than percent done
# Convert it to percent done.
progress=(100-int($(NF-2)))"%"
}
# When we int()-ify "hours", it converts stuff like "NOW" and "-" into
# 0. In those cases, set it to hours_on, so they will cancel out in
# the "hours_ago" calculation later on.
if (hours == 0)
hours=hours_on
if (test_seen) {
print "test="hours_on
print "test_type="test_type
print "test_status="status
print "test_progress="progress
}
# Not all drives report hours_on
if (hours_on && hours) {
total_hours_ago=(hours_on-hours)
days_ago=int(total_hours_ago/24)
hours_ago=(total_hours_ago % 24)
if (days_ago != 0)
ago_str=days_ago"d"
if (hours_ago !=0)
ago_str=ago_str""hours_ago"h"
print "test_ended="ago_str
}
}
END {print "type="type; ORS="\n"; print ""}
END {ORS="\n"; print ""}
');
fi
type=$(echo "$out" | grep '^type=' | cut -d '=' -f 2)
# If type is not set by now, either we don't have a block device
# or smartctl failed. Either way, default to ATA and set $out to
# nothing.
# if type is not set by now, either we don't have a block device
# or smartctl failed. Either way, default to ATA and set out to
# nothing
if [ -z "$type" ]; then
type="sata"
type="ATA"
out=
fi
case $script in
smart)
# Print temperature plus common predictors of drive failure
if [ "$type" = "sas" ] ; then
if [ "$type" = "SAS" ] ; then
scripts="temp|health|r_ucor|w_ucor"
elif [ "$type" = "sata" ] ; then
elif [ "$type" = "ATA" ] ; then
scripts="temp|health|ata_err|realloc|rep_ucor|cmd_to|pend_sec|off_ucor"
elif [ "$type" = "nvme" ] ; then
scripts="temp|health|nvme_err"
fi
;;
smartx)
# Print some other interesting stats
if [ "$type" = "sas" ] ; then
if [ "$type" = "SAS" ] ; then
scripts="hours_on|defect|nonmed|r_proc|w_proc"
elif [ "$type" = "sata" ] ; then
scripts="hours_on|pwr_cyc"
elif [ "$type" = "nvme" ] ; then
elif [ "$type" = "ATA" ] ; then
scripts="hours_on|pwr_cyc"
fi
;;
smart_test)
scripts="test_type|test_status|test_progress|test_ended"
;;
*)
scripts="$script"
esac

View File

@ -1 +0,0 @@
smart

View File

@ -1 +0,0 @@
smart

View File

@ -1 +0,0 @@
smart

View File

@ -1 +0,0 @@
smart

View File

@ -1 +0,0 @@
smart

View File

@ -2211,8 +2211,7 @@ show_import(nvlist_t *config)
(void) printf(gettext(" config:\n\n"));
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
VDEV_NAME_TYPE_ID);
cb.cb_namewidth = max_width(NULL, nvroot, 0, 0, VDEV_NAME_TYPE_ID);
if (cb.cb_namewidth < 10)
cb.cb_namewidth = 10;
@ -3493,7 +3492,7 @@ single_histo_average(uint64_t *histo, unsigned int buckets)
static void
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
nvlist_t *newnv, double scale)
{
int i;
uint64_t val;
@ -3523,7 +3522,7 @@ print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *oldnv,
format = ZFS_NICENUM_1024;
for (i = 0; i < ARRAY_SIZE(names); i++) {
val = nva[i].data[0];
val = nva[i].data[0] * scale;
print_one_stat(val, format, column_width, cb->cb_scripted);
}
@ -3532,7 +3531,7 @@ print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *oldnv,
static void
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
nvlist_t *newnv, double scale)
{
int i;
uint64_t val;
@ -3562,7 +3561,7 @@ print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
/* Print our avg latencies on the line */
for (i = 0; i < ARRAY_SIZE(names); i++) {
/* Compute average latency for a latency histo */
val = single_histo_average(nva[i].data, nva[i].count);
val = single_histo_average(nva[i].data, nva[i].count) * scale;
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
@ -3701,9 +3700,9 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
print_iostat_default(calcvs, cb, scale);
}
if (cb->cb_flags & IOS_LATENCY_M)
print_iostat_latency(cb, oldnv, newnv);
print_iostat_latency(cb, oldnv, newnv, scale);
if (cb->cb_flags & IOS_QUEUES_M)
print_iostat_queues(cb, oldnv, newnv);
print_iostat_queues(cb, oldnv, newnv, scale);
if (cb->cb_flags & IOS_ANYHISTO_M) {
printf("\n");
print_iostat_histos(cb, oldnv, newnv, scale, name);
@ -3902,7 +3901,7 @@ get_namewidth(zpool_handle_t *zhp, void *data)
&nvroot) == 0);
unsigned int poolname_len = strlen(zpool_get_name(zhp));
if (!cb->cb_verbose)
cb->cb_namewidth = MAX(poolname_len, cb->cb_namewidth);
cb->cb_namewidth = poolname_len;
else
cb->cb_namewidth = MAX(poolname_len,
max_width(zhp, nvroot, 0, cb->cb_namewidth,
@ -6226,8 +6225,7 @@ status_callback(zpool_handle_t *zhp, void *data)
&nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
health = zpool_get_state_str(zhp);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
(void) printf(gettext(" pool: %s\n"), zpool_get_name(zhp));
(void) printf(gettext(" state: %s\n"), health);
@ -6396,15 +6394,6 @@ status_callback(zpool_handle_t *zhp, void *data)
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
(void) printf(gettext("status: The pool is suspended because "
"multihost writes failed or were delayed;\n\tanother "
"system could import the pool undetected.\n"));
(void) printf(gettext("action: Make sure the pool's devices "
"are connected, then reboot your system and\n\timport the "
"pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
(void) printf(gettext("status: One or more devices are "
@ -7971,7 +7960,6 @@ main(int argc, char **argv)
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) textdomain(TEXT_DOMAIN);
@ -8006,25 +7994,16 @@ main(int argc, char **argv)
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
ret = command_table[i].func(argc - 1, argv + 1);
} else if (strchr(cmdname, '=')) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
ret = command_table[i].func(argc, argv);
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
/*
* 'freeze' is a vile debugging abomination, so we treat
@ -8041,10 +8020,6 @@ main(int argc, char **argv)
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);

View File

@ -191,7 +191,6 @@ static vdev_disk_db_entry_t vdev_disk_database[] = {
{"ATA INTEL SSDSC2BP24", 4096},
{"ATA INTEL SSDSC2BP48", 4096},
{"NA SmrtStorSDLKAE9W", 4096},
{"NVMe Amazon EC2 NVMe ", 4096},
/* Imported from Open Solaris */
{"ATA MARVELL SD88SA02", 4096},
/* Advanced format Hard drives */
@ -801,11 +800,8 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
if (is_log)
continue;
/* Ignore holes introduced by removing aux devices */
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_HOLE) == 0)
continue;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE,
&type) == 0);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
/*
@ -861,11 +857,9 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
/*
* If this is a replacing or spare vdev, then
* get the real first child of the vdev: do this
* in a loop because replacing and spare vdevs
* can be nested.
* get the real first child of the vdev.
*/
while (strcmp(childtype,
if (strcmp(childtype,
VDEV_TYPE_REPLACING) == 0 ||
strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
nvlist_t **rchild;

View File

@ -171,8 +171,8 @@ typedef struct ztest_shared_opts {
} ztest_shared_opts_t;
static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_pool = "ztest",
.zo_dir = "/tmp",
.zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
.zo_dir = { '/', 't', 'm', 'p', '\0' },
.zo_alt_ztest = { '\0' },
.zo_alt_libpath = { '\0' },
.zo_vdevs = 5,
@ -197,8 +197,7 @@ extern uint64_t metaslab_gang_bang;
extern uint64_t metaslab_df_alloc_threshold;
extern int metaslab_preload_limit;
extern boolean_t zfs_compressed_arc_enabled;
extern int zfs_abd_scatter_enabled;
extern int dmu_object_alloc_chunk_shift;
extern int zfs_abd_scatter_enabled;
static ztest_shared_opts_t *ztest_shared_opts;
static ztest_shared_opts_t ztest_opts;
@ -311,7 +310,6 @@ static ztest_shared_callstate_t *ztest_shared_callstate;
ztest_func_t ztest_dmu_read_write;
ztest_func_t ztest_dmu_write_parallel;
ztest_func_t ztest_dmu_object_alloc_free;
ztest_func_t ztest_dmu_object_next_chunk;
ztest_func_t ztest_dmu_commit_callbacks;
ztest_func_t ztest_zap;
ztest_func_t ztest_zap_parallel;
@ -359,7 +357,6 @@ ztest_info_t ztest_info[] = {
ZTI_INIT(ztest_dmu_read_write, 1, &zopt_always),
ZTI_INIT(ztest_dmu_write_parallel, 10, &zopt_always),
ZTI_INIT(ztest_dmu_object_alloc_free, 1, &zopt_always),
ZTI_INIT(ztest_dmu_object_next_chunk, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_commit_callbacks, 1, &zopt_always),
ZTI_INIT(ztest_zap, 30, &zopt_always),
ZTI_INIT(ztest_zap_parallel, 100, &zopt_always),
@ -1189,7 +1186,7 @@ ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
*/
typedef struct {
list_node_t z_lnode;
zfs_refcount_t z_refcnt;
refcount_t z_refcnt;
uint64_t z_object;
zfs_rlock_t z_range_lock;
} ztest_znode_t;
@ -1205,7 +1202,7 @@ ztest_znode_init(uint64_t object)
ztest_znode_t *zp = umem_alloc(sizeof (*zp), UMEM_NOFAIL);
list_link_init(&zp->z_lnode);
zfs_refcount_create(&zp->z_refcnt);
refcount_create(&zp->z_refcnt);
zp->z_object = object;
zfs_rlock_init(&zp->z_range_lock);
@ -1215,10 +1212,10 @@ ztest_znode_init(uint64_t object)
static void
ztest_znode_fini(ztest_znode_t *zp)
{
ASSERT(zfs_refcount_is_zero(&zp->z_refcnt));
ASSERT(refcount_is_zero(&zp->z_refcnt));
zfs_rlock_destroy(&zp->z_range_lock);
zp->z_object = 0;
zfs_refcount_destroy(&zp->z_refcnt);
refcount_destroy(&zp->z_refcnt);
list_link_init(&zp->z_lnode);
umem_free(zp, sizeof (*zp));
}
@ -1248,13 +1245,13 @@ ztest_znode_get(ztest_ds_t *zd, uint64_t object)
for (zp = list_head(&zll->z_list); (zp);
zp = list_next(&zll->z_list, zp)) {
if (zp->z_object == object) {
zfs_refcount_add(&zp->z_refcnt, RL_TAG);
refcount_add(&zp->z_refcnt, RL_TAG);
break;
}
}
if (zp == NULL) {
zp = ztest_znode_init(object);
zfs_refcount_add(&zp->z_refcnt, RL_TAG);
refcount_add(&zp->z_refcnt, RL_TAG);
list_insert_head(&zll->z_list, zp);
}
mutex_exit(&zll->z_lock);
@ -1268,8 +1265,8 @@ ztest_znode_put(ztest_ds_t *zd, ztest_znode_t *zp)
ASSERT3U(zp->z_object, !=, 0);
zll = &zd->zd_range_lock[zp->z_object & (ZTEST_OBJECT_LOCKS - 1)];
mutex_enter(&zll->z_lock);
zfs_refcount_remove(&zp->z_refcnt, RL_TAG);
if (zfs_refcount_is_zero(&zp->z_refcnt)) {
refcount_remove(&zp->z_refcnt, RL_TAG);
if (refcount_is_zero(&zp->z_refcnt)) {
list_remove(&zll->z_list, zp);
ztest_znode_fini(zp);
}
@ -3930,26 +3927,6 @@ ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
umem_free(od, size);
}
/*
* Rewind the global allocator to verify object allocation backfilling.
*/
void
ztest_dmu_object_next_chunk(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
uint64_t object;
/*
* Rewind the global allocator randomly back to a lower object number
* to force backfilling and reclamation of recently freed dnodes.
*/
mutex_enter(&os->os_obj_lock);
object = ztest_random(os->os_obj_next_chunk);
os->os_obj_next_chunk = P2ALIGN(object, dnodes_per_chunk);
mutex_exit(&os->os_obj_lock);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
@ -5688,9 +5665,6 @@ ztest_reguid(ztest_ds_t *zd, uint64_t id)
uint64_t orig, load;
int error;
if (ztest_opts.zo_mmp_test)
return;
orig = spa_guid(spa);
load = spa_load_guid(spa);

View File

@ -55,12 +55,11 @@ main(int argc, char **argv)
{
int fd, error = 0;
char zvol_name[ZFS_MAX_DATASET_NAME_LEN];
char *zvol_name_part = NULL;
char zvol_name_part[ZFS_MAX_DATASET_NAME_LEN];
char *dev_name;
struct stat64 statbuf;
int dev_minor, dev_part;
int i;
int rc;
if (argc < 2) {
printf("Usage: %s /dev/zvol_device_node\n", argv[0]);
@ -89,13 +88,11 @@ main(int argc, char **argv)
return (errno);
}
if (dev_part > 0)
rc = asprintf(&zvol_name_part, "%s-part%d", zvol_name,
dev_part);
snprintf(zvol_name_part, ZFS_MAX_DATASET_NAME_LEN,
"%s-part%d", zvol_name, dev_part);
else
rc = asprintf(&zvol_name_part, "%s", zvol_name);
if (rc == -1 || zvol_name_part == NULL)
goto error;
snprintf(zvol_name_part, ZFS_MAX_DATASET_NAME_LEN,
"%s", zvol_name);
for (i = 0; i < strlen(zvol_name_part); i++) {
if (isblank(zvol_name_part[i]))
@ -103,8 +100,6 @@ main(int argc, char **argv)
}
printf("%s\n", zvol_name_part);
free(zvol_name_part);
error:
close(fd);
return (error);
}

View File

@ -6,7 +6,6 @@ AM_CFLAGS += ${NO_UNUSED_BUT_SET_VARIABLE}
AM_CFLAGS += ${NO_BOOL_COMPARE}
AM_CFLAGS += -fno-strict-aliasing
AM_CFLAGS += -std=gnu99
AM_CFLAGS += $(CODE_COVERAGE_CFLAGS)
AM_CPPFLAGS = -D_GNU_SOURCE -D__EXTENSIONS__ -D_REENTRANT
AM_CPPFLAGS += -D_POSIX_PTHREAD_SEMANTICS -D_FILE_OFFSET_BITS=64
AM_CPPFLAGS += -D_LARGEFILE64_SOURCE -DHAVE_LARGE_STACKS=1
@ -15,4 +14,3 @@ AM_CPPFLAGS += -DLIBEXECDIR=\"$(libexecdir)\"
AM_CPPFLAGS += -DRUNSTATEDIR=\"$(runstatedir)\"
AM_CPPFLAGS += -DSBINDIR=\"$(sbindir)\"
AM_CPPFLAGS += -DSYSCONFDIR=\"$(sysconfdir)\"
AM_CPPFLAGS += $(CODE_COVERAGE_CPPFLAGS)

View File

@ -1,264 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_code_coverage.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_CODE_COVERAGE()
#
# DESCRIPTION
#
# Defines CODE_COVERAGE_CPPFLAGS, CODE_COVERAGE_CFLAGS,
# CODE_COVERAGE_CXXFLAGS and CODE_COVERAGE_LIBS which should be included
# in the CPPFLAGS, CFLAGS CXXFLAGS and LIBS/LIBADD variables of every
# build target (program or library) which should be built with code
# coverage support. Also defines CODE_COVERAGE_RULES which should be
# substituted in your Makefile; and $enable_code_coverage which can be
# used in subsequent configure output. CODE_COVERAGE_ENABLED is defined
# and substituted, and corresponds to the value of the
# --enable-code-coverage option, which defaults to being disabled.
#
# Test also for gcov program and create GCOV variable that could be
# substituted.
#
# Note that all optimization flags in CFLAGS must be disabled when code
# coverage is enabled.
#
# Usage example:
#
# configure.ac:
#
# AX_CODE_COVERAGE
#
# Makefile.am:
#
# @CODE_COVERAGE_RULES@
# my_program_LIBS = ... $(CODE_COVERAGE_LIBS) ...
# my_program_CPPFLAGS = ... $(CODE_COVERAGE_CPPFLAGS) ...
# my_program_CFLAGS = ... $(CODE_COVERAGE_CFLAGS) ...
# my_program_CXXFLAGS = ... $(CODE_COVERAGE_CXXFLAGS) ...
#
# This results in a "check-code-coverage" rule being added to any
# Makefile.am which includes "@CODE_COVERAGE_RULES@" (assuming the module
# has been configured with --enable-code-coverage). Running `make
# check-code-coverage` in that directory will run the module's test suite
# (`make check`) and build a code coverage report detailing the code which
# was touched, then print the URI for the report.
#
# In earlier versions of this macro, CODE_COVERAGE_LDFLAGS was defined
# instead of CODE_COVERAGE_LIBS. They are both still defined, but use of
# CODE_COVERAGE_LIBS is preferred for clarity; CODE_COVERAGE_LDFLAGS is
# deprecated. They have the same value.
#
# This code was derived from Makefile.decl in GLib, originally licenced
# under LGPLv2.1+.
#
# LICENSE
#
# Copyright (c) 2012, 2016 Philip Withnall
# Copyright (c) 2012 Xan Lopez
# Copyright (c) 2012 Christian Persch
# Copyright (c) 2012 Paolo Borelli
# Copyright (c) 2012 Dan Winship
# Copyright (c) 2015 Bastien ROUCARIES
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or (at
# your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#serial 25
AC_DEFUN([AX_CODE_COVERAGE],[
dnl Check for --enable-code-coverage
AC_REQUIRE([AC_PROG_SED])
# allow to override gcov location
AC_ARG_WITH([gcov],
[AS_HELP_STRING([--with-gcov[=GCOV]], [use given GCOV for coverage (GCOV=gcov).])],
[_AX_CODE_COVERAGE_GCOV_PROG_WITH=$with_gcov],
[_AX_CODE_COVERAGE_GCOV_PROG_WITH=gcov])
AC_MSG_CHECKING([whether to build with code coverage support])
AC_ARG_ENABLE([code-coverage],
AS_HELP_STRING([--enable-code-coverage],
[Whether to enable code coverage support]),,
enable_code_coverage=no)
AM_CONDITIONAL([CODE_COVERAGE_ENABLED], [test x$enable_code_coverage = xyes])
AC_SUBST([CODE_COVERAGE_ENABLED], [$enable_code_coverage])
AC_MSG_RESULT($enable_code_coverage)
AS_IF([ test "$enable_code_coverage" = "yes" ], [
# check for gcov
AC_CHECK_TOOL([GCOV],
[$_AX_CODE_COVERAGE_GCOV_PROG_WITH],
[:])
AS_IF([test "X$GCOV" = "X:"],
[AC_MSG_ERROR([gcov is needed to do coverage])])
AC_SUBST([GCOV])
dnl Check if gcc is being used
AS_IF([ test "$GCC" = "no" ], [
AC_MSG_ERROR([not compiling with gcc, which is required for gcov code coverage])
])
AC_CHECK_PROG([LCOV], [lcov], [lcov])
AC_CHECK_PROG([GENHTML], [genhtml], [genhtml])
AS_IF([ test -z "$LCOV" ], [
AC_MSG_ERROR([To enable code coverage reporting you must have lcov installed])
])
AS_IF([ test -z "$GENHTML" ], [
AC_MSG_ERROR([Could not find genhtml from the lcov package])
])
dnl Build the code coverage flags
dnl Define CODE_COVERAGE_LDFLAGS for backwards compatibility
CODE_COVERAGE_CPPFLAGS=""
CODE_COVERAGE_CFLAGS="-O0 -g -fprofile-arcs -ftest-coverage"
CODE_COVERAGE_CXXFLAGS="-O0 -g -fprofile-arcs -ftest-coverage"
CODE_COVERAGE_LIBS="-lgcov"
CODE_COVERAGE_LDFLAGS="$CODE_COVERAGE_LIBS"
AC_SUBST([CODE_COVERAGE_CPPFLAGS])
AC_SUBST([CODE_COVERAGE_CFLAGS])
AC_SUBST([CODE_COVERAGE_CXXFLAGS])
AC_SUBST([CODE_COVERAGE_LIBS])
AC_SUBST([CODE_COVERAGE_LDFLAGS])
[CODE_COVERAGE_RULES_CHECK='
-$(A''M_V_at)$(MAKE) $(AM_MAKEFLAGS) -k check
$(A''M_V_at)$(MAKE) $(AM_MAKEFLAGS) code-coverage-capture
']
[CODE_COVERAGE_RULES_CAPTURE='
$(code_coverage_v_lcov_cap)$(LCOV) $(code_coverage_quiet) $(addprefix --directory ,$(CODE_COVERAGE_DIRECTORY)) --capture --output-file "$(CODE_COVERAGE_OUTPUT_FILE).tmp" --test-name "$(call code_coverage_sanitize,$(PACKAGE_NAME)-$(PACKAGE_VERSION))" --no-checksum --compat-libtool $(CODE_COVERAGE_LCOV_SHOPTS) $(CODE_COVERAGE_LCOV_OPTIONS)
$(code_coverage_v_lcov_ign)$(LCOV) $(code_coverage_quiet) $(addprefix --directory ,$(CODE_COVERAGE_DIRECTORY)) --remove "$(CODE_COVERAGE_OUTPUT_FILE).tmp" "/tmp/*" $(CODE_COVERAGE_IGNORE_PATTERN) --output-file "$(CODE_COVERAGE_OUTPUT_FILE)" $(CODE_COVERAGE_LCOV_SHOPTS) $(CODE_COVERAGE_LCOV_RMOPTS)
-@rm -f $(CODE_COVERAGE_OUTPUT_FILE).tmp
$(code_coverage_v_genhtml)LANG=C $(GENHTML) $(code_coverage_quiet) $(addprefix --prefix ,$(CODE_COVERAGE_DIRECTORY)) --output-directory "$(CODE_COVERAGE_OUTPUT_DIRECTORY)" --title "$(PACKAGE_NAME)-$(PACKAGE_VERSION) Code Coverage" --legend --show-details "$(CODE_COVERAGE_OUTPUT_FILE)" $(CODE_COVERAGE_GENHTML_OPTIONS)
@echo "file://$(abs_builddir)/$(CODE_COVERAGE_OUTPUT_DIRECTORY)/index.html"
']
[CODE_COVERAGE_RULES_CLEAN='
clean: code-coverage-clean
distclean: code-coverage-clean
code-coverage-clean:
-$(LCOV) --directory $(top_builddir) -z
-rm -rf $(CODE_COVERAGE_OUTPUT_FILE) $(CODE_COVERAGE_OUTPUT_FILE).tmp $(CODE_COVERAGE_OUTPUT_DIRECTORY)
-find . \( -name "*.gcda" -o -name "*.gcno" -o -name "*.gcov" \) -delete
']
], [
[CODE_COVERAGE_RULES_CHECK='
@echo "Need to reconfigure with --enable-code-coverage"
']
CODE_COVERAGE_RULES_CAPTURE="$CODE_COVERAGE_RULES_CHECK"
CODE_COVERAGE_RULES_CLEAN=''
])
[CODE_COVERAGE_RULES='
# Code coverage
#
# Optional:
# - CODE_COVERAGE_DIRECTORY: Top-level directory for code coverage reporting.
# Multiple directories may be specified, separated by whitespace.
# (Default: $(top_builddir))
# - CODE_COVERAGE_OUTPUT_FILE: Filename and path for the .info file generated
# by lcov for code coverage. (Default:
# $(PACKAGE_NAME)-$(PACKAGE_VERSION)-coverage.info)
# - CODE_COVERAGE_OUTPUT_DIRECTORY: Directory for generated code coverage
# reports to be created. (Default:
# $(PACKAGE_NAME)-$(PACKAGE_VERSION)-coverage)
# - CODE_COVERAGE_BRANCH_COVERAGE: Set to 1 to enforce branch coverage,
# set to 0 to disable it and leave empty to stay with the default.
# (Default: empty)
# - CODE_COVERAGE_LCOV_SHOPTS_DEFAULT: Extra options shared between both lcov
# instances. (Default: based on $CODE_COVERAGE_BRANCH_COVERAGE)
# - CODE_COVERAGE_LCOV_SHOPTS: Extra options to shared between both lcov
# instances. (Default: $CODE_COVERAGE_LCOV_SHOPTS_DEFAULT)
# - CODE_COVERAGE_LCOV_OPTIONS_GCOVPATH: --gcov-tool pathtogcov
# - CODE_COVERAGE_LCOV_OPTIONS_DEFAULT: Extra options to pass to the
# collecting lcov instance. (Default: $CODE_COVERAGE_LCOV_OPTIONS_GCOVPATH)
# - CODE_COVERAGE_LCOV_OPTIONS: Extra options to pass to the collecting lcov
# instance. (Default: $CODE_COVERAGE_LCOV_OPTIONS_DEFAULT)
# - CODE_COVERAGE_LCOV_RMOPTS_DEFAULT: Extra options to pass to the filtering
# lcov instance. (Default: empty)
# - CODE_COVERAGE_LCOV_RMOPTS: Extra options to pass to the filtering lcov
# instance. (Default: $CODE_COVERAGE_LCOV_RMOPTS_DEFAULT)
# - CODE_COVERAGE_GENHTML_OPTIONS_DEFAULT: Extra options to pass to the
# genhtml instance. (Default: based on $CODE_COVERAGE_BRANCH_COVERAGE)
# - CODE_COVERAGE_GENHTML_OPTIONS: Extra options to pass to the genhtml
# instance. (Default: $CODE_COVERAGE_GENHTML_OPTIONS_DEFAULT)
# - CODE_COVERAGE_IGNORE_PATTERN: Extra glob pattern of files to ignore
#
# The generated report will be titled using the $(PACKAGE_NAME) and
# $(PACKAGE_VERSION). In order to add the current git hash to the title,
# use the git-version-gen script, available online.
# Optional variables
CODE_COVERAGE_DIRECTORY ?= $(top_builddir)
CODE_COVERAGE_OUTPUT_FILE ?= $(PACKAGE_NAME)-$(PACKAGE_VERSION)-coverage.info
CODE_COVERAGE_OUTPUT_DIRECTORY ?= $(PACKAGE_NAME)-$(PACKAGE_VERSION)-coverage
CODE_COVERAGE_BRANCH_COVERAGE ?=
CODE_COVERAGE_LCOV_SHOPTS_DEFAULT ?= $(if $(CODE_COVERAGE_BRANCH_COVERAGE),\
--rc lcov_branch_coverage=$(CODE_COVERAGE_BRANCH_COVERAGE))
CODE_COVERAGE_LCOV_SHOPTS ?= $(CODE_COVERAGE_LCOV_SHOPTS_DEFAULT)
CODE_COVERAGE_LCOV_OPTIONS_GCOVPATH ?= --gcov-tool "$(GCOV)"
CODE_COVERAGE_LCOV_OPTIONS_DEFAULT ?= $(CODE_COVERAGE_LCOV_OPTIONS_GCOVPATH)
CODE_COVERAGE_LCOV_OPTIONS ?= $(CODE_COVERAGE_LCOV_OPTIONS_DEFAULT)
CODE_COVERAGE_LCOV_RMOPTS_DEFAULT ?=
CODE_COVERAGE_LCOV_RMOPTS ?= $(CODE_COVERAGE_LCOV_RMOPTS_DEFAULT)
CODE_COVERAGE_GENHTML_OPTIONS_DEFAULT ?=\
$(if $(CODE_COVERAGE_BRANCH_COVERAGE),\
--rc genhtml_branch_coverage=$(CODE_COVERAGE_BRANCH_COVERAGE))
CODE_COVERAGE_GENHTML_OPTIONS ?= $(CODE_COVERAGE_GENHTML_OPTIONS_DEFAULT)
CODE_COVERAGE_IGNORE_PATTERN ?=
GITIGNOREFILES ?=
GITIGNOREFILES += $(CODE_COVERAGE_OUTPUT_FILE) $(CODE_COVERAGE_OUTPUT_DIRECTORY)
code_coverage_v_lcov_cap = $(code_coverage_v_lcov_cap_$(V))
code_coverage_v_lcov_cap_ = $(code_coverage_v_lcov_cap_$(AM_DEFAULT_VERBOSITY))
code_coverage_v_lcov_cap_0 = @echo " LCOV --capture"\
$(CODE_COVERAGE_OUTPUT_FILE);
code_coverage_v_lcov_ign = $(code_coverage_v_lcov_ign_$(V))
code_coverage_v_lcov_ign_ = $(code_coverage_v_lcov_ign_$(AM_DEFAULT_VERBOSITY))
code_coverage_v_lcov_ign_0 = @echo " LCOV --remove /tmp/*"\
$(CODE_COVERAGE_IGNORE_PATTERN);
code_coverage_v_genhtml = $(code_coverage_v_genhtml_$(V))
code_coverage_v_genhtml_ = $(code_coverage_v_genhtml_$(AM_DEFAULT_VERBOSITY))
code_coverage_v_genhtml_0 = @echo " GEN " $(CODE_COVERAGE_OUTPUT_DIRECTORY);
code_coverage_quiet = $(code_coverage_quiet_$(V))
code_coverage_quiet_ = $(code_coverage_quiet_$(AM_DEFAULT_VERBOSITY))
code_coverage_quiet_0 = --quiet
# sanitizes the test-name: replaces with underscores: dashes and dots
code_coverage_sanitize = $(subst -,_,$(subst .,_,$(1)))
# Use recursive makes in order to ignore errors during check
check-code-coverage:'"$CODE_COVERAGE_RULES_CHECK"'
# Capture code coverage data
code-coverage-capture: code-coverage-capture-hook'"$CODE_COVERAGE_RULES_CAPTURE"'
# Hook rule executed before code-coverage-capture, overridable by the user
code-coverage-capture-hook:
'"$CODE_COVERAGE_RULES_CLEAN"'
A''M_DISTCHECK_CONFIGURE_FLAGS ?=
A''M_DISTCHECK_CONFIGURE_FLAGS += --disable-code-coverage
.PHONY: check-code-coverage code-coverage-capture code-coverage-capture-hook code-coverage-clean
']
AC_SUBST([CODE_COVERAGE_RULES])
m4_ifdef([_AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE([CODE_COVERAGE_RULES])])
])

View File

@ -2,25 +2,24 @@ deb-local:
@(if test "${HAVE_DPKGBUILD}" = "no"; then \
echo -e "\n" \
"*** Required util ${DPKGBUILD} missing. Please install the\n" \
"*** package for your distribution which provides ${DPKGBUILD},\n" \
"*** package for your distribution which provides ${DPKGBUILD},\n" \
"*** re-run configure, and try again.\n"; \
exit 1; \
exit 1; \
fi; \
if test "${HAVE_ALIEN}" = "no"; then \
echo -e "\n" \
"*** Required util ${ALIEN} missing. Please install the\n" \
"*** package for your distribution which provides ${ALIEN},\n" \
"*** package for your distribution which provides ${ALIEN},\n" \
"*** re-run configure, and try again.\n"; \
exit 1; \
exit 1; \
fi)
deb-kmod: deb-local rpm-kmod
name=${PACKAGE}; \
version=${VERSION}-${RELEASE}; \
arch=`$(RPM) -qp $${name}-kmod-$${version}.src.rpm --qf %{arch} | tail -1`; \
debarch=`$(DPKG) --print-architecture`; \
pkg1=kmod-$${name}*$${version}.$${arch}.rpm; \
fakeroot $(ALIEN) --bump=0 --scripts --to-deb --target=$$debarch $$pkg1; \
fakeroot $(ALIEN) --bump=0 --scripts --to-deb $$pkg1; \
$(RM) $$pkg1
@ -28,16 +27,14 @@ deb-dkms: deb-local rpm-dkms
name=${PACKAGE}; \
version=${VERSION}-${RELEASE}; \
arch=`$(RPM) -qp $${name}-dkms-$${version}.src.rpm --qf %{arch} | tail -1`; \
debarch=`$(DPKG) --print-architecture`; \
pkg1=$${name}-dkms-$${version}.$${arch}.rpm; \
fakeroot $(ALIEN) --bump=0 --scripts --to-deb --target=$$debarch $$pkg1; \
fakeroot $(ALIEN) --bump=0 --scripts --to-deb $$pkg1; \
$(RM) $$pkg1
deb-utils: deb-local rpm-utils
name=${PACKAGE}; \
version=${VERSION}-${RELEASE}; \
arch=`$(RPM) -qp $${name}-$${version}.src.rpm --qf %{arch} | tail -1`; \
debarch=`$(DPKG) --print-architecture`; \
pkg1=$${name}-$${version}.$${arch}.rpm; \
pkg2=libnvpair1-$${version}.$${arch}.rpm; \
pkg3=libuutil1-$${version}.$${arch}.rpm; \
@ -60,7 +57,7 @@ deb-utils: deb-local rpm-utils
## which should NOT be mixed with the alien-generated debs created here
chmod +x $${path_prepend}/dh_shlibdeps; \
env PATH=$${path_prepend}:$${PATH} \
fakeroot $(ALIEN) --bump=0 --scripts --to-deb --target=$$debarch \
fakeroot $(ALIEN) --bump=0 --scripts --to-deb \
$$pkg1 $$pkg2 $$pkg3 $$pkg4 $$pkg5 $$pkg6 $$pkg7 \
$$pkg8 $$pkg9; \
$(RM) $${path_prepend}/dh_shlibdeps; \

View File

@ -1,21 +0,0 @@
dnl #
dnl # Linux 5.0: access_ok() drops 'type' parameter:
dnl #
dnl # - access_ok(type, addr, size)
dnl # + access_ok(addr, size)
dnl #
AC_DEFUN([ZFS_AC_KERNEL_ACCESS_OK_TYPE], [
AC_MSG_CHECKING([whether access_ok() has 'type' parameter])
ZFS_LINUX_TRY_COMPILE([
#include <linux/uaccess.h>
],[
const void __user __attribute__((unused)) *addr = (void *) 0xdeadbeef;
unsigned long __attribute__((unused)) size = 1;
int error __attribute__((unused)) = access_ok(0, addr, size);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ACCESS_OK_TYPE, 1, [kernel has access_ok with 'type' parameter])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -1,20 +0,0 @@
dnl #
dnl # 4.16 kernel: check if struct posix_acl acl.a_refcount is a refcount_t.
dnl # It's an atomic_t on older kernels.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_ACL_HAS_REFCOUNT], [
AC_MSG_CHECKING([whether posix_acl has refcount_t])
ZFS_LINUX_TRY_COMPILE([
#include <linux/backing-dev.h>
#include <linux/refcount.h>
#include <linux/posix_acl.h>
],[
struct posix_acl acl;
refcount_t *r __attribute__ ((unused)) = &acl.a_refcount;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ACL_REFCOUNT, 1, [posix_acl has refcount_t])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -184,7 +184,6 @@ AC_DEFUN([ZFS_AC_KERNEL_INODE_OPERATIONS_PERMISSION_WITH_NAMEIDATA], [
AC_MSG_CHECKING([whether iops->permission() wants nameidata])
ZFS_LINUX_TRY_COMPILE([
#include <linux/fs.h>
#include <linux/sched.h>
int permission_fn(struct inode *inode, int mask,
struct nameidata *nd) { return 0; }

View File

@ -0,0 +1,34 @@
dnl #
dnl # 2.6.x API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BDEV_BLOCK_DEVICE_OPERATIONS], [
AC_MSG_CHECKING([block device operation prototypes])
tmp_flags="$EXTRA_KCFLAGS"
EXTRA_KCFLAGS="${NO_UNUSED_BUT_SET_VARIABLE}"
ZFS_LINUX_TRY_COMPILE([
#include <linux/blkdev.h>
int blk_open(struct block_device *bdev, fmode_t mode)
{ return 0; }
int blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned x, unsigned long y) { return 0; }
int blk_compat_ioctl(struct block_device * bdev, fmode_t mode,
unsigned x, unsigned long y) { return 0; }
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.open = blk_open,
.release = NULL,
.ioctl = blk_ioctl,
.compat_ioctl = blk_compat_ioctl,
};
],[
],[
AC_MSG_RESULT(struct block_device)
AC_DEFINE(HAVE_BDEV_BLOCK_DEVICE_OPERATIONS, 1,
[struct block_device_operations use bdevs])
],[
AC_MSG_RESULT(struct inode)
])
EXTRA_KCFLAGS="$tmp_flags"
])

View File

@ -1,10 +1,10 @@
dnl #
dnl # Linux 4.14 API,
dnl #
dnl # The bio_set_dev() helper macro was introduced as part of the transition
dnl # The bio_set_dev() helper was introduced as part of the transition
dnl # to have struct gendisk in struct bio.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV_MACRO], [
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV], [
AC_MSG_CHECKING([whether bio_set_dev() exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/bio.h>
@ -20,34 +20,3 @@ AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV_MACRO], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Linux 5.0 API,
dnl #
dnl # The bio_set_dev() helper macro was updated to internally depend on
dnl # bio_associate_blkg() symbol which is exported GPL-only.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV_GPL_ONLY], [
AC_MSG_CHECKING([whether bio_set_dev() is GPL-only])
ZFS_LINUX_TRY_COMPILE([
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/fs.h>
MODULE_LICENSE("$ZFS_META_LICENSE");
],[
struct block_device *bdev = NULL;
struct bio *bio = NULL;
bio_set_dev(bio, bdev);
],[
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BIO_SET_DEV_GPL_ONLY, 1,
[bio_set_dev() GPL-only])
])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV], [
ZFS_AC_KERNEL_BIO_SET_DEV_MACRO
ZFS_AC_KERNEL_BIO_SET_DEV_GPL_ONLY
])

View File

@ -1,38 +0,0 @@
dnl #
dnl # API change
dnl # https://github.com/torvalds/linux/commit/8814ce8
dnl # Introduction of blk_queue_flag_set and blk_queue_flag_clear
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLAG_SET], [
AC_MSG_CHECKING([whether blk_queue_flag_set() exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/kernel.h>
#include <linux/blkdev.h>
],[
struct request_queue *q = NULL;
blk_queue_flag_set(0, q);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_FLAG_SET, 1, [blk_queue_flag_set() exists])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLAG_CLEAR], [
AC_MSG_CHECKING([whether blk_queue_flag_clear() exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/kernel.h>
#include <linux/blkdev.h>
],[
struct request_queue *q = NULL;
blk_queue_flag_clear(0, q);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_FLAG_CLEAR, 1, [blk_queue_flag_clear() exists])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -0,0 +1,29 @@
dnl #
dnl # 3.10.x API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID], [
AC_MSG_CHECKING([whether block_device_operations.release is void])
tmp_flags="$EXTRA_KCFLAGS"
EXTRA_KCFLAGS="${NO_UNUSED_BUT_SET_VARIABLE}"
ZFS_LINUX_TRY_COMPILE([
#include <linux/blkdev.h>
void blk_release(struct gendisk *g, fmode_t mode) { return; }
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.open = NULL,
.release = blk_release,
.ioctl = NULL,
.compat_ioctl = NULL,
};
],[
],[
AC_MSG_RESULT(void)
AC_DEFINE(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID, 1,
[struct block_device_operations.release returns void])
],[
AC_MSG_RESULT(int)
])
EXTRA_KCFLAGS="$tmp_flags"
])

View File

@ -1,57 +0,0 @@
dnl #
dnl # 2.6.38 API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS], [
AC_MSG_CHECKING([whether bops->check_events() exists])
tmp_flags="$EXTRA_KCFLAGS"
EXTRA_KCFLAGS="${NO_UNUSED_BUT_SET_VARIABLE}"
ZFS_LINUX_TRY_COMPILE([
#include <linux/blkdev.h>
unsigned int blk_check_events(struct gendisk *disk,
unsigned int clearing) { return (0); }
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.check_events = blk_check_events,
};
],[
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS, 1,
[bops->check_events() exists])
],[
AC_MSG_RESULT(no)
])
EXTRA_KCFLAGS="$tmp_flags"
])
dnl #
dnl # 3.10.x API change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID], [
AC_MSG_CHECKING([whether bops->release() is void])
tmp_flags="$EXTRA_KCFLAGS"
EXTRA_KCFLAGS="${NO_UNUSED_BUT_SET_VARIABLE}"
ZFS_LINUX_TRY_COMPILE([
#include <linux/blkdev.h>
void blk_release(struct gendisk *g, fmode_t mode) { return; }
static const struct block_device_operations
bops __attribute__ ((unused)) = {
.open = NULL,
.release = blk_release,
.ioctl = NULL,
.compat_ioctl = NULL,
};
],[
],[
AC_MSG_RESULT(void)
AC_DEFINE(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID, 1,
[bops->release() returns void])
],[
AC_MSG_RESULT(int)
])
EXTRA_KCFLAGS="$tmp_flags"
])

View File

@ -5,7 +5,6 @@ AC_DEFUN([ZFS_AC_KERNEL_CREATE_NAMEIDATA], [
AC_MSG_CHECKING([whether iops->create() passes nameidata])
ZFS_LINUX_TRY_COMPILE([
#include <linux/fs.h>
#include <linux/sched.h>
#ifdef HAVE_MKDIR_UMODE_T
int inode_create(struct inode *inode ,struct dentry *dentry,

View File

@ -1,14 +1,15 @@
dnl #
dnl # 4.9, current_time() added
dnl # 4.18, return type changed from timespec to timespec64
dnl #
AC_DEFUN([ZFS_AC_KERNEL_CURRENT_TIME],
[AC_MSG_CHECKING([whether current_time() exists])
ZFS_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/fs.h>
], [
struct inode ip __attribute__ ((unused));
ip.i_atime = current_time(&ip);
struct inode ip;
struct timespec now __attribute__ ((unused));
now = current_time(&ip);
], [current_time], [fs/inode.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CURRENT_TIME, 1, [current_time() exists])

View File

@ -5,7 +5,6 @@ AC_DEFUN([ZFS_AC_KERNEL_D_REVALIDATE_NAMEIDATA], [
AC_MSG_CHECKING([whether dops->d_revalidate() takes struct nameidata])
ZFS_LINUX_TRY_COMPILE([
#include <linux/dcache.h>
#include <linux/sched.h>
int revalidate (struct dentry *dentry,
struct nameidata *nidata) { return 0; }

View File

@ -1,6 +1,6 @@
dnl #
dnl # 2.6.36 API, exported elevator_change() symbol
dnl # 4.12 API, removed elevator_change() symbol
dnl # 2.6.36 API change
dnl # Verify the elevator_change() symbol is available.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_ELEVATOR_CHANGE], [
AC_MSG_CHECKING([whether elevator_change() is available])

View File

@ -1,41 +1,18 @@
dnl #
dnl # Handle differences in kernel FPU code.
dnl #
dnl # Kernel
dnl # 5.0: All kernel fpu functions are GPL only, so we can't use them.
dnl # (nothing defined)
dnl #
dnl # 4.2: Use __kernel_fpu_{begin,end}()
dnl # HAVE_UNDERSCORE_KERNEL_FPU & KERNEL_EXPORTS_X86_FPU
dnl #
dnl # Pre-4.2: Use kernel_fpu_{begin,end}()
dnl # HAVE_KERNEL_FPU & KERNEL_EXPORTS_X86_FPU
dnl # 4.2 API change
dnl # asm/i387.h is replaced by asm/fpu/api.h
dnl #
AC_DEFUN([ZFS_AC_KERNEL_FPU], [
AC_MSG_CHECKING([which kernel_fpu function to use])
AC_MSG_CHECKING([whether asm/fpu/api.h exists])
ZFS_LINUX_TRY_COMPILE([
#include <asm/i387.h>
#include <asm/xcr.h>
#include <linux/kernel.h>
#include <asm/fpu/api.h>
],[
kernel_fpu_begin();
kernel_fpu_end();
__kernel_fpu_begin();
],[
AC_MSG_RESULT(kernel_fpu_*)
AC_DEFINE(HAVE_KERNEL_FPU, 1, [kernel has kernel_fpu_* functions])
AC_DEFINE(KERNEL_EXPORTS_X86_FPU, 1, [kernel exports FPU functions])
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FPU_API_H, 1, [kernel has <asm/fpu/api.h> interface])
],[
ZFS_LINUX_TRY_COMPILE([
#include <linux/kernel.h>
#include <asm/fpu/api.h>
],[
__kernel_fpu_begin();
__kernel_fpu_end();
],[
AC_MSG_RESULT(__kernel_fpu_*)
AC_DEFINE(HAVE_UNDERSCORE_KERNEL_FPU, 1, [kernel has __kernel_fpu_* functions])
AC_DEFINE(KERNEL_EXPORTS_X86_FPU, 1, [kernel exports FPU functions])
],[
AC_MSG_RESULT(not exported)
])
AC_MSG_RESULT(no)
])
])

View File

@ -1,28 +0,0 @@
dnl #
dnl # 2.6.38 API change
dnl # The .get_sb callback has been replaced by a .mount callback
dnl # in the file_system_type structure.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_FST_MOUNT], [
AC_MSG_CHECKING([whether fst->mount() exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/fs.h>
static struct dentry *
mount(struct file_system_type *fs_type, int flags,
const char *osname, void *data) {
struct dentry *d = NULL;
return (d);
}
static struct file_system_type fst __attribute__ ((unused)) = {
.mount = mount,
};
],[
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FST_MOUNT, 1, [fst->mount() exists])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -1,19 +0,0 @@
dnl #
dnl # 4.16 API change
dnl # Verify if get_disk_and_module() symbol is available.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_GET_DISK_AND_MODULE],
[AC_MSG_CHECKING([whether get_disk_and_module() is available])
ZFS_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/genhd.h>
], [
struct gendisk *disk = NULL;
(void) get_disk_and_module(disk);
], [get_disk_and_module], [block/genhd.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GET_DISK_AND_MODULE,
1, [get_disk_and_module() is available])
], [
AC_MSG_RESULT(no)
])
])

View File

@ -41,7 +41,7 @@ AC_DEFUN([ZFS_AC_KERNEL_FOLLOW_LINK], [
AC_DEFINE(HAVE_FOLLOW_LINK_NAMEIDATA, 1,
[iops->follow_link() nameidata])
],[
AC_MSG_ERROR(no; please file a bug report)
AC_MSG_ERROR(no; please file a bug report)
])
])
])

View File

@ -1,109 +0,0 @@
dnl #
dnl # 4.8 API change
dnl #
dnl # 75ef71840539 mm, vmstat: add infrastructure for per-node vmstats
dnl # 599d0c954f91 mm, vmscan: move LRU lists to node
dnl #
AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_NODE_PAGE_STATE], [
AC_MSG_CHECKING([whether global_node_page_state() exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/mm.h>
#include <linux/vmstat.h>
],[
(void) global_node_page_state(0);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(ZFS_GLOBAL_NODE_PAGE_STATE, 1, [global_node_page_state() exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 4.14 API change
dnl #
dnl # c41f012ade0b mm: rename global_page_state to global_zone_page_state
dnl #
AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE], [
AC_MSG_CHECKING([whether global_zone_page_state() exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/mm.h>
#include <linux/vmstat.h>
],[
(void) global_zone_page_state(0);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(ZFS_GLOBAL_ZONE_PAGE_STATE, 1, [global_zone_page_state() exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Create a define and autoconf variable for an enum member
dnl #
AC_DEFUN([ZFS_AC_KERNEL_ENUM_MEMBER], [
AC_MSG_CHECKING([whether enum $2 contains $1])
AS_IF([AC_TRY_COMMAND("${srcdir}/scripts/enum-extract.pl" "$2" "$3" | egrep -qx $1)],[
AC_MSG_RESULT([yes])
AC_DEFINE(m4_join([_], [ZFS_ENUM], m4_toupper($2), $1), 1, [enum $2 contains $1])
m4_join([_], [ZFS_ENUM], m4_toupper($2), $1)=1
],[
AC_MSG_RESULT([no])
])
])
dnl #
dnl # Sanity check helpers
dnl #
AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_ERROR],[
AC_MSG_RESULT(no)
AC_MSG_RESULT([$1 in either node_stat_item or zone_stat_item: $2])
AC_MSG_RESULT([configure needs updating, see: config/kernel-global_page_state.m4])
AC_MSG_FAILURE([SHUT 'ER DOWN CLANCY, SHE'S PUMPIN' MUD!])
])
AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK], [
enum_check_a="m4_join([_], [$ZFS_ENUM_NODE_STAT_ITEM], $1)"
enum_check_b="m4_join([_], [$ZFS_ENUM_ZONE_STAT_ITEM], $1)"
AS_IF([test -n "$enum_check_a" -a -n "$enum_check_b"],[
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_ERROR([$1], [DUPLICATE])
])
AS_IF([test -z "$enum_check_a" -a -z "$enum_check_b"],[
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_ERROR([$1], [NOT FOUND])
])
])
dnl #
dnl # Ensure the config tests are finding one and only one of each enum of interest
dnl #
AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE_SANITY], [
AC_MSG_CHECKING([global_page_state enums are sane])
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_FILE_PAGES])
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_INACTIVE_ANON])
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_INACTIVE_FILE])
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE_ENUM_CHECK([NR_SLAB_RECLAIMABLE])
AC_MSG_RESULT(yes)
])
dnl #
dnl # enum members in which we're interested
dnl #
AC_DEFUN([ZFS_AC_KERNEL_GLOBAL_PAGE_STATE], [
ZFS_AC_KERNEL_GLOBAL_NODE_PAGE_STATE
ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE
ZFS_AC_KERNEL_ENUM_MEMBER([NR_FILE_PAGES], [node_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_ANON], [node_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_FILE], [node_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_SLAB_RECLAIMABLE], [node_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_FILE_PAGES], [zone_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_ANON], [zone_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_INACTIVE_FILE], [zone_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_ENUM_MEMBER([NR_SLAB_RECLAIMABLE], [zone_stat_item], [$LINUX/include/linux/mmzone.h])
ZFS_AC_KERNEL_GLOBAL_ZONE_PAGE_STATE_SANITY
])

View File

@ -1,20 +0,0 @@
dnl #
dnl # 4.5 API change
dnl # Added in_compat_syscall() which can be overridden on a per-
dnl # architecture basis. Prior to this is_compat_task() was the
dnl # provided interface.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_IN_COMPAT_SYSCALL], [
AC_MSG_CHECKING([whether in_compat_syscall() is available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/compat.h>
],[
in_compat_syscall();
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IN_COMPAT_SYSCALL, 1,
[in_compat_syscall() is available])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -1,19 +0,0 @@
dnl #
dnl # 4.16 API change
dnl # inode_set_iversion introduced to set i_version
dnl #
AC_DEFUN([ZFS_AC_KERNEL_INODE_SET_IVERSION], [
AC_MSG_CHECKING([whether inode_set_iversion() exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/iversion.h>
],[
struct inode inode;
inode_set_iversion(&inode, 1);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_INODE_SET_IVERSION, 1,
[inode_set_iversion() exists])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -5,7 +5,6 @@ AC_DEFUN([ZFS_AC_KERNEL_LOOKUP_NAMEIDATA], [
AC_MSG_CHECKING([whether iops->lookup() passes nameidata])
ZFS_LINUX_TRY_COMPILE([
#include <linux/fs.h>
#include <linux/sched.h>
struct dentry *inode_lookup(struct inode *inode,
struct dentry *dentry, struct nameidata *nidata)

View File

@ -1,26 +0,0 @@
dnl #
dnl # Determine an available miscellaneous minor number which can be used
dnl # for the /dev/zfs device. This is needed because kernel module
dnl # auto-loading depends on registering a reserved non-conflicting minor
dnl # number. Start with a large known available unreserved minor and work
dnl # our way down to lower value if a collision is detected.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_MISC_MINOR], [
AC_MSG_CHECKING([for available /dev/zfs minor])
for i in $(seq 249 -1 200); do
if ! grep -q "^#define\s\+.*_MINOR\s\+.*$i" \
${LINUX}/include/linux/miscdevice.h; then
ZFS_DEVICE_MINOR="$i"
AC_MSG_RESULT($ZFS_DEVICE_MINOR)
AC_DEFINE_UNQUOTED([ZFS_DEVICE_MINOR],
[$ZFS_DEVICE_MINOR], [/dev/zfs minor])
break
fi
done
AS_IF([ test -z "$ZFS_DEVICE_MINOR"], [
AC_MSG_ERROR([
*** No available misc minor numbers available for use.])
])
])

View File

@ -0,0 +1,20 @@
dnl #
dnl # 2.6.39 API change
dnl # The .get_sb callback has been replaced by a .mount callback
dnl # in the file_system_type structure. When using the new
dnl # interface the caller must now use the mount_nodev() helper.
dnl # This updated callback and helper no longer pass the vfsmount.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_MOUNT_NODEV],
[AC_MSG_CHECKING([whether mount_nodev() is available])
ZFS_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/fs.h>
], [
mount_nodev(NULL, 0, NULL, NULL);
], [mount_nodev], [fs/super.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_MOUNT_NODEV, 1, [mount_nodev() is available])
], [
AC_MSG_RESULT(no)
])
])

View File

@ -1,67 +0,0 @@
dnl #
dnl # 2.6.38 API change
dnl # ns_capable() was introduced
dnl #
AC_DEFUN([ZFS_AC_KERNEL_NS_CAPABLE], [
AC_MSG_CHECKING([whether ns_capable exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/capability.h>
],[
ns_capable((struct user_namespace *)NULL, CAP_SYS_ADMIN);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_NS_CAPABLE, 1,
[ns_capable exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.39 API change
dnl # struct user_namespace was added to struct cred_t as
dnl # cred->user_ns member
dnl # Note that current_user_ns() was added in 2.6.28.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_CRED_USER_NS], [
AC_MSG_CHECKING([whether cred_t->user_ns exists])
ZFS_LINUX_TRY_COMPILE([
#include <linux/cred.h>
],[
struct cred cr;
cr.user_ns = (struct user_namespace *)NULL;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CRED_USER_NS, 1,
[cred_t->user_ns exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 3.4 API change
dnl # kuid_has_mapping() and kgid_has_mapping() were added to distinguish
dnl # between internal kernel uids/gids and user namespace uids/gids.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_KUID_HAS_MAPPING], [
AC_MSG_CHECKING([whether kuid_has_mapping/kgid_has_mapping exist])
ZFS_LINUX_TRY_COMPILE([
#include <linux/uidgid.h>
],[
kuid_has_mapping((struct user_namespace *)NULL, KUIDT_INIT(0));
kgid_has_mapping((struct user_namespace *)NULL, KGIDT_INIT(0));
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KUID_HAS_MAPPING, 1,
[kuid_has_mapping/kgid_has_mapping exist])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_USERNS_CAPABILITIES], [
ZFS_AC_KERNEL_NS_CAPABLE
ZFS_AC_KERNEL_CRED_USER_NS
ZFS_AC_KERNEL_KUID_HAS_MAPPING
])

View File

@ -23,27 +23,16 @@ AC_DEFUN([ZFS_AC_KERNEL_VFS_ITERATE], [
dnl #
dnl # 3.11 API change
dnl #
dnl # RHEL 7.5 compatibility; the fops.iterate() method was
dnl # added to the file_operations structure but in order to
dnl # maintain KABI compatibility all callers must set
dnl # FMODE_KABI_ITERATE which is checked in iterate_dir().
dnl # When detected ignore this interface and fallback to
dnl # to using fops.readdir() to retain KABI compatibility.
dnl #
AC_MSG_CHECKING([whether fops->iterate() is available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/fs.h>
int iterate(struct file *filp,
struct dir_context *context) { return 0; }
int iterate(struct file *filp, struct dir_context * context)
{ return 0; }
static const struct file_operations fops
__attribute__ ((unused)) = {
.iterate = iterate,
};
#if defined(FMODE_KABI_ITERATE)
#error "RHEL 7.5, FMODE_KABI_ITERATE interface"
#endif
],[
],[
AC_MSG_RESULT(yes)
@ -55,8 +44,8 @@ AC_DEFUN([ZFS_AC_KERNEL_VFS_ITERATE], [
AC_MSG_CHECKING([whether fops->readdir() is available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/fs.h>
int readdir(struct file *filp, void *entry,
filldir_t func) { return 0; }
int readdir(struct file *filp, void *entry, filldir_t func)
{ return 0; }
static const struct file_operations fops
__attribute__ ((unused)) = {
@ -68,7 +57,7 @@ AC_DEFUN([ZFS_AC_KERNEL_VFS_ITERATE], [
AC_DEFINE(HAVE_VFS_READDIR, 1,
[fops->readdir() is available])
],[
AC_MSG_ERROR(no; file a bug report with ZoL)
AC_MSG_ERROR(no; file a bug report with ZFSOnLinux)
])
])
])

View File

@ -32,23 +32,15 @@ dnl #
dnl # Linux 4.1 API
dnl #
AC_DEFUN([ZFS_AC_KERNEL_NEW_SYNC_READ],
[AC_MSG_CHECKING([whether new_sync_read/write() are available])
[AC_MSG_CHECKING([whether new_sync_read() is available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/fs.h>
],[
ssize_t ret __attribute__ ((unused));
struct file *filp = NULL;
char __user *rbuf = NULL;
const char __user *wbuf = NULL;
size_t len = 0;
loff_t ppos;
ret = new_sync_read(filp, rbuf, len, &ppos);
ret = new_sync_write(filp, wbuf, len, &ppos);
new_sync_read(NULL, NULL, 0, NULL);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_NEW_SYNC_READ, 1,
[new_sync_read()/new_sync_write() are available])
[new_sync_read() is available])
],[
AC_MSG_RESULT(no)
])

View File

@ -0,0 +1,22 @@
dnl #
dnl # 4.8 API change
dnl # kernel vm counters change
dnl #
AC_DEFUN([ZFS_AC_KERNEL_VM_NODE_STAT], [
AC_MSG_CHECKING([whether to use vm_node_stat based fn's])
ZFS_LINUX_TRY_COMPILE([
#include <linux/mm.h>
#include <linux/vmstat.h>
],[
int a __attribute__ ((unused)) = NR_VM_NODE_STAT_ITEMS;
long x __attribute__ ((unused)) =
atomic_long_read(&vm_node_stat[0]);
(void) global_node_page_state(0);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(ZFS_GLOBAL_NODE_PAGE_STATE, 1,
[using global_node_page_state()])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -5,16 +5,14 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL
ZFS_AC_SPL
ZFS_AC_QAT
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_TEST_MODULE
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_CONFIG
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
ZFS_AC_KERNEL_CURRENT_BIO_TAIL
ZFS_AC_KERNEL_SUPER_USER_NS
ZFS_AC_KERNEL_SUBMIT_BIO
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_CHECK_EVENTS
ZFS_AC_KERNEL_BDEV_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
ZFS_AC_KERNEL_TYPE_FMODE_T
ZFS_AC_KERNEL_3ARG_BLKDEV_GET
@ -37,14 +35,11 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL_BIO_RW_BARRIER
ZFS_AC_KERNEL_BIO_RW_DISCARD
ZFS_AC_KERNEL_BLK_QUEUE_BDI
ZFS_AC_KERNEL_BLK_QUEUE_FLAG_CLEAR
ZFS_AC_KERNEL_BLK_QUEUE_FLAG_SET
ZFS_AC_KERNEL_BLK_QUEUE_FLUSH
ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS
ZFS_AC_KERNEL_BLK_QUEUE_HAVE_BIO_RW_UNPLUG
ZFS_AC_KERNEL_BLK_QUEUE_HAVE_BLK_PLUG
ZFS_AC_KERNEL_GET_DISK_AND_MODULE
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GET_GENDISK
ZFS_AC_KERNEL_HAVE_BIO_SET_OP_ATTRS
@ -70,7 +65,6 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL_INODE_OPERATIONS_SET_ACL
ZFS_AC_KERNEL_INODE_OPERATIONS_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_GET_ACL_HANDLE_CACHE
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
@ -104,7 +98,7 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_6ARGS_SECURITY_INODE_INIT_SECURITY
ZFS_AC_KERNEL_CALLBACK_SECURITY_INODE_INIT_SECURITY
ZFS_AC_KERNEL_FST_MOUNT
ZFS_AC_KERNEL_MOUNT_NODEV
ZFS_AC_KERNEL_SHRINK
ZFS_AC_KERNEL_SHRINK_CONTROL_HAS_NID
ZFS_AC_KERNEL_S_INSTANCES_LIST_HEAD
@ -128,10 +122,7 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
ZFS_AC_KERNEL_RENAME_WANTS_FLAGS
ZFS_AC_KERNEL_HAVE_GENERIC_SETXATTR
ZFS_AC_KERNEL_CURRENT_TIME
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_ACL_HAS_REFCOUNT
ZFS_AC_KERNEL_USERNS_CAPABILITIES
ZFS_AC_KERNEL_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_VM_NODE_STAT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNELMAKE_PARAMS="$KERNELMAKE_PARAMS O=$LINUX_OBJ"
@ -257,7 +248,7 @@ AC_DEFUN([ZFS_AC_KERNEL], [
AS_IF([test "$utsrelease"], [
kernsrcver=`(echo "#include <$utsrelease>";
echo "kernsrcver=UTS_RELEASE") |
${CPP} -I $kernelbuild/include - |
cpp -I $kernelbuild/include |
grep "^kernsrcver=" | cut -d \" -f 2`
AS_IF([test -z "$kernsrcver"], [
@ -541,10 +532,10 @@ AC_DEFUN([ZFS_AC_QAT], [
AC_MSG_RESULT([$qatbuild])
QAT_OBJ=${qatbuild}
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko" && ! test -e "$QAT_OBJ/qat_api.ko"], [
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko"], [
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find icp_qa_al.ko or qat_api.ko in:
*** Failed to find icp_qa_al.ko in:
$QAT_OBJ])
])
@ -730,7 +721,7 @@ AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
modpost_flag=''
test "x$enable_linux_builtin" = xyes && modpost_flag='modpost=true' # fake modpost stage
AS_IF(
[AC_TRY_COMMAND(cp conftest.c conftest.h build && make [$2] -C $LINUX_OBJ EXTRA_CFLAGS="-Werror $FRAME_LARGER_THAN $EXTRA_KCFLAGS" $ARCH_UM M=$PWD/build $modpost_flag) >/dev/null && AC_TRY_COMMAND([$3])],
[AC_TRY_COMMAND(cp conftest.c conftest.h build && make [$2] -C $LINUX_OBJ EXTRA_CFLAGS="-Werror $EXTRA_KCFLAGS" $ARCH_UM M=$PWD/build $modpost_flag) >/dev/null && AC_TRY_COMMAND([$3])],
[$4],
[_AC_MSG_LOG_CONFTEST m4_ifvaln([$5],[$5])]
)

View File

@ -2,9 +2,9 @@ tgz-local:
@(if test "${HAVE_ALIEN}" = "no"; then \
echo -e "\n" \
"*** Required util ${ALIEN} missing. Please install the\n" \
"*** package for your distribution which provides ${ALIEN},\n" \
"*** package for your distribution which provides ${ALIEN},\n" \
"*** re-run configure, and try again.\n"; \
exit 1; \
exit 1; \
fi)
tgz-kmod: tgz-local rpm-kmod

View File

@ -1,14 +0,0 @@
dnl #
dnl # Check for libaio - only used for libaiot test cases.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_LIBAIO], [
LIBAIO=
AC_CHECK_HEADER([libaio.h], [
user_libaio=yes
AC_SUBST([LIBAIO], ["-laio"])
AC_DEFINE([HAVE_LIBAIO], 1, [Define if you have libaio])
], [
user_libaio=no
])
])

12
config/user-libattr.m4 Normal file
View File

@ -0,0 +1,12 @@
dnl #
dnl # Check for libattr
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_LIBATTR], [
LIBATTR=
AC_CHECK_HEADER([attr/xattr.h], [], [AC_MSG_FAILURE([
*** attr/xattr.h missing, libattr-devel package required])])
AC_SUBST([LIBATTR], ["-lattr"])
AC_DEFINE([HAVE_LIBATTR], 1, [Define if you have libattr])
])

View File

@ -6,7 +6,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_LIBBLKID], [
LIBBLKID=
AC_CHECK_HEADER([blkid/blkid.h], [], [AC_MSG_FAILURE([
*** blkid.h missing, libblkid-devel package required])])
*** blkid.h missing, libblkid-devel package required])])
AC_SUBST([LIBBLKID], ["-lblkid"])
AC_DEFINE([HAVE_LIBBLKID], 1, [Define if you have libblkid])

View File

@ -2,8 +2,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_SYSTEMD], [
AC_ARG_ENABLE(systemd,
AC_HELP_STRING([--enable-systemd],
[install systemd unit/preset files [[default: yes]]]),
[enable_systemd=$enableval],
[enable_systemd=check])
[],enable_systemd=yes)
AC_ARG_WITH(systemdunitdir,
AC_HELP_STRING([--with-systemdunitdir=DIR],
@ -20,27 +19,16 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_SYSTEMD], [
[install systemd module load files into dir [[/usr/lib/modules-load.d]]]),
systemdmoduleloaddir=$withval,systemdmodulesloaddir=/usr/lib/modules-load.d)
AS_IF([test "x$enable_systemd" = xcheck], [
AS_IF([systemctl --version >/dev/null 2>&1],
[enable_systemd=yes],
[enable_systemd=no])
])
AC_MSG_CHECKING(for systemd support)
AC_MSG_RESULT([$enable_systemd])
AS_IF([test "x$enable_systemd" = xyes], [
AS_IF([test "x$enable_systemd" = xyes],
[
ZFS_INIT_SYSTEMD=systemd
ZFS_MODULE_LOAD=modules-load.d
DEFINE_SYSTEMD='--with systemd --define "_unitdir $(systemdunitdir)" --define "_presetdir $(systemdpresetdir)"'
modulesloaddir=$systemdmodulesloaddir
],[
DEFINE_SYSTEMD='--without systemd'
])
])
AC_SUBST(ZFS_INIT_SYSTEMD)
AC_SUBST(ZFS_MODULE_LOAD)
AC_SUBST(DEFINE_SYSTEMD)
AC_SUBST(systemdunitdir)
AC_SUBST(systemdpresetdir)
AC_SUBST(modulesloaddir)

View File

@ -11,9 +11,9 @@ AC_DEFUN([ZFS_AC_CONFIG_USER], [
ZFS_AC_CONFIG_USER_LIBUUID
ZFS_AC_CONFIG_USER_LIBTIRPC
ZFS_AC_CONFIG_USER_LIBBLKID
ZFS_AC_CONFIG_USER_LIBATTR
ZFS_AC_CONFIG_USER_LIBUDEV
ZFS_AC_CONFIG_USER_FRAME_LARGER_THAN
ZFS_AC_CONFIG_USER_LIBAIO
ZFS_AC_CONFIG_USER_RUNSTATEDIR
ZFS_AC_CONFIG_USER_MAKEDEV_IN_SYSMACROS
ZFS_AC_CONFIG_USER_MAKEDEV_IN_MKDEV

View File

@ -6,75 +6,37 @@ AC_DEFUN([ZFS_AC_LICENSE], [
AC_MSG_RESULT([$ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_DEBUG_ENABLE], [
KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG -Werror"
HOSTCFLAGS="${HOSTCFLAGS} -DDEBUG -Werror"
DEBUG_CFLAGS="-DDEBUG -Werror"
DEBUG_ZFS="_with_debug"
AC_DEFINE(ZFS_DEBUG, 1, [zfs debugging enabled])
])
AC_DEFUN([ZFS_AC_DEBUG_DISABLE], [
KERNELCPPFLAGS="${KERNELCPPFLAGS} -DNDEBUG "
HOSTCFLAGS="${HOSTCFLAGS} -DNDEBUG "
DEBUG_CFLAGS="-DNDEBUG"
DEBUG_STACKFLAGS=""
DEBUG_ZFS="_without_debug"
])
AC_DEFUN([ZFS_AC_DEBUG], [
AC_MSG_CHECKING([whether assertion support will be enabled])
AC_MSG_CHECKING([whether debugging is enabled])
AC_ARG_ENABLE([debug],
[AS_HELP_STRING([--enable-debug],
[Enable assertion support @<:@default=no@:>@])],
[Enable generic debug support @<:@default=no@:>@])],
[],
[enable_debug=no])
AS_CASE(["x$enable_debug"],
["xyes"],
[ZFS_AC_DEBUG_ENABLE],
["xno"],
[ZFS_AC_DEBUG_DISABLE],
[AC_MSG_ERROR([Unknown option $enable_debug])])
AS_IF([test "x$enable_debug" = xyes],
[
KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG -Werror"
HOSTCFLAGS="${HOSTCFLAGS} -DDEBUG -Werror"
DEBUG_CFLAGS="-DDEBUG -Werror"
DEBUG_STACKFLAGS="-fstack-check"
DEBUG_ZFS="_with_debug"
AC_DEFINE(ZFS_DEBUG, 1, [zfs debugging enabled])
],
[
KERNELCPPFLAGS="${KERNELCPPFLAGS} -DNDEBUG "
HOSTCFLAGS="${HOSTCFLAGS} -DNDEBUG "
DEBUG_CFLAGS="-DNDEBUG"
DEBUG_STACKFLAGS=""
DEBUG_ZFS="_without_debug"
])
AC_SUBST(DEBUG_CFLAGS)
AC_SUBST(DEBUG_STACKFLAGS)
AC_SUBST(DEBUG_ZFS)
AC_MSG_RESULT([$enable_debug])
])
AC_DEFUN([ZFS_AC_DEBUGINFO_KERNEL], [
KERNELMAKE_PARAMS="$KERNELMAKE_PARAMS CONFIG_DEBUG_INFO=y"
KERNELCPPFLAGS="${KERNELCPPFLAGS} -fno-inline"
])
AC_DEFUN([ZFS_AC_DEBUGINFO_USER], [
DEBUG_CFLAGS="${DEBUG_CFLAGS} -g -fno-inline"
])
AC_DEFUN([ZFS_AC_DEBUGINFO], [
AC_MSG_CHECKING([whether debuginfo support will be forced])
AC_ARG_ENABLE([debuginfo],
[AS_HELP_STRING([--enable-debuginfo],
[Force generation of debuginfo @<:@default=no@:>@])],
[],
[enable_debuginfo=no])
AS_CASE(["x$enable_debuginfo"],
["xyes"],
[ZFS_AC_DEBUGINFO_KERNEL
ZFS_AC_DEBUGINFO_USER],
["xkernel"],
[ZFS_AC_DEBUGINFO_KERNEL],
["xuser"],
[ZFS_AC_DEBUGINFO_USER],
["xno"],
[],
[AC_MSG_ERROR([Unknown option $enable_debug])])
AC_SUBST(DEBUG_CFLAGS)
AC_MSG_RESULT([$enable_debuginfo])
])
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [
ZFS_AC_CONFIG_ALWAYS_NO_UNUSED_BUT_SET_VARIABLE
ZFS_AC_CONFIG_ALWAYS_NO_BOOL_COMPARE
@ -117,11 +79,11 @@ AC_DEFUN([ZFS_AC_CONFIG], [
AM_CONDITIONAL([CONFIG_KERNEL],
[test "$ZFS_CONFIG" = kernel -o "$ZFS_CONFIG" = all] &&
[test "x$enable_linux_builtin" != xyes ])
AM_CONDITIONAL([WANT_DEVNAME2DEVID],
[test "x$user_libudev" = xyes ])
AM_CONDITIONAL([CONFIG_QAT],
[test "$ZFS_CONFIG" = kernel -o "$ZFS_CONFIG" = all] &&
[test "x$qatsrc" != x ])
AM_CONDITIONAL([WANT_DEVNAME2DEVID], [test "x$user_libudev" = xyes ])
AM_CONDITIONAL([WANT_MMAP_LIBAIO], [test "x$user_libaio" = xyes ])
])
dnl #
@ -159,45 +121,10 @@ AC_DEFUN([ZFS_AC_RPM], [
])
RPM_DEFINE_COMMON='--define "$(DEBUG_ZFS) 1"'
RPM_DEFINE_UTIL=' --define "_initconfdir $(DEFAULT_INITCONF_DIR)"'
dnl # Make the next three RPM_DEFINE_UTIL additions conditional, since
dnl # their values may not be set when running:
dnl #
dnl # ./configure --with-config=srpm
dnl #
AS_IF([test -n "$dracutdir" ], [
RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)"'
])
AS_IF([test -n "$udevdir" ], [
RPM_DEFINE_UTIL+=' --define "_udevdir $(udevdir)"'
])
AS_IF([test -n "$udevruledir" ], [
RPM_DEFINE_UTIL+=' --define "_udevdir $(udevruledir)"'
])
RPM_DEFINE_UTIL+=' $(DEFINE_INITRAMFS)'
RPM_DEFINE_UTIL+=' $(DEFINE_SYSTEMD)'
RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)" --define "_udevdir $(udevdir)" --define "_udevruledir $(udevruledir)" --define "_initconfdir $(DEFAULT_INITCONF_DIR)" $(DEFINE_INITRAMFS)'
RPM_DEFINE_KMOD='--define "kernels $(LINUX_VERSION)" --define "require_spldir $(SPL)" --define "require_splobj $(SPL_OBJ)" --define "ksrc $(LINUX)" --define "kobj $(LINUX_OBJ)"'
RPM_DEFINE_KMOD+=' --define "_wrong_version_format_terminate_build 0"'
RPM_DEFINE_DKMS=
dnl # Override default lib directory on Debian/Ubuntu systems. The provided
dnl # /usr/lib/rpm/platform/<arch>/macros files do not specify the correct
dnl # path for multiarch systems as described by the packaging guidelines.
dnl #
dnl # https://wiki.ubuntu.com/MultiarchSpec
dnl # https://wiki.debian.org/Multiarch/Implementation
dnl #
AS_IF([test "$DEFAULT_PACKAGE" = "deb"], [
MULTIARCH_LIBDIR="lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)"
RPM_DEFINE_UTIL+=' --define "_lib $(MULTIARCH_LIBDIR)"'
AC_SUBST(MULTIARCH_LIBDIR)
])
SRPM_DEFINE_COMMON='--define "build_src_rpm 1"'
SRPM_DEFINE_UTIL=
SRPM_DEFINE_KMOD=

View File

@ -50,13 +50,11 @@ AC_PROG_CC
AC_PROG_LIBTOOL
AM_PROG_AS
AM_PROG_CC_C_O
AX_CODE_COVERAGE
ZFS_AC_LICENSE
ZFS_AC_PACKAGE
ZFS_AC_CONFIG
ZFS_AC_DEBUG
ZFS_AC_DEBUGINFO
AC_CONFIG_FILES([
Makefile
@ -122,9 +120,6 @@ AC_CONFIG_FILES([
contrib/dracut/02zfsexpandknowledge/Makefile
contrib/dracut/90zfs/Makefile
contrib/initramfs/Makefile
contrib/initramfs/hooks/Makefile
contrib/initramfs/scripts/Makefile
contrib/initramfs/scripts/local-top/Makefile
module/Makefile
module/avl/Makefile
module/nvpair/Makefile
@ -156,7 +151,6 @@ AC_CONFIG_FILES([
tests/zfs-tests/callbacks/Makefile
tests/zfs-tests/cmd/Makefile
tests/zfs-tests/cmd/chg_usr_exec/Makefile
tests/zfs-tests/cmd/user_ns_exec/Makefile
tests/zfs-tests/cmd/devname2devid/Makefile
tests/zfs-tests/cmd/dir_rd_update/Makefile
tests/zfs-tests/cmd/file_check/Makefile
@ -168,7 +162,6 @@ AC_CONFIG_FILES([
tests/zfs-tests/cmd/mkfiles/Makefile
tests/zfs-tests/cmd/mktree/Makefile
tests/zfs-tests/cmd/mmap_exec/Makefile
tests/zfs-tests/cmd/mmap_libaio/Makefile
tests/zfs-tests/cmd/mmapwrite/Makefile
tests/zfs-tests/cmd/randfree_file/Makefile
tests/zfs-tests/cmd/readmmap/Makefile
@ -241,7 +234,6 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/cli_user/zpool_iostat/Makefile
tests/zfs-tests/tests/functional/cli_user/zpool_list/Makefile
tests/zfs-tests/tests/functional/compression/Makefile
tests/zfs-tests/tests/functional/cp_files/Makefile
tests/zfs-tests/tests/functional/ctime/Makefile
tests/zfs-tests/tests/functional/delegate/Makefile
tests/zfs-tests/tests/functional/devices/Makefile
@ -256,7 +248,6 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/history/Makefile
tests/zfs-tests/tests/functional/inheritance/Makefile
tests/zfs-tests/tests/functional/inuse/Makefile
tests/zfs-tests/tests/functional/kstat/Makefile
tests/zfs-tests/tests/functional/large_files/Makefile
tests/zfs-tests/tests/functional/largest_pool/Makefile
tests/zfs-tests/tests/functional/link_count/Makefile
@ -291,7 +282,6 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/threadsappend/Makefile
tests/zfs-tests/tests/functional/tmpfile/Makefile
tests/zfs-tests/tests/functional/truncate/Makefile
tests/zfs-tests/tests/functional/user_namespace/Makefile
tests/zfs-tests/tests/functional/userquota/Makefile
tests/zfs-tests/tests/functional/upgrade/Makefile
tests/zfs-tests/tests/functional/vdev_zaps/Makefile

View File

@ -24,7 +24,6 @@ $(pkgdracut_SCRIPTS):%:%.in
-e 's,@udevruledir\@,$(udevruledir),g' \
-e 's,@sysconfdir\@,$(sysconfdir),g' \
-e 's,@systemdunitdir\@,$(systemdunitdir),g' \
-e 's,@mounthelperdir\@,$(mounthelperdir),g' \
$< >'$@'
distclean-local::

View File

@ -5,7 +5,7 @@ check() {
[ "${1}" = "-d" ] && return 0
# Verify the zfs tool chain
for tool in "@sbindir@/zpool" "@sbindir@/zfs" "@mounthelperdir@/mount.zfs" ; do
for tool in "@sbindir@/zpool" "@sbindir@/zfs" "@sbindir@/mount.zfs" ; do
test -x "$tool" || return 1
done
# Verify grep exists
@ -53,7 +53,7 @@ install() {
# Fallback: Guess the path and include all matches
dracut_install /usr/lib/gcc/*/*/libgcc_s.so*
fi
dracut_install @mounthelperdir@/mount.zfs
dracut_install @sbindir@/mount.zfs
dracut_install @udevdir@/vdev_id
dracut_install awk
dracut_install head

View File

@ -34,7 +34,6 @@ info "ZFS: No sysroot.mount exists or zfs-generator did not extend it."
info "ZFS: Mounting root with the traditional mount-zfs.sh instead."
# Delay until all required block devices are present.
modprobe zfs 2>/dev/null
udevadm settle
if [ "${root}" = "zfs:AUTO" ] ; then

View File

@ -1,17 +1,16 @@
initrddir = $(datarootdir)/initramfs-tools
initrd_SCRIPTS = \
conf.d/zfs conf-hooks.d/zfs hooks/zfs scripts/zfs scripts/local-top/zfs
SUBDIRS = hooks scripts
initrd_SCRIPTS = conf-hooks.d/zfs hooks/zfs scripts/zfs scripts/local-top/zfs
EXTRA_DIST = \
$(top_srcdir)/contrib/initramfs/conf.d/zfs \
$(top_srcdir)/contrib/initramfs/conf-hooks.d/zfs \
$(top_srcdir)/contrib/initramfs/hooks/zfs \
$(top_srcdir)/contrib/initramfs/scripts/zfs \
$(top_srcdir)/contrib/initramfs/scripts/local-top/zfs \
$(top_srcdir)/contrib/initramfs/README.initramfs.markdown
install-initrdSCRIPTS: $(EXTRA_DIST)
for d in conf.d conf-hooks.d hooks scripts scripts/local-top; do \
for d in conf-hooks.d hooks scripts scripts/local-top; do \
$(MKDIR_P) $(DESTDIR)$(initrddir)/$$d; \
cp $(top_srcdir)/contrib/initramfs/$$d/zfs \
$(DESTDIR)$(initrddir)/$$d/; \

View File

@ -1,8 +0,0 @@
for x in $(cat /proc/cmdline)
do
case $x in
root=ZFS=*|root=zfs:*)
BOOT=zfs
;;
esac
done

View File

@ -1 +0,0 @@
zfs

View File

@ -1,21 +0,0 @@
hooksdir = $(datarootdir)/initramfs-tools/hooks
hooks_SCRIPTS = \
zfs
EXTRA_DIST = \
$(top_srcdir)/contrib/initramfs/hooks/zfs.in
$(hooks_SCRIPTS):%:%.in
-$(SED) -e 's,@sbindir\@,$(sbindir),g' \
-e 's,@sysconfdir\@,$(sysconfdir),g' \
-e 's,@udevdir\@,$(udevdir),g' \
-e 's,@udevruledir\@,$(udevruledir),g' \
-e 's,@mounthelperdir\@,$(mounthelperdir),g' \
$< >'$@'
clean-local::
-$(RM) $(hooks_SCRIPTS)
distclean-local::
-$(RM) $(hooks_SCRIPTS)

View File

@ -8,17 +8,14 @@ PREREQ="zdev"
# These prerequisites are provided by the zfsutils package. The zdb utility is
# not strictly required, but it can be useful at the initramfs recovery prompt.
COPY_EXEC_LIST="@sbindir@/zdb @sbindir@/zpool @sbindir@/zfs"
COPY_EXEC_LIST="$COPY_EXEC_LIST @mounthelperdir@/mount.zfs @udevdir@/vdev_id"
COPY_FILE_LIST="/etc/hostid @sysconfdir@/zfs/zpool.cache"
COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/default/zfs"
COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/zfs/zfs-functions"
COPY_FILE_LIST="$COPY_FILE_LIST @sysconfdir@/zfs/vdev_id.conf"
COPY_FILE_LIST="$COPY_FILE_LIST @udevruledir@/69-vdev.rules"
COPY_EXEC_LIST="/sbin/zdb /sbin/zpool /sbin/zfs /sbin/mount.zfs"
COPY_EXEC_LIST="$COPY_EXEC_LIST /usr/bin/dirname /lib/udev/vdev_id"
COPY_FILE_LIST="/etc/hostid /etc/zfs/zpool.cache /etc/default/zfs"
COPY_FILE_LIST="$COPY_FILE_LIST /etc/zfs/zfs-functions /etc/zfs/vdev_id.conf"
COPY_FILE_LIST="$COPY_FILE_LIST /lib/udev/rules.d/69-vdev.rules"
# These prerequisites are provided by the base system.
COPY_EXEC_LIST="$COPY_EXEC_LIST /usr/bin/dirname /bin/hostname /sbin/blkid"
COPY_EXEC_LIST="$COPY_EXEC_LIST /usr/bin/env"
COPY_EXEC_LIST="$COPY_EXEC_LIST /bin/hostname /sbin/blkid"
# Explicitly specify all kernel modules because automatic dependency resolution
# is unreliable on many systems.
@ -85,7 +82,7 @@ else
fi
for ii in zfs zfs.conf spl spl.conf
do
do
if [ -f "/etc/modprobe.d/$ii" ]; then
if [ ! -d "$DESTDIR/etc/modprobe.d" ]; then
mkdir -p $DESTDIR/etc/modprobe.d

View File

@ -1 +0,0 @@
zfs

View File

@ -1,20 +0,0 @@
scriptsdir = $(datarootdir)/initramfs-tools/scripts
scripts_SCRIPTS = \
zfs
SUBDIRS = local-top
EXTRA_DIST = \
$(top_srcdir)/contrib/initramfs/scripts/zfs.in
$(scripts_SCRIPTS):%:%.in
-$(SED) -e 's,@sbindir\@,$(sbindir),g' \
-e 's,@sysconfdir\@,$(sysconfdir),g' \
$< >'$@'
clean-local::
-$(RM) $(scripts_SCRIPTS)
distclean-local::
-$(RM) $(scripts_SCRIPTS)

View File

@ -1,3 +0,0 @@
localtopdir = $(datarootdir)/initramfs-tools/scripts/local-top
EXTRA_DIST = zfs

View File

@ -11,9 +11,9 @@
# Paths to what we need - in the initrd, these paths are hardcoded,
# so override the defines in zfs-functions.
ZFS="@sbindir@/zfs"
ZPOOL="@sbindir@/zpool"
ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache"
ZFS="/sbin/zfs"
ZPOOL="/sbin/zpool"
ZPOOL_CACHE="/etc/zfs/zpool.cache"
export ZFS ZPOOL ZPOOL_CACHE
# This runs any scripts that should run before we start importing
@ -150,7 +150,7 @@ get_pools()
fi
fi
# Filter out any exceptions...
# Filter out any exceptions...
if [ -n "$ZFS_POOL_EXCEPTIONS" ]
then
local found=""
@ -193,7 +193,7 @@ import_pool()
# Verify that the pool isn't already imported
# Make as sure as we can to not require '-f' to import.
"${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
"${ZPOOL}" status "$pool" > /dev/null 2>&1 && return 0
# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
# to something we can use later with the real import(s). We want to
@ -317,14 +317,6 @@ mount_fs()
"${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1
[ "$?" -ne 0 ] && return 1
# Skip filesystems with canmount=off. The root fs should not have
# canmount=off, but ignore it for backwards compatibility just in case.
if [ "$fs" != "${ZFS_BOOTFS}" ]
then
canmount=$(get_fs_value "$fs" canmount)
[ "$canmount" = "off" ] && return 0
fi
# Need the _original_ datasets mountpoint!
mountpoint=$(get_fs_value "$fs" mountpoint)
if [ "$mountpoint" = "legacy" -o "$mountpoint" = "none" ]; then
@ -337,9 +329,11 @@ mount_fs()
"$mountpoint" = "-" ]
then
if [ "$fs" != "${ZFS_BOOTFS}" ]; then
# We don't have a proper mountpoint and this
# isn't the root fs.
return 0
# We don't have a proper mountpoint, this
# isn't the root fs. So extract the root fs
# value from the filesystem, and we should
# (hopefully!) have a mountpoint we can use.
mountpoint="${fs##$ZFS_BOOTFS}"
else
# Last hail-mary: Hope 'rootmnt' is set!
mountpoint=""
@ -478,7 +472,7 @@ destroy_fs()
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
echo "Failed to destroy '$fs'. Please make sure that '$fs' is not availible."
echo "Hint: Try: zfs destroy -Rfn $fs"
echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
/bin/sh
@ -616,7 +610,7 @@ setup_snapshot_booting()
# Separate the full snapshot ('$snap') into it's filesystem and
# snapshot names. Would have been nice with a split() function..
rootfs="${snap%%@*}"
snapname="${snap##*@}"
snapname="${snap##*@}"
ZFS_BOOTFS="${rootfs}_${snapname}"
if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
@ -772,7 +766,6 @@ mountroot()
# root=zfs:<pool>/<dataset> (uses this for rpool - first part, without 'zfs:')
#
# Option <dataset> could also be <snapshot>
# Option <pool> could also be <guid>
# ------------
# Support force option
@ -890,32 +883,6 @@ mountroot()
/bin/sh
fi
# In case the pool was specified as guid, resolve guid to name
pool="$("${ZPOOL}" get name,guid -o name,value -H | \
awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
if [ -n "$pool" ]; then
ZFS_BOOTFS="${pool}/${ZFS_BOOTFS#*/}"
ZFS_RPOOL="${pool}"
fi
# Set elevator=noop on the root pool's vdevs' disks. ZFS already
# does this for wholedisk vdevs (for all pools), so this is only
# important for partitions.
"${ZPOOL}" status -L "${ZFS_RPOOL}" 2> /dev/null |
awk '/^\t / && !/(mirror|raidz)/ {
dev=$1;
sub(/[0-9]+$/, "", dev);
print dev
}' |
while read i
do
if [ -e "/sys/block/$i/queue/scheduler" ]
then
echo noop > "/sys/block/$i/queue/scheduler"
fi
done
# ----------------------------------------------------------------
# P R E P A R E R O O T F I L E S Y S T E M
@ -958,7 +925,7 @@ mountroot()
# NOTE: Mounted in the order specified in the
# ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
# Go through the complete list (recursively) of all filesystems below
# Go through the complete list (recursivly) of all filesystems below
# the real root dataset
filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS

View File

@ -22,7 +22,7 @@ $(init_SCRIPTS) $(initconf_SCRIPTS) $(initcommon_SCRIPTS):%:%.in
NFS_SRV=nfs; \
fi; \
if [ -e /sbin/openrc-run ]; then \
SHELL=/sbin/openrc-run; \
SHELL=/sbin/runscript; \
else \
SHELL=/bin/sh; \
fi; \

0
etc/init.d/zfs-import.in Normal file → Executable file
View File

0
etc/init.d/zfs-mount.in Normal file → Executable file
View File

0
etc/init.d/zfs-share.in Normal file → Executable file
View File

0
etc/init.d/zfs-zed.in Normal file → Executable file
View File

View File

@ -1,3 +1,3 @@
# The default behavior is to allow udev to load the kernel modules on demand.
# Uncomment the following line to unconditionally load them at boot.
# Always load kernel modules at boot. The default behavior is to load the
# kernel modules in the zfs-import-*.service or when blkid(8) detects a pool.
#zfs

View File

@ -1,7 +1,6 @@
# ZFS is enabled by default
enable zfs-import-cache.service
disable zfs-import-scan.service
enable zfs-import.target
enable zfs-mount.service
enable zfs-share.service
enable zfs-zed.service

View File

@ -7,7 +7,6 @@ systemdunit_DATA = \
zfs-import-scan.service \
zfs-mount.service \
zfs-share.service \
zfs-import.target \
zfs.target
EXTRA_DIST = \
@ -16,7 +15,6 @@ EXTRA_DIST = \
$(top_srcdir)/etc/systemd/system/zfs-import-scan.service.in \
$(top_srcdir)/etc/systemd/system/zfs-mount.service.in \
$(top_srcdir)/etc/systemd/system/zfs-share.service.in \
$(top_srcdir)/etc/systemd/system/zfs-import.target.in \
$(top_srcdir)/etc/systemd/system/zfs.target.in \
$(top_srcdir)/etc/systemd/system/50-zfs.preset.in

View File

@ -6,13 +6,14 @@ After=systemd-udev-settle.service
After=cryptsetup.target
After=systemd-remount-fs.service
Before=dracut-mount.service
Before=zfs-import.target
ConditionPathExists=@sysconfdir@/zfs/zpool.cache
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/sbin/modprobe zfs
ExecStart=@sbindir@/zpool import -c @sysconfdir@/zfs/zpool.cache -aN
[Install]
WantedBy=zfs-import.target
WantedBy=zfs-mount.service
WantedBy=zfs.target

View File

@ -5,13 +5,14 @@ Requires=systemd-udev-settle.service
After=systemd-udev-settle.service
After=cryptsetup.target
Before=dracut-mount.service
Before=zfs-import.target
ConditionPathExists=!@sysconfdir@/zfs/zpool.cache
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/sbin/modprobe zfs
ExecStart=@sbindir@/zpool import -aN -o cachefile=none
[Install]
WantedBy=zfs-import.target
WantedBy=zfs-mount.service
WantedBy=zfs.target

View File

@ -1,6 +0,0 @@
[Unit]
Description=ZFS pool import target
[Install]
WantedBy=zfs-mount.service
WantedBy=zfs.target

View File

@ -2,7 +2,8 @@
Description=Mount ZFS filesystems
DefaultDependencies=no
After=systemd-udev-settle.service
After=zfs-import.target
After=zfs-import-cache.service
After=zfs-import-scan.service
After=systemd-remount-fs.service
Before=local-fs.target

View File

@ -4,7 +4,6 @@ pkgsysconf_DATA = \
vdev_id.conf.alias.example \
vdev_id.conf.sas_direct.example \
vdev_id.conf.sas_switch.example \
vdev_id.conf.multipath.example \
vdev_id.conf.scsi.example
vdev_id.conf.multipath.example
EXTRA_DIST = $(pkgsysconf_DATA)

Some files were not shown because too many files have changed in this diff Show More