diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index 8d3a373047..b8d81d4fad 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -139,7 +139,7 @@ AC_DEFUN([ZFS_AC_RPM], [ ]) RPM_DEFINE_COMMON='--define "$(DEBUG_ZFS) 1" --define "$(DEBUG_DMU_TX) 1"' - RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)" --define "_udevdir $(udevdir)" --define "_udevruledir $(udevruledir)"' + RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)" --define "_udevdir $(udevdir)" --define "_udevruledir $(udevruledir)" --define "_initconfdir $(DEFAULT_INITCONF_DIR)"' RPM_DEFINE_KMOD='--define "kernels $(LINUX_VERSION)" --define "require_spldir $(SPL)" --define "require_splobj $(SPL_OBJ)" --define "ksrc $(LINUX)" --define "kobj $(LINUX_OBJ)"' RPM_DEFINE_DKMS= @@ -311,6 +311,21 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [ AC_MSG_RESULT([$DEFAULT_INIT_SCRIPT]) AC_SUBST(DEFAULT_INIT_SCRIPT) + + AC_MSG_CHECKING([default init config direectory]) + case "$VENDOR" in + gentoo) DEFAULT_INITCONF_DIR=/etc/conf.d ;; + toss) DEFAULT_INITCONF_DIR=/etc/sysconfig ;; + redhat) DEFAULT_INITCONF_DIR=/etc/sysconfig ;; + fedora) DEFAULT_INITCONF_DIR=/etc/sysconfig ;; + sles) DEFAULT_INITCONF_DIR=/etc/sysconfig ;; + ubuntu) DEFAULT_INITCONF_DIR=/etc/default ;; + debian) DEFAULT_INITCONF_DIR=/etc/default ;; + *) DEFAULT_INITCONF_DIR=/etc/default ;; + esac + + AC_MSG_RESULT([$DEFAULT_INITCONF_DIR]) + AC_SUBST(DEFAULT_INITCONF_DIR) ]) dnl # diff --git a/etc/init.d/.gitignore b/etc/init.d/.gitignore index 73304bc2cd..3f16b08ecc 100644 --- a/etc/init.d/.gitignore +++ b/etc/init.d/.gitignore @@ -1 +1,6 @@ +zfs-functions +zfs-import +zfs-mount +zfs-share +zfs-zed zfs diff --git a/etc/init.d/Makefile.am b/etc/init.d/Makefile.am index 5a049dfe14..895ffc1f94 100644 --- a/etc/init.d/Makefile.am +++ b/etc/init.d/Makefile.am @@ -1,22 +1,44 @@ initdir = $(DEFAULT_INIT_DIR) -init_SCRIPTS = zfs +init_SCRIPTS = zfs-import zfs-mount zfs-share zfs-zed + +initcommondir = $(sysconfdir)/zfs +initcommon_SCRIPTS = zfs-functions + +initconfdir = $(DEFAULT_INITCONF_DIR) +initconf_SCRIPTS = zfs EXTRA_DIST = \ - $(top_srcdir)/etc/init.d/zfs.fedora.in \ - $(top_srcdir)/etc/init.d/zfs.gentoo.in \ - $(top_srcdir)/etc/init.d/zfs.lsb.in \ - $(top_srcdir)/etc/init.d/zfs.lunar.in \ - $(top_srcdir)/etc/init.d/zfs.redhat.in + $(top_srcdir)/etc/init.d/zfs-functions.in \ + $(top_srcdir)/etc/init.d/zfs-share.in \ + $(top_srcdir)/etc/init.d/zfs-import.in \ + $(top_srcdir)/etc/init.d/zfs-mount.in \ + $(top_srcdir)/etc/init.d/zfs-zed.in \ + $(top_srcdir)/etc/init.d/zfs.in -$(init_SCRIPTS): $(init_SCRIPTS).$(DEFAULT_INIT_SCRIPT).in - -$(SED) -e 's,@bindir\@,$(bindir),g' \ - -e 's,@sbindir\@,$(sbindir),g' \ - -e 's,@udevdir\@,$(udevdir),g' \ - -e 's,@udevruledir\@,$(udevruledir),g' \ - -e 's,@sysconfdir\@,$(sysconfdir),g' \ - -e 's,@initdir\@,$(initdir),g' \ - -e 's,@runstatedir\@,$(runstatedir),g' \ - '$@.$(DEFAULT_INIT_SCRIPT).in' >'$@' +$(init_SCRIPTS) $(initconf_SCRIPTS) $(initcommon_SCRIPTS): + -(if [ -e /etc/debian_version ]; then \ + NFS_SRV=nfs-kernel-server; \ + else \ + NFS_SRV=nfs; \ + fi; \ + if [ -e /etc/gentoo-release ]; then \ + SHELL=/sbin/runscript; \ + else \ + SHELL=/bin/sh; \ + fi; \ + $(SED) -e 's,@bindir\@,$(bindir),g' \ + -e 's,@sbindir\@,$(sbindir),g' \ + -e 's,@udevdir\@,$(udevdir),g' \ + -e 's,@udevruledir\@,$(udevruledir),g' \ + -e 's,@sysconfdir\@,$(sysconfdir),g' \ + -e 's,@initconfdir\@,$(initconfdir),g' \ + -e 's,@initdir\@,$(initdir),g' \ + -e 's,@runstatedir\@,$(runstatedir),g' \ + -e "s,@SHELL\@,$$SHELL,g" \ + -e "s,@NFS_SRV\@,$$NFS_SRV,g" \ + '$@.in' >'$@'; \ + [ '$@' = 'zfs-functions' -o '$@' = 'zfs' ] || \ + chmod +x '$@') distclean-local:: - -$(RM) $(init_SCRIPTS) + -$(RM) $(init_SCRIPTS) $(initcommon_SCRIPTS) $(initconf_SCRIPTS) diff --git a/etc/init.d/README.md b/etc/init.d/README.md new file mode 100644 index 0000000000..a0d3d99470 --- /dev/null +++ b/etc/init.d/README.md @@ -0,0 +1,77 @@ +DESCRIPTION + These script were written with the primary intention of being portable and + usable on as many systems as possible. + + This is, in practice, usually not possible. But the intention is there. + And it is a good one. + + They have been tested successfully on: + + * Debian GNU/Linux Wheezy + * Debian GNU/Linux Jessie + * Ubuntu Trusty + * CentOS 6.0 + * CentOS 6.6 + * Gentoo + +SUPPORT + If you find that they don't work for your platform, please report this + at the ZFS On Linux issue tracker at https://github.com/zfsonlinux/zfs/issues. + + Please include: + + * Distribution name + * Distribution version + * Where to find an install CD image + * Architecture + + If you have code to share that fixes the problem, that is much better. + But please remember to try your best keep portability in mind. If you + suspect that what you're writing/modifying won't work on anything else + than your distribution, please make sure to put that code in appropriate + if/else/fi code. + + It currently MUST be bash (or fully compatible) for this to work. + + If you're making your own distribution and you want the scripts to + work on that, the biggest problem you'll (probably) have is the part + at the beginning of the "zfs-functions.in" file which sets up the + logging output. + +INSTALLING INIT SCRIPT LINKS + To setup the init script links in /etc/rc?.d manually on a Debian GNU/Linux + (or derived) system, run the following commands (the order is important!): + + update-rc.d zfs-zed start 07 S . stop 08 0 1 6 . + update-rc.d zfs-import start 07 S . stop 07 0 1 6 . + update-rc.d zfs-mount start 02 2 3 4 5 . stop 06 0 1 6 . + update-rc.d zfs-share start 27 2 3 4 5 . stop 05 0 1 6 . + + To do the same on RedHat, Fedora and/or CentOS: + + chkconfig zfs-zed + chkconfig zfs-import + chkconfig zfs-mount + chkconfig zfs-share + + On Gentoo: + + rc-update add zfs-zed boot + rc-update add zfs-import boot + rc-update add zfs-mount boot + rc-update add zfs-share default + + + The idea here is to make sure ZED is started before the imports (so that + we can start consuming pool events before pools are imported). + + Then import any/all pools (except the root pool which is mounted in the + initrd before the system even boots - basically before the S (single-user) + mode). + + Then we mount all filesystems before we start any network service (such as + NFSd, AFSd, Samba, iSCSI targets and what not). Even if the share* in ZFS + isn't used, the filesystem must be mounted for the service to start properly. + + Then, at almost the very end, we share filesystems configured with the + share* property in ZFS. diff --git a/etc/init.d/zfs-functions.in b/etc/init.d/zfs-functions.in new file mode 100644 index 0000000000..17805ebec2 --- /dev/null +++ b/etc/init.d/zfs-functions.in @@ -0,0 +1,437 @@ +# This is a script with common functions etc used by zfs-import, zfs-mount, +# zfs-share and zfs-zed. +# +# It is _NOT_ to be called independently +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +PATH=/sbin:/bin:/usr/bin:/usr/sbin + +# Source function library +if [ -f /lib/lsb/init-functions ]; then + # LSB, Debian GNU/Linux and derivates + . /lib/lsb/init-functions +elif [ -f /etc/rc.d/init.d/functions ]; then + # RedHat and derivates + . /etc/rc.d/init.d/functions +elif [ -L /etc/init.d/functions.sh ]; then + # Gentoo + . /etc/init.d/functions.sh +fi + +# Of course the functions we need are called differently +# on different distributions - it would be way too easy +# otherwise!! +if type log_failure_msg > /dev/null 2>&1 ; then + # LSB functions - fall through + zfs_log_begin_msg() { log_begin_msg "$1"; } + zfs_log_end_msg() { log_end_msg "$1"; } + zfs_log_failure_msg() { log_failure_msg "$1"; } + zfs_log_progress_msg() { log_progress_msg "$1"; } +elif type success > /dev/null 2>&1 ; then + # Fedora/RedHat functions + zfs_set_ifs() { + # For some reason, the init function library have a problem + # with a changed IFS, so this function goes around that. + local tIFS="$1" + if [ -n "$tIFS" ] + then + TMP_IFS="$IFS" + IFS="$tIFS" + fi + } + + zfs_log_begin_msg() { echo -n "$1 "; } + zfs_log_end_msg() { + zfs_set_ifs "$OLD_IFS" + if [ "$1" -eq 0 ]; then + success + else + failure + fi + echo + zfs_set_ifs "$TMP_IFS" + } + zfs_log_failure_msg() { + zfs_set_ifs "$OLD_IFS" + failure + echo + zfs_set_ifs "$TMP_IFS" + } + zfs_log_progress_msg() { echo -n $"$1"; } +elif type einfo > /dev/null 2>&1 ; then + # Gentoo functions + zfs_log_begin_msg() { ebegin "$1"; } + zfs_log_end_msg() { eend "$1"; } + zfs_log_failure_msg() { eerror "$1"; } +# zfs_log_progress_msg() { echo -n "$1"; } + zfs_log_progress_msg() { echo -n; } +else + # Unknown - simple substitues. + zfs_log_begin_msg() { echo -n "$1"; } + zfs_log_end_msg() { + ret=$1 + if [ "$ret" -ge 1 ]; then + echo " failed!" + else + echo " success" + fi + return "$ret" + } + zfs_log_failure_msg() { echo "$1"; } + zfs_log_progress_msg() { echo -n "$1"; } +fi + +# Paths to what we need +ZFS="@sbindir@/zfs" +ZED="@sbindir@/zed" +ZPOOL="@sbindir@/zpool" +ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" + +# Sensible defaults +ZFS_MOUNT='yes' +ZFS_UNMOUNT='yes' + +export ZFS ZED ZPOOL ZPOOL_CACHE ZFS_MOUNT ZFS_UNMOUNT + +# Source zfs configuration, overriding the defaults +if [ -f @initconfdir@/zfs ]; then + . @initconfdir@/zfs +fi + +# ---------------------------------------------------- + +zfs_action() +{ + local MSG="$1"; shift + local CMD="$*" + local ret + + zfs_log_begin_msg "$MSG " + $CMD + ret=$? + if [ "$ret" -eq 0 ]; then + zfs_log_end_msg $ret + else + zfs_log_failure_msg $ret + fi + + return $ret +} + +# Returns +# 0 if daemon has been started +# 1 if daemon was already running +# 2 if daemon could not be started +# 3 if unsupported +# +zfs_daemon_start() +{ + local PIDFILE="$1"; shift + local DAEMON_BIN="$1"; shift + local DAEMON_ARGS="$*" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --start --quiet --pidfile "$PIDFILE" \ + --exec "$DAEMON_BIN" --test > /dev/null || return 1 + + start-stop-daemon --start --quiet --exec "$DAEMON_BIN" -- \ + $DAEMON_ARGS || return 2 + + # On Debian GNU/Linux, there's a 'sendsigs' script that will + # kill basically everything quite early and zed is stopped + # much later than that. We don't want zed to be among them, + # so add the zed pid to list of pids to ignore. + if [ -f "$PIDFILE" -a -d /run/sendsigs.omit.d ] + then + ln -s "$PIDFILE" /run/sendsigs.omit.d/zed + fi + elif type daemon > /dev/null 2>&1 ; then + # Fedora/RedHat functions + daemon --pidfile "$PIDFILE" "$DAEMON_BIN" $DAEMON_ARGS + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +# Returns +# 0 if daemon has been stopped +# 1 if daemon was already stopped +# 2 if daemon could not be stopped +# 3 if unsupported +# +zfs_daemon_stop() +{ + local PIDFILE="$1" + local DAEMON_BIN="$2" + local DAEMON_NAME="$3" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \ + --pidfile "$PIDFILE" --name "$DAEMON_NAME" + [ "$?" = 0 ] && rm -f "$PIDFILE" + + return $? + elif type killproc > /dev/null 2>&1 ; then + # Fedora/RedHat functions + killproc -p "$PIDFILE" "$DAEMON_NAME" + [ "$?" = 0 ] && rm -f "$PIDFILE" + + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +# Returns status +zfs_daemon_status() +{ + local PIDFILE="$1" + local DAEMON_BIN="$2" + local DAEMON_NAME="$3" + + if type status_of_proc > /dev/null 2>&1 ; then + # LSB functions + status_of_proc "$DAEMON_NAME" "$DAEMON_BIN" + return $? + elif type status > /dev/null 2>&1 ; then + # Fedora/RedHat functions + status -p "$PIDFILE" "$DAEMON_NAME" + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +zfs_daemon_reload() +{ + local PIDFILE="$1" + local DAEMON_NAME="$2" + + if type start-stop-daemon > /dev/null 2>&1 ; then + # LSB functions + start-stop-daemon --stop -signal 1 --quiet \ + --pidfile "$PIDFILE" --name "$DAEMON_NAME" + return $? + elif type killproc > /dev/null 2>&1 ; then + # Fedora/RedHat functions + killproc -p "$PIDFILE" "$DAEMON_NAME" -HUP + return $? + else + # Unsupported + return 3 + fi + + return 0 +} + +zfs_installed() +{ + if [ ! -x "$ZPOOL" ]; then + return 1 + else + # Test if it works (will catch missing/broken libs etc) + "$ZPOOL" -? > /dev/null 2>&1 + return $? + fi + + if [ ! -x "$ZFS" ]; then + return 2 + else + # Test if it works (will catch missing/broken libs etc) + "$ZFS" -? > /dev/null 2>&1 + return $? + fi + + return 0 +} + +# Trigger udev and wait for it to settle. +udev_trigger() +{ + if [ -x /sbin/udevadm ]; then + /sbin/udevadm trigger --action=change --subsystem-match=block + /sbin/udevadm settle + elif [ -x /sbin/udevsettle ]; then + /sbin/udevtrigger + /sbin/udevsettle + fi +} + +# Do a lot of checks to make sure it's 'safe' to continue with the import. +checksystem() +{ + if grep -qiE '(^|[^\\](\\\\)* )zfs=(off|no|0)( |$)' /proc/cmdline; + then + # Called with zfs=(off|no|0) - bail because we don't + # want anything import, mounted or shared. + # HOWEVER, only do this if we're called at the boot up + # (from init), not if we're running interactivly (as in + # from the shell - we know what we're doing). + [ -n "$init" ] && exit 3 + fi + + # Check if ZFS is installed. + zfs_installed || return 5 + + # Just make sure that /dev/zfs is created. + udev_trigger + + if ! [ "$(uname -m)" = "x86_64" ]; then + echo "Warning: You're not running 64bit. Currently native zfs in"; + echo " Linux is only supported and tested on 64bit."; + # should we break here? People doing this should know what they + # do, thus i'm not breaking here. + fi + + return 0 +} + +reregister_mounts() +{ + local fs mntpnt fstype opts rest tmpdir + tmpdir=removethismountpointhoweverpossible + + while read -r fs mntpnt fstype opts rest ; do + fs=$(printf '%b\n' "$fs") + mntpnt=$(printf '%b\n' "$mntpnt") + if [ "$fstype" = "zfs" ] ; then + if [ "$mntpnt" = "/" ] ; then + mount -f -o zfsutil -t zfs --move / /$tmpdir + umount --fake /$tmpdir + else + umount --fake "$mntpnt" + fi + elif echo "$fs" | grep -qE "^/dev/(zd|zvol)" ; then + if [ "$mntpnt" = "/" ] ; then + mount -f -t "$fstype" --move / /$tmpdir + umount --fake /$tmpdir + else + umount --fake "$mntpnt" + fi + fi + done < /proc/mounts + + while read -r fs mntpnt fstype opts rest ; do + fs=$(printf '%b\n' "$fs") + mntpnt=$(printf '%b\n' "$mntpnt") + if [ "$fstype" = "zfs" ] ; then + mount -f -t zfs -o zfsutil "$fs" "$mntpnt" + elif echo "$fs" | grep -q "^/dev/zd" ; then + mount -f -t "$fstype" -o "$opts" "$fs" "$mntpnt" + fi + done < /proc/mounts +} + +get_root_pool() +{ + set -- $(mount | grep ' on / ') + [ "$5" = "zfs" ] && echo "${1%%/*}" +} + +check_module_loaded() +{ + [ -r /sys/module/zfs/version ] && return 0 || return 1 +} + +load_module() +{ + # Load the zfs module stack + if ! check_module_loaded; then + if ! modprobe zfs; then + return 5 + fi + fi + return 0 +} + +# first parameter is a regular expression that filters mtab +read_mtab() +{ + local match="$1" + local fs mntpnt fstype opts rest TMPFILE + + # Unset all MTAB_* variables + unset $(env | grep ^MTAB_ | sed 's,=.*,,') + + while read -r fs mntpnt fstype opts rest; do + if echo "$fs $mntpnt $fstype $opts" | grep -qE "$match"; then + mntpnt=$(printf '%b\n' "$mntpnt" | sed 's,/,_,g') + eval export MTAB_$mntpnt="$fs" + fi + done < /proc/mounts +} + +in_mtab() +{ + local fs="$(echo "$1" | sed 's,/,_,g')" + local var + + var="$(eval echo MTAB_$fs)" + [ "$(eval echo "$""$var")" != "" ] + return "$?" +} + +# first parameter is a regular expression that filters fstab +read_fstab() +{ + local match="$1" + local i var TMPFILE + + # Unset all FSTAB_* variables + unset $(env | grep ^FSTAB_ | sed 's,=.*,,') + + i=0 + while read -r fs mntpnt fstype opts; do + echo "$fs" | egrep -qE '^#|^$' && continue + + if echo "$fs $mntpnt $fstype $opts" | grep -qE "$match"; then + eval export FSTAB_dev_$i="$fs" + fs=$(printf '%b\n' "$fs" | sed 's,/,_,g') + eval export FSTAB_$i="$mntpnt" + + i=$((i + 1)) + fi + done < /etc/fstab +} + +in_fstab() +{ + local var + + var="$(eval echo FSTAB_$1)" + [ "${var}" != "" ] + return $? +} + +is_mounted() +{ + local mntpt="$1" + local line + + mount | \ + while read line; do + if echo "$line" | grep -q " on $mntpt "; then + return 0 + fi + done + + return 1 +} diff --git a/etc/init.d/zfs-import.in b/etc/init.d/zfs-import.in new file mode 100755 index 0000000000..1bc3ebe9ad --- /dev/null +++ b/etc/init.d/zfs-import.in @@ -0,0 +1,338 @@ +#!@SHELL@ +# +# zfs-import This script will import/export zfs pools. +# +# chkconfig: 2345 01 99 +# description: This script will import/export zfs pools during system +# boot/shutdown. +# It is also responsible for all userspace zfs services. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-import +# Required-Start: zfs-zed +# Required-Stop: zfs-zed +# Default-Start: S +# Default-Stop: 0 1 6 +# X-Start-Before: checkfs +# X-Stop-After: zfs-mount +# Short-Description: Import ZFS pools +# Description: Run the `zpool import` or `zpool export` commands. +### END INIT INFO +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +# Source the common init script +. @sysconfdir@/zfs/zfs-functions + +# ---------------------------------------------------- + +do_depend() +{ + after sysfs udev zfs-zed + keyword -lxc -openvz -prefix -vserver +} + +# Support function to get a list of all pools, separated with ';' +find_pools() +{ + local CMD="$*" + local pools + + pools=$($CMD 2> /dev/null | \ + grep -E "pool:|^[a-zA-Z0-9]" | \ + sed 's@.*: @@' | \ + sort | \ + while read pool; do \ + echo -n "$pool;" + done) + + echo "${pools%%;}" # Return without the last ';'. +} + +# Import all pools +do_import() +{ + local already_imported available_pools pool npools + local exception dir ZPOOL_IMPORT_PATH RET=0 r=1 + + # In case not shutdown cleanly. + [ -n "$init" ] && rm -f /etc/dfs/sharetab + + # Just simplify code later on. + if [ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] + then + # It's something, but not 'yes' so it's no good to us. + unset USE_DISK_BY_ID + fi + + # Find list of already imported pools. + already_imported=$(find_pools "$ZPOOL" list -H -oname) + available_pools=$(find_pools "$ZPOOL" import) + + # Just in case - seen it happen (that a pool isn't visable/found + # with a simple "zpool import" but only when using the "-d" + # option or setting ZPOOL_IMPORT_PATH). + if [ -d "/dev/disk/by-id" ] + then + npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id) + if [ -n "$npools" ] + then + # Because we have found extra pool(s) here, which wasn't + # found 'normaly', we need to force USE_DISK_BY_ID to + # make sure we're able to actually import it/them later. + USE_DISK_BY_ID='yes' + + # Filter out duplicates (pools found with the simpl + # "zpool import" but which is also found with the + # "zpool import -d ..."). + npools=$(echo "$npools" | sed "s,$available_pools,,") + + if [ -n "$available_pools" ] + then + # Add the list to the existing list of + # available pools + available_pools="$available_pools;$npools" + else + available_pools="$npools" + fi + fi + fi + + # Filter out any exceptions... + if [ -n "$ZFS_POOL_EXCEPTIONS" ] + then + local found="" + local apools="" + OLD_IFS="$IFS" ; IFS=";" + + for pool in $available_pools + do + for exception in $ZFS_POOL_EXCEPTIONS + do + [ "$pool" = "$exception" ] && continue 2 + found="$pool" + done + + if [ -n "$found" ] + then + if [ -n "$apools" ] + then + apools="$apools;$pool" + else + apools="$pool" + fi + fi + done + + IFS="$OLD_IFS" + available_pools="$apools" + fi + + # For backwards compability, make sure that ZPOOL_IMPORT_PATH is set + # to something we can use later with the real import(s). We want to + # make sure we find all by* dirs, BUT by-vdev should be first (if it + # exists). + if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ] + then + local dirs + dirs="$(find /dev/disk/by-* -maxdepth 0 -type d | \ + grep -v by-vdev)" + dirs="$(echo "$dirs" | sed 's, ,:,g')" + if [ -d "/dev/disk/by-vdev" ] + then + # Add by-vdev at the beginning. + ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:" + fi + ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev" + fi + + # Needs to be exported for "zpool" to catch it. + [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH + + # Mount all availible pools (except those set in ZFS_POOL_EXCEPTIONS. + # + # If not interactive (run from init - variable init='/sbin/init') + # we get ONE line for all pools being imported, with just a dot + # as status for each pool. + # Example: Importing ZFS pool(s)... [OK] + # + # If it IS interactive (started from the shell manually), then we + # get one line per pool importing. + # Example: Importing ZFS pool pool1 [OK] + # Importing ZFS pool pool2 [OK] + # [etc] + [ -n "$init" ] && zfs_log_begin_msg "Importing ZFS pool(s)" + OLD_IFS="$IFS" ; IFS=";" + for pool in $available_pools + do + [ -z "$pool" ] && continue + + # We have pools that haven't been imported - import them + if [ -n "$init" ] + then + # Not interactive - a dot for each pool. + # Except on Gentoo where this doesn't work. + zfs_log_progress_msg "." + else + # Interactive - one 'Importing ...' line per pool + zfs_log_begin_msg "Importing ZFS pool $pool" + fi + + # Import by using ZPOOL_IMPORT_PATH (either set above or in + # the config file) _or_ with the 'built in' default search + # paths. This is the prefered way. + "$ZPOOL" import -N "$pool" 2> /dev/null + r="$?" ; RET=$((RET + r)) + if [ "$r" -eq 0 ] + then + # Output success and process the next pool + [ -z "$init" ] && zfs_log_end_msg 0 + continue + fi + # We don't want a fail msg here, we're going to try import + # using the cache file soon and that might succeed. + [ ! -f "$ZPOOL_CACHE" ] && zfs_log_end_msg "$RET" + + if [ "$r" -gt 0 -a -f "$ZPOOL_CACHE" ] + then + # Failed to import without a cache file. Try WITH... + if [ -z "$init" -a "$VERBOSE_MOUNT" = 'yes' ] + then + # Interactive + Verbose = more information + zfs_log_progress_msg " using cache file" + fi + + "$ZPOOL" import -c "$ZPOOL_CACHE" -N "$pool" 2> /dev/null + r="$?" ; RET=$((RET + r)) + if [ "$r" -eq 0 ] + then + [ -z "$init" ] && zfs_log_end_msg 0 + continue 3 # Next pool + fi + zfs_log_end_msg "$RET" + fi + done + [ -n "$init" ] && zfs_log_end_msg "$RET" + + IFS="$OLD_IFS" + [ -n "$already_imported" -a -z "$available_pools" ] && return 0 + + return "$RET" +} + +# Export all pools +do_export() +{ + local pool root_pool RET r + RET=0 + + root_pool=$(get_root_pool) + + [ -n "$init" ] && zfs_log_begin_msg "Exporting ZFS pool(s)" + TMPFILE=$(mktemp --tmpdir=/var/tmp zpool.XXXXX) + "$ZPOOL" list -H -oname > "$TMPFILE" + while read pool; do + [ "$pool" = "$root_pool" ] && continue + + if [ -z "$init" ] + then + # Interactive - one 'Importing ...' line per pool + zfs_log_begin_msg "Exporting ZFS pool $pool" + else + # Not interactive - a dot for each pool. + zfs_log_progress_msg "." + fi + + "$ZPOOL" export "$pool" + r="$?" ; RET=$((RET + r)) + [ -z "$init" ] && zfs_log_end_msg "$r" + done < "$TMPFILE" + rm -f "$TMPFILE" + [ -n "$init" ] && zfs_log_end_msg "$RET" +} + +# Output the status and list of pools +do_status() +{ + check_module_loaded || exit 0 + + "$ZPOOL" status && echo "" && "$ZPOOL" list +} + +do_start() +{ + if [ "$VERBOSE_MOUNT" = 'yes' ] + then + zfs_log_begin_msg "Checking if ZFS userspace tools present" + fi + + if checksystem + then + [ "$VERBOSE_MOUNT" = 'yes' ] && zfs_log_end_msg 0 + + if [ "$VERBOSE_MOUNT" = 'yes' ] + then + zfs_log_begin_msg "Loading kernel ZFS infrastructure" + fi + + if ! load_module + then + [ "$VERBOSE_MOUNT" = 'yes' ] && zfs_log_end_msg 1 + return 5 + fi + [ "$VERBOSE_MOUNT" = 'yes' ] && zfs_log_end_msg 0 + + do_import && udev_trigger # just to make sure we get zvols. + + return 0 + else + return 1 + fi +} + +do_stop() +{ + # Check to see if the module is even loaded. + check_module_loaded || exit 0 + + do_export +} + +# ---------------------------------------------------- + +if [ ! -e /etc/gentoo-release ] +then + case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + status) + do_status + ;; + force-reload|condrestart|reload|restart) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop|status}" + exit 3 + ;; + esac + + exit $? +else + # Create wrapper functions since Gentoo don't use the case part. + depend() { do_depend; } + start() { do_start; } + stop() { do_stop; } + status() { do_status; } +fi diff --git a/etc/init.d/zfs-mount.in b/etc/init.d/zfs-mount.in new file mode 100755 index 0000000000..e108cb153f --- /dev/null +++ b/etc/init.d/zfs-mount.in @@ -0,0 +1,225 @@ +#!@SHELL@ +# +# zfs-mount This script will mount/umount the zfs filesystems. +# +# chkconfig: 2345 06 99 +# description: This script will mount/umount the zfs filesystems during +# system boot/shutdown. Configuration of which filesystems +# should be mounted is handled by the zfs 'mountpoint' and +# 'canmount' properties. See the zfs(8) man page for details. +# It is also responsible for all userspace zfs services. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-mount +# Required-Start: $local_fs zfs-import +# Required-Stop: $local_fs zfs-import +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# X-Stop-After: zfs-share +# Short-Description: Mount ZFS filesystems and volumes +# Description: Run the `zfs mount -a` or `zfs umount -a` commands. +### END INIT INFO +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +# Source the common init script +. @sysconfdir@/zfs/zfs-functions + +# ---------------------------------------------------- + +do_depend() +{ + after procfs zfs-import sysfs procps + use mtab + keyword -lxc -openvz -prefix -vserver +} + +# Mount all datasets/filesystems +do_mount() +{ + local verbose overlay i mntpt val + + [ "$VERBOSE_MOUNT" = 'yes' ] && verbose=v + [ "$OVERLAY_MOUNTS" = 'yes' ] && overlay=O + + zfs_action "Mounting ZFS filesystem(s)" \ + "$ZFS" mount -a$verbose$overlay "$MOUNT_EXTRA_OPTIONS" + + # Require each volume/filesytem to have 'noauto' and no fsck + # option. This shouldn't really be necessary, as long as one + # can get zfs-import to run sufficiently early on in the boot + # process - before local mounts. This is just here in case/if + # this isn't possible. + [ "$VERBOSE_MOUNT" = 'yes' ] && \ + zfs_log_begin_msg "Mounting volumes and filesystems registered in fstab" + + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + i=0; var=$(eval echo FSTAB_$i) + while [ -n "$(eval echo "$""$var")" ] + do + mntpt=$(eval echo "$""$var") + dev=$(eval echo "$"FSTAB_dev_$i) + if ! in_mtab "$mntpt" && ! is_mounted "$mntpt" && [ -e "$dev" ] + then + [ "$VERBOSE_MOUNT" = 'yes' ] && \ + zfs_log_progress_msg "$mntpt " + fsck "$dev" && mount "$mntpt" + fi + + i=$((i + 1)) + var=$(eval echo FSTAB_$i) + done + + read_mtab "[[:space:]]zfs[[:space:]]" + read_fstab "[[:space:]]zfs[[:space:]]" + i=0; var=$(eval echo FSTAB_$i) + while [ -n "$(eval echo "$""$var")" ] + do + mntpt=$(eval echo "$""$var") + if ! in_mtab "$mntpt" && ! is_mounted "$mntpt" + then + [ "$VERBOSE_MOUNT" = 'yes' ] && \ + zfs_log_progress_msg "$mntpt " + mount "$mntpt" + fi + + i=$((i + 1)) + var=$(eval echo FSTAB_$i) + done + [ "$VERBOSE_MOUNT" = 'yes' ] && zfs_log_end_msg 0 + + return 0 +} + +# Unmount all filesystems +do_unmount() +{ + local i var mntpt + + # This shouldn't really be necessary, as long as one can get + # zfs-import to run sufficiently late in the shutdown/reboot process + # - after unmounting local filesystems. This is just here in case/if + # this isn't possible. + zfs_action "Unmounting ZFS filesystems" "$ZFS" unmount -a + + [ "$VERBOSE_MOUNT" = 'yes' ] && \ + zfs_log_begin_msg "Unmounting volumes and filesystems registered in fstab" + + read_mtab "^/dev/(zd|zvol)" + read_fstab "^/dev/(zd|zvol)" + i=0; var=$(eval echo FSTAB_$i) + while [ -n "$(eval echo "$""$var")" ] + do + mntpt=$(eval echo "$""$var") + dev=$(eval echo "$"FSTAB_dev_$i) + if in_mtab "$mntpt" + then + [ "$VERBOSE_MOUNT" = 'yes' ] && \ + zfs_log_progress_msg "$mntpt " + umount "$mntpt" + fi + + i=$((i + 1)) + var=$(eval echo FSTAB_$i) + done + + read_mtab "[[:space:]]zfs[[:space:]]" + read_fstab "[[:space:]]zfs[[:space:]]" + i=0; var=$(eval echo FSTAB_$i) + while [ -n "$(eval echo "$""$var")" ] + do + mntpt=$(eval echo "$""$var") + if in_mtab "$mntpt"; then + [ "$VERBOSE_MOUNT" = 'yes' ] && \ + zfs_log_progress_msg "$mntpt " + umount "$mntpt" + fi + + i=$((i + 1)) + var=$(eval echo FSTAB_$i) + done + [ "$VERBOSE_MOUNT" = 'yes' ] && zfs_log_end_msg 0 + + return 0 +} + +do_start() +{ + check_module_loaded || exit 0 + + # fix mtab to include already-mounted fs filesystems, in case there are any + # we ONLY do this if mtab does not point to /proc/mounts + # which is the case in some systems (systemd may bring that soon) + if ! readlink /etc/mtab | grep -q /proc ; then + if grep -qE "(^/dev/zd|^/dev/zvol| zfs )" /proc/mounts ; then + zfs_action "Registering already-mounted ZFS filesystems and volumes" \ + reregister_mounts + fi + fi + + # Ensure / exists in /etc/mtab, if not update mtab accordingly. + # This should be handled by rc.sysinit but lets be paranoid. + awk '$2 = "/" { exit 1 }' /etc/mtab + RETVAL=$? + if [ "$RETVAL" -eq 0 ] + then + mount -f / + fi + + case "$ZFS_MOUNT" in + [Oo][Ff][Ff]|[Nn][Oo]|''|0) + exit 3 + ;; + esac + + do_mount +} + +do_stop() +{ + case "$ZFS_UNMOUNT" in + [Oo][Ff][Ff]|[Nn][Oo]|''|0) + exit 0 + ;; + esac + + check_module_loaded || exit 0 + + do_unmount +} + +# ---------------------------------------------------- + +if [ ! -e /etc/gentoo-release ] +then + case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + force-reload|condrestart|reload|restart|status) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; + esac + + exit $? +else + # Create wrapper functions since Gentoo don't use the case part. + depend() { do_depend; } + start() { do_start; } + stop() { do_stop; } +fi diff --git a/etc/init.d/zfs-share.in b/etc/init.d/zfs-share.in new file mode 100755 index 0000000000..70439151b3 --- /dev/null +++ b/etc/init.d/zfs-share.in @@ -0,0 +1,93 @@ +#!@SHELL@ +# +# zfs-share This script will network share zfs filesystems and volumes. +# +# chkconfig: 2345 30 99 +# description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-share +# Required-Start: $local_fs $network $remote_fs zfs-mount +# Required-Stop: $local_fs $network $remote_fs zfs-mount +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Should-Start: iscsi iscsitarget istgt scst @NFS_SRV@ samba samba4 zfs-mount +# Should-Stop: iscsi iscsitarget istgt scst @NFS_SRV@ samba samba4 zfs-mount +# Short-Description: Network share ZFS datasets and volumes. +# Description: Run the `zfs share -a` or `zfs unshare -a` commands +# for controlling iSCSI, NFS, or CIFS network shares. +### END INIT INFO +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +# Source the common init script +. @sysconfdir@/zfs/zfs-functions + +# ---------------------------------------------------- + +do_depend() +{ + after sysfs zfs-mount + keyword -lxc -openvz -prefix -vserver +} + +do_start() +{ + case "$ZFS_SHARE" in + [Oo][Ff][Ff]|[Nn][Oo]|''|0) + exit 0 + ;; + esac + + check_module_loaded || exit 0 + + zfs_action "Sharing ZFS filesystems" "$ZFS" share -a +} + +do_stop() +{ + case "$ZFS_UNSHARE" in + [Oo][Ff][Ff]|[Nn][Oo]|''|0) + exit 0 + ;; + esac + + check_module_loaded || exit 0 + + zfs_action "Unsharing ZFS filesystems" "$ZFS" unshare -a +} + +# ---------------------------------------------------- + +if [ ! -e /etc/gentoo-release ]; then + case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + force-reload|reload|restart|status) + # no-op + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop}" + exit 3 + ;; + esac + + exit $? +else + # Create wrapper functions since Gentoo don't use the case part. + depend() { do_depend; } + start() { do_start; } + stop() { do_stop; } +fi diff --git a/etc/init.d/zfs-zed.in b/etc/init.d/zfs-zed.in new file mode 100755 index 0000000000..1458387f51 --- /dev/null +++ b/etc/init.d/zfs-zed.in @@ -0,0 +1,148 @@ +#!@SHELL@ +# +# zfs-zed +# +# chkconfig: 2345 01 99 +# description: This script will start and stop the ZFS Event Daemon. +# probe: true +# +### BEGIN INIT INFO +# Provides: zfs-zed +# Required-Start: mtab +# Required-Stop: $local_fs mtab +# Default-Start: S +# Default-Stop: 0 1 6 +# X-Start-Before: checkfs +# X-Stop-After: zfs-import +# Short-Description: ZFS Event Daemon +# Description: zed monitors ZFS events. When a zevent is posted, zed +# will run any scripts that have been enabled for the +# corresponding zevent class. +### END INIT INFO +# +# NOTE: Not having '$local_fs' on Required-Start but only on Required-Stop +# is on purpose. If we have '$local_fs' in both (and X-Start-Before=checkfs) +# we get conflicts - zed and import needs to be started extremely early, +# but not stopped too late. +# +# Released under the 2-clause BSD license. +# +# The original script that acted as a template for this script came from +# the Debian GNU/Linux kFreeBSD ZFS packages (which did not include a +# licensing stansa) in the commit dated Mar 24, 2011: +# https://github.com/zfsonlinux/pkg-zfs/commit/80a3ae582b59c0250d7912ba794dca9e669e605a + +# Source the common init script +. @sysconfdir@/zfs/zfs-functions + +ZED_NAME="zed" +ZED_PIDFILE="@runstatedir@/$ZED_NAME.pid" + +# Exit if the package is not installed +[ -x "$ZED" ] || exit 0 + +# ---------------------------------------------------- + +do_depend() +{ + # Try to allow people to mix and match fstab with ZFS in a way that makes sense. + if [ "$(mountinfo -s /)" = 'zfs' ] + then + before localmount + else + after localmount + fi + + # bootmisc will log to /var which may be a different zfs than root. + before bootmisc logger zfs-import + after sysfs +} + +do_start() +{ + check_module_loaded || exit 0 + + ZED_ARGS="$ZED_ARGS -p $ZED_PIDFILE" + + zfs_action "Starting ZFS Event Daemon" zfs_daemon_start \ + "$ZED_PIDFILE" "$ZED" "$ZED_ARGS" + return "$?" +} + +do_stop() +{ + local pools RET + check_module_loaded || exit 0 + + zfs_action "Stopping ZFS Event Daemon" zfs_daemon_stop \ + "$ZED_PIDFILE" "$ZED" "$ZED_NAME" + if [ "$?" -eq "0" ] + then + # Let's see if we have any pools imported + pools=$("$ZPOOL" list -H -oname) + if [ -z "$pools" ] + then + # No pools imported, it is/should be safe/possible to + # unload modules. + zfs_action "Unloading modules" rmmod zfs zunicode \ + zavl zcommon znvpair spl + return "$?" + fi + else + return "$?" + fi +} + +do_status() +{ + check_module_loaded || exit 0 + + zfs_daemon_status "$ZED_PIDFILE" "$ZED" "$ZED_NAME" + return "$?" +} + +do_reload() +{ + check_module_loaded || exit 0 + + zfs_action "Reloading ZFS Event Daemon" zfs_daemon_reload \ + "$ZED_PIDFILE" "$ZED_NAME" + return "$?" +} + +# ---------------------------------------------------- + +if [ ! -e /etc/gentoo-release ]; then + case "$1" in + start) + do_start + ;; + stop) + do_stop + ;; + status) + do_status + ;; + reload|force-reload) + do_reload + ;; + restart) + do_stop + do_start + ;; + *) + [ -n "$1" ] && echo "Error: Unknown command $1." + echo "Usage: $0 {start|stop|status|reload|restart}" + exit 1 + ;; + esac + + exit $? +else + # Create wrapper functions since Gentoo don't use the case part. + depend() { do_depend; } + start() { do_start; } + stop() { do_stop; } + status() { do_status; } + reload() { do_reload; } +fi diff --git a/etc/init.d/zfs.fedora.in b/etc/init.d/zfs.fedora.in deleted file mode 100644 index 86f430dce9..0000000000 --- a/etc/init.d/zfs.fedora.in +++ /dev/null @@ -1,243 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: -# Required-Stop: -# Should-Start: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 1 -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -export PATH=/usr/local/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -# Source function library & LSB routines -. /etc/rc.d/init.d/functions - -# script variables -RETVAL=0 -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -servicename=zfs -LOCKFILE=/var/lock/subsys/$servicename - -# functions -zfs_installed() { - modinfo zfs > /dev/null 2>&1 || return 5 - $ZPOOL > /dev/null 2>&1 - [ $? == 127 ] && return 5 - $ZFS > /dev/null 2>&1 - [ $? == 127 ] && return 5 - return 0 -} - -reregister_mounts() { - cat /etc/mtab | while read -r fs mntpnt fstype opts rest ; do - fs=`printf '%b\n' "$fs"` - mntpnt=`printf '%b\n' "$mntpnt"` - if [ "$fstype" == "zfs" ] ; then - if [ "$mntpnt" == "/" ] ; then - mount -f -o zfsutil -t zfs --move / /removethismountpointhoweverpossible - umount --fake /removethismountpointhoweverpossible - else - umount --fake "$mntpnt" - fi - elif echo "$fs" | grep -q "^/dev/zd" ; then - if [ "$mntpnt" == "/" ] ; then - mount -f -t "$fstype" --move / /removethismountpointhoweverpossible - umount --fake /removethismountpointhoweverpossible - else - umount --fake "$mntpnt" - fi - fi - done - cat /proc/mounts | while read -r fs mntpnt fstype opts rest ; do - fs=`printf '%b\n' "$fs"` - mntpnt=`printf '%b\n' "$mntpnt"` - if [ "$fstype" == "zfs" ] ; then - mount -f -t zfs -o zfsutil "$fs" "$mntpnt" - elif echo "$fs" | grep -q "^/dev/zd" ; then - mount -f -t "$fstype" -o "$opts" "$fs" "$mntpnt" - fi - done -} - -# i need a bash guru to simplify this, since this is copy and paste, but donno how -# to correctly dereference variable names in bash, or how to do this right - -declare -A MTAB -declare -A FSTAB - -# first parameter is a regular expression that filters mtab -read_mtab() { - for fs in "${!MTAB[@]}" ; do unset MTAB["$fs"] ; done - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - MTAB["$fs"]=$mntpnt - done < <(grep "$1" /etc/mtab) -} - -in_mtab() { - [ "${MTAB[$1]}" != "" ] - return $? -} - -# first parameter is a regular expression that filters fstab -read_fstab() { - for fs in "${!FSTAB[@]}" ; do unset FSTAB["$fs"] ; done - while read -r fs mntpnt fstype opts blah ; do - fs=`printf '%b\n' "$fs"` - FSTAB["$fs"]=$mntpnt - done < <(grep "$1" /etc/fstab) -} - -in_fstab() { - [ "${FSTAB[$1]}" != "" ] - return $? -} - -start() -{ - if [ -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # Delay until all required block devices are present. - udevadm settle - - # load kernel module infrastructure - if ! grep -q zfs /proc/modules ; then - action $"Loading kernel ZFS infrastructure: " modprobe zfs || return 5 - fi - - # fix mtab to include already-mounted fs filesystems, in case there are any - # we ONLY do this if mtab does not point to /proc/mounts - # which is the case in some systems (systemd may bring that soon) - if ! readlink /etc/mtab | grep -q /proc ; then - if grep -qE "(^/dev/zd| zfs )" /proc/mounts ; then - action $"Registering already-mounted ZFS filesystems and volumes: " reregister_mounts || return 150 - fi - fi - - if [ -f $ZPOOL_CACHE ] ; then - - echo -n $"Importing ZFS pools not yet imported: " - $ZPOOL import -c $ZPOOL_CACHE -aN || true # stupid zpool will fail if all pools are already imported - RETVAL=$? - if [ $RETVAL -ne 0 ]; then - failure "Importing ZFS pools not yet imported: " - return 151 - fi - success "Importing ZFS pools not yet imported: " - - fi - - action $"Mounting ZFS filesystems not yet mounted: " $ZFS mount -a || return 152 - - action $"Exporting ZFS filesystems: " $ZFS share -a || return 153 - - read_mtab "^/dev/zd" - read_fstab "^/dev/zd" - - template=$"Mounting volume %s registered in fstab: " - for volume in "${!FSTAB[@]}" ; do - if in_mtab "$volume" ; then continue ; fi - string=`printf "$template" "$volume"` - action "$string" mount "$volume" - done - - touch "$LOCKFILE" -} - -stop() -{ - if [ ! -f "$LOCKFILE" ] ; then return 0 ; fi - - # check if ZFS is installed. If not, comply to FC standards and bail - zfs_installed || { - action $"Checking if ZFS is installed: not installed" /bin/false - return 5 - } - - # the poweroff of the system takes care of this - # but it never unmounts the root filesystem itself - # shit - - action $"Syncing ZFS filesystems: " sync - # about the only thing we can do, and then we - # hope that the umount process will succeed - # unfortunately the umount process does not dismount - # the root file system, there ought to be some way - # we can tell zfs to just flush anything in memory - # when a request to remount,ro comes in - - #echo -n $"Unmounting ZFS filesystems: " - #$ZFS umount -a - #RETVAL=$? - #if [ $RETVAL -ne 0 ]; then - # failure - - # return 8 - #fi - #success - - rm -f "$LOCKFILE" -} - -# See how we are called -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - lsmod | grep -q zfs || RETVAL=3 - $ZPOOL status && echo && $ZFS list || { - [ -f "$LOCKFILE" ] && RETVAL=2 || RETVAL=4 - } - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ] ; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - RETVAL=3 - ;; -esac - -exit $RETVAL diff --git a/etc/init.d/zfs.gentoo.in b/etc/init.d/zfs.gentoo.in deleted file mode 100644 index 07fce01ba0..0000000000 --- a/etc/init.d/zfs.gentoo.in +++ /dev/null @@ -1,124 +0,0 @@ -#!/sbin/runscript -# Copyright 1999-2011 Gentoo Foundation -# Released under the 2-clause BSD license. -# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/files/zfs,v 0.9 2011/04/30 10:13:43 devsk Exp $ - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -depend() -{ - # Try to allow people to mix and match fstab with ZFS in a way that makes sense. - if [ "$(mountinfo -s /)" = 'zfs' ] - then - before localmount - else - after localmount - fi - - # bootmisc will log to /var which may be a different zfs than root. - before bootmisc logger - use mtab - keyword -lxc -openvz -prefix -vserver -} - -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -ZFS_MODULE=zfs - -checksystem() { - if [ ! -c /dev/zfs ]; then - einfo "Checking if ZFS modules present" - if ! modinfo zfs > /dev/null 2>&1 ; then - eerror "$ZFS_MODULE not found. Is the ZFS package installed?" - return 1 - fi - fi - einfo "Checking if zfs userspace tools present" - if [ ! -x $ZPOOL ]; then - eerror "$ZPOOL binary not found." - return 1 - fi - if [ ! -x $ZFS ]; then - eerror "$ZFS binary not found." - return 1 - fi - return 0 -} - -start() { - ebegin "Starting ZFS" - checksystem || return 1 - - # Delay until all required block devices are present. - udevadm settle - - if [ ! -c /dev/zfs ]; then - modprobe $ZFS_MODULE - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to load the $ZFS_MODULE module, check 'dmesg|tail'." - eend $rv - return $rv - fi - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ -f $ZPOOL_CACHE ]; then - einfo "Importing ZFS pools" - # as per fedora script, import can fail if all pools are already imported - # The check for $rv makes no sense...but someday, it will work right. - $ZPOOL import -c $ZPOOL_CACHE -aN 2>/dev/null || true - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to import not-yet imported pools." - eend $rv - return $rv - fi - fi - - einfo "Mounting ZFS filesystems" - $ZFS mount -a - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to mount ZFS filesystems." - eend $rv - return $rv - fi - - einfo "Exporting ZFS filesystems" - $ZFS share -a - rv=$? - if [ $rv -ne 0 ]; then - eerror "Failed to export ZFS filesystems." - eend $rv - return $rv - fi - - eend 0 - return 0 -} - -stop() -{ - ebegin "Unmounting ZFS filesystems" - $ZFS umount -a - rv=$? - if [ $rv -ne 0 ]; then - einfo "Some ZFS filesystems not unmounted" - fi - - # Don't fail if we couldn't umount everything. /usr might be in use. - eend 0 - return 0 -} - -status() -{ - # show pool status and list - $ZPOOL status && echo && $ZPOOL list -} diff --git a/etc/init.d/zfs.in b/etc/init.d/zfs.in new file mode 100644 index 0000000000..d81abfef56 --- /dev/null +++ b/etc/init.d/zfs.in @@ -0,0 +1,82 @@ +# ZoL userland configuration. + +# Run `zfs mount -a` during system start? +ZFS_MOUNT='yes' + +# Run `zfs unmount -a` during system stop? +ZFS_UNMOUNT='yes' + +# Run `zfs share -a` during system start? +# nb: The shareiscsi, sharenfs, and sharesmb dataset properties. +ZFS_SHARE='yes' + +# Run `zfs unshare -a` during system stop? +ZFS_UNSHARE='yes' + +# Specify specific path(s) to look for device nodes and/or links for the +# pool import(s). See zpool(8) for more information about this variable. +# It supersedes the old USE_DISK_BY_ID which indicated that it would only +# try '/dev/disk/by-id'. +# The old variable will still work in the code, but is deprecated. +#ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:/dev/disk/by-id" + +# Should the datasets be mounted verbosely? +# A mount counter will be used when mounting if set to 'yes'. +VERBOSE_MOUNT='no' + +# Should we allow overlay mounts? +# This is standard in Linux, but not ZFS which comes from Solaris where this +# is not allowed). +DO_OVERLAY_MOUNTS='no' + +# Any additional option to the 'zfs mount' command line? +# Include '-o' for each option wanted. +MOUNT_EXTRA_OPTIONS="" + +# Build kernel modules with the --enable-debug switch? +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_DKMS_ENABLE_DEBUG='no' + +# Build kernel modules with the --enable-debug-dmu-tx switch? +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_DKMS_ENABLE_DEBUG_DMU_TX='no' + +# Keep debugging symbols in kernel modules? +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_DKMS_DISABLE_STRIP='no' + +# Wait for this many seconds in the initrd pre_mountroot? +# This delays startup and should be '0' on most systems. +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_INITRD_PRE_MOUNTROOT_SLEEP='0' + +# Wait for this many seconds in the initrd mountroot? +# This delays startup and should be '0' on most systems. This might help on +# systems which have their ZFS root on a USB disk that takes just a little +# longer to be available +# Only applicable for Debian GNU/Linux {dkms,initramfs}. +ZFS_INITRD_POST_MODPROBE_SLEEP='0' + +# List of additional datasets to mount after the root dataset is mounted? +# +# The init script will use the mountpoint specified in the 'mountpoint' +# property value in the dataset to determine where it should be mounted. +# +# This is a space separated list, and will be mounted in the order specified, +# so if one filesystem depends on a previous mountpoint, make sure to put +# them in the right order. +# +# It is not necessary to add filesystems below the root fs here. It is +# taken care of by the initrd script automatically. These are only for +# additional filesystems needed. Such as /opt, /usr/local which is not +# located under the root fs. +# Example: If root FS is 'rpool/ROOT/rootfs', this would make sense. +#ZFS_INITRD_ADDITIONAL_DATASETS="rpool/ROOT/usr rpool/ROOT/var" + +# List of pools that should NOT be imported at boot? +# This is a space separated list. +#ZFS_POOL_EXCEPTIONS="test2" + +# Optional arguments for the ZFS Event Daemon (ZED). +# See zed(8) for more information on available options. +#ZED_ARGS="-M" diff --git a/etc/init.d/zfs.lsb.in b/etc/init.d/zfs.lsb.in deleted file mode 100644 index 05e815ede8..0000000000 --- a/etc/init.d/zfs.lsb.in +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: $local_fs -# Required-Stop: $local_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Should-Stop: -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -# Source function library. -. /lib/lsb/init-functions - -LOCKFILE=/var/lock/zfs -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" -USE_DISK_BY_ID=0 -VERBOSE_MOUNT=0 -DO_OVERLAY_MOUNTS=0 -MOUNT_EXTRA_OPTIONS="" - -# Source zfs configuration. -[ -r '/etc/default/zfs' ] && . /etc/default/zfs - -[ -x "$ZPOOL" ] || exit 1 -[ -x "$ZFS" ] || exit 2 - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -start() -{ - [ -f "$LOCKFILE" ] && return 3 - - # Delay until all required block devices are present. - udevadm settle - - # Load the zfs module stack - /sbin/modprobe zfs - - # Ensure / exists in /etc/mtab, if not update mtab accordingly. - # This should be handled by rc.sysinit but lets be paranoid. - awk '$2 == "/" { exit 1 }' /etc/mtab - RETVAL=$? - if [ "$RETVAL" -eq 0 ]; then - /bin/mount -f / - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ "$USE_DISK_BY_ID" -eq 1 ]; then - log_begin_msg "Importing ZFS pools" - "$ZPOOL" import -d /dev/disk/by-id -aN 2>/dev/null - ret=$? - log_end_msg $ret - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - elif [ -f "$ZPOOL_CACHE" ] ; then - log_begin_msg "Importing ZFS pools" - "$ZPOOL" import -c "$ZPOOL_CACHE" -aN 2>/dev/null - ret=$? - log_end_msg $ret - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - fi - - if [ -n "$POOL_IMPORTED" ]; then - if [ "$VERBOSE_MOUNT" -eq 1 ]; then - verbose=v - fi - - if [ "$DO_OVERLAY_MOUNTS" -eq 1 ]; then - overlay=O - fi - - log_begin_msg "Mounting ZFS filesystems" - "$ZFS" mount -a$verbose$overlay$MOUNT_EXTRA_OPTIONS - log_end_msg $? - - log_begin_msg "Exporting ZFS filesystems" - "$ZFS" share -a - log_end_msg $? - fi - - touch "$LOCKFILE" -} - -stop() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - log_begin_msg "Unsharing ZFS filesystems" - "$ZFS" unshare -a - log_end_msg $? - - log_begin_msg "Unmounting ZFS filesystems" - "$ZFS" umount -a - log_end_msg $? - - rm -f "$LOCKFILE" -} - -status() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - "$ZPOOL" status && echo "" && "$ZPOOL" list -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - status - RETVAL=$? - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ]; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - ;; -esac - -exit $RETVAL diff --git a/etc/init.d/zfs.lunar.in b/etc/init.d/zfs.lunar.in deleted file mode 100644 index 7a51104c26..0000000000 --- a/etc/init.d/zfs.lunar.in +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -# -# zfs This shell script takes care of starting (mount) and -# stopping (umount) zfs shares. -# -# chkconfig: 35 60 40 -# description: ZFS is a filesystem developed by Sun, ZFS is a -# combined file system and logical volume manager -# designed by Sun Microsystems. Made available to Linux -# using SPL (Solaris Porting Layer) by zfsonlinux.org. -# probe: true - -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -case $1 in - start) echo "$1ing ZFS filesystems" - - # Delay until all required block devices are present. - udevadm settle - - if ! grep "zfs" /proc/modules > /dev/null; then - echo "ZFS kernel module not loaded yet; loading..."; - if ! modprobe zfs; then - echo "Failed to load ZFS kernel module..."; - exit 0; - fi - fi - - if ! [ `uname -m` == "x86_64" ]; then - echo "Warning: You're not running 64bit. Currently native zfs in"; - echo " linux is only supported and tested on 64bit."; - # should we break here? People doing this should know what they - # do, thus i'm not breaking here. - fi - - # mount the filesystems - while IFS= read -r -d $'\n' dev; do - mdev=$(echo "$dev" | awk '{ print $1; }') - echo -n "mounting $mdev..." - if $ZFS mount $mdev; then - echo -e "done"; - else - echo -e "failed"; - fi - done < <($ZFS list -H); - - # export the filesystems - echo -n "exporting ZFS filesystems..." - if $ZFS share -a; then - echo -e "done"; - else - echo -e "failed"; - fi - - - ;; - - stop) echo "$1ping ZFS filesystems" - - if grep "zfs" /proc/modules > /dev/null; then - # module is loaded, so we can try to umount filesystems - while IFS= read -r -d $'\n' dev; do - mdev=$(echo "$dev" | awk '{ print $1 }'); - echo -n "umounting $mdev..."; - if $ZFS umount $mdev; then - echo -e "done"; - else - echo -e "failed"; - fi - # the next line is, because i have to reverse the - # output, otherwise it wouldn't work as it should - done < <($ZFS list -H | tac); - - # and finally let's rmmod the module - rmmod zfs - - - else - # module not loaded, no need to umount anything - exit 0 - fi - - ;; - - restart) echo "$1ing ZFS filesystems" - $0 stop - $0 start - ;; - - *) echo "Usage: $0 {start|stop|restart}" - ;; - -esac diff --git a/etc/init.d/zfs.redhat.in b/etc/init.d/zfs.redhat.in deleted file mode 100644 index 30b9f0bf62..0000000000 --- a/etc/init.d/zfs.redhat.in +++ /dev/null @@ -1,150 +0,0 @@ -#!/bin/bash -# -# zfs This script will mount/umount the zfs filesystems. -# -# chkconfig: 2345 01 99 -# description: This script will mount/umount the zfs filesystems during -# system boot/shutdown. Configuration of which filesystems -# should be mounted is handled by the zfs 'mountpoint' and -# 'canmount' properties. See the zfs(8) man page for details. -# It is also responsible for all userspace zfs services. -# -### BEGIN INIT INFO -# Provides: zfs -# Required-Start: $local_fs -# Required-Stop: $local_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Should-Stop: -# Short-Description: Mount/umount the zfs filesystems -# Description: ZFS is an advanced filesystem designed to simplify managing -# and protecting your data. This service mounts the ZFS -# filesystems and starts all related zfs services. -### END INIT INFO - -# Source function library. -. /etc/rc.d/init.d/functions - -LOCKFILE=/var/lock/zfs -ZED="@sbindir@/zed" -ZED_PIDFILE="@runstatedir@/zed.pid" -ZFS="@sbindir@/zfs" -ZPOOL="@sbindir@/zpool" -ZPOOL_CACHE="/etc/zfs/zpool.cache" -USE_DISK_BY_ID=0 -VERBOSE_MOUNT=0 -DO_OVERLAY_MOUNTS=0 -MOUNT_EXTRA_OPTIONS="" - -# Source zfs configuration. -[ -r '/etc/sysconfig/zfs' ] && . /etc/sysconfig/zfs - -[ -x "$ZPOOL" ] || exit 1 -[ -x "$ZFS" ] || exit 2 - -if [ -z "$init" ]; then - # Not interactive - grep -qE '(^|[^\\](\\\\)* )zfs=(off|no)( |$)' /proc/cmdline && exit 3 -fi - -start() -{ - [ -f "$LOCKFILE" ] && return 3 - - # Delay until all required block devices are present. - udevadm settle - - # Load the zfs module stack - /sbin/modprobe zfs - - # Start the ZED for event handling - action $"Starting ZFS Event Daemon" daemon --pidfile="$ZED_PIDFILE" "$ZED" - - # Ensure / exists in /etc/mtab, if not update mtab accordingly. - # This should be handled by rc.sysinit but lets be paranoid. - awk '$2 == "/" { exit 1 }' /etc/mtab - RETVAL=$? - if [ "$RETVAL" -eq 0 ]; then - /bin/mount -f / - fi - - # Import all pools described by the cache file, and then mount - # all filesystem based on their properties. - if [ "$USE_DISK_BY_ID" -eq 1 ]; then - action $"Importing ZFS pools" \ - "$ZPOOL" import -d /dev/disk/by-id -aN 2>/dev/null - ret=$? - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - elif [ -f "$ZPOOL_CACHE" ] ; then - action $"Importing ZFS pools" \ - "$ZPOOL" import -c "$ZPOOL_CACHE" -aN 2>/dev/null - ret=$? - [ "$ret" -eq 0 ] && POOL_IMPORTED=1 - fi - - if [ -n "$POOL_IMPORTED" ]; then - if [ "$VERBOSE_MOUNT" -eq 1 ]; then - verbose=v - fi - - if [ "$DO_OVERLAY_MOUNTS" -eq 1 ]; then - overlay=O - fi - - action $"Mounting ZFS filesystems" \ - "$ZFS" mount -a$verbose$overlay$MOUNT_EXTRA_OPTIONS - - action $"Sharing ZFS filesystems" \ - "$ZFS" share -a - fi - - touch "$LOCKFILE" -} - -stop() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - action $"Unsharing ZFS filesystems" "$ZFS" unshare -a - action $"Unmounting ZFS filesystems" "$ZFS" umount -a - action $"Shutting down ZFS Event Daemon" killproc -p "$ZED_PIDFILE" "$ZED" - - rm -f "$LOCKFILE" -} - -status() -{ - [ ! -f "$LOCKFILE" ] && return 3 - - "$ZPOOL" status && echo "" && "$ZPOOL" list -} - -case "$1" in - start) - start - RETVAL=$? - ;; - stop) - stop - RETVAL=$? - ;; - status) - status - RETVAL=$? - ;; - restart) - stop - start - ;; - condrestart) - if [ -f "$LOCKFILE" ]; then - stop - start - fi - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - ;; -esac - -exit $RETVAL diff --git a/rpm/generic/zfs.spec.in b/rpm/generic/zfs.spec.in index c9bf36e623..04b0033f6d 100644 --- a/rpm/generic/zfs.spec.in +++ b/rpm/generic/zfs.spec.in @@ -229,7 +229,12 @@ find %{?buildroot}%{_libdir} -name '*.la' -exec rm -f {} \; %if 0%{?_systemd} %systemd_post zfs.target %else -[ -x /sbin/chkconfig ] && /sbin/chkconfig --add zfs +if [ -x /sbin/chkconfig ]; then + /sbin/chkconfig --add zfs-import + /sbin/chkconfig --add zfs-mount + /sbin/chkconfig --add zfs-share + /sbin/chkconfig --add zfs-zed +fi %endif exit 0 @@ -237,8 +242,11 @@ exit 0 %if 0%{?_systemd} %systemd_preun zfs.target %else -if [ $1 -eq 0 ] ; then - [ -x /sbin/chkconfig ] && /sbin/chkconfig --del zfs +if [ $1 -eq 0 ] && [ -x /sbin/chkconfig ]; then + /sbin/chkconfig --del zfs-import + /sbin/chkconfig --del zfs-mount + /sbin/chkconfig --del zfs-share + /sbin/chkconfig --del zfs-zed fi %endif exit 0 @@ -258,14 +266,15 @@ exit 0 %{_udevdir}/vdev_id %{_udevdir}/zvol_id %{_udevdir}/rules.d/* -%config(noreplace) %{_sysconfdir}/%{name} %if 0%{?_systemd} /usr/lib/modules-load.d/* %{_unitdir}/* %{_presetdir}/* %else -%{_sysconfdir}/init.d/* +%config(noreplace) %{_sysconfdir}/init.d/* +%config(noreplace) %{_initconfdir}/zfs %endif +%config(noreplace) %{_sysconfdir}/%{name} %files -n libzpool2 %{_libdir}/libzpool.so.*