diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index 3ea77abd65..d281b4dcf9 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -188,6 +188,8 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [ VENDOR=slackware ; elif test -f /etc/gentoo-release ; then VENDOR=gentoo ; + elif test -f /etc/lunar.release ; then + VENDOR=lunar ; else VENDOR= ; fi @@ -196,12 +198,14 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [ AC_MSG_CHECKING([default package type]) case "$VENDOR" in - fedora) DEFAULT_PACKAGE=rpm ;; redhat) DEFAULT_PACKAGE=rpm ;; - sles) DEFAULT_PACKAGE=rpm ;; + fedora) DEFAULT_PACKAGE=rpm ;; ubuntu) DEFAULT_PACKAGE=deb ;; debian) DEFAULT_PACKAGE=deb ;; + sles) DEFAULT_PACKAGE=rpm ;; slackware) DEFAULT_PACKAGE=tgz ;; + gentoo) DEFAULT_PACKAGE=tgz ;; + lunar) DEFAULT_PACKAGE=tgz ;; *) DEFAULT_PACKAGE=rpm ;; esac @@ -210,12 +214,14 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [ AC_MSG_CHECKING([default init script type]) case "$VENDOR" in + redhat) DEFAULT_INIT_SCRIPT=redhat ;; fedora) DEFAULT_INIT_SCRIPT=fedora ;; - redhat) DEFAULT_INIT_SCRIPT=fedora ;; - sles) DEFAULT_INIT_SCRIPT=lsb ;; ubuntu) DEFAULT_INIT_SCRIPT=lsb ;; debian) DEFAULT_INIT_SCRIPT=lsb ;; + sles) DEFAULT_INIT_SCRIPT=lsb ;; slackware) DEFAULT_INIT_SCRIPT=lsb ;; + gentoo) DEFAULT_INIT_SCRIPT=gentoo ;; + lunar) DEFAULT_INIT_SCRIPT=lunar ;; *) DEFAULT_INIT_SCRIPT=lsb ;; esac diff --git a/configure b/configure index b0d1b8a1ec..fee3c743df 100755 --- a/configure +++ b/configure @@ -11795,6 +11795,8 @@ $as_echo_n "checking linux distribution... " >&6; } VENDOR=slackware ; elif test -f /etc/gentoo-release ; then VENDOR=gentoo ; + elif test -f /etc/lunar.release ; then + VENDOR=lunar ; else VENDOR= ; fi @@ -11805,12 +11807,14 @@ $as_echo "$VENDOR" >&6; } { $as_echo "$as_me:$LINENO: checking default package type" >&5 $as_echo_n "checking default package type... " >&6; } case "$VENDOR" in - fedora) DEFAULT_PACKAGE=rpm ;; redhat) DEFAULT_PACKAGE=rpm ;; - sles) DEFAULT_PACKAGE=rpm ;; + fedora) DEFAULT_PACKAGE=rpm ;; ubuntu) DEFAULT_PACKAGE=deb ;; debian) DEFAULT_PACKAGE=deb ;; + sles) DEFAULT_PACKAGE=rpm ;; slackware) DEFAULT_PACKAGE=tgz ;; + gentoo) DEFAULT_PACKAGE=tgz ;; + lunar) DEFAULT_PACKAGE=tgz ;; *) DEFAULT_PACKAGE=rpm ;; esac @@ -11821,12 +11825,14 @@ $as_echo "$DEFAULT_PACKAGE" >&6; } { $as_echo "$as_me:$LINENO: checking default init script type" >&5 $as_echo_n "checking default init script type... " >&6; } case "$VENDOR" in + redhat) DEFAULT_INIT_SCRIPT=redhat ;; fedora) DEFAULT_INIT_SCRIPT=fedora ;; - redhat) DEFAULT_INIT_SCRIPT=fedora ;; - sles) DEFAULT_INIT_SCRIPT=lsb ;; ubuntu) DEFAULT_INIT_SCRIPT=lsb ;; debian) DEFAULT_INIT_SCRIPT=lsb ;; + sles) DEFAULT_INIT_SCRIPT=lsb ;; slackware) DEFAULT_INIT_SCRIPT=lsb ;; + gentoo) DEFAULT_INIT_SCRIPT=gentoo ;; + lunar) DEFAULT_INIT_SCRIPT=lunar ;; *) DEFAULT_INIT_SCRIPT=lsb ;; esac diff --git a/etc/init.d/Makefile.am b/etc/init.d/Makefile.am index 094681edd9..dd11946e77 100644 --- a/etc/init.d/Makefile.am +++ b/etc/init.d/Makefile.am @@ -1,4 +1,4 @@ -EXTRA_DIST = zfs.fedora zfs.lsb +EXTRA_DIST = zfs.fedora zfs.gentoo zfs.lsb zfs.lunar zfs.redhat install-data-local: @instdest=$(DESTDIR)/$(sysconfdir)/init.d/zfs; \ diff --git a/etc/init.d/Makefile.in b/etc/init.d/Makefile.in index f221818f90..63012dc991 100644 --- a/etc/init.d/Makefile.in +++ b/etc/init.d/Makefile.in @@ -256,7 +256,7 @@ target_vendor = @target_vendor@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -EXTRA_DIST = zfs.fedora zfs.lsb +EXTRA_DIST = zfs.fedora zfs.gentoo zfs.lsb zfs.lunar zfs.redhat all: all-am .SUFFIXES: diff --git a/etc/init.d/zfs.gentoo b/etc/init.d/zfs.gentoo new file mode 100644 index 0000000000..d2ea90267f --- /dev/null +++ b/etc/init.d/zfs.gentoo @@ -0,0 +1,151 @@ +#!/sbin/runscript +# Copyright 1999-2011 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/files/zfs,v 0.9 2011/04/30 10:13:43 devsk Exp $ + +depend() +{ + before net + after udev +} + +CACHEFILE=/etc/zfs/zpool.cache +ZPOOL=/usr/sbin/zpool +ZFS=/usr/sbin/zfs +ZFS_MODULE=zfs +LOCKFILE=/var/lock/zfs/zfs_lockfile + +checksystem() +{ + /sbin/modinfo $ZFS_MODULE &>/dev/null + if [[ $? -ne 0 ]] + then + eerror "$ZFS_MODULE not found. Is the ZFS package installed?" + return 1 + fi + if [[ ! -x $ZPOOL ]] + then + eerror "$ZPOOL binary not found." + return 1 + fi + if [[ ! -x $ZFS ]] + then + eerror "$ZFS binary not found." + return 1 + fi + + # create the lockdir if not there + lockdir=$(dirname ${LOCKFILE}) + if [[ ! -d ${lockdir} ]] + then + mkdir -p ${lockdir} &>/dev/null + fi + return 0 +} + +start() +{ + if [[ -f $LOCKFILE ]] + then + einfo "ZFS already running, please stop it first. Delete $LOCKFILE if its not so." + eend 3 + return 3 + fi + ebegin "Starting ZFS" + checksystem || return 1 + if ! grep -q $ZFS_MODULE /proc/modules + then + /sbin/modprobe $ZFS_MODULE &>/dev/null + rv=$? + if [[ $rv -ne 0 ]] + then + eerror "Failed to load the $ZFS_MODULE module, check 'dmesg|tail'." + eend $rv + return $rv + fi + fi + + # Ensure / exists in /etc/mtab, if not update mtab accordingly. + # This should be handled by rc.sysinit but lets be paranoid. + awk '$2 == "/" { exit 1 }' /etc/mtab + RETVAL=$? + if [[ $RETVAL -eq 0 ]] + then + /bin/mount -f / + fi + + # Import all pools described by the cache file, and then mount + # all filesystem based on their properties. + if [[ -f $CACHEFILE ]] + then + einfo "Importing ZFS pools" + + # as per fedora script, import can fail if all pools are already imported + # The check for $rv makes no sense...but someday, it will work right. + $ZPOOL import -c $CACHEFILE -aN 2>/dev/null || true + rv=$? + if [[ $rv -ne 0 ]] + then + eerror "Failed to import not-yet imported pools." + eend $rv + return $rv + fi + fi + + einfo "Mounting ZFS filesystems" + $ZFS mount -a + rv=$? + if [[ $rv -ne 0 ]] + then + eerror "Failed to mount ZFS filesystems." + eend $rv + return $rv + fi + + # hack to read mounted file systems because otherwise + # zfs returns EPERM when a non-root user reads a mounted filesystem before root did + savepwd="$PWD" + mount | grep " type zfs " | sed 's/.*on //' | sed 's/ type zfs.*$//' | \ + while read line + do + cd "$line" &> /dev/null + ls &> /dev/null + done + cd "$savepwd" + + touch $LOCKFILE + eend 0 + return 0 +} + +stop() +{ + if [[ ! -f $LOCKFILE ]] + then + einfo "ZFS is not started, remove $LOCKFILE if its not so." + eend 3 + return 3 + fi + ebegin "Unmounting ZFS filesystems" + sync + $ZFS umount -a + if [[ $rv -ne 0 ]] + then + eerror "Failed to umount ZFS filesystems." + fi + rm -f $LOCKFILE + eend $rv +} + +status() +{ + if [[ ! -f $LOCKFILE ]] + then + einfo "ZFS is not started, remove $LOCKFILE if its not so." + eend 3 + return 3 + fi + + # show pool status and list + $ZPOOL status && echo && $ZPOOL list +} diff --git a/etc/init.d/zfs.lunar b/etc/init.d/zfs.lunar new file mode 100644 index 0000000000..c7aa1edb46 --- /dev/null +++ b/etc/init.d/zfs.lunar @@ -0,0 +1,80 @@ +#!/bin/bash +# +# zfs This shell script takes care of starting (mount) and +# stopping (umount) zfs shares. +# +# chkconfig: 35 60 40 +# description: ZFS is a filesystem developed by Sun, ZFS is a +# combined file system and logical volume manager +# designed by Sun Microsystems. Made available to Linux +# using SPL (Solaris Porting Layer) by zfsonlinux.org. +# probe: true + +case $1 in + start) echo "$1ing ZFS filesystems" + + if ! grep "zfs" /proc/modules > /dev/null; then + echo "ZFS kernel module not loaded yet; loading..."; + if ! modprobe zfs; then + echo "Failed to load ZFS kernel module..."; + exit 0; + fi + fi + + if ! [ `uname -m` == "x86_64" ]; then + echo "Warning: You're not running 64bit. Currently native zfs in"; + echo " linux is only supported and tested on 64bit."; + # should we break here? People doing this should know what they + # do, thus i'm not breaking here. + fi + + # mount the filesystems + while IFS= read -r -d $'\n' dev; do + mdev=$(echo "$dev" | awk '{ print $1; }') + echo -n "mounting $mdev..." + if `zfs mount $mdev`; then + echo -e "done"; + else + echo -e "failed"; + fi + done < <(zfs list -H); + + + ;; + + stop) echo "$1ping ZFS filesystems" + + if grep "zfs" /proc/modules > /dev/null; then + # module is loaded, so we can try to umount filesystems + while IFS= read -r -d $'\n' dev; do + mdev=$(echo "$dev" | awk '{ print $1 }'); + echo -n "umounting $mdev..."; + if `zfs umount $mdev`; then + echo -e "done"; + else + echo -e "failed"; + fi + # the next line is, because i have to reverse the + # output, otherwise it wouldn't work as it should + done < <(zfs list -H | tac); + + # and finally let's rmmod the module + rmmod zfs + + + else + # module not loaded, no need to umount anything + exit 0 + fi + + ;; + + restart) echo "$1ing ZFS filesystems" + $0 stop + $0 start + ;; + + *) echo "Usage: $0 {start|stop|restart}" + ;; + +esac diff --git a/etc/init.d/zfs.redhat b/etc/init.d/zfs.redhat new file mode 100644 index 0000000000..99ff80e929 --- /dev/null +++ b/etc/init.d/zfs.redhat @@ -0,0 +1,166 @@ +#!/bin/bash +# +# zfs This script will mount/umount the zfs filesystems. +# +# chkconfig: 2345 01 99 +# description: This script will mount/umount the zfs filesystems during +# system boot/shutdown. Configuration of which filesystems +# should be mounted is handled by the zfs 'mountpoint' and +# 'canmount' properties. See the zfs(8) man page for details. +# It is also responsible for all userspace zfs services. +# +### BEGIN INIT INFO +# Provides: zfs +# Required-Start: +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 1 +# Short-Description: Mount/umount the zfs filesystems +# Description: ZFS is an advanced filesystem designed to simplify managing +# and protecting your data. This service mounts the ZFS +# filesystems and starts all related zfs services. +### END INIT INFO + +export PATH=/usr/local/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin + +# Source function library & LSB routines +. /etc/rc.d/init.d/functions + +# script variables +RETVAL=0 +ZPOOL=zpool +ZFS=zfs +servicename=zfs +LOCKFILE=/var/lock/subsys/$servicename + +# functions +zfs_installed() { + modinfo zfs > /dev/null 2>&1 || return 5 + $ZPOOL > /dev/null 2>&1 + [ $? == 127 ] && return 5 + $ZFS > /dev/null 2>&1 + [ $? == 127 ] && return 5 + return 0 +} + +# i need a bash guru to simplify this, since this is copy and paste, but donno how +# to correctly dereference variable names in bash, or how to do this right + +# first parameter is a regular expression that filters fstab +read_fstab() { + unset FSTAB + n=0 + while read -r fs mntpnt fstype opts blah ; do + fs=`printf '%b\n' "$fs"` + FSTAB[$n]=$fs + let n++ + done < <(egrep "$1" /etc/fstab) +} + +start() +{ + # Disable lockfile check + # if [ -f "$LOCKFILE" ] ; then return 0 ; fi + + # check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + action $"Checking if ZFS is installed: not installed" /bin/false + return 5 + } + + # Requires selinux policy which has not been written. + if [ -r "/selinux/enforce" ] && + [ "$(cat /selinux/enforce)" = "1" ]; then + action $"SELinux ZFS policy required: " /bin/false || return 6 + fi + + # load kernel module infrastructure + if ! grep -q zfs /proc/modules ; then + action $"Loading kernel ZFS infrastructure: " modprobe zfs || return 5 + fi + sleep 1 + + action $"Mounting automounted ZFS filesystems: " $ZFS mount -a || return 152 + + # Read fstab, try to mount zvols ignoring error + read_fstab "^/dev/(zd|zvol)" + template=$"Mounting volume %s registered in fstab: " + for volume in "${FSTAB[@]}" ; do + string=`printf "$template" "$volume"` + action "$string" mount "$volume" 2>/dev/null || /bin/true + done + + # touch "$LOCKFILE" +} + +stop() +{ + # Disable lockfile check + # if [ ! -f "$LOCKFILE" ] ; then return 0 ; fi + + # check if ZFS is installed. If not, comply to FC standards and bail + zfs_installed || { + action $"Checking if ZFS is installed: not installed" /bin/false + return 5 + } + + # the poweroff of the system takes care of this + # but it never unmounts the root filesystem itself + # shit + + action $"Syncing ZFS filesystems: " sync + # about the only thing we can do, and then we + # hope that the umount process will succeed + # unfortunately the umount process does not dismount + # the root file system, there ought to be some way + # we can tell zfs to just flush anything in memory + # when a request to remount,ro comes in + + #echo -n $"Unmounting ZFS filesystems: " + #$ZFS umount -a + #RETVAL=$? + #if [ $RETVAL -ne 0 ]; then + # failure + + # return 8 + #fi + #success + + rm -f "$LOCKFILE" +} + +# See how we are called +case "$1" in + start) + start + RETVAL=$? + ;; + stop) + stop + RETVAL=$? + ;; + status) + lsmod | grep -q zfs || RETVAL=3 + $ZPOOL status && echo && $ZFS list || { + [ -f "$LOCKFILE" ] && RETVAL=2 || RETVAL=4 + } + ;; + restart) + stop + start + ;; + condrestart) + if [ -f "$LOCKFILE" ] ; then + stop + start + fi + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart}" + RETVAL=3 + ;; +esac + +exit $RETVAL