Merge pull request #214 from truenas/truenas/zfs-2.3-staging

Early zfs-2.3 for SCALE 24.10
This commit is contained in:
Ameer Hamza 2024-03-01 00:17:59 +05:00 committed by GitHub
commit 34be70e146
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
80 changed files with 6189 additions and 143 deletions

44
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,44 @@
name: CI
on: [push]
jobs:
build-native-deb:
runs-on: ubuntu-22.04
container:
image: debian:testing
steps:
- name: Installing Dependencies
run: |
apt update > /dev/null 2>&1
apt install -y linux-image-amd64 linux-headers-amd64 debhelper-compat devscripts > /dev/null 2>&1
- name: Checkout
uses: actions/checkout@v2
- name: Build deb package
run: |
mk-build-deps --build-dep contrib/debian/control
apt install -y ./*.deb
sh autogen.sh
./configure
cp -a contrib/debian debian
sed 's/@CFGOPTS@/--enable-debuginfo/g' debian/rules.in > debian/rules
chmod +x debian/rules
dch -b -M --force-distribution --distribution bullseye-truenas-unstable 'Tagged from zfs CI'
debuild -us -uc -b
debian/rules override_dh_binary-modules
- name: Create artifacts dir
run: mkdir artifacts
if: success()
- name: Move artifacts
run: mv ../*.deb artifacts
if: success()
- uses: actions/upload-artifact@v1
with:
name: zfs-native
path: artifacts
if: success()

28
.github/workflows/docker_image.yml vendored Normal file
View File

@ -0,0 +1,28 @@
name: build_image
on:
push:
branches:
- 'truenas/zfs-2.3-release'
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v2
with:
push: true
tags: ixsystems/zfs:latest
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}

View File

@ -103,9 +103,7 @@ ERRLOGS=0
if [ ! -f Summary/Summary.md ]; then
# first call, we do the default summary (~500k)
echo -n > Summary.md
summarize_s "Sanity Tests Ubuntu 20.04" Logs-20.04-sanity
summarize_s "Sanity Tests Ubuntu 22.04" Logs-22.04-sanity
summarize_f "Functional Tests Ubuntu 20.04" Logs-20.04-functional
summarize_f "Functional Tests Ubuntu 22.04" Logs-22.04-functional
cat Summary.md >> $GITHUB_STEP_SUMMARY

View File

@ -11,7 +11,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [20.04, 22.04]
os: [22.04]
runs-on: ubuntu-${{ matrix.os }}
steps:
- uses: actions/checkout@v4
@ -32,7 +32,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [20.04, 22.04]
os: [22.04]
needs: build
uses: ./.github/workflows/zfs-linux-tests.yml
with:

3
.gitignore vendored
View File

@ -52,6 +52,7 @@
!configure.ac
!copy-builtin
!COPYRIGHT
!Dockerfile
!LICENSE
!Makefile.am
!META
@ -84,6 +85,8 @@ modules.order
Makefile
Makefile.in
changelog
nfs41acl.h
nfs41acl_xdr.c
*.patch
*.orig
*.tmp

27
Dockerfile Normal file
View File

@ -0,0 +1,27 @@
FROM debian:bullseye-slim
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update
RUN apt-get install -y \
debhelper-compat \
devscripts
ENV WORK_DIR /zfs_app/zfs
WORKDIR ${WORK_DIR}
ADD . ${WORK_DIR}/
RUN mk-build-deps --build-dep contrib/debian/control
RUN apt install -y ./*.deb
RUN sh autogen.sh
RUN ./configure
RUN cp -a contrib/debian debian
RUN sed 's/@CFGOPTS@/--enable-debuginfo/g' debian/rules.in > debian/rules
RUN chmod +x debian/rules
RUN dch -b -M --force-distribution --distribution bullseye-truenas-unstable "Tagged from ixsystems/zfs CI"
RUN debuild -us -uc -b
RUN rm ../openzfs-zfs-dracut_*.deb
RUN rm ../openzfs-zfs-initramfs_*.deb
RUN apt-get install -y ../*.deb

View File

@ -119,9 +119,10 @@ cstyle_line = -exec ${top_srcdir}/scripts/cstyle.pl -cpP {} +
endif
CHECKS += cstyle
cstyle:
$(AM_V_at)find $(top_srcdir) -name build -prune \
$(AM_V_at)find $(top_srcdir) -name build -prune -o -name zfsd -prune \
-o -type f -name '*.[hc]' \
! -name 'zfs_config.*' ! -name '*.mod.c' \
! -name 'nfs41acl_xdr.c' ! -name 'nfs41acl.h' \
! -name 'opt_global.h' ! -name '*_if*.h' \
! -name 'zstd_compat_wrapper.h' \
! -path './module/zstd/lib/*' \

View File

@ -64,6 +64,9 @@ include $(srcdir)/%D%/zpool/Makefile.am
include $(srcdir)/%D%/zpool_influxdb/Makefile.am
include $(srcdir)/%D%/zstream/Makefile.am
if BUILD_FREEBSD
include $(srcdir)/%D%/zfsd/Makefile.am
endif
if BUILD_LINUX
mounthelper_PROGRAMS += mount.zfs

View File

@ -238,8 +238,9 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&enc_sysfs_path);
#ifndef TRUENAS_SCALE_NEVER_WHOLEDISK
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
#endif
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
@ -1040,7 +1041,9 @@ vdev_whole_disk_from_config(zpool_handle_t *zhp, const char *vdev_path)
if (!nvl)
return (0);
#ifndef TRUENAS_SCALE_NEVER_WHOLEDISK
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
#endif
return (wholedisk);
}
@ -1088,10 +1091,10 @@ zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
zpool_close(zhp);
return (0);
}
#ifndef TRUENAS_SCALE_NEVER_WHOLEDISK
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
#endif
if (wholedisk) {
char *tmp;
path = strrchr(path, '/');

1
cmd/zfsd/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/zfsd

25
cmd/zfsd/Makefile.am Normal file
View File

@ -0,0 +1,25 @@
zfsd_CFLAGS = $(AM_CFLAGS)
zfsd_CXXFLAGS = $(AM_CXXFLAGS)
zfsd_CPPFLAGS = $(AM_CPPFLAGS)
sbin_PROGRAMS += zfsd
zfsd_SOURCES = \
%D%/callout.cc \
%D%/case_file.cc \
%D%/vdev.cc \
%D%/vdev_iterator.cc \
%D%/zfsd.cc \
%D%/zfsd_event.cc \
%D%/zfsd_exception.cc \
%D%/zfsd_main.cc \
%D%/zpool_list.cc
zfsd_LDADD = \
libnvpair.la \
libuutil.la \
libzfs_core.la \
libzfs.la
zfsd_LDADD += -lrt -lprivatedevdctl -lgeom -lbsdxml -lsbuf
zfsd_LDFLAGS = -pthread

220
cmd/zfsd/callout.cc Normal file
View File

@ -0,0 +1,220 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file callout.cc
*
* \brief Implementation of the Callout class - multi-client
* timer services built on top of the POSIX interval timer.
*/
#include <sys/byteorder.h>
#include <sys/time.h>
#include <signal.h>
#include <syslog.h>
#include <climits>
#include <list>
#include <map>
#include <string>
#include <devdctl/guid.h>
#include <devdctl/event.h>
#include <devdctl/event_factory.h>
#include <devdctl/consumer.h>
#include <devdctl/exception.h>
#include "callout.h"
#include "vdev_iterator.h"
#include "zfsd.h"
#include "zfsd_exception.h"
std::list<Callout *> Callout::s_activeCallouts;
bool Callout::s_alarmFired(false);
void
Callout::Init()
{
signal(SIGALRM, Callout::AlarmSignalHandler);
}
bool
Callout::Stop()
{
if (!IsPending())
return (false);
for (std::list<Callout *>::iterator it(s_activeCallouts.begin());
it != s_activeCallouts.end(); it++) {
if (*it != this)
continue;
it = s_activeCallouts.erase(it);
if (it != s_activeCallouts.end()) {
/*
* Maintain correct interval for the
* callouts that follow the just removed
* entry.
*/
timeradd(&(*it)->m_interval, &m_interval,
&(*it)->m_interval);
}
break;
}
m_pending = false;
return (true);
}
bool
Callout::Reset(const timeval &interval, CalloutFunc_t *func, void *arg)
{
bool cancelled(false);
if (!timerisset(&interval))
throw ZfsdException("Callout::Reset: interval of 0");
cancelled = Stop();
m_interval = interval;
m_func = func;
m_arg = arg;
m_pending = true;
std::list<Callout *>::iterator it(s_activeCallouts.begin());
for (; it != s_activeCallouts.end(); it++) {
if (timercmp(&(*it)->m_interval, &m_interval, <=)) {
/*
* Decrease our interval by those that come
* before us.
*/
timersub(&m_interval, &(*it)->m_interval, &m_interval);
} else {
/*
* Account for the time between the newly
* inserted event and those that follow.
*/
timersub(&(*it)->m_interval, &m_interval,
&(*it)->m_interval);
break;
}
}
s_activeCallouts.insert(it, this);
if (s_activeCallouts.front() == this) {
itimerval timerval = { {0, 0}, m_interval };
setitimer(ITIMER_REAL, &timerval, NULL);
}
return (cancelled);
}
void
Callout::AlarmSignalHandler(int)
{
s_alarmFired = true;
ZfsDaemon::WakeEventLoop();
}
void
Callout::ExpireCallouts()
{
if (!s_alarmFired)
return;
s_alarmFired = false;
if (s_activeCallouts.empty()) {
/* Callout removal/SIGALRM race was lost. */
return;
}
/*
* Expire the first callout (the one we used to set the
* interval timer) as well as any callouts following that
* expire at the same time (have a zero interval from
* the callout before it).
*/
do {
Callout *cur(s_activeCallouts.front());
s_activeCallouts.pop_front();
cur->m_pending = false;
cur->m_func(cur->m_arg);
} while (!s_activeCallouts.empty()
&& timerisset(&s_activeCallouts.front()->m_interval) == 0);
if (!s_activeCallouts.empty()) {
Callout *next(s_activeCallouts.front());
itimerval timerval = { { 0, 0 }, next->m_interval };
setitimer(ITIMER_REAL, &timerval, NULL);
}
}
timeval
Callout::TimeRemaining() const
{
/*
* Outline: Add the m_interval for each callout in s_activeCallouts
* ahead of this, except for the first callout. Add to that the result
* of getitimer (That's because the first callout stores its original
* interval setting while the timer is ticking).
*/
itimerval timervalToAlarm;
timeval timeToExpiry;
std::list<Callout *>::iterator it;
if (!IsPending()) {
timeToExpiry.tv_sec = INT_MAX;
timeToExpiry.tv_usec = 999999; /*maximum normalized value*/
return (timeToExpiry);
}
timerclear(&timeToExpiry);
getitimer(ITIMER_REAL, &timervalToAlarm);
timeval& timeToAlarm = timervalToAlarm.it_value;
timeradd(&timeToExpiry, &timeToAlarm, &timeToExpiry);
it =s_activeCallouts.begin();
it++; /*skip the first callout in the list*/
for (; it != s_activeCallouts.end(); it++) {
timeradd(&timeToExpiry, &(*it)->m_interval, &timeToExpiry);
if ((*it) == this)
break;
}
return (timeToExpiry);
}

185
cmd/zfsd/callout.h Normal file
View File

@ -0,0 +1,185 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file callout.h
*
* \brief Interface for timer based callback services.
*
* Header requirements:
*
* #include <sys/time.h>
*
* #include <list>
*/
#ifndef _CALLOUT_H_
#define _CALLOUT_H_
/**
* \brief Type of the function callback from a Callout.
*/
typedef void CalloutFunc_t(void *);
/**
* \brief Interface to a schedulable one-shot timer with the granularity
* of the system clock (see setitimer(2)).
*
* Determination of callback expiration is triggered by the SIGALRM
* signal. Callout callbacks are always delivered from Zfsd's event
* processing loop.
*
* Periodic actions can be triggered via the Callout mechanisms by
* resetting the Callout from within its callback.
*/
class Callout
{
public:
/**
* Initialize the Callout subsystem.
*/
static void Init();
/**
* Function called (via SIGALRM) when our interval
* timer expires.
*/
static void AlarmSignalHandler(int);
/**
* Execute callbacks for all callouts that have the same
* expiration time as the first callout in the list.
*/
static void ExpireCallouts();
/** Constructor. */
Callout();
/**
* Returns true if callout has not been stopped,
* or deactivated since the last time the callout was
* reset.
*/
bool IsActive() const;
/**
* Returns true if callout is still waiting to expire.
*/
bool IsPending() const;
/**
* Disestablish a callout.
*/
bool Stop();
/**
* \brief Establish or change a timeout.
*
* \param interval Timeval indicating the time which must elapse
* before this callout fires.
* \param func Pointer to the callback function
* \param arg Argument pointer to pass to callback function
*
* \return Cancellation status.
* true: The previous callback was pending and therefore
* was cancelled.
* false: The callout was not pending at the time of this
* reset request.
* In all cases, a new callout is established.
*/
bool Reset(const timeval &interval, CalloutFunc_t *func, void *arg);
/**
* \brief Calculate the remaining time until this Callout's timer
* expires.
*
* The return value will be slightly greater than the actual time to
* expiry.
*
* If the callout is not pending, returns INT_MAX.
*/
timeval TimeRemaining() const;
private:
/**
* All active callouts sorted by expiration time. The callout
* with the nearest expiration time is at the head of the list.
*/
static std::list<Callout *> s_activeCallouts;
/**
* The interval timer has expired. This variable is set from
* signal handler context and tested from Zfsd::EventLoop()
* context via ExpireCallouts().
*/
static bool s_alarmFired;
/**
* Time, relative to others in the active list, until
* this callout is fired.
*/
timeval m_interval;
/** Callback function argument. */
void *m_arg;
/**
* The callback function associated with this timer
* entry.
*/
CalloutFunc_t *m_func;
/** State of this callout. */
bool m_pending;
};
//- Callout public const methods ----------------------------------------------
inline bool
Callout::IsPending() const
{
return (m_pending);
}
//- Callout public methods ----------------------------------------------------
inline
Callout::Callout()
: m_arg(0),
m_func(NULL),
m_pending(false)
{
timerclear(&m_interval);
}
#endif /* CALLOUT_H_ */

1195
cmd/zfsd/case_file.cc Normal file

File diff suppressed because it is too large Load Diff

452
cmd/zfsd/case_file.h Normal file
View File

@ -0,0 +1,452 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file case_file.h
*
* CaseFile objects aggregate vdev faults that may require ZFSD action
* in order to maintain the health of a ZFS pool.
*
* Header requirements:
*
* #include <list>
*
* #include "callout.h"
* #include "zfsd_event.h"
*/
#ifndef _CASE_FILE_H_
#define _CASE_FILE_H_
/*=========================== Forward Declarations ===========================*/
class CaseFile;
class Vdev;
/*============================= Class Definitions ============================*/
/*------------------------------- CaseFileList -------------------------------*/
/**
* CaseFileList is a specialization of the standard list STL container.
*/
typedef std::list< CaseFile *> CaseFileList;
/*--------------------------------- CaseFile ---------------------------------*/
/**
* A CaseFile object is instantiated anytime a vdev for an active pool
* experiences an I/O error, is faulted by ZFS, or is determined to be
* missing/removed.
*
* A vdev may have at most one CaseFile.
*
* CaseFiles are retired when a vdev leaves an active pool configuration
* or an action is taken to resolve the issues recorded in the CaseFile.
*
* Logging a case against a vdev does not imply that an immediate action
* to resolve a fault is required or even desired. For example, a CaseFile
* must accumulate a number of I/O errors in order to flag a device as
* degraded.
*
* Vdev I/O errors are not recorded in ZFS label inforamation. For this
* reasons, CaseFile%%s with accumulated I/O error events are serialized
* to the file system so that they survive across boots. Currently all
* other fault types can be reconstructed from ZFS label information, so
* CaseFile%%s for missing, faulted, or degradded members are just recreated
* at ZFSD startup instead of being deserialized from the file system.
*/
class CaseFile
{
public:
/**
* \brief Find a CaseFile object by a vdev's pool/vdev GUID tuple.
*
* \param poolGUID Pool GUID for the vdev of the CaseFile to find.
* If InvalidGuid, then only match the vdev GUID
* instead of both pool and vdev GUIDs.
* \param vdevGUID Vdev GUID for the vdev of the CaseFile to find.
*
* \return If found, a pointer to a valid CaseFile object.
* Otherwise NULL.
*/
static CaseFile *Find(DevdCtl::Guid poolGUID, DevdCtl::Guid vdevGUID);
/**
* \brief Find multiple CaseFile objects by a vdev's pool/vdev
* GUID tuple (special case for spare vdevs)
*
* \param poolGUID Pool GUID for the vdev of the CaseFile to find.
* If InvalidGuid, then only match the vdev GUID
* instead of both pool and vdev GUIDs.
* \param vdevGUID Vdev GUID for the vdev of the CaseFile to find.
* \param caseList List of cases associated with the vdev.
*/
static void Find(DevdCtl::Guid poolGUID, DevdCtl::Guid vdevGUID,
CaseFileList &caseList);
/**
* \brief Find a CaseFile object by a vdev's current/last known
* physical path.
*
* \param physPath Physical path of the vdev of the CaseFile to find.
*
* \return If found, a pointer to a valid CaseFile object.
* Otherwise NULL.
*/
static CaseFile *Find(const string &physPath);
/**
* \brief ReEvaluate all open cases whose pool guid matches the argument
*
* \param poolGUID Only reevaluate cases for this pool
* \param event Try to consume this event with the casefile
*/
static void ReEvaluateByGuid(DevdCtl::Guid poolGUID,
const ZfsEvent &event);
/**
* \brief Create or return an existing active CaseFile for the
* specified vdev.
*
* \param vdev The vdev object for which to find/create a CaseFile.
*
* \return A reference to a valid CaseFile object.
*/
static CaseFile &Create(Vdev &vdev);
/**
* \brief Deserialize all serialized CaseFile objects found in
* the file system.
*/
static void DeSerialize();
/**
* \brief returns true if there are no CaseFiles
*/
static bool Empty();
/**
* \brief Emit syslog data on all active CaseFile%%s in the system.
*/
static void LogAll();
/**
* \brief Destroy the in-core cache of CaseFile data.
*
* This routine does not disturb the on disk, serialized, CaseFile
* data.
*/
static void PurgeAll();
DevdCtl::Guid PoolGUID() const;
DevdCtl::Guid VdevGUID() const;
vdev_state VdevState() const;
const string &PoolGUIDString() const;
const string &VdevGUIDString() const;
const string &PhysicalPath() const;
/**
* \brief Attempt to resolve this CaseFile using the disk
* resource at the given device/physical path/vdev object
* tuple.
*
* \param devPath The devfs path for the disk resource.
* \param physPath The physical path information reported by
* the disk resource.
* \param vdev If the disk contains ZFS label information,
* a pointer to the disk label's vdev object
* data. Otherwise NULL.
*
* \return True if this event was consumed by this CaseFile.
*/
bool ReEvaluate(const string &devPath, const string &physPath,
Vdev *vdev);
/**
* \brief Update this CaseFile in light of the provided ZfsEvent.
*
* Must be virtual so it can be overridden in the unit tests
*
* \param event The ZfsEvent to evaluate.
*
* \return True if this event was consumed by this CaseFile.
*/
virtual bool ReEvaluate(const ZfsEvent &event);
/**
* \brief Register an itimer callout for the given event, if necessary
*/
virtual void RegisterCallout(const DevdCtl::Event &event);
/**
* \brief Close a case if it is no longer relevant.
*
* This method deals with cases tracking soft errors. Soft errors
* will be discarded should a remove event occur within a short period
* of the soft errors being reported. We also discard the events
* if the vdev is marked degraded or failed.
*
* \return True if the case is closed. False otherwise.
*/
bool CloseIfSolved();
/**
* \brief Emit data about this CaseFile via syslog(3).
*/
void Log();
/**
* \brief Whether we should degrade this vdev
*/
bool ShouldDegrade() const;
/**
* \brief Whether we should fault this vdev
*/
bool ShouldFault() const;
/**
* \brief If this vdev is spare
*/
int IsSpare();
protected:
enum {
/**
* The number of soft errors on a vdev required
* to transition a vdev from healthy to degraded
* status.
*/
ZFS_DEGRADE_IO_COUNT = 50
};
static CalloutFunc_t OnGracePeriodEnded;
/**
* \brief scandir(3) filter function used to find files containing
* serialized CaseFile data.
*
* \param dirEntry Directory entry for the file to filter.
*
* \return Non-zero for a file to include in the selection,
* otherwise 0.
*/
static int DeSerializeSelector(const struct dirent *dirEntry);
/**
* \brief Given the name of a file containing serialized events from a
* CaseFile object, create/update an in-core CaseFile object
* representing the serialized data.
*
* \param fileName The name of a file containing serialized events
* from a CaseFile object.
*/
static void DeSerializeFile(const char *fileName);
/** Constructor. */
CaseFile(const Vdev &vdev);
/**
* Destructor.
* Must be virtual so it can be subclassed in the unit tests
*/
virtual ~CaseFile();
/**
* \brief Reload state for the vdev associated with this CaseFile.
*
* \return True if the refresh was successful. False if the system
* has no record of the pool or vdev for this CaseFile.
*/
virtual bool RefreshVdevState();
/**
* \brief Free all events in the m_events list.
*/
void PurgeEvents();
/**
* \brief Free all events in the m_tentativeEvents list.
*/
void PurgeTentativeEvents();
/**
* \brief Commit to file system storage.
*/
void Serialize();
/**
* \brief Retrieve event data from a serialization stream.
*
* \param caseStream The serializtion stream to parse.
*/
void DeSerialize(std::ifstream &caseStream);
/**
* \brief Serializes the supplied event list and writes it to fd
*
* \param prefix If not NULL, this prefix will be prepended to
* every event in the file.
*/
void SerializeEvList(const DevdCtl::EventList events, int fd,
const char* prefix=NULL) const;
/**
* \brief Unconditionally close a CaseFile.
*/
virtual void Close();
/**
* \brief Callout callback invoked when the remove timer grace
* period expires.
*
* If no remove events are received prior to the grace period
* firing, then any tentative events are promoted and counted
* against the health of the vdev.
*/
void OnGracePeriodEnded();
/**
* \brief Attempt to activate a spare on this case's pool.
*
* Call this whenever a pool becomes degraded. It will look for any
* spare devices and activate one to replace the casefile's vdev. It
* will _not_ close the casefile; that should only happen when the
* missing drive is replaced or the user promotes the spare.
*
* \return True if a spare was activated
*/
bool ActivateSpare();
/**
* \brief replace a pool's vdev with another
*
* \param vdev_type The type of the new vdev. Usually either
* VDEV_TYPE_DISK or VDEV_TYPE_FILE
* \param path The file system path to the new vdev
* \param isspare Whether the new vdev is a spare
*
* \return true iff the replacement was successful
*/
bool Replace(const char* vdev_type, const char* path, bool isspare);
/**
* \brief Which vdev, if any, is replacing ours.
*
* \param zhp Pool handle state from the caller context
*
* \return the vdev that is currently replacing ours,
* or NonexistentVdev if there isn't one.
*/
Vdev BeingReplacedBy(zpool_handle_t *zhp);
/**
* \brief All CaseFiles being tracked by ZFSD.
*/
static CaseFileList s_activeCases;
/**
* \brief The file system path to serialized CaseFile data.
*/
static const string s_caseFilePath;
/**
* \brief The time ZFSD waits before promoting a tentative event
* into a permanent event.
*/
static const timeval s_removeGracePeriod;
/**
* \brief A list of soft error events counted against the health of
* a vdev.
*/
DevdCtl::EventList m_events;
/**
* \brief A list of soft error events waiting for a grace period
* expiration before being counted against the health of
* a vdev.
*/
DevdCtl::EventList m_tentativeEvents;
DevdCtl::Guid m_poolGUID;
DevdCtl::Guid m_vdevGUID;
vdev_state m_vdevState;
string m_poolGUIDString;
string m_vdevGUIDString;
string m_vdevPhysPath;
int m_is_spare;
/**
* \brief Callout activated when a grace period
*/
Callout m_tentativeTimer;
private:
nvlist_t *CaseVdev(zpool_handle_t *zhp) const;
};
inline DevdCtl::Guid
CaseFile::PoolGUID() const
{
return (m_poolGUID);
}
inline DevdCtl::Guid
CaseFile::VdevGUID() const
{
return (m_vdevGUID);
}
inline vdev_state
CaseFile::VdevState() const
{
return (m_vdevState);
}
inline const string &
CaseFile::PoolGUIDString() const
{
return (m_poolGUIDString);
}
inline const string &
CaseFile::VdevGUIDString() const
{
return (m_vdevGUIDString);
}
inline const string &
CaseFile::PhysicalPath() const
{
return (m_vdevPhysPath);
}
#endif /* _CASE_FILE_H_ */

358
cmd/zfsd/vdev.cc Normal file
View File

@ -0,0 +1,358 @@
/*-
* Copyright (c) 2011, 2012, 2013, 2014 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file vdev.cc
*
* Implementation of the Vdev class.
*/
#include <syslog.h>
#include <sys/cdefs.h>
#include <sys/byteorder.h>
#include <sys/fs/zfs.h>
#include <libzfs.h>
/*
* Undefine flush, defined by cpufunc.h on sparc64, because it conflicts with
* C++ flush methods
*/
#undef flush
#include <list>
#include <map>
#include <string>
#include <sstream>
#include <devdctl/guid.h>
#include <devdctl/event.h>
#include <devdctl/event_factory.h>
#include <devdctl/exception.h>
#include <devdctl/consumer.h>
#include "vdev.h"
#include "vdev_iterator.h"
#include "zfsd.h"
#include "zfsd_exception.h"
#include "zpool_list.h"
__FBSDID("$FreeBSD$");
/*============================ Namespace Control =============================*/
using std::string;
using std::stringstream;
//- Special objects -----------------------------------------------------------
Vdev NonexistentVdev;
//- Vdev Inline Public Methods ------------------------------------------------
/*=========================== Class Implementations ==========================*/
/*----------------------------------- Vdev -----------------------------------*/
/* Special constructor for NonexistentVdev. */
Vdev::Vdev()
: m_poolConfig(NULL),
m_config(NULL)
{}
bool
Vdev::VdevLookupPoolGuid()
{
uint64_t guid;
if (nvlist_lookup_uint64(m_poolConfig, ZPOOL_CONFIG_POOL_GUID, &guid))
return (false);
m_poolGUID = guid;
return (true);
}
void
Vdev::VdevLookupGuid()
{
uint64_t guid;
if (nvlist_lookup_uint64(m_config, ZPOOL_CONFIG_GUID, &guid) != 0)
throw ZfsdException("Unable to extract vdev GUID "
"from vdev config data.");
m_vdevGUID = guid;
}
Vdev::Vdev(zpool_handle_t *pool, nvlist_t *config)
: m_poolConfig(zpool_get_config(pool, NULL)),
m_config(config)
{
if (!VdevLookupPoolGuid())
throw ZfsdException("Can't extract pool GUID from handle.");
VdevLookupGuid();
}
Vdev::Vdev(nvlist_t *poolConfig, nvlist_t *config)
: m_poolConfig(poolConfig),
m_config(config)
{
if (!VdevLookupPoolGuid())
throw ZfsdException("Can't extract pool GUID from config.");
VdevLookupGuid();
}
Vdev::Vdev(nvlist_t *labelConfig)
: m_poolConfig(labelConfig),
m_config(labelConfig)
{
/*
* Spares do not have a Pool GUID. Tolerate its absence.
* Code accessing this Vdev in a context where the Pool GUID is
* required will find it invalid (as it is upon Vdev construction)
* and act accordingly.
*/
(void) VdevLookupPoolGuid();
VdevLookupGuid();
try {
m_config = VdevIterator(labelConfig).Find(m_vdevGUID);
} catch (const ZfsdException &exp) {
/*
* When reading a spare's label, it is normal not to find
* a list of vdevs
*/
m_config = NULL;
}
}
bool
Vdev::IsSpare() const
{
uint64_t is_spare(0);
if (m_config == NULL)
return (false);
(void)nvlist_lookup_uint64(m_config, ZPOOL_CONFIG_IS_SPARE, &is_spare);
return (bool(is_spare));
}
vdev_state
Vdev::State() const
{
uint64_t *nvlist_array;
vdev_stat_t *vs;
uint_t vsc;
if (m_config == NULL) {
/*
* If we couldn't find the list of vdevs, that normally means
* that this is an available hotspare. In that case, we will
* presume it to be healthy. Even if this spare had formerly
* been in use, been degraded, and been replaced, the act of
* replacement wipes the degraded bit from the label. So we
* have no choice but to presume that it is healthy.
*/
return (VDEV_STATE_HEALTHY);
}
if (nvlist_lookup_uint64_array(m_config, ZPOOL_CONFIG_VDEV_STATS,
&nvlist_array, &vsc) == 0) {
vs = reinterpret_cast<vdev_stat_t *>(nvlist_array);
return (static_cast<vdev_state>(vs->vs_state));
}
/*
* Stats are not available. This vdev was created from a label.
* Synthesize a state based on available data.
*/
uint64_t faulted(0);
uint64_t degraded(0);
(void)nvlist_lookup_uint64(m_config, ZPOOL_CONFIG_FAULTED, &faulted);
(void)nvlist_lookup_uint64(m_config, ZPOOL_CONFIG_DEGRADED, &degraded);
if (faulted)
return (VDEV_STATE_FAULTED);
if (degraded)
return (VDEV_STATE_DEGRADED);
return (VDEV_STATE_HEALTHY);
}
std::list<Vdev>
Vdev::Children()
{
nvlist_t **vdevChildren;
int result;
u_int numChildren;
std::list<Vdev> children;
if (m_poolConfig == NULL || m_config == NULL)
return (children);
result = nvlist_lookup_nvlist_array(m_config,
ZPOOL_CONFIG_CHILDREN, &vdevChildren, &numChildren);
if (result != 0)
return (children);
for (u_int c = 0;c < numChildren; c++)
children.push_back(Vdev(m_poolConfig, vdevChildren[c]));
return (children);
}
Vdev
Vdev::RootVdev()
{
nvlist_t *rootVdev;
if (m_poolConfig == NULL)
return (NonexistentVdev);
if (nvlist_lookup_nvlist(m_poolConfig, ZPOOL_CONFIG_VDEV_TREE,
&rootVdev) != 0)
return (NonexistentVdev);
return (Vdev(m_poolConfig, rootVdev));
}
/*
* Find our parent. This requires doing a traversal of the config; we can't
* cache it as leaf vdevs may change their pool config location (spare,
* replacing, mirror, etc).
*/
Vdev
Vdev::Parent()
{
std::list<Vdev> to_examine;
std::list<Vdev> children;
std::list<Vdev>::iterator children_it;
to_examine.push_back(RootVdev());
for (;;) {
if (to_examine.empty())
return (NonexistentVdev);
Vdev vd = to_examine.front();
if (vd.DoesNotExist())
return (NonexistentVdev);
to_examine.pop_front();
children = vd.Children();
children_it = children.begin();
for (;children_it != children.end(); children_it++) {
Vdev child = *children_it;
if (child.GUID() == GUID())
return (vd);
to_examine.push_front(child);
}
}
}
bool
Vdev::IsAvailableSpare() const
{
/* If we have a pool guid, we cannot be an available spare. */
if (PoolGUID())
return (false);
return (true);
}
bool
Vdev::IsSpare()
{
uint64_t spare;
if (nvlist_lookup_uint64(m_config, ZPOOL_CONFIG_IS_SPARE, &spare) != 0)
return (false);
return (spare != 0);
}
bool
Vdev::IsActiveSpare() const
{
vdev_stat_t *vs;
uint_t c;
if (m_poolConfig == NULL)
return (false);
(void) nvlist_lookup_uint64_array(m_config, ZPOOL_CONFIG_VDEV_STATS,
reinterpret_cast<uint64_t **>(&vs), &c);
if (vs == NULL || vs->vs_aux != VDEV_AUX_SPARED)
return (false);
return (true);
}
bool
Vdev::IsResilvering() const
{
pool_scan_stat_t *ps = NULL;
uint_t c;
if (State() != VDEV_STATE_HEALTHY)
return (false);
(void) nvlist_lookup_uint64_array(m_config, ZPOOL_CONFIG_SCAN_STATS,
reinterpret_cast<uint64_t **>(&ps), &c);
if (ps == NULL || ps->pss_func != POOL_SCAN_RESILVER)
return (false);
return (true);
}
string
Vdev::GUIDString() const
{
stringstream vdevGUIDString;
vdevGUIDString << GUID();
return (vdevGUIDString.str());
}
string
Vdev::Name(zpool_handle_t *zhp, bool verbose) const
{
return (zpool_vdev_name(g_zfsHandle, zhp, m_config,
verbose ? B_TRUE : B_FALSE));
}
string
Vdev::Path() const
{
const char *path(NULL);
if ((m_config != NULL)
&& (nvlist_lookup_string(m_config, ZPOOL_CONFIG_PATH, &path) == 0))
return (path);
return ("");
}
string
Vdev::PhysicalPath() const
{
const char *path(NULL);
if ((m_config != NULL) && (nvlist_lookup_string(m_config,
ZPOOL_CONFIG_PHYS_PATH, &path) == 0))
return (path);
return ("");
}

188
cmd/zfsd/vdev.h Normal file
View File

@ -0,0 +1,188 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file vdev.h
*
* Definition of the Vdev class.
*
* Header requirements:
*
* #include <string>
* #include <list>
*
* #include <devdctl/guid.h>
*/
#ifndef _VDEV_H_
#define _VDEV_H_
/*=========================== Forward Declarations ===========================*/
struct zpool_handle;
typedef struct zpool_handle zpool_handle_t;
struct nvlist;
typedef struct nvlist nvlist_t;
/*============================= Class Definitions ============================*/
/*----------------------------------- Vdev -----------------------------------*/
/**
* \brief Wrapper class for a vdev's name/value configuration list
* simplifying access to commonly used vdev attributes.
*/
class Vdev
{
public:
/**
* \brief Instantiate a vdev object for a vdev that is a member
* of an imported pool.
*
* \param pool The pool object containing the vdev with
* configuration data provided in vdevConfig.
* \param vdevConfig Vdev configuration data.
*
* This method should be used whenever dealing with vdev's
* enumerated via the ZpoolList class. The in-core configuration
* data for a vdev does not contain all of the items found in
* the on-disk label. This requires the vdev class to augment
* the data in vdevConfig with data found in the pool object.
*/
Vdev(zpool_handle_t *pool, nvlist_t *vdevConfig);
/**
* \brief Instantiate a vdev object for a vdev that is a member
* of a pool configuration.
*
* \param poolConfig The pool configuration containing the vdev
* configuration data provided in vdevConfig.
* \param vdevConfig Vdev configuration data.
*
* This method should be used whenever dealing with vdev's
* enumerated via the ZpoolList class. The in-core configuration
* data for a vdev does not contain all of the items found in
* the on-disk label. This requires the vdev class to augment
* the data in vdevConfig with data found in the pool object.
*/
Vdev(nvlist_t *poolConfig, nvlist_t *vdevConfig);
/**
* \brief Instantiate a vdev object from a ZFS label stored on
* the device.
*
* \param vdevConfig The name/value list retrieved by reading
* the label information on a leaf vdev.
*/
Vdev(nvlist_t *vdevConfig);
/**
* \brief No-op copy constructor for nonexistent vdevs.
*/
Vdev();
/**
* \brief No-op virtual destructor, since this class has virtual
* functions.
*/
virtual ~Vdev();
bool DoesNotExist() const;
/**
* \brief Return a list of the vdev's children.
*/
std::list<Vdev> Children();
virtual DevdCtl::Guid GUID() const;
bool IsSpare() const;
virtual DevdCtl::Guid PoolGUID() const;
virtual vdev_state State() const;
std::string Path() const;
virtual std::string PhysicalPath() const;
std::string GUIDString() const;
nvlist_t *PoolConfig() const;
nvlist_t *Config() const;
Vdev Parent();
Vdev RootVdev();
std::string Name(zpool_handle_t *, bool verbose) const;
bool IsSpare();
bool IsAvailableSpare() const;
bool IsActiveSpare() const;
bool IsResilvering() const;
private:
void VdevLookupGuid();
bool VdevLookupPoolGuid();
DevdCtl::Guid m_poolGUID;
DevdCtl::Guid m_vdevGUID;
nvlist_t *m_poolConfig;
nvlist_t *m_config;
};
//- Special objects -----------------------------------------------------------
extern Vdev NonexistentVdev;
//- Vdev Inline Public Methods ------------------------------------------------
inline Vdev::~Vdev()
{
}
inline DevdCtl::Guid
Vdev::PoolGUID() const
{
return (m_poolGUID);
}
inline DevdCtl::Guid
Vdev::GUID() const
{
return (m_vdevGUID);
}
inline nvlist_t *
Vdev::PoolConfig() const
{
return (m_poolConfig);
}
inline nvlist_t *
Vdev::Config() const
{
return (m_config);
}
inline bool
Vdev::DoesNotExist() const
{
return (m_config == NULL);
}
#endif /* _VDEV_H_ */

172
cmd/zfsd/vdev_iterator.cc Normal file
View File

@ -0,0 +1,172 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file vdev_iterator.cc
*
* Implementation of the VdevIterator class.
*/
#include <sys/cdefs.h>
#include <sys/byteorder.h>
#include <sys/fs/zfs.h>
#include <stdint.h>
#include <syslog.h>
#include <libzfs.h>
#include <list>
#include <string>
#include <devdctl/exception.h>
#include <devdctl/guid.h>
#include "vdev.h"
#include "vdev_iterator.h"
#include "zfsd_exception.h"
/*============================ Namespace Control =============================*/
using DevdCtl::Guid;
/*=========================== Class Implementations ==========================*/
/*------------------------------- VdevIterator -------------------------------*/
VdevIterator::VdevIterator(zpool_handle_t *pool)
: m_poolConfig(zpool_get_config(pool, NULL))
{
Reset();
}
VdevIterator::VdevIterator(nvlist_t *poolConfig)
: m_poolConfig(poolConfig)
{
Reset();
}
void
VdevIterator::Reset()
{
nvlist_t *rootVdev;
nvlist **cache_child;
nvlist **spare_child;
int result;
uint_t cache_children;
uint_t spare_children;
result = nvlist_lookup_nvlist(m_poolConfig,
ZPOOL_CONFIG_VDEV_TREE,
&rootVdev);
if (result != 0)
throw ZfsdException(m_poolConfig, "Unable to extract "
"ZPOOL_CONFIG_VDEV_TREE from pool.");
m_vdevQueue.assign(1, rootVdev);
result = nvlist_lookup_nvlist_array(rootVdev,
ZPOOL_CONFIG_L2CACHE,
&cache_child,
&cache_children);
if (result == 0)
for (uint_t c = 0; c < cache_children; c++)
m_vdevQueue.push_back(cache_child[c]);
result = nvlist_lookup_nvlist_array(rootVdev,
ZPOOL_CONFIG_SPARES,
&spare_child,
&spare_children);
if (result == 0)
for (uint_t c = 0; c < spare_children; c++)
m_vdevQueue.push_back(spare_child[c]);
}
nvlist_t *
VdevIterator::Next()
{
nvlist_t *vdevConfig;
if (m_vdevQueue.empty())
return (NULL);
for (;;) {
nvlist_t **vdevChildren;
int result;
u_int numChildren;
vdevConfig = m_vdevQueue.front();
m_vdevQueue.pop_front();
/* Expand non-leaf vdevs. */
result = nvlist_lookup_nvlist_array(vdevConfig,
ZPOOL_CONFIG_CHILDREN,
&vdevChildren, &numChildren);
if (result != 0) {
/* leaf vdev */
break;
}
/*
* Insert children at the head of the queue to effect a
* depth first traversal of the tree.
*/
m_vdevQueue.insert(m_vdevQueue.begin(), vdevChildren,
vdevChildren + numChildren);
}
return (vdevConfig);
}
void
VdevIterator::Each(VdevCallback_t *callBack, void *callBackArg)
{
nvlist_t *vdevConfig;
Reset();
while ((vdevConfig = Next()) != NULL) {
Vdev vdev(m_poolConfig, vdevConfig);
if (callBack(vdev, callBackArg))
break;
}
}
nvlist_t *
VdevIterator::Find(Guid vdevGUID)
{
nvlist_t *vdevConfig;
Reset();
while ((vdevConfig = Next()) != NULL) {
Vdev vdev(m_poolConfig, vdevConfig);
if (vdev.GUID() == vdevGUID)
return (vdevConfig);
}
return (NULL);
}

123
cmd/zfsd/vdev_iterator.h Normal file
View File

@ -0,0 +1,123 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file vdev_iterator.h
*
* VdevIterator class definition.
*
* Header requirements:
*
* #include <list>
*/
#ifndef _VDEV_ITERATOR_H_
#define _VDEV_ITERATOR_H_
/*=========================== Forward Declarations ===========================*/
struct zpool_handle;
typedef struct zpool_handle zpool_handle_t;
struct nvlist;
typedef struct nvlist nvlist_t;
class Vdev;
/*============================= Class Definitions ============================*/
/*------------------------------- VdevIterator -------------------------------*/
typedef bool VdevCallback_t(Vdev &vdev, void *cbArg);
/**
* \brief VdevIterator provides mechanisms for traversing and searching
* the leaf vdevs contained in a ZFS pool configuration.
*/
class VdevIterator
{
public:
/**
* \brief Instantiate a VdevIterator for the given ZFS pool.
*
* \param pool The ZFS pool to traverse/search.
*/
VdevIterator(zpool_handle_t *pool);
/**
* \brief Instantiate a VdevIterator for the given ZFS pool.
*
* \param poolConfig The configuration data for the ZFS pool
* to traverse/search.
*/
VdevIterator(nvlist_t *poolConfig);
/**
* \brief Reset this iterator's cursor so that Next() will
* report the first member of the pool.
*/
void Reset();
/**
* \brief Report the leaf vdev at this iterator's cursor and increment
* the cursor to the next leaf pool member.
*/
nvlist_t *Next();
/**
* \brief Traverse the entire pool configuration starting its
* first member, returning a vdev object with the given
* vdev GUID if found.
*
* \param vdevGUID The vdev GUID of the vdev object to find.
*
* \return A Vdev object for the matching vdev if found. Otherwise
* NULL.
*
* Upon return, the VdevIterator's cursor points to the vdev just
* past the returned vdev or end() if no matching vdev is found.
*/
nvlist_t *Find(DevdCtl::Guid vdevGUID);
/**
* \brief Perform the specified operation on each leaf member of
* a pool's vdev membership.
*
* \param cb Callback function to execute for each member.
* \param cbArg Argument to pass to cb.
*/
void Each(VdevCallback_t *cb, void *cbArg);
private:
nvlist_t *m_poolConfig;
std::list<nvlist_t *> m_vdevQueue;
};
#endif /* _VDEV_ITERATOR_H_ */

154
cmd/zfsd/zfsd.8 Normal file
View File

@ -0,0 +1,154 @@
.\"-
.\" Copyright (c) 2016 Allan Jude
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd April 18, 2020
.Dt ZFSD 8
.Os
.Sh NAME
.Nm zfsd
.Nd ZFS fault management daemon
.Sh SYNOPSIS
.Nm
.Op Fl d
.Sh DESCRIPTION
.Nm
attempts to resolve ZFS faults that the kernel can't resolve by itself.
It listens to
.Xr devctl 4
events, which are how the kernel notifies userland of events such as I/O
errors and disk removals.
.Nm
attempts to resolve these faults by activating or deactivating hot spares
and onlining offline vdevs.
.Pp
The following options are available:
.Bl -tag -width indent
.It Fl d
Run in the foreground instead of daemonizing.
.El
.Pp
System administrators never interact with
.Nm
directly.
Instead, they control its behavior indirectly through zpool configuration.
There are two ways to influence
.Nm :
assigning hotspares and setting pool properties.
Currently, only the
.Em autoreplace
property has any effect.
See
.Xr zpool 8
for details.
.Pp
.Nm
will attempt to resolve the following types of fault:
.Bl -tag -width a
.It device removal
When a leaf vdev disappears,
.Nm
will activate any available hotspare.
.It device arrival
When a new GEOM device appears,
.Nm
will attempt to read its ZFS label, if any.
If it matches a previously removed vdev on an active pool,
.Nm
will online it.
Once resilvering completes, any active hotspare will detach automatically.
.Pp
If the new device has no ZFS label but its physical path matches the
physical path of a previously removed vdev on an active pool, and that
pool has the autoreplace property set, then
.Nm
will replace the missing vdev with the newly arrived device.
Once resilvering completes, any active hotspare will detach automatically.
.It vdev degrade or fault events
If a vdev becomes degraded or faulted,
.Nm
will activate any available hotspare.
.It I/O errors
If a leaf vdev generates more than 50 I/O errors in a 60 second period, then
.Nm
will mark that vdev as
.Em FAULTED .
ZFS will no longer issue any I/Os to it.
.Nm
will activate a hotspare if one is available.
.It Checksum errors
If a leaf vdev generates more than 50 checksum errors in a 60 second
period, then
.Nm
will mark that vdev as
.Em DEGRADED .
ZFS will still use it, but zfsd will activate a spare anyway.
.It Spare addition
If the system administrator adds a hotspare to a pool that is already degraded,
.Nm
will activate the spare.
.It Resilver complete
.Nm
will detach any hotspare once a permanent replacement finishes resilvering.
.It Physical path change
If the physical path of an existing disk changes,
.Nm
will attempt to replace any missing disk with the same physical path,
if its pool's autoreplace property is set.
.El
.Pp
.Nm
will log interesting events and its actions to syslog with facility
.Em daemon
and identity
.Op zfsd .
.El
.Sh FILES
.Bl -tag -width a -compact
.It Pa /var/db/zfsd/cases
When
.Nm
exits, it serializes any unresolved casefiles here,
then reads them back in when next it starts up.
.El
.Sh SEE ALSO
.Xr devctl 4 ,
.Xr zpool 8
.Sh HISTORY
.Nm
first appeared in
.Fx 11.0 .
.Sh AUTHORS
.Nm
was originally written by
.An Justin Gibbs Aq Mt gibbs@FreeBSD.org
and
.An Alan Somers Aq Mt asomers@FreeBSD.org
.Sh TODO
In the future,
.Nm
should be able to resume a pool that became suspended due to device
removals, if enough missing devices have returned.

449
cmd/zfsd/zfsd.cc Normal file
View File

@ -0,0 +1,449 @@
/*-
* Copyright (c) 2011, 2012, 2013, 2014, 2015, 2016 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*/
/**
* \file zfsd.cc
*
* The ZFS daemon consumes kernel devdctl(4) event data via devd(8)'s
* unix domain socket in order to react to system changes that impact
* the function of ZFS storage pools. The goal of this daemon is to
* provide similar functionality to the Solaris ZFS Diagnostic Engine
* (zfs-diagnosis), the Solaris ZFS fault handler (zfs-retire), and
* the Solaris ZFS vdev insertion agent (zfs-mod sysevent handler).
*/
#include <sys/cdefs.h>
#include <sys/byteorder.h>
#include <sys/param.h>
#include <sys/fs/zfs.h>
#include <err.h>
#include <fcntl.h>
#include <libgeom.h>
#include <libutil.h>
#include <poll.h>
#include <syslog.h>
#include <libzfs.h>
#include <list>
#include <map>
#include <string>
#include <devdctl/guid.h>
#include <devdctl/event.h>
#include <devdctl/event_factory.h>
#include <devdctl/exception.h>
#include <devdctl/consumer.h>
#include "callout.h"
#include "vdev_iterator.h"
#include "zfsd_event.h"
#include "case_file.h"
#include "vdev.h"
#include "vdev_iterator.h"
#include "zfsd.h"
#include "zfsd_exception.h"
#include "zpool_list.h"
__FBSDID("$FreeBSD$");
/*================================== Macros ==================================*/
#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
/*============================ Namespace Control =============================*/
using DevdCtl::Event;
using DevdCtl::EventFactory;
using DevdCtl::EventList;
/*================================ Global Data ===============================*/
int g_debug = 0;
libzfs_handle_t *g_zfsHandle;
/*--------------------------------- ZfsDaemon --------------------------------*/
//- ZfsDaemon Static Private Data ----------------------------------------------
ZfsDaemon *ZfsDaemon::s_theZfsDaemon;
bool ZfsDaemon::s_logCaseFiles;
bool ZfsDaemon::s_terminateEventLoop;
char ZfsDaemon::s_pidFilePath[] = "/var/run/zfsd.pid";
pidfh *ZfsDaemon::s_pidFH;
int ZfsDaemon::s_signalPipeFD[2];
bool ZfsDaemon::s_systemRescanRequested(false);
EventFactory::Record ZfsDaemon::s_registryEntries[] =
{
{ Event::NOTIFY, "GEOM", &GeomEvent::Builder },
{ Event::NOTIFY, "ZFS", &ZfsEvent::Builder }
};
//- ZfsDaemon Static Public Methods --------------------------------------------
ZfsDaemon &
ZfsDaemon::Get()
{
return (*s_theZfsDaemon);
}
void
ZfsDaemon::WakeEventLoop()
{
write(s_signalPipeFD[1], "+", 1);
}
void
ZfsDaemon::RequestSystemRescan()
{
s_systemRescanRequested = true;
ZfsDaemon::WakeEventLoop();
}
void
ZfsDaemon::Run()
{
ZfsDaemon daemon;
while (s_terminateEventLoop == false) {
try {
daemon.DisconnectFromDevd();
if (daemon.ConnectToDevd() == false) {
sleep(30);
continue;
}
daemon.DetectMissedEvents();
daemon.EventLoop();
} catch (const DevdCtl::Exception &exp) {
exp.Log();
}
}
daemon.DisconnectFromDevd();
}
//- ZfsDaemon Private Methods --------------------------------------------------
ZfsDaemon::ZfsDaemon()
: Consumer(/*defBuilder*/NULL, s_registryEntries,
NUM_ELEMENTS(s_registryEntries))
{
if (s_theZfsDaemon != NULL)
errx(1, "Multiple ZfsDaemon instances created. Exiting");
s_theZfsDaemon = this;
if (pipe(s_signalPipeFD) != 0)
errx(1, "Unable to allocate signal pipe. Exiting");
if (fcntl(s_signalPipeFD[0], F_SETFL, O_NONBLOCK) == -1)
errx(1, "Unable to set pipe as non-blocking. Exiting");
if (fcntl(s_signalPipeFD[1], F_SETFL, O_NONBLOCK) == -1)
errx(1, "Unable to set pipe as non-blocking. Exiting");
signal(SIGHUP, ZfsDaemon::RescanSignalHandler);
signal(SIGINFO, ZfsDaemon::InfoSignalHandler);
signal(SIGINT, ZfsDaemon::QuitSignalHandler);
signal(SIGTERM, ZfsDaemon::QuitSignalHandler);
signal(SIGUSR1, ZfsDaemon::RescanSignalHandler);
g_zfsHandle = libzfs_init();
if (g_zfsHandle == NULL)
errx(1, "Unable to initialize ZFS library. Exiting");
Callout::Init();
InitializeSyslog();
OpenPIDFile();
if (g_debug == 0)
daemon(0, 0);
UpdatePIDFile();
}
ZfsDaemon::~ZfsDaemon()
{
PurgeCaseFiles();
ClosePIDFile();
}
void
ZfsDaemon::PurgeCaseFiles()
{
CaseFile::PurgeAll();
}
bool
ZfsDaemon::VdevAddCaseFile(Vdev &vdev, void *cbArg)
{
if (vdev.State() != VDEV_STATE_HEALTHY)
CaseFile::Create(vdev);
return (/*break early*/false);
}
void
ZfsDaemon::BuildCaseFiles()
{
ZpoolList zpl;
ZpoolList::iterator pool;
/* Add CaseFiles for vdevs with issues. */
for (pool = zpl.begin(); pool != zpl.end(); pool++)
VdevIterator(*pool).Each(VdevAddCaseFile, NULL);
/* De-serialize any saved cases. */
CaseFile::DeSerialize();
/* Simulate config_sync events to force CaseFile reevaluation */
for (pool = zpl.begin(); pool != zpl.end(); pool++) {
char evString[160];
Event *event;
nvlist_t *config;
uint64_t poolGUID;
const char *poolname;
poolname = zpool_get_name(*pool);
config = zpool_get_config(*pool, NULL);
if (config == NULL) {
syslog(LOG_ERR, "ZFSDaemon::BuildCaseFiles: Could not "
"find pool config for pool %s", poolname);
continue;
}
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&poolGUID) != 0) {
syslog(LOG_ERR, "ZFSDaemon::BuildCaseFiles: Could not "
"find pool guid for pool %s", poolname);
continue;
}
snprintf(evString, 160, "!system=ZFS subsystem=ZFS "
"type=misc.fs.zfs.config_sync sub_type=synthesized "
"pool_name=%s pool_guid=%" PRIu64 "\n", poolname, poolGUID);
event = Event::CreateEvent(GetFactory(), string(evString));
if (event != NULL) {
event->Process();
delete event;
}
}
}
void
ZfsDaemon::RescanSystem()
{
struct gmesh mesh;
struct gclass *mp;
struct ggeom *gp;
struct gprovider *pp;
int result;
/*
* The devdctl system doesn't replay events for new consumers
* of the interface. Emit manufactured DEVFS arrival events
* for any devices that already before we started or during
* periods where we've lost our connection to devd.
*/
result = geom_gettree(&mesh);
if (result != 0) {
syslog(LOG_ERR, "ZfsDaemon::RescanSystem: "
"geom_gettree failed with error %d\n", result);
return;
}
const string evStart("!system=DEVFS subsystem=CDEV type=CREATE "
"sub_type=synthesized cdev=");
LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
Event *event;
string evString(evStart + pp->lg_name + "\n");
event = Event::CreateEvent(GetFactory(),
evString);
if (event != NULL) {
if (event->Process())
SaveEvent(*event);
delete event;
}
}
}
}
geom_deletetree(&mesh);
}
void
ZfsDaemon::DetectMissedEvents()
{
do {
PurgeCaseFiles();
/*
* Discard any events waiting for us. We don't know
* if they still apply to the current state of the
* system.
*/
FlushEvents();
BuildCaseFiles();
/*
* If the system state has changed during our
* interrogation, start over.
*/
} while (s_terminateEventLoop == false && EventsPending());
RescanSystem();
}
void
ZfsDaemon::EventLoop()
{
while (s_terminateEventLoop == false) {
struct pollfd fds[2];
int result;
if (s_logCaseFiles == true) {
EventList::iterator event(m_unconsumedEvents.begin());
s_logCaseFiles = false;
CaseFile::LogAll();
while (event != m_unconsumedEvents.end())
(*event++)->Log(LOG_INFO);
}
Callout::ExpireCallouts();
/* Wait for data. */
fds[0].fd = m_devdSockFD;
fds[0].events = POLLIN;
fds[0].revents = 0;
fds[1].fd = s_signalPipeFD[0];
fds[1].events = POLLIN;
fds[1].revents = 0;
result = poll(fds, NUM_ELEMENTS(fds), /*timeout*/INFTIM);
if (result == -1) {
if (errno == EINTR)
continue;
else
err(1, "Polling for devd events failed");
} else if (result == 0) {
errx(1, "Unexpected result of 0 from poll. Exiting");
}
if ((fds[0].revents & POLLIN) != 0)
ProcessEvents();
if ((fds[1].revents & POLLIN) != 0) {
static char discardBuf[128];
/*
* This pipe exists just to close the signal
* race. Its contents are of no interest to
* us, but we must ensure that future signals
* have space in the pipe to write.
*/
while (read(s_signalPipeFD[0], discardBuf,
sizeof(discardBuf)) > 0)
;
}
if (s_systemRescanRequested == true) {
s_systemRescanRequested = false;
syslog(LOG_INFO, "System Rescan request processed.");
RescanSystem();
}
if ((fds[0].revents & POLLERR) != 0) {
syslog(LOG_INFO, "POLLERROR detected on devd socket.");
break;
}
if ((fds[0].revents & POLLHUP) != 0) {
syslog(LOG_INFO, "POLLHUP detected on devd socket.");
break;
}
}
}
//- ZfsDaemon staic Private Methods --------------------------------------------
void
ZfsDaemon::InfoSignalHandler(int)
{
s_logCaseFiles = true;
ZfsDaemon::WakeEventLoop();
}
void
ZfsDaemon::RescanSignalHandler(int)
{
RequestSystemRescan();
}
void
ZfsDaemon::QuitSignalHandler(int)
{
s_terminateEventLoop = true;
ZfsDaemon::WakeEventLoop();
}
void
ZfsDaemon::OpenPIDFile()
{
pid_t otherPID;
s_pidFH = pidfile_open(s_pidFilePath, 0600, &otherPID);
if (s_pidFH == NULL) {
if (errno == EEXIST)
errx(1, "already running as PID %d. Exiting", otherPID);
warn("cannot open PID file");
}
}
void
ZfsDaemon::UpdatePIDFile()
{
if (s_pidFH != NULL)
pidfile_write(s_pidFH);
}
void
ZfsDaemon::ClosePIDFile()
{
if (s_pidFH != NULL)
pidfile_remove(s_pidFH);
}
void
ZfsDaemon::InitializeSyslog()
{
openlog("zfsd", LOG_NDELAY, LOG_DAEMON);
}

228
cmd/zfsd/zfsd.h Normal file
View File

@ -0,0 +1,228 @@
/*-
* Copyright (c) 2011, 2012, 2013, 2014 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file zfsd.h
*
* Class definitions and supporting data strutures for the ZFS fault
* management daemon.
*
* Header requirements:
*
* #include <sys/fs/zfs.h>
*
* #include <libzfs.h>
*
* #include <list>
* #include <map>
* #include <string>
*
* #include <devdctl/guid.h>
* #include <devdctl/event.h>
* #include <devdctl/event_factory.h>
* #include <devdctl/consumer.h>
*
* #include "vdev_iterator.h"
*/
#ifndef _ZFSD_H_
#define _ZFSD_H_
/*=========================== Forward Declarations ===========================*/
struct pidfh;
struct zpool_handle;
typedef struct zpool_handle zpool_handle_t;
struct zfs_handle;
typedef struct libzfs_handle libzfs_handle_t;
struct nvlist;
typedef struct nvlist nvlist_t;
typedef int LeafIterFunc(zpool_handle_t *, nvlist_t *, void *);
/*================================ Global Data ===============================*/
extern int g_debug;
extern libzfs_handle_t *g_zfsHandle;
/*============================= Class Definitions ============================*/
/*--------------------------------- ZfsDaemon --------------------------------*/
/**
* Static singleton orchestrating the operations of the ZFS daemon program.
*/
class ZfsDaemon : public DevdCtl::Consumer
{
public:
/** Return the ZfsDaemon singleton. */
static ZfsDaemon &Get();
/**
* Used by signal handlers to ensure, in a race free way, that
* the event loop will perform at least one more full loop
* before sleeping again.
*/
static void WakeEventLoop();
/**
* Schedules a rescan of devices in the system for potential
* candidates to replace a missing vdev. The scan is performed
* during the next run of the event loop.
*/
static void RequestSystemRescan();
/** Daemonize and perform all functions of the ZFS daemon. */
static void Run();
private:
ZfsDaemon();
~ZfsDaemon();
static VdevCallback_t VdevAddCaseFile;
/** Purge our cache of outstanding ZFS issues in the system. */
void PurgeCaseFiles();
/** Build a cache of outstanding ZFS issues in the system. */
void BuildCaseFiles();
/**
* Iterate over all known issues and attempt to solve them
* given resources currently available in the system.
*/
void RescanSystem();
/**
* Interrogate the system looking for previously unknown
* faults that occurred either before ZFSD was started,
* or during a period of lost communication with Devd.
*/
void DetectMissedEvents();
/**
* Wait for and process event source activity.
*/
void EventLoop();
/**
* Signal handler for which our response is to
* log the current state of the daemon.
*
* \param sigNum The signal caught.
*/
static void InfoSignalHandler(int sigNum);
/**
* Signal handler for which our response is to
* request a case rescan.
*
* \param sigNum The signal caught.
*/
static void RescanSignalHandler(int sigNum);
/**
* Signal handler for which our response is to
* gracefully terminate.
*
* \param sigNum The signal caught.
*/
static void QuitSignalHandler(int sigNum);
/**
* Open and lock our PID file.
*/
static void OpenPIDFile();
/**
* Update our PID file with our PID.
*/
static void UpdatePIDFile();
/**
* Close and release the lock on our PID file.
*/
static void ClosePIDFile();
/**
* Perform syslog configuration.
*/
static void InitializeSyslog();
static ZfsDaemon *s_theZfsDaemon;
/**
* Set to true when our program is signaled to
* gracefully exit.
*/
static bool s_logCaseFiles;
/**
* Set to true when our program is signaled to
* gracefully exit.
*/
static bool s_terminateEventLoop;
/**
* The canonical path and file name of zfsd's PID file.
*/
static char s_pidFilePath[];
/**
* Control structure for PIDFILE(3) API.
*/
static pidfh *s_pidFH;
/**
* Pipe file descriptors used to close races with our
* signal handlers.
*/
static int s_signalPipeFD[2];
/**
* Flag controlling a rescan from ZFSD's event loop of all
* GEOM providers in the system to find candidates for solving
* cases.
*/
static bool s_systemRescanRequested;
/**
* Flag controlling whether events can be queued. This boolean
* is set during event replay to ensure that events for pools or
* devices no longer in the system are not retained forever.
*/
static bool s_consumingEvents;
static DevdCtl::EventFactory::Record s_registryEntries[];
};
#endif /* _ZFSD_H_ */

488
cmd/zfsd/zfsd_event.cc Normal file
View File

@ -0,0 +1,488 @@
/*-
* Copyright (c) 2011, 2012, 2013, 2014, 2016 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*/
/**
* \file zfsd_event.cc
*/
#include <sys/cdefs.h>
#include <sys/byteorder.h>
#include <sys/time.h>
#include <sys/fs/zfs.h>
#include <sys/vdev_impl.h>
#include <syslog.h>
#include <libzfs.h>
#include <libzutil.h>
/*
* Undefine flush, defined by cpufunc.h on sparc64, because it conflicts with
* C++ flush methods
*/
#undef flush
#undef __init
#include <list>
#include <map>
#include <sstream>
#include <string>
#include <devdctl/guid.h>
#include <devdctl/event.h>
#include <devdctl/event_factory.h>
#include <devdctl/exception.h>
#include <devdctl/consumer.h>
#include "callout.h"
#include "vdev_iterator.h"
#include "zfsd_event.h"
#include "case_file.h"
#include "vdev.h"
#include "zfsd.h"
#include "zfsd_exception.h"
#include "zpool_list.h"
__FBSDID("$FreeBSD$");
/*============================ Namespace Control =============================*/
using DevdCtl::Event;
using DevdCtl::Guid;
using DevdCtl::NVPairMap;
using std::stringstream;
/*=========================== Class Implementations ==========================*/
/*-------------------------------- GeomEvent --------------------------------*/
//- GeomEvent Static Public Methods -------------------------------------------
Event *
GeomEvent::Builder(Event::Type type,
NVPairMap &nvPairs,
const string &eventString)
{
return (new GeomEvent(type, nvPairs, eventString));
}
//- GeomEvent Virtual Public Methods ------------------------------------------
Event *
GeomEvent::DeepCopy() const
{
return (new GeomEvent(*this));
}
bool
GeomEvent::Process() const
{
/*
* We only use GEOM events to repair damaged pools. So return early if
* there are no damaged pools
*/
if (CaseFile::Empty())
return (false);
/*
* We are only concerned with arrivals and physical path changes,
* because those can be used to satisfy online and autoreplace
* operations
*/
if (Value("type") != "GEOM::physpath" && Value("type") != "CREATE")
return (false);
/* Log the event since it is of interest. */
Log(LOG_INFO);
string devPath;
if (!DevPath(devPath))
return (false);
int devFd(open(devPath.c_str(), O_RDONLY));
if (devFd == -1)
return (false);
bool inUse;
bool degraded;
nvlist_t *devLabel(ReadLabel(devFd, inUse, degraded));
string physPath;
bool havePhysPath(PhysicalPath(physPath));
string devName;
DevName(devName);
close(devFd);
if (inUse && devLabel != NULL) {
OnlineByLabel(devPath, physPath, devLabel);
} else if (degraded) {
syslog(LOG_INFO, "%s is marked degraded. Ignoring "
"as a replace by physical path candidate.\n",
devName.c_str());
} else if (havePhysPath) {
/*
* TODO: attempt to resolve events using every casefile
* that matches this physpath
*/
CaseFile *caseFile(CaseFile::Find(physPath));
if (caseFile != NULL) {
syslog(LOG_INFO,
"Found CaseFile(%s:%s:%s) - ReEvaluating\n",
caseFile->PoolGUIDString().c_str(),
caseFile->VdevGUIDString().c_str(),
zpool_state_to_name(caseFile->VdevState(),
VDEV_AUX_NONE));
caseFile->ReEvaluate(devPath, physPath, /*vdev*/NULL);
}
}
return (false);
}
//- GeomEvent Protected Methods -----------------------------------------------
GeomEvent::GeomEvent(Event::Type type, NVPairMap &nvpairs,
const string &eventString)
: DevdCtl::GeomEvent(type, nvpairs, eventString)
{
}
GeomEvent::GeomEvent(const GeomEvent &src)
: DevdCtl::GeomEvent::GeomEvent(src)
{
}
nvlist_t *
GeomEvent::ReadLabel(int devFd, bool &inUse, bool &degraded)
{
pool_state_t poolState;
char *poolName;
boolean_t b_inuse;
int nlabels;
inUse = false;
degraded = false;
poolName = NULL;
if (zpool_in_use(g_zfsHandle, devFd, &poolState,
&poolName, &b_inuse) == 0) {
nvlist_t *devLabel = NULL;
inUse = b_inuse == B_TRUE;
if (poolName != NULL)
free(poolName);
if (zpool_read_label(devFd, &devLabel, &nlabels) != 0)
return (NULL);
/*
* If we find a disk with fewer than the maximum number of
* labels, it might be the whole disk of a partitioned disk
* where ZFS resides on a partition. In that case, we should do
* nothing and wait for the partition to appear. Or, the disk
* might be damaged. In that case, zfsd should do nothing and
* wait for the sysadmin to decide.
*/
if (nlabels != VDEV_LABELS || devLabel == NULL) {
nvlist_free(devLabel);
return (NULL);
}
try {
Vdev vdev(devLabel);
degraded = vdev.State() != VDEV_STATE_HEALTHY;
return (devLabel);
} catch (ZfsdException &exp) {
string devName = fdevname(devFd);
string devPath = _PATH_DEV + devName;
string context("GeomEvent::ReadLabel: "
+ devPath + ": ");
exp.GetString().insert(0, context);
exp.Log();
nvlist_free(devLabel);
}
}
return (NULL);
}
bool
GeomEvent::OnlineByLabel(const string &devPath, const string& physPath,
nvlist_t *devConfig)
{
bool ret = false;
try {
CaseFileList case_list;
/*
* A device with ZFS label information has been
* inserted. If it matches a device for which we
* have a case, see if we can solve that case.
*/
syslog(LOG_INFO, "Interrogating VDEV label for %s\n",
devPath.c_str());
Vdev vdev(devConfig);
CaseFile::Find(vdev.PoolGUID(),vdev.GUID(), case_list);
for (CaseFileList::iterator curr = case_list.begin();
curr != case_list.end(); curr++) {
ret |= (*curr)->ReEvaluate(devPath, physPath, &vdev);
}
return (ret);
} catch (ZfsdException &exp) {
string context("GeomEvent::OnlineByLabel: " + devPath + ": ");
exp.GetString().insert(0, context);
exp.Log();
}
return (ret);
}
/*--------------------------------- ZfsEvent ---------------------------------*/
//- ZfsEvent Static Public Methods ---------------------------------------------
DevdCtl::Event *
ZfsEvent::Builder(Event::Type type, NVPairMap &nvpairs,
const string &eventString)
{
return (new ZfsEvent(type, nvpairs, eventString));
}
//- ZfsEvent Virtual Public Methods --------------------------------------------
Event *
ZfsEvent::DeepCopy() const
{
return (new ZfsEvent(*this));
}
bool
ZfsEvent::Process() const
{
string logstr("");
if (!Contains("class") && !Contains("type")) {
syslog(LOG_ERR,
"ZfsEvent::Process: Missing class or type data.");
return (false);
}
/* On config syncs, replay any queued events first. */
if (Value("type").find("misc.fs.zfs.config_sync") == 0) {
/*
* Even if saved events are unconsumed the second time
* around, drop them. Any events that still can't be
* consumed are probably referring to vdevs or pools that
* no longer exist.
*/
ZfsDaemon::Get().ReplayUnconsumedEvents(/*discard*/true);
CaseFile::ReEvaluateByGuid(PoolGUID(), *this);
}
if (Value("type").find("misc.fs.zfs.") == 0) {
/* Configuration changes, resilver events, etc. */
ProcessPoolEvent();
return (false);
}
if (!Contains("pool_guid") || !Contains("vdev_guid")) {
/* Only currently interested in Vdev related events. */
return (false);
}
CaseFile *caseFile(CaseFile::Find(PoolGUID(), VdevGUID()));
if (caseFile != NULL) {
Log(LOG_INFO);
syslog(LOG_INFO, "Evaluating existing case file\n");
caseFile->ReEvaluate(*this);
return (false);
}
/* Skip events that can't be handled. */
Guid poolGUID(PoolGUID());
/* If there are no replicas for a pool, then it's not manageable. */
if (Value("class").find("fs.zfs.vdev.no_replicas") == 0) {
stringstream msg;
msg << "No replicas available for pool " << poolGUID;
msg << ", ignoring";
Log(LOG_INFO);
syslog(LOG_INFO, "%s", msg.str().c_str());
return (false);
}
/*
* Create a case file for this vdev, and have it
* evaluate the event.
*/
ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID);
if (zpl.empty()) {
stringstream msg;
int priority = LOG_INFO;
msg << "ZfsEvent::Process: Event for unknown pool ";
msg << poolGUID << " ";
msg << "queued";
Log(LOG_INFO);
syslog(priority, "%s", msg.str().c_str());
return (true);
}
nvlist_t *vdevConfig = VdevIterator(zpl.front()).Find(VdevGUID());
if (vdevConfig == NULL) {
stringstream msg;
int priority = LOG_INFO;
msg << "ZfsEvent::Process: Event for unknown vdev ";
msg << VdevGUID() << " ";
msg << "queued";
Log(LOG_INFO);
syslog(priority, "%s", msg.str().c_str());
return (true);
}
Vdev vdev(zpl.front(), vdevConfig);
caseFile = &CaseFile::Create(vdev);
if (caseFile->ReEvaluate(*this) == false) {
stringstream msg;
int priority = LOG_INFO;
msg << "ZfsEvent::Process: Unconsumed event for vdev(";
msg << zpool_get_name(zpl.front()) << ",";
msg << vdev.GUID() << ") ";
msg << "queued";
Log(LOG_INFO);
syslog(priority, "%s", msg.str().c_str());
return (true);
}
return (false);
}
//- ZfsEvent Protected Methods -------------------------------------------------
ZfsEvent::ZfsEvent(Event::Type type, NVPairMap &nvpairs,
const string &eventString)
: DevdCtl::ZfsEvent(type, nvpairs, eventString)
{
}
ZfsEvent::ZfsEvent(const ZfsEvent &src)
: DevdCtl::ZfsEvent(src)
{
}
/*
* Sometimes the kernel won't detach a spare when it is no longer needed. This
* can happen for example if a drive is removed, then either the pool is
* exported or the machine is powered off, then the drive is reinserted, then
* the machine is powered on or the pool is imported. ZFSD must detach these
* spares itself.
*/
void
ZfsEvent::CleanupSpares() const
{
Guid poolGUID(PoolGUID());
ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID);
if (!zpl.empty()) {
zpool_handle_t* hdl;
hdl = zpl.front();
VdevIterator(hdl).Each(TryDetach, (void*)hdl);
}
}
void
ZfsEvent::ProcessPoolEvent() const
{
bool degradedDevice(false);
/* The pool is destroyed. Discard any open cases */
if (Value("type") == "misc.fs.zfs.pool_destroy") {
Log(LOG_INFO);
CaseFile::ReEvaluateByGuid(PoolGUID(), *this);
return;
}
CaseFile *caseFile(CaseFile::Find(PoolGUID(), VdevGUID()));
if (caseFile != NULL) {
if (caseFile->VdevState() != VDEV_STATE_UNKNOWN
&& caseFile->VdevState() < VDEV_STATE_HEALTHY)
degradedDevice = true;
Log(LOG_INFO);
caseFile->ReEvaluate(*this);
}
else if (Value("type") == "misc.fs.zfs.resilver_finish")
{
/*
* It's possible to get a resilver_finish event with no
* corresponding casefile. For example, if a damaged pool were
* exported, repaired, then reimported.
*/
Log(LOG_INFO);
CleanupSpares();
}
if (Value("type") == "misc.fs.zfs.vdev_remove"
&& degradedDevice == false) {
/* See if any other cases can make use of this device. */
Log(LOG_INFO);
ZfsDaemon::RequestSystemRescan();
}
}
bool
ZfsEvent::TryDetach(Vdev &vdev, void *cbArg)
{
/*
* Outline:
* if this device is a spare, and its parent includes one healthy,
* non-spare child, then detach this device.
*/
zpool_handle_t *hdl(static_cast<zpool_handle_t*>(cbArg));
if (vdev.IsSpare()) {
std::list<Vdev> siblings;
std::list<Vdev>::iterator siblings_it;
boolean_t cleanup = B_FALSE;
Vdev parent = vdev.Parent();
siblings = parent.Children();
/* Determine whether the parent should be cleaned up */
for (siblings_it = siblings.begin();
siblings_it != siblings.end();
siblings_it++) {
Vdev sibling = *siblings_it;
if (!sibling.IsSpare() &&
sibling.State() == VDEV_STATE_HEALTHY) {
cleanup = B_TRUE;
break;
}
}
if (cleanup) {
syslog(LOG_INFO, "Detaching spare vdev %s from pool %s",
vdev.Path().c_str(), zpool_get_name(hdl));
zpool_vdev_detach(hdl, vdev.Path().c_str());
}
}
/* Always return false, because there may be other spares to detach */
return (false);
}

144
cmd/zfsd/zfsd_event.h Normal file
View File

@ -0,0 +1,144 @@
/*-
* Copyright (c) 2011, 2012, 2013, 2014, 2016 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file dev_ctl_event.h
*
* \brief Class hierarchy used to express events received via
* the devdctl API.
*
* Header requirements:
* #include <string>
* #include <list>
* #include <map>
*
* #include <devdctl/guid.h>
* #include <devdctl/event.h>
*/
#ifndef _ZFSD_EVENT_H_
#define _ZFSD_EVENT_H_
/*============================ Namespace Control =============================*/
using std::string;
/*=========================== Forward Declarations ===========================*/
struct zpool_handle;
typedef struct zpool_handle zpool_handle_t;
struct nvlist;
typedef struct nvlist nvlist_t;
/*--------------------------------- ZfsEvent ---------------------------------*/
class ZfsEvent : public DevdCtl::ZfsEvent
{
public:
/** Specialized DevdCtlEvent object factory for ZFS events. */
static BuildMethod Builder;
virtual DevdCtl::Event *DeepCopy() const;
/**
* Interpret and perform any actions necessary to
* consume the event.
* \return True if this event should be queued for later reevaluation
*/
virtual bool Process() const;
protected:
/** DeepCopy Constructor. */
ZfsEvent(const ZfsEvent &src);
/** Constructor */
ZfsEvent(Type, DevdCtl::NVPairMap &, const string &);
/**
* Detach any spares that are no longer needed, but were not
* automatically detached by the kernel
*/
virtual void CleanupSpares() const;
virtual void ProcessPoolEvent() const;
static VdevCallback_t TryDetach;
};
class GeomEvent : public DevdCtl::GeomEvent
{
public:
static BuildMethod Builder;
virtual DevdCtl::Event *DeepCopy() const;
virtual bool Process() const;
protected:
/** DeepCopy Constructor. */
GeomEvent(const GeomEvent &src);
/** Constructor */
GeomEvent(Type, DevdCtl::NVPairMap &, const string &);
/**
* Attempt to match the ZFS labeled device at devPath with an active
* CaseFile for a missing vdev. If a CaseFile is found, attempt
* to re-integrate the device with its pool.
*
* \param devPath The devfs path to the potential leaf vdev.
* \param physPath The physical path string reported by the device
* at devPath.
* \param devConfig The ZFS label information found on the device
* at devPath.
*
* \return true if the event that caused the online action can
* be considered consumed.
*/
static bool OnlineByLabel(const string &devPath,
const string& physPath,
nvlist_t *devConfig);
/**
* \brief Read and return label information for a device.
*
* \param devFd The device from which to read ZFS label information.
* \param inUse The device is part of an active or potentially
* active configuration.
* \param degraded The device label indicates the vdev is not healthy.
*
* \return If label information is available, an nvlist describing
* the vdev configuraiton found on the device specified by
* devFd. Otherwise NULL.
*/
static nvlist_t *ReadLabel(int devFd, bool &inUse, bool &degraded);
};
#endif /*_ZFSD_EVENT_H_ */

135
cmd/zfsd/zfsd_exception.cc Normal file
View File

@ -0,0 +1,135 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*/
/**
* \file zfsd_exception
*
* Implementation of the ZfsdException class.
*/
#include <sys/cdefs.h>
#include <sys/byteorder.h>
#include <sys/fs/zfs.h>
#include <syslog.h>
#include <string>
#include <list>
#include <sstream>
#include <devdctl/exception.h>
#include <devdctl/guid.h>
#include <libzfs.h>
#include "vdev.h"
#include "zfsd_exception.h"
__FBSDID("$FreeBSD$");
/*============================ Namespace Control =============================*/
using std::endl;
using std::string;
using std::stringstream;
/*=========================== Class Implementations ==========================*/
/*------------------------------- ZfsdException ------------------------------*/
ZfsdException::ZfsdException(const char *fmt, ...)
: DevdCtl::Exception(),
m_poolConfig(NULL),
m_vdevConfig(NULL)
{
va_list ap;
va_start(ap, fmt);
FormatLog(fmt, ap);
va_end(ap);
}
ZfsdException::ZfsdException(zpool_handle_t *pool, const char *fmt, ...)
: DevdCtl::Exception(),
m_poolConfig(zpool_get_config(pool, NULL)),
m_vdevConfig(NULL)
{
va_list ap;
va_start(ap, fmt);
FormatLog(fmt, ap);
va_end(ap);
}
ZfsdException::ZfsdException(nvlist_t *poolConfig, const char *fmt, ...)
: DevdCtl::Exception(),
m_poolConfig(poolConfig),
m_vdevConfig(NULL)
{
va_list ap;
va_start(ap, fmt);
FormatLog(fmt, ap);
va_end(ap);
}
void
ZfsdException::Log() const
{
stringstream output;
if (m_poolConfig != NULL) {
output << "Pool ";
const char *poolName;
if (nvlist_lookup_string(m_poolConfig, ZPOOL_CONFIG_POOL_NAME,
&poolName) == 0)
output << poolName;
else
output << "Unknown";
output << ": ";
}
if (m_vdevConfig != NULL) {
if (m_poolConfig != NULL) {
Vdev vdev(m_poolConfig, m_vdevConfig);
output << "Vdev " << vdev.GUID() << ": ";
} else {
Vdev vdev(m_vdevConfig);
output << "Pool " << vdev.PoolGUID() << ": ";
output << "Vdev " << vdev.GUID() << ": ";
}
}
output << m_log << endl;
syslog(LOG_ERR, "%s", output.str().c_str());
}

109
cmd/zfsd/zfsd_exception.h Normal file
View File

@ -0,0 +1,109 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file zfsd_exception.h
*
* Definition of the ZfsdException class hierarchy. All exceptions
* explicitly thrown by Zfsd are defined here.
*
* Header requirements:
* #include <string>
*
* #include <devdctl/exception.h>
*/
#ifndef _ZFSD_EXCEPTION_H_
#define _ZFSD_EXCEPTION_H_
/*=========================== Forward Declarations ===========================*/
struct zpool_handle;
typedef struct zpool_handle zpool_handle_t;
struct nvlist;
typedef struct nvlist nvlist_t;
/*============================= Class Definitions ============================*/
/*------------------------------- ZfsdException ------------------------------*/
/**
* \brief Class allowing unified reporting/logging of exceptional events.
*/
class ZfsdException : public DevdCtl::Exception
{
public:
/**
* \brief ZfsdException constructor allowing arbitrary string
* data to be reported.
*
* \param fmt Printf-like string format specifier.
*/
ZfsdException(const char *fmt, ...);
/**
* \brief ZfsdException constructor allowing arbitrary string
* data to be reported and associated with the configuration
* data for a ZFS pool.
*
* \param pool Pool handle describing the pool to which this
* exception is associated.
* \param fmt Printf-like string format specifier.
*
* Instantiation with this method is used to report global
* pool errors.
*/
ZfsdException(zpool_handle_t *pool, const char *, ...);
/**
* \brief ZfsdException constructor allowing arbitrary string
* data to be reported and associated with the configuration
* data for a ZFS pool.
*
* \param poolConfig Pool configuration describing the pool to
* which this exception is associated.
* \param fmt Printf-like string format specifier.
*
* Instantiation with this method is used to report global
* pool errors.
*/
ZfsdException(nvlist_t *poolConfig, const char *, ...);
/**
* \brief Emit exception data to syslog(3).
*/
virtual void Log() const;
private:
nvlist_t *m_poolConfig;
nvlist_t *m_vdevConfig;
};
#endif /* _ZFSD_EXCEPTION_H_ */

90
cmd/zfsd/zfsd_main.cc Normal file
View File

@ -0,0 +1,90 @@
/*-
* Copyright (c) 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Alan Somers (Spectra Logic Corporation)
*/
/**
* \file zfsd_main.cc
*
* main function for the ZFS Daemon. Separated to facilitate testing.
*
*/
#include <sys/cdefs.h>
#include <cstdlib>
#include <cstdio>
#include <unistd.h>
#include <list>
#include <map>
#include <string>
#include <devdctl/guid.h>
#include <devdctl/event.h>
#include <devdctl/event_factory.h>
#include <devdctl/exception.h>
#include <devdctl/consumer.h>
#include "vdev_iterator.h"
#include "zfsd.h"
__FBSDID("$FreeBSD$");
/*=============================== Program Main ===============================*/
static void
usage()
{
fprintf(stderr, "usage: %s [-d]\n", getprogname());
exit(1);
}
/**
* Program entry point.
*/
int
main(int argc, char **argv)
{
int ch;
while ((ch = getopt(argc, argv, "d")) != -1) {
switch (ch) {
case 'd':
g_debug++;
break;
default:
usage();
}
}
ZfsDaemon::Run();
return (0);
}

122
cmd/zfsd/zpool_list.cc Normal file
View File

@ -0,0 +1,122 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file zpool_list.cc
*
* Implementation of the ZpoolList class.
*/
#include <sys/cdefs.h>
#include <sys/byteorder.h>
#include <sys/fs/zfs.h>
#include <stdint.h>
#include <libzfs.h>
#include <list>
#include <map>
#include <string>
#include <devdctl/guid.h>
#include <devdctl/event.h>
#include <devdctl/event_factory.h>
#include <devdctl/exception.h>
#include <devdctl/consumer.h>
#include "vdev.h"
#include "vdev_iterator.h"
#include "zpool_list.h"
#include "zfsd.h"
/*============================ Namespace Control =============================*/
using DevdCtl::Guid;
/*=========================== Class Implementations ==========================*/
/*--------------------------------- ZpoolList --------------------------------*/
bool
ZpoolList::ZpoolAll(zpool_handle_t *pool, nvlist_t *poolConfig, void *cbArg)
{
return (true);
}
bool
ZpoolList::ZpoolByGUID(zpool_handle_t *pool, nvlist_t *poolConfig,
void *cbArg)
{
Guid *desiredPoolGUID(static_cast<Guid *>(cbArg));
uint64_t poolGUID;
/* We are only intested in the pool that matches our pool GUID. */
return (nvlist_lookup_uint64(poolConfig, ZPOOL_CONFIG_POOL_GUID,
&poolGUID) == 0
&& poolGUID == (uint64_t)*desiredPoolGUID);
}
bool
ZpoolList::ZpoolByName(zpool_handle_t *pool, nvlist_t *poolConfig, void *cbArg)
{
const string &desiredPoolName(*static_cast<const string *>(cbArg));
/* We are only intested in the pool that matches our pool GUID. */
return (desiredPoolName == zpool_get_name(pool));
}
int
ZpoolList::LoadIterator(zpool_handle_t *pool, void *data)
{
ZpoolList *zpl(reinterpret_cast<ZpoolList *>(data));
nvlist_t *poolConfig(zpool_get_config(pool, NULL));
if (zpl->m_filter(pool, poolConfig, zpl->m_filterArg))
zpl->push_back(pool);
else
zpool_close(pool);
return (0);
}
ZpoolList::ZpoolList(PoolFilter_t *filter, void * filterArg)
: m_filter(filter),
m_filterArg(filterArg)
{
zpool_iter(g_zfsHandle, LoadIterator, this);
}
ZpoolList::~ZpoolList()
{
for (iterator it(begin()); it != end(); it++)
zpool_close(*it);
clear();
}

133
cmd/zfsd/zpool_list.h Normal file
View File

@ -0,0 +1,133 @@
/*-
* Copyright (c) 2011, 2012, 2013 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* Authors: Justin T. Gibbs (Spectra Logic Corporation)
*
* $FreeBSD$
*/
/**
* \file zpool_list.h
*
* ZpoolList class definition. ZpoolList is a standard container
* allowing filtering and iteration of imported ZFS pool information.
*
* Header requirements:
*
* #include <list>
* #include <string>
*/
#ifndef _ZPOOL_LIST_H_
#define _ZPOOL_LIST_H_
/*============================ Namespace Control =============================*/
using std::string;
/*=========================== Forward Declarations ===========================*/
struct zpool_handle;
typedef struct zpool_handle zpool_handle_t;
struct nvlist;
typedef struct nvlist nvlist_t;
class Vdev;
/*============================= Class Definitions ============================*/
/*--------------------------------- ZpoolList --------------------------------*/
class ZpoolList;
typedef bool PoolFilter_t(zpool_handle_t *pool, nvlist_t *poolConfig,
void *filterArg);
/**
* \brief Container of imported ZFS pool data.
*
* ZpoolList is a convenience class that converts libzfs's ZFS
* pool methods into a standard list container.
*/
class ZpoolList : public std::list<zpool_handle_t *>
{
public:
/**
* \brief Utility ZpoolList construction filter that causes all
* pools known to the system to be included in the
* instantiated ZpoolList.
*/
static PoolFilter_t ZpoolAll;
/**
* \brief Utility ZpoolList construction filter that causes only
* a pool known to the system and having the specified GUID
* to be included in the instantiated ZpoolList.
*/
static PoolFilter_t ZpoolByGUID;
/**
* \brief Utility ZpoolList construction filter that causes only
* pools known to the system and having the specified name
* to be included in the instantiated ZpoolList.
*/
static PoolFilter_t ZpoolByName;
/**
* \brief ZpoolList constructor
*
* \param filter The filter function to use when constructing
* the ZpoolList. This may be one of the static
* utility filters defined for ZpoolList or a
* user defined function.
* \param filterArg A single argument to pass into the filter function
* when it is invoked on each candidate pool.
*/
ZpoolList(PoolFilter_t *filter = ZpoolAll, void *filterArg = NULL);
~ZpoolList();
private:
/**
* \brief Helper routine used to populate the internal
* data store of ZFS pool objects using libzfs's
* zpool_iter() function.
*
* \param pool The ZFS pool object to filter.
* \param data User argument passed through zpool_iter().
*/
static int LoadIterator(zpool_handle_t *pool, void *data);
/**
* \brief The filter with which this ZpoolList was constructed.
*/
PoolFilter_t *m_filter;
/**
* \brief The filter argument with which this ZpoolList was
* constructed.
*/
void *m_filterArg;
};
#endif /* _ZPOOL_ITERATOR_H_ */

View File

@ -989,8 +989,12 @@ make_disks(zpool_handle_t *zhp, nvlist_t *nv, boolean_t replacing)
* magic value left by the previous filesystem.
*/
verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
#ifdef TRUENAS_SCALE_NEVER_WHOLEDISK
wholedisk = B_FALSE;
#else
verify(!nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk));
#endif
if (!wholedisk) {
/*
@ -1155,9 +1159,11 @@ is_device_in_use(nvlist_t *config, nvlist_t *nv, boolean_t force,
&child, &children) != 0) {
verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
#ifndef TRUENAS_SCALE_NEVER_WHOLEDISK
if (strcmp(type, VDEV_TYPE_DISK) == 0)
verify(!nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_WHOLE_DISK, &wholedisk));
#endif
/*
* As a generic check, we look to see if this is a replace of a

View File

@ -28,6 +28,15 @@ if BUILD_FREEBSD
AM_CFLAGS += -fPIC -Werror -Wno-unknown-pragmas -Wno-enum-conversion
AM_CFLAGS += -include $(top_srcdir)/include/os/freebsd/spl/sys/ccompile.h
AM_CFLAGS += -I/usr/include -I/usr/local/include
AM_CXXFLAGS = -std=c++17 -Wall -Wstrict-prototypes -fno-strict-aliasing
AM_CXXFLAGS += $(NO_OMIT_FRAME_POINTER)
AM_CXXFLAGS += $(DEBUG_CFLAGS)
AM_CXXFLAGS += $(ASAN_CFLAGS)
AM_CXXFLAGS += $(CODE_COVERAGE_FLAGS) $(NO_FORMAT_ZERO_LENGTH)
AM_CXXFLAGS += -fPIC -Werror -Wno-unknown-pragmas -Wno-enum-conversion
AM_CXXFLAGS += -include $(top_srcdir)/include/os/freebsd/spl/sys/ccompile.h
AM_CXXFLAGS += -I/usr/include -I/usr/local/include
endif
AM_CPPFLAGS += -D_GNU_SOURCE
@ -43,6 +52,12 @@ AM_CPPFLAGS += -DPKGDATADIR=\"$(pkgdatadir)\"
AM_CPPFLAGS += $(DEBUG_CPPFLAGS)
AM_CPPFLAGS += $(CODE_COVERAGE_CPPFLAGS)
AM_CPPFLAGS += -DTEXT_DOMAIN=\"zfs-@ac_system_l@-user\"
if BUILD_LINUX
# NAS-118236: Vdevs from CORE are labeled with whole_disk=1, but we did not
# create the partition table in ZFS and must not repartition them. We know
# SCALE never creates wholedisk vdevs, so just ignore the config.
AM_CPPFLAGS += -DTRUENAS_SCALE_NEVER_WHOLEDISK
endif
if ASAN_ENABLED
AM_CPPFLAGS += -DZFS_ASAN_ENABLED

View File

@ -62,7 +62,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_FPU], [
], [
kernel_fpu_begin();
kernel_fpu_end();
], [], [ZFS_META_LICENSE])
], [], [])
ZFS_LINUX_TEST_SRC([__kernel_fpu], [
#include <linux/types.h>
@ -77,7 +77,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_FPU], [
], [
__kernel_fpu_begin();
__kernel_fpu_end();
], [], [ZFS_META_LICENSE])
], [], [])
ZFS_LINUX_TEST_SRC([kernel_neon], [
#include <asm/neon.h>
@ -92,7 +92,7 @@ AC_DEFUN([ZFS_AC_KERNEL_FPU], [
dnl # Legacy kernel
dnl #
AC_MSG_CHECKING([whether kernel fpu is available])
ZFS_LINUX_TEST_RESULT([kernel_fpu_license], [
ZFS_LINUX_TEST_RESULT([kernel_fpu], [
AC_MSG_RESULT(kernel_fpu_*)
AC_DEFINE(HAVE_KERNEL_FPU, 1,
[kernel has kernel_fpu_* functions])
@ -102,7 +102,7 @@ AC_DEFUN([ZFS_AC_KERNEL_FPU], [
dnl #
dnl # Linux 4.2 kernel
dnl #
ZFS_LINUX_TEST_RESULT_SYMBOL([__kernel_fpu_license],
ZFS_LINUX_TEST_RESULT_SYMBOL([__kernel_fpu],
[__kernel_fpu_begin],
[arch/x86/kernel/fpu/core.c arch/x86/kernel/i387.c], [
AC_MSG_RESULT(__kernel_fpu_*)

View File

@ -49,6 +49,8 @@ AC_CONFIG_HEADERS([zfs_config.h], [
LT_INIT
AC_PROG_INSTALL
AC_PROG_CC
AC_PROG_CXX
AC_PROG_LIBTOOL
AC_PROG_LN_S
PKG_PROG_PKG_CONFIG
AM_PROG_AS

View File

@ -17,3 +17,152 @@ openzfs-linux (2.1.99-1) unstable; urgency=low
* This packaging is a fork of Debian zfs-linux 2.1.6-2 release.
-- Umer Saleem <usaleem@ixsystems.com> Fri, 11 Oct 2022 15:00:00 -0400
openzfs-linux (2.1.12-0) unstable; urgency=medium
* Merge tag zfs-2.1.12
-- Ameer Hamza <ahamza@ixsystems.com> Wed, 14 Jun 2023 09:00:00 -0500
openzfs-linux (2.1.11-0) unstable; urgency=medium
* Merge tag zfs-2.1.11
-- Ameer Hamza <ahamza@ixsystems.com> Thu, 20 Apr 2023 13:00:00 -0500
openzfs-linux (2.1.10-0) unstable; urgency=medium
* Merge tag zfs-2.1.10
-- Ameer Hamza <ahamza@ixsystems.com> Mon, 17 Apr 2023 12:00:00 -0500
openzfs-linux (2.1.9-0) unstable; urgency=medium
* Merge tag zfs-2.1.9
-- Ryan Moeller <ryan@ixsystems.com> Wed, 25 Jan 2023 14:00:00 -0500
openzfs-linux (2.1.8-0) unstable; urgency=medium
* Merge tag zfs-2.1.8
-- Ryan Moeller <ryan@ixsystems.com> Fri, 20 Jan 2023 12:30:00 -0500
openzfs-linux (2.1.7-1) unstable; urgency=medium
* Integrate native Debian packaging with TrueNAS ZFS.
* This packaging is a fork of Debian zfs-linux 2.1.6-2 release.
-- Umer Saleem <usaleem@ixsystems.com> Wed, 14 Dec 2022 15:00:00 -0400
openzfs (2.1.7-0) unstable; urgency=medium
* Merge tag zfs-2.1.7
* Expose libzutil error info in libpc_handle_t
* Build packages with debug symbols
* Add support for overlayfs for docker
* Optimize microzaps
* zed: Avoid core dump if wholedisk property does not exist
* zed: post a udev change event from spa_vdev_attach()
* SCALE: ignore wholedisk
* Skip trivial permission checks on xattrs for POSIX ACL type
-- Ryan Moeller <ryan@ixsystems.com> Wed, 07 Dec 2022 16:00:00 -0500
openzfs (2.1.6-0) unstable; urgency=medium
* Merge tag zfs-2.1.6
* zed: mark disks as REMOVED when they are removed
* Provide kfpu_begin/end from spl
* Add snapshots_changed as property
* Add createtxg sort support for simple snapshot iterator
* Expose ZFS dataset case sensitivity setting via sb_opts
-- Ryan Moeller <ryan@ixsystems.com> Wed, 22 Jun 2022 16:00:00 -0500
openzfs (2.1.5-0) unstable; urgency=medium
* Merged tag zfs-2.1.5
-- Ryan Moeller <ryan@ixsystems.com> Wed, 22 Jun 2022 16:00:00 -0500
openzfs (2.1.4-1) unstable; urgency=medium
* Merged from zfs-2.1.5-staging
* Remove wrong assertion in log spacemap
* Add debug symbols to the truenas package
-- Ryan Moeller <ryan@ixsystems.com> Thu, 02 Jun 2022 08:00:00 -0500
openzfs (2.1.4-0) unstable; urgency=medium
* Merged OpenZFS zfs-2.1.4
* Also merged from zfs-2.1.5-staging
* Fix zfs send -V
* Expose zpool guids through kstats
-- Ryan Moeller <ryan@ixsystems.com> Wed, 18 May 2022 14:00:00 -0500
openzfs (2.1.3-0) unstable; urgency=medium
* Merged OpenZFS zfs-2.1.3
-- Ryan Moeller <ryan@ixsystems.com> Thu, 10 Mar 2021 16:00:00 -0400
openzfs (2.1.2-1) unstable; urgency=medium
* Merged OpenZFS zfs-2.1.2-release
* Implement FS_IOC_GETVERSION
* Improve log spacemap load time after unclean export
-- Ryan Moeller <ryan@ixsystems.com> Mon, 20 Dec 2021 10:00:00 -0400
openzfs (2.1.2-0) unstable; urgency=medium
* Merged OpenZFS zfs-2.1.2-staging
* Updated Python build dependencies
* Linux 5.14 build fixes
-- Ryan Moeller <ryan@ixsystems.com> Wed, 03 Nov 2021 10:00:00 -0400
openzfs (2.1.1-0) unstable; urgency=medium
* Merged OpenZFS 2.1.1
* Removed feature@xattr_compat and xattr_fallback property
-- Ryan Moeller <ryan@ixsystems.com> Thu, 07 Oct 2021 14:09:30 -0400
openzfs (2.1.0-0) unstable; urgency=medium
* Rebased to OpenZFS 2.1.0
-- Ryan Moeller <ryan@ixsystems.com> Thu, 01 Apr 2021 13:00:00 -0500
openzfs (2.0.4-0) unstable; urgency=medium
* Rebased to OpenZFS 2.0.4
-- Ryan Moeller <ryan@ixsystems.com> Tue, 09 Mar 2021 14:00:00 -0500
openzfs (2.0.3-0) unstable; urgency=medium
* Rebased to OpenZFS 2.0.3
-- Ryan Moeller <ryan@ixsystems.com> Wed, 24 Feb 2021 15:00:00 -0500
openzfs (2.0.2-0) unstable; urgency=medium
* Rebased to OpenZFS 2.0.2
-- Ryan Moeller <ryan@ixsystems.com> Mon, 01 Feb 2021 15:00:00 -0500
openzfs (2.0.1-0) unstable; urgency=medium
* Rebased to OpenZFS 2.0.1
-- Ryan Moeller <ryan@ixsystems.com> Thu, 07 Jan 2021 17:34:28 -0500
openzfs (2.0.0-0) unstable; urgency=medium
* Initial package for TrueNAS SCALE based on OpenZFS 2.0.0
-- Ryan Moeller <ryan@ixsystems.com> Mon, 14 Sep 2020 22:01:55 -0400

View File

@ -4,10 +4,9 @@ Priority: optional
Maintainer: ZFS on Linux specific mailing list <zfs-discuss@list.zfsonlinux.org>
Build-Depends: debhelper-compat (= 12),
dh-python,
dh-sequence-dkms | dkms (>> 2.1.1.2-5),
dh-sequence-dkms,
libaio-dev,
libblkid-dev,
libcurl4-openssl-dev,
libelf-dev,
libpam0g-dev,
libssl-dev | libssl1.0-dev,
@ -35,6 +34,7 @@ Depends: ${misc:Depends}, ${shlibs:Depends}
Breaks: libnvpair1, libnvpair3
Replaces: libnvpair1, libnvpair3, libnvpair3linux
Conflicts: libnvpair3linux
Provides: libnvpair3
Description: Solaris name-value library for Linux
This library provides routines for packing and unpacking nv pairs for
transporting data across process boundaries, transporting between
@ -46,6 +46,7 @@ Architecture: linux-any
Depends: libpam-runtime, ${misc:Depends}, ${shlibs:Depends}
Replaces: libpam-zfs
Conflicts: libpam-zfs
Provides: pam-zfs-key
Description: PAM module for managing encryption keys for ZFS
OpenZFS is a storage platform that encompasses the functionality of
traditional filesystems and volume managers. It supports data checksums,
@ -61,6 +62,7 @@ Depends: ${misc:Depends}, ${shlibs:Depends}
Breaks: libuutil1, libuutil3
Replaces: libuutil1, libuutil3, libuutil3linux
Conflicts: libuutil3linux
Provides: libuutil3
Description: Solaris userland utility library for Linux
This library provides a variety of glue functions for ZFS on Linux:
* libspl: The Solaris Porting Layer userland library, which provides APIs
@ -84,7 +86,7 @@ Depends: libssl-dev | libssl1.0-dev,
${misc:Depends}
Replaces: libzfslinux-dev
Conflicts: libzfslinux-dev
Provides: libnvpair-dev, libuutil-dev
Provides: libnvpair-dev, libuutil-dev, libzfs5-devel
Description: OpenZFS filesystem development files for Linux
Header files and static libraries for compiling software against
libraries of OpenZFS filesystem.
@ -102,6 +104,7 @@ Recommends: libcurl4
Breaks: libzfs2, libzfs4
Replaces: libzfs2, libzfs4, libzfs4linux
Conflicts: libzfs4linux
Provides: libzfs5
Description: OpenZFS filesystem library for Linux - general support
OpenZFS is a storage platform that encompasses the functionality of
traditional filesystems and volume managers. It supports data checksums,
@ -130,6 +133,7 @@ Depends: ${misc:Depends}, ${shlibs:Depends}
Breaks: libzpool2, libzpool5
Replaces: libzpool2, libzpool5, libzpool5linux
Conflicts: libzpool5linux
Provides: libzpool5
Description: OpenZFS pool library for Linux
OpenZFS is a storage platform that encompasses the functionality of
traditional filesystems and volume managers. It supports data checksums,
@ -146,6 +150,7 @@ Depends: python3-cffi,
${python3:Depends}
Replaces: python3-pyzfs
Conflicts: python3-pyzfs
Provides: python3-pyzfs
Description: wrapper for libzfs_core C library
libzfs_core is intended to be a stable interface for programmatic
administration of ZFS. This wrapper provides one-to-one wrappers for
@ -197,6 +202,7 @@ Recommends: openzfs-zfs-zed, openzfs-zfsutils (>= ${source:Version}), ${linux:Re
Suggests: debhelper
Breaks: spl-dkms (<< 0.8.0~rc1)
Replaces: spl-dkms, zfs-dkms
Conflicts: zfs-dkms
Provides: openzfs-zfs-modules
Description: OpenZFS filesystem kernel modules for Linux
OpenZFS is a storage platform that encompasses the functionality of
@ -216,6 +222,7 @@ Depends: busybox-initramfs | busybox-static | busybox,
Breaks: zfsutils-linux (<= 0.7.11-2)
Replaces: zfsutils-linux (<= 0.7.11-2), zfs-initramfs
Conflicts: zfs-initramfs
Provides: zfs-initramfs
Description: OpenZFS root filesystem capabilities for Linux - initramfs
OpenZFS is a storage platform that encompasses the functionality of
traditional filesystems and volume managers. It supports data checksums,
@ -232,6 +239,7 @@ Depends: dracut,
${misc:Depends}
Conflicts: zfs-dracut
Replaces: zfs-dracut
Provides: zfs-dracut
Description: OpenZFS root filesystem capabilities for Linux - dracut
OpenZFS is a storage platform that encompasses the functionality of
traditional filesystems and volume managers. It supports data checksums,
@ -248,10 +256,11 @@ Depends: openzfs-libnvpair3 (= ${binary:Version}),
openzfs-libuutil3 (= ${binary:Version}),
openzfs-libzfs4 (= ${binary:Version}),
openzfs-libzpool5 (= ${binary:Version}),
openzfs-zfs-modules | openzfs-zfs-dkms,
python3,
${misc:Depends},
${shlibs:Depends}
Recommends: lsb-base, openzfs-zfs-modules | openzfs-zfs-dkms, openzfs-zfs-zed
Recommends: lsb-base, openzfs-zfs-zed
Breaks: openrc,
spl (<< 0.7.9-2),
spl-dkms (<< 0.8.0~rc1),
@ -262,7 +271,7 @@ Conflicts: zfs, zfs-fuse, zfsutils-linux
Suggests: nfs-kernel-server,
samba-common-bin (>= 3.0.23),
openzfs-zfs-initramfs | openzfs-zfs-dracut
Provides: openzfsutils
Provides: openzfs
Description: command-line tools to manage OpenZFS filesystems
OpenZFS is a storage platform that encompasses the functionality of
traditional filesystems and volume managers. It supports data checksums,
@ -316,6 +325,7 @@ Recommends: nfs-kernel-server
Breaks: zfsutils-linux (<= 0.7.9-2)
Replaces: zfsutils-linux (<= 0.7.9-2), zfs-test
Conflicts: zutils, zfs-test
Provides: zfs-test
Description: OpenZFS test infrastructure and support scripts
OpenZFS is a storage platform that encompasses the functionality of
traditional filesystems and volume managers. It supports data checksums,

View File

@ -5,7 +5,7 @@ Maintainer: ZFS on Linux specific mailing list <zfs-discuss@list.zfsonlinux.org>
Build-Depends: debhelper-compat (= 10),
dkms (>> 2.1.1.2-5),
libtool,
linux-headers-_KVERS_ | raspberrypi-kernel-headers
linux-headers-_KVERS_
Standards-Version: 4.3.0
Homepage: http://www.openzfs.org/
Vcs-Git: https://github.com/openzfs/zfs.git
@ -14,7 +14,7 @@ Vcs-Browser: https://github.com/openzfs/zfs
Package: openzfs-zfs-modules-_KVERS_
Architecture: _ARCH_
Provides: openzfs-zfs-modules
Depends: linux-image-_KVERS_ | raspberrypi-kernel
Depends: linux-image-_KVERS_ | linux-image-amd64
Recommends: openzfsutils
Replaces: zfs-modules-_KVERS_
Conflicts: zfs-modules-_KVERS_

View File

@ -25,7 +25,7 @@ PARALLEL = $(subst parallel=,,$(filter parallel=%,$(DEB_BUILD_OPTIONS)))
NJOBS = -j$(or $(PARALLEL),$(NUM_CPUS),1)
%:
dh $@ --with autoreconf,dkms,python3,sphinxdoc
dh $@ --with autoreconf,python3,sphinxdoc
override_dh_autoreconf:
@# Embed the downstream version in the module.
@ -163,7 +163,6 @@ override_dh_installinit:
dh_installinit -r --no-restart-after-upgrade --name zfs-import
dh_installinit -r --no-restart-after-upgrade --name zfs-mount
dh_installinit -r --no-restart-after-upgrade --name zfs-load-key
dh_installinit -R --name zfs-share
dh_installinit -R --name zfs-zed
override_dh_installsystemd:

View File

@ -48,6 +48,8 @@ activate_vg()
}
udev_settle
activate_vg
# TrueNAS SCALE doesn't boot from pools on top of LVM, and the scan can take a
# significant amount of time on systems with a large number of disks. Skip it.
#activate_vg
exit 0

View File

@ -283,6 +283,7 @@ load_module_initrd()
while true; do
# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
echo "1" > /sys/kernel/wait_for_device_probe
if command -v wait_for_udev > /dev/null 2>&1 ; then
wait_for_udev 10
elif command -v wait_for_dev > /dev/null 2>&1 ; then

View File

@ -21,6 +21,14 @@ SUBSTFILES += $(sysconf_zfs_DATA)
SHELLCHECKSCRIPTS += $(sysconf_zfs_DATA)
$(call SHELLCHECK_OPTS,$(sysconf_zfs_DATA)): SHELLCHECK_SHELL = sh
if BUILD_FREEBSD
rcdir = $(sysconfdir)/rc.d
rc_SCRIPTS = %D%/rc.d/zfsd
SUBSTFILES += $(rc_SCRIPTS)
SHELLCHECKSCRIPTS += $(rc_SCRIPTS)
$(call SHELLCHECK_OPTS,$(rc_SCRIPTS)): SHELLCHECK_SHELL = sh
endif
if BUILD_LINUX
initconf_DATA = \

1
etc/rc.d/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/zfsd

14
etc/rc.d/zfsd.in Normal file
View File

@ -0,0 +1,14 @@
#!/bin/sh
# PROVIDE: zfsd
# REQUIRE: devd zfs
# KEYWORD: nojail shutdown
. /etc/rc.subr
name="zfsd"
rcvar="zfsd_enable"
command="@sbindir@/${name}"
load_rc_config ${name}
run_rc_command "${1}"

View File

@ -3,7 +3,7 @@ enable zfs-import-cache.service
disable zfs-import-scan.service
enable zfs-import.target
enable zfs-mount.service
enable zfs-share.service
disable zfs-share.service
enable zfs-zed.service
enable zfs-volume-wait.service
enable zfs.target

View File

@ -17,4 +17,4 @@ EnvironmentFile=-@initconfdir@/zfs
ExecStart=@sbindir@/zfs share -a
[Install]
WantedBy=zfs.target
WantedBy=multi-user.target

View File

@ -106,29 +106,8 @@
#define kfpu_init() 0
#define kfpu_fini() ((void) 0)
#if defined(HAVE_UNDERSCORE_KERNEL_FPU)
#define kfpu_begin() \
{ \
preempt_disable(); \
__kernel_fpu_begin(); \
}
#define kfpu_end() \
{ \
__kernel_fpu_end(); \
preempt_enable(); \
}
#elif defined(HAVE_KERNEL_FPU)
#define kfpu_begin() kernel_fpu_begin()
#define kfpu_end() kernel_fpu_end()
#else
/*
* This case is unreachable. When KERNEL_EXPORTS_X86_FPU is defined then
* either HAVE_UNDERSCORE_KERNEL_FPU or HAVE_KERNEL_FPU must be defined.
*/
#error "Unreachable kernel configuration"
#endif
extern void kfpu_begin(void);
extern void kfpu_end(void);
#else /* defined(KERNEL_EXPORTS_X86_FPU) */

View File

@ -22,6 +22,7 @@
#define _SPL_RPC_XDR_H
#include <sys/types.h>
#include <sys/sysmacros.h>
typedef int bool_t;

View File

@ -83,6 +83,8 @@ typedef struct ace_object {
#define ACL_PROTECTED 0x0002
#define ACL_DEFAULTED 0x0004
#define ACL_FLAGS_ALL (ACL_AUTO_INHERIT|ACL_PROTECTED|ACL_DEFAULTED)
#define ACL_IS_TRIVIAL 0x10000
#define ACL_IS_DIR 0x20000
#define ACE_ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
#define ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05

View File

@ -106,6 +106,15 @@ zpl_chmod_acl(struct inode *ip)
}
#endif /* CONFIG_FS_POSIX_ACL */
#if defined(HAVE_IOPS_PERMISSION_USERNS)
extern int zpl_permission(struct user_namespace *userns, struct inode *ip,
int mask);
#elif defined(HAVE_IOPS_PERMISSION_IDMAP)
extern int zpl_permission(struct mnt_idmap *idmap, struct inode *ip, int mask);
#else
extern int zpl_permission(struct inode *ip, int mask);
#endif
extern xattr_handler_t *zpl_xattr_handlers[];
/* zpl_ctldir.c */

View File

@ -211,6 +211,8 @@ void zfs_acl_ids_free(zfs_acl_ids_t *);
boolean_t zfs_acl_ids_overquota(struct zfsvfs *, zfs_acl_ids_t *, uint64_t);
int zfs_getacl(struct znode *, vsecattr_t *, boolean_t, cred_t *);
int zfs_setacl(struct znode *, vsecattr_t *, boolean_t, cred_t *);
int zfs_stripacl(struct znode *, cred_t *);
void zfs_acl_rele(void *);
void zfs_oldace_byteswap(ace_t *, int);
void zfs_ace_byteswap(void *, size_t, boolean_t);

View File

@ -3158,8 +3158,10 @@ zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
uint64_t wholedisk = 0;
#ifndef TRUENAS_SCALE_NEVER_WHOLEDISK
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
#endif
/*
* XXX - L2ARC 1.0 devices can't support expansion.
@ -4314,7 +4316,7 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
!(name_flags & VDEV_NAME_PATH)) {
path = zfs_strip_path(path);
}
#ifndef TRUENAS_SCALE_NEVER_WHOLEDISK
/*
* Remove the partition from the path if this is a whole disk.
*/
@ -4323,6 +4325,7 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
== 0 && value && !(name_flags & VDEV_NAME_PATH)) {
return (zfs_strip_partition(path));
}
#endif
} else {
path = type;

View File

@ -87,6 +87,7 @@ zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
return (0);
}
#ifndef TRUENAS_SCALE_NEVER_WHOLEDISK
/*
* Read the EFI label from the config, if a label does not exist then
* pass back the error to the caller. If the caller has passed a non-NULL
@ -118,6 +119,7 @@ read_efi_label(nvlist_t *config, diskaddr_t *sb)
}
return (err);
}
#endif
/*
* determine where a partition starts on a disk in the current
@ -126,6 +128,9 @@ read_efi_label(nvlist_t *config, diskaddr_t *sb)
static diskaddr_t
find_start_block(nvlist_t *config)
{
#ifdef TRUENAS_SCALE_NEVER_WHOLEDISK
(void) config;
#else
nvlist_t **child;
uint_t c, children;
diskaddr_t sb = MAXOFFSET_T;
@ -149,6 +154,7 @@ find_start_block(nvlist_t *config)
return (sb);
}
}
#endif
return (MAXOFFSET_T);
}

View File

@ -660,23 +660,18 @@ platform, the behavior is the same as if it were set to
.Sy off .
.Bl -tag -compact -offset 4n -width "posixacl"
.It Sy off
default on Linux, when a file system has the
when a file system has the
.Sy acltype
property set to off then ACLs are disabled.
.It Sy noacl
an alias for
.Sy off
.It Sy nfsv4
default on
.Fx ,
indicates that NFSv4-style ZFS ACLs should be used.
default, indicates that NFSv4-style ZFS ACLs should be used.
These ACLs can be managed with the
.Xr getfacl 1
and
.Xr setfacl 1 .
The
.Sy nfsv4
ZFS ACL type is not yet supported on Linux.
.It Sy posix
indicates POSIX ACLs should be used.
POSIX ACLs are specific to Linux and are not functional on other platforms.

View File

@ -74,6 +74,7 @@ SPL_OBJS := \
spl-cred.o \
spl-err.o \
spl-generic.o \
spl-kfpu.o \
spl-kmem-cache.o \
spl-kmem.o \
spl-kstat.o \

View File

@ -53,6 +53,7 @@ FMAKE = env -u MAKEFLAGS make $(FMAKEFLAGS)
modules-Linux:
mkdir -p $(sort $(dir $(spl-objs) $(spl-)))
mkdir -p $(sort $(dir $(zfs-objs) $(zfs-)))
@# Build the kernel modules.
$(MAKE) -C @LINUX_OBJ@ $(if @KERNEL_CC@,CC=@KERNEL_CC@) \
$(if @KERNEL_LD@,LD=@KERNEL_LD@) $(if @KERNEL_LLVM@,LLVM=@KERNEL_LLVM@) \
M="$$PWD" @KERNEL_MAKE@ CONFIG_ZFS=m modules
@ -161,7 +162,7 @@ cppcheck-FreeBSD:
cppcheck: cppcheck-@ac_system@
distdir:
cd @srcdir@ && find . -name '*.[chS]' -exec sh -c 'for f; do mkdir -p $$distdir/$${f%/*}; cp @srcdir@/$$f $$distdir/$$f; done' _ {} +
cd @srcdir@ && find . -name '*.[chxS]' -exec sh -c 'for f; do mkdir -p $$distdir/$${f%/*}; cp @srcdir@/$$f $$distdir/$$f; done' _ {} +
cp @srcdir@/Makefile.bsd $$distdir/Makefile.bsd
gen-zstd-symbols:

View File

@ -2029,6 +2029,12 @@ top:
return (error);
}
int
zfs_stripacl(znode_t *zp, cred_t *cr)
{
return (SET_ERROR(EOPNOTSUPP));
}
/*
* Check accesses of interest (AoI) against attributes of the dataset
* such as read-only. Returns zero if no AoI conflict with dataset

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2022 iXsystems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef KERNEL_EXPORTS_X86_FPU
#include <sys/simd.h>
#if defined(HAVE_KERNEL_FPU)
void
// cppcheck-suppress syntaxError
kfpu_begin(void)
{
kernel_fpu_begin();
}
void
// cppcheck-suppress syntaxError
kfpu_end(void)
{
kernel_fpu_end();
}
#elif defined(HAVE_UNDERSCORE_KERNEL_FPU)
void
// cppcheck-suppress syntaxError
kfpu_begin(void)
{
preempt_disable();
__kernel_fpu_begin();
}
void
// cppcheck-suppress syntaxError
kfpu_end(void)
{
__kernel_fpu_end();
preempt_enable();
}
#else
/*
* This case should be unreachable. When KERNEL_EXPORTS_X86_FPU is defined
* then either HAVE_UNDERSCORE_KERNEL_FPU or HAVE_KERNEL_FPU must be defined.
*/
#error "Unreachable kernel configuration"
#endif
EXPORT_SYMBOL(kfpu_begin);
EXPORT_SYMBOL(kfpu_end);
#endif /* KERNEL_EXPORTS_X86_FPU */

View File

@ -33,6 +33,7 @@
#include <sys/policy.h>
#include <linux/security.h>
#include <linux/vfs_compat.h>
#include <sys/zfs_znode.h>
/*
* The passed credentials cannot be directly verified because Linux only
@ -103,13 +104,56 @@ secpolicy_sys_config(const cred_t *cr, boolean_t checkonly)
* Like secpolicy_vnode_access() but we get the actual wanted mode and the
* current mode of the file, not the missing bits.
*
* Enforced in the Linux VFS.
* If filesystem is using NFSv4 ACLs, validate the current mode
* and the wanted mode are the same, otherwise access fails.
*
* If using POSIX ACLs or no ACLs, enforced in the Linux VFS.
*/
int
secpolicy_vnode_access2(const cred_t *cr, struct inode *ip, uid_t owner,
mode_t curmode, mode_t wantmode)
{
return (0);
mode_t remainder = ~curmode & wantmode;
uid_t uid = crgetuid(cr);
if ((ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_NFSV4) ||
(remainder == 0)) {
return (0);
}
if (uid == 0)
return (0);
#if defined(CONFIG_USER_NS)
if (!kuid_has_mapping(cr->user_ns, SUID_TO_KUID(owner)))
return (EPERM);
#endif
/*
* There are some situations in which capabilities
* may allow overriding the DACL.
*/
if (S_ISDIR(ip->i_mode)) {
if (!(wantmode & S_IWUSR) &&
(priv_policy_user(cr, CAP_DAC_READ_SEARCH, EPERM) == 0)) {
return (0);
}
if (priv_policy_user(cr, CAP_DAC_OVERRIDE, EPERM) == 0) {
return (0);
}
return (EACCES);
}
if ((wantmode == S_IRUSR) &&
(priv_policy_user(cr, CAP_DAC_READ_SEARCH, EPERM) == 0)) {
return (0);
}
if (!(remainder & S_IXUSR) &&
(priv_policy_user(cr, CAP_DAC_OVERRIDE, EPERM) == 0)) {
return (0);
}
return (EACCES);
}
/*

View File

@ -46,6 +46,7 @@
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
@ -1963,8 +1964,8 @@ zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
/*
* Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
static int
zfs_getacl_impl(znode_t *zp, vsecattr_t *vsecp, boolean_t stripped, cred_t *cr)
{
zfs_acl_t *aclp;
ulong_t mask;
@ -1975,21 +1976,21 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr,
zfs_init_idmap)))
return (error);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
if (stripped) {
mode_t mode = ZTOI(zp)->i_mode;
aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
(aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(S_ISDIR(mode), mode, B_TRUE,
(ZTOZSB(zp)->z_acl_mode == ZFS_ACL_GROUPMASK), aclp);
} else {
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0)
return (error);
}
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
/*
* Scan ACL to determine number of ACEs
*/
@ -2054,11 +2055,37 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
vsecp->vsa_aclflags |= ACL_PROTECTED;
if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT)
vsecp->vsa_aclflags |= ACL_AUTO_INHERIT;
if (zp->z_pflags & ZFS_ACL_TRIVIAL)
vsecp->vsa_aclflags |= ACL_IS_TRIVIAL;
if (S_ISDIR(ZTOI(zp)->i_mode))
vsecp->vsa_aclflags |= ACL_IS_DIR;
}
return (0);
}
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
int error;
ulong_t mask;
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr,
zfs_init_idmap))) {
return (error);
}
mutex_enter(&zp->z_acl_lock);
error = zfs_getacl_impl(zp, vsecp, B_FALSE, cr);
mutex_exit(&zp->z_acl_lock);
return (0);
return (error);
}
int
@ -2119,12 +2146,11 @@ zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_mode,
/*
* Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
static int
zfs_setacl_impl(znode_t *zp, vsecattr_t *vsecp, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
zfs_acl_t *aclp;
@ -2132,16 +2158,6 @@ zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
boolean_t fuid_dirtied;
uint64_t acl_obj;
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr,
zfs_init_idmap)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
&aclp);
if (error)
@ -2156,9 +2172,6 @@ zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
@ -2189,12 +2202,15 @@ top:
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock);
if (error == ERESTART) {
mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock);
dmu_tx_wait(tx);
dmu_tx_abort(tx);
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
goto top;
}
dmu_tx_abort(tx);
@ -2216,9 +2232,90 @@ top:
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
return (error);
}
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
int error;
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr,
zfs_init_idmap))) {
return (error);
}
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
error = zfs_setacl_impl(zp, vsecp, cr);
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
int
zfs_stripacl(znode_t *zp, cred_t *cr)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
vsecattr_t vsec = {
.vsa_mask = VSA_ACE_ALLTYPES | VSA_ACECNT | VSA_ACE |
VSA_ACE_ACLFLAGS
};
if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
return (error);
if ((error = zfs_verify_zp(zp)) != 0)
goto done;
if (zp->z_pflags & ZFS_IMMUTABLE) {
error = SET_ERROR(EPERM);
goto done;
}
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, B_FALSE, cr,
zfs_init_idmap)))
goto done;
if (zp->z_pflags & ZFS_ACL_TRIVIAL) {
// ACL is already stripped. Nothing to do.
error = 0;
goto done;
}
mutex_enter(&zp->z_acl_lock);
error = zfs_getacl_impl(zp, &vsec, B_TRUE, cr);
if (error) {
mutex_exit(&zp->z_acl_lock);
goto done;
}
mutex_enter(&zp->z_lock);
error = zfs_setacl_impl(zp, &vsec, cr);
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
kmem_free(vsec.vsa_aclentp, vsec.vsa_aclentsz);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zfsvfs->z_log, 0);
done:
zfs_exit(zfsvfs, FTAG);
return (error);
}
@ -2523,7 +2620,7 @@ zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
* Also note: DOS R/O is ignored for directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
S_ISDIR(ZTOI(zp)->i_mode) &&
!S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & ZFS_READONLY)) {
return (SET_ERROR(EPERM));
}

View File

@ -336,9 +336,15 @@ xattr_changed_cb(void *arg, uint64_t newval)
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
#ifdef SB_LARGEXATTR
zfsvfs->z_sb->s_flags &= ~SB_LARGEXATTR;
#endif
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
#ifdef SB_LARGEXATTR
zfsvfs->z_sb->s_flags |= SB_LARGEXATTR;
#endif
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
@ -353,12 +359,17 @@ acltype_changed_cb(void *arg, uint64_t newval)
zfsvfs_t *zfsvfs = arg;
switch (newval) {
case ZFS_ACLTYPE_NFSV4:
case ZFS_ACLTYPE_OFF:
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
#ifdef SB_NFSV4ACL
zfsvfs->z_sb->s_flags &= ~SB_NFSV4ACL;
#endif
break;
case ZFS_ACLTYPE_POSIX:
#ifdef SB_NFSV4ACL
zfsvfs->z_sb->s_flags &= ~SB_NFSV4ACL;
#endif
#ifdef CONFIG_FS_POSIX_ACL
zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIX;
zfsvfs->z_sb->s_flags |= SB_POSIXACL;
@ -367,6 +378,13 @@ acltype_changed_cb(void *arg, uint64_t newval)
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
#endif /* CONFIG_FS_POSIX_ACL */
break;
case ZFS_ACLTYPE_NFSV4:
zfsvfs->z_acl_type = ZFS_ACLTYPE_NFSV4;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
#ifdef SB_NFSV4ACL
zfsvfs->z_sb->s_flags |= SB_NFSV4ACL;
#endif
break;
default:
break;
}

View File

@ -594,6 +594,22 @@ zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
/*
* For compatibility purposes with data migrated from FreeBSD
* (which will have NFSv4 ACL type), BSD file creation semantics
* are forced rather than System V. Hence on new file creation
* if NFSV4ACL we inherit GID from parent rather than take current
* process GID. This makes S_ISGID on directories a de-facto
* no-op, but we still honor setting / removing it and normal
* inheritance of the bit on new directories in case user changes
* the underlying ACL type.
*/
if ((vap->va_mask & ATTR_MODE) &&
S_ISDIR(ZTOI(dzp)->i_mode) &&
(zfsvfs->z_acl_type == ZFS_ACLTYPE_NFSV4)) {
vap->va_gid = KGID_TO_SGID(ZTOI(dzp)->i_gid);
}
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
zfs_exit(zfsvfs, FTAG);
@ -1220,6 +1236,22 @@ zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp,
return (error);
zilog = zfsvfs->z_log;
/*
* For compatibility purposes with data migrated from FreeBSD
* (which will have NFSv4 ACL type), BSD file creation semantics
* are forced rather than System V. Hence on new file creation
* if NFSV4ACL we inherit GID from parent rather than take current
* process GID. This makes S_ISGID on directories a de-facto
* no-op, but we still honor setting / removing it and normal
* inheritance of the bit on new directories in case user changes
* the underlying ACL type.
*/
if ((vap->va_mask & ATTR_MODE) &&
S_ISDIR(ZTOI(dzp)->i_mode) &&
(zfsvfs->z_acl_type == ZFS_ACLTYPE_NFSV4)) {
vap->va_gid = KGID_TO_SGID(ZTOI(dzp)->i_gid);
}
if (dzp->z_pflags & ZFS_XATTR) {
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(EINVAL));
@ -1966,10 +1998,7 @@ zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zidmap_t *mnt_ns)
goto out3;
}
if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
err = SET_ERROR(EPERM);
goto out3;
}
/* ZFS_READONLY will be handled in zfs_zaccess() */
/*
* Verify timestamps doesn't overflow 32 bits.

View File

@ -816,6 +816,7 @@ const struct inode_operations zpl_inode_operations = {
.get_acl = zpl_get_acl,
#endif /* HAVE_GET_INODE_ACL */
#endif /* CONFIG_FS_POSIX_ACL */
.permission = zpl_permission,
};
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
@ -862,6 +863,7 @@ const struct inode_operations zpl_dir_inode_operations = {
.get_acl = zpl_get_acl,
#endif /* HAVE_GET_INODE_ACL */
#endif /* CONFIG_FS_POSIX_ACL */
.permission = zpl_permission,
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
},
.rename2 = zpl_rename2,
@ -909,4 +911,5 @@ const struct inode_operations zpl_special_inode_operations = {
.get_acl = zpl_get_acl,
#endif /* HAVE_GET_INODE_ACL */
#endif /* CONFIG_FS_POSIX_ACL */
.permission = zpl_permission,
};

View File

@ -230,6 +230,9 @@ __zpl_show_options(struct seq_file *seq, zfsvfs_t *zfsvfs)
case ZFS_ACLTYPE_POSIX:
seq_puts(seq, ",posixacl");
break;
case ZFS_ACLTYPE_NFSV4:
seq_puts(seq, ",nfs4acl");
break;
default:
seq_puts(seq, ",noacl");
break;

View File

@ -80,11 +80,34 @@
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/xvattr.h>
#include <sys/zap.h>
#include <sys/vfs.h>
#include <sys/zpl.h>
#include <linux/vfs_compat.h>
#define NFS41ACL_XATTR "system.nfs4_acl_xdr"
static const struct {
int kmask;
int zfsperm;
} mask2zfs[] = {
{ MAY_READ, ACE_READ_DATA },
{ MAY_WRITE, ACE_WRITE_DATA },
{ MAY_EXEC, ACE_EXECUTE },
#ifdef SB_NFSV4ACL
{ MAY_DELETE, ACE_DELETE },
{ MAY_DELETE_CHILD, ACE_DELETE_CHILD },
{ MAY_WRITE_ATTRS, ACE_WRITE_ATTRIBUTES },
{ MAY_WRITE_NAMED_ATTRS, ACE_WRITE_NAMED_ATTRS },
{ MAY_WRITE_ACL, ACE_WRITE_ACL },
{ MAY_WRITE_OWNER, ACE_WRITE_OWNER },
#endif
};
#define POSIX_MASKS (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_OPEN)
#define GENERIC_MASK(mask) ((mask & ~POSIX_MASKS) == 0)
enum xattr_permission {
XAPERM_DENY,
XAPERM_ALLOW,
@ -250,6 +273,14 @@ zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
goto out1;
rw_enter(&zp->z_xattr_lock, RW_READER);
if ((zfsvfs->z_acl_type == ZFS_ACLTYPE_NFSV4) &&
((zp->z_pflags & ZFS_ACL_TRIVIAL) == 0)) {
error = zpl_xattr_filldir(&xf, NFS41ACL_XATTR,
sizeof (NFS41ACL_XATTR) - 1);
if (error)
goto out;
}
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_list_sa(&xf);
if (error)
@ -1457,6 +1488,449 @@ static xattr_handler_t zpl_xattr_acl_default_handler = {
#endif /* CONFIG_FS_POSIX_ACL */
/*
* zpl_permission() gets called by linux kernel whenever it checks
* inode_permission via inode->i_op->permission. The general preference
* is to defer to the standard in-kernel permission check (generic_permission)
* wherever possible.
*
* https://www.kernel.org/doc/Documentation/filesystems/vfs.txt
*/
int
#if defined(HAVE_IOPS_PERMISSION_USERNS)
zpl_permission(struct user_namespace *userns, struct inode *ip, int mask)
#elif defined(HAVE_IOPS_PERMISSION_IDMAP)
zpl_permission(struct mnt_idmap *idmap, struct inode *ip, int mask)
#else
zpl_permission(struct inode *ip, int mask)
#endif
{
int to_check = 0, i, ret;
cred_t *cr = NULL;
/*
* If NFSv4 ACLs are not being used, go back to
* generic_permission(). If ACL is trivial and the
* mask is representable by POSIX permissions, then
* also go back to generic_permission().
*/
if ((ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_NFSV4) ||
((ITOZ(ip)->z_pflags & ZFS_ACL_TRIVIAL && GENERIC_MASK(mask)))) {
#if (defined(HAVE_IOPS_PERMISSION_USERNS) || \
defined(HAVE_IOPS_PERMISSION_IDMAP))
return (generic_permission(zfs_init_idmap, ip, mask));
#else
return (generic_permission(ip, mask));
#endif
}
for (i = 0; i < ARRAY_SIZE(mask2zfs); i++) {
if (mask & mask2zfs[i].kmask) {
to_check |= mask2zfs[i].zfsperm;
}
}
/*
* We're being asked to check something that doesn't contain an
* NFSv4 ACE. Pass back to default kernel permissions check.
*/
if (to_check == 0) {
#if (defined(HAVE_IOPS_PERMISSION_USERNS) || \
defined(HAVE_IOPS_PERMISSION_IDMAP))
return (generic_permission(zfs_init_idmap, ip, mask));
#else
return (generic_permission(ip, mask));
#endif
}
/*
* Fast path for execute checks. Do not use zfs_fastaccesschk_execute
* since it may end up granting execute access in presence of explicit
* deny entry for user / group, and it also read the ZFS ACL
* (non-cached) which we wish to avoid in RCU.
*/
if ((to_check == ACE_EXECUTE) &&
(ITOZ(ip)->z_pflags & ZFS_NO_EXECS_DENIED))
return (0);
/*
* inode permission operation may be called in rcu-walk mode
* (mask & MAY_NOT_BLOCK). If in rcu-walk mode, the filesystem must
* check the permission without blocking or storing to the inode.
*
* If a situation is encountered that rcu-walk cannot handle,
* return -ECHILD and it will be called again in ref-walk mode.
*/
cr = CRED();
crhold(cr);
/*
* There are some situations in which capabilities may allow overriding
* the DACL. Skip reading ACL if requested permissions are fully
* satisfied by capabilities.
*/
/*
* CAP_DAC_OVERRIDE may override RWX on directories, and RW on other
* files. Execute may also be overriden if at least one execute bit is
* set. This behavior is not formally documented, but is covered in
* commit messages and code comments in namei.c.
*
* CAP_DAC_READ_SEARCH may bypass file read permission checks and
* directory read and execute permission checks.
*/
if (S_ISDIR(ip->i_mode)) {
#ifdef SB_NFSV4ACL
if (!(mask & (MAY_WRITE | NFS41ACL_WRITE_ALL))) {
#else
if (!(mask & MAY_WRITE)) {
#endif
if (capable(CAP_DAC_READ_SEARCH)) {
crfree(cr);
return (0);
}
}
if (capable(CAP_DAC_OVERRIDE)) {
crfree(cr);
return (0);
}
}
if (!(mask & MAY_EXEC) || (ip->i_mode & S_IXUGO)) {
if (capable(CAP_DAC_OVERRIDE)) {
crfree(cr);
return (0);
}
}
if ((to_check == ACE_READ_DATA) &&
capable(CAP_DAC_READ_SEARCH)) {
crfree(cr);
return (0);
}
if (mask & MAY_NOT_BLOCK) {
crfree(cr);
return (-ECHILD);
}
ret = -zfs_access(ITOZ(ip), to_check, V_ACE_MASK, cr);
crfree(cr);
return (ret);
}
#define ACEI4_SPECIAL_WHO 1
#define ACE4_SPECIAL_OWNER 1
#define ACE4_SPECIAL_GROUP 2
#define ACE4_SPECIAL_EVERYONE 3
#define NFS41ACL_MAX_ACES 128
#define NFS41_FLAGS (ACE_DIRECTORY_INHERIT_ACE| \
ACE_FILE_INHERIT_ACE| \
ACE_NO_PROPAGATE_INHERIT_ACE| \
ACE_INHERIT_ONLY_ACE| \
ACE_INHERITED_ACE| \
ACE_IDENTIFIER_GROUP)
/*
* Macros for sanity checks related to XDR and ACL buffer sizes
*/
#define ACE4ELEM 5
#define ACE4SIZE (ACE4ELEM * sizeof (u32))
#define XDRBASE (2 * sizeof (u32))
#define ACES_TO_XDRSIZE(x) (XDRBASE + (x * ACE4SIZE))
#define XDRSIZE_TO_ACES(x) ((x - XDRBASE) / ACE4SIZE)
#define XDRSIZE_IS_VALID(x) ((x >= XDRBASE) && \
(((x - XDRBASE) % ACE4SIZE) == 0))
static int
__zpl_xattr_nfs41acl_list(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
char *xattr_name = NFS41ACL_XATTR;
size_t xattr_size = sizeof (NFS41ACL_XATTR);
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_NFSV4)
return (0);
if (list && xattr_size <= list_size)
memcpy(list, xattr_name, xattr_size);
return (xattr_size);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_nfs41acl_list);
static int
acep_to_nfsace4i(const ace_t *acep, u32 *xattrbuf)
{
u32 who = 0, iflag = 0;
switch (acep->a_flags & ACE_TYPE_FLAGS) {
case ACE_OWNER:
iflag = ACEI4_SPECIAL_WHO;
who = ACE4_SPECIAL_OWNER;
break;
case ACE_GROUP|ACE_IDENTIFIER_GROUP:
iflag = ACEI4_SPECIAL_WHO;
who = ACE4_SPECIAL_GROUP;
break;
case ACE_EVERYONE:
iflag = ACEI4_SPECIAL_WHO;
who = ACE4_SPECIAL_EVERYONE;
break;
case ACE_IDENTIFIER_GROUP:
case 0:
who = acep->a_who;
break;
default:
dprintf("Unknown ACE_TYPE_FLAG 0x%08x\n",
acep->a_flags & ACE_TYPE_FLAGS);
return (-EINVAL);
}
*xattrbuf++ = htonl(acep->a_type);
*xattrbuf++ = htonl(acep->a_flags & NFS41_FLAGS);
*xattrbuf++ = htonl(iflag);
*xattrbuf++ = htonl(acep->a_access_mask);
*xattrbuf++ = htonl(who);
return (0);
}
static int
zfsacl_to_nfsacl41i(const vsecattr_t vsecp, u32 *xattrbuf)
{
int i, error = 0;
ace_t *acep = NULL;
*xattrbuf++ = htonl(vsecp.vsa_aclflags);
*xattrbuf++ = htonl(vsecp.vsa_aclcnt);
for (i = 0; i < vsecp.vsa_aclcnt; i++, xattrbuf += ACE4ELEM) {
acep = vsecp.vsa_aclentp + (i * sizeof (ace_t));
error = acep_to_nfsace4i(acep, xattrbuf);
if (error)
break;
}
return (error);
}
static int
nfsace4i_to_acep(const u32 *xattrbuf, ace_t *acep)
{
u32 iflag, id;
acep->a_type = ntohl(*(xattrbuf++));
acep->a_flags = ntohl(*(xattrbuf++)) & NFS41_FLAGS;
iflag = ntohl(*(xattrbuf++));
acep->a_access_mask = ntohl(*(xattrbuf++));
id = ntohl(*(xattrbuf++));
if (iflag & ACEI4_SPECIAL_WHO) {
switch (id) {
case ACE4_SPECIAL_OWNER:
acep->a_flags |= ACE_OWNER;
acep->a_who = -1;
break;
case ACE4_SPECIAL_GROUP:
acep->a_flags |= (ACE_GROUP | ACE_IDENTIFIER_GROUP);
acep->a_who = -1;
break;
case ACE4_SPECIAL_EVERYONE:
acep->a_flags |= ACE_EVERYONE;
acep->a_who = -1;
break;
default:
dprintf("Unknown id 0x%08x\n", id);
return (-EINVAL);
}
} else {
acep->a_who = id;
}
return (0);
}
static int
nfsacl41i_to_zfsacl(const u32 *xattrbuf, size_t bufsz, vsecattr_t *vsecp)
{
int error;
int i;
vsecp->vsa_aclflags = ntohl(*(xattrbuf++));
vsecp->vsa_aclcnt = ntohl(*(xattrbuf++));
bufsz -= (2 * sizeof (u32));
vsecp->vsa_aclentsz = vsecp->vsa_aclcnt * sizeof (ace_t);
if (bufsz != (vsecp->vsa_aclcnt * ACE4SIZE)) {
dprintf("Embedded ACL count [%d] is larger than "
"can fit in provided buffer size: %zu\n",
vsecp->vsa_aclcnt, bufsz);
return (-ERANGE);
}
vsecp->vsa_aclentp = kmem_alloc(vsecp->vsa_aclentsz, KM_SLEEP);
for (i = 0; i < vsecp->vsa_aclcnt; i++, xattrbuf += ACE4ELEM) {
ace_t *acep = vsecp->vsa_aclentp + (i * sizeof (ace_t));
error = nfsace4i_to_acep(xattrbuf, acep);
if (error) {
kmem_free(vsecp->vsa_aclentp, vsecp->vsa_aclentsz);
return (error);
}
}
return (0);
}
static int
__zpl_xattr_nfs41acl_get(struct inode *ip, const char *name,
void *buffer, size_t size)
{
vsecattr_t vsecp = {0};
cred_t *cr = CRED();
int ret, fl;
size_t xdr_size;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_NFSV4)
return (-EOPNOTSUPP);
if (size == 0) {
/*
* API user may send 0 size so that we
* return size of buffer needed for ACL.
*/
crhold(cr);
vsecp.vsa_mask = VSA_ACECNT;
ret = -zfs_getsecattr(ITOZ(ip), &vsecp, ATTR_NOACLCHECK, cr);
if (ret) {
return (ret);
}
crfree(cr);
ret = ACES_TO_XDRSIZE(vsecp.vsa_aclcnt);
return (ret);
}
if (size < ACES_TO_XDRSIZE(1)) {
return (-EINVAL);
}
vsecp.vsa_mask = VSA_ACE_ALLTYPES | VSA_ACECNT | VSA_ACE |
VSA_ACE_ACLFLAGS;
crhold(cr);
fl = capable(CAP_DAC_OVERRIDE) ? ATTR_NOACLCHECK : 0;
ret = -zfs_getsecattr(ITOZ(ip), &vsecp, fl, cr);
crfree(cr);
if (ret) {
return (ret);
}
if (vsecp.vsa_aclcnt == 0) {
ret = -ENODATA;
goto nfs4acl_get_out;
}
xdr_size = ACES_TO_XDRSIZE(vsecp.vsa_aclcnt);
if (xdr_size > size) {
ret = -ERANGE;
goto nfs4acl_get_out;
}
ret = zfsacl_to_nfsacl41i(vsecp, (u32 *)buffer);
if (ret == 0)
ret = xdr_size;
nfs4acl_get_out:
kmem_free(vsecp.vsa_aclentp, vsecp.vsa_aclentsz);
return (ret);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_nfs41acl_get);
static int
__zpl_xattr_nfs41acl_set(zidmap_t *mnt_ns,
struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
(void) mnt_ns;
cred_t *cr = CRED();
int error, fl, naces;
vsecattr_t vsecp = { .vsa_mask = (VSA_ACE | VSA_ACE_ACLFLAGS) };
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_NFSV4)
return (-EOPNOTSUPP);
if (value == NULL && size == 0) {
crhold(cr);
error = zfs_stripacl(ITOZ(ip), cr);
crfree(cr);
return (error);
}
/* xdr data is 4-byte aligned */
if (((ulong_t)value % 4) != 0) {
return (-EINVAL);
}
naces = XDRSIZE_TO_ACES(size);
if (naces > NFS41ACL_MAX_ACES) {
return (-E2BIG);
}
if (!XDRSIZE_IS_VALID(size)) {
return (-EINVAL);
}
error = nfsacl41i_to_zfsacl((u32 *)value, size, &vsecp);
if (error)
return (error);
crhold(cr);
fl = capable(CAP_DAC_OVERRIDE) ? ATTR_NOACLCHECK : 0;
error = -zfs_setsecattr(ITOZ(ip), &vsecp, fl, cr);
crfree(cr);
kmem_free(vsecp.vsa_aclentp, vsecp.vsa_aclentsz);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_nfs41acl_set);
/*
* ACL access xattr namespace handlers.
*
* Use .name instead of .prefix when available. xattr_resolve_name will match
* whole name and reject anything that has .name only as prefix.
*/
xattr_handler_t zpl_xattr_nfs41acl_handler =
{
#ifdef HAVE_XATTR_HANDLER_NAME
.name = NFS41ACL_XATTR,
#else
.prefix = NFS41ACL_XATTR,
#endif
.list = zpl_xattr_nfs41acl_list,
.get = zpl_xattr_nfs41acl_get,
.set = zpl_xattr_nfs41acl_set,
};
xattr_handler_t *zpl_xattr_handlers[] = {
&zpl_xattr_security_handler,
&zpl_xattr_trusted_handler,
@ -1465,6 +1939,7 @@ xattr_handler_t *zpl_xattr_handlers[] = {
&zpl_xattr_acl_access_handler,
&zpl_xattr_acl_default_handler,
#endif /* CONFIG_FS_POSIX_ACL */
&zpl_xattr_nfs41acl_handler,
NULL
};
@ -1493,6 +1968,10 @@ zpl_xattr_handler(const char *name)
return (&zpl_xattr_acl_default_handler);
#endif /* CONFIG_FS_POSIX_ACL */
if (strncmp(name, NFS41ACL_XATTR,
sizeof (NFS41ACL_XATTR)) == 0)
return (&zpl_xattr_nfs41acl_handler);
return (NULL);
}

View File

@ -436,13 +436,7 @@ zfs_prop_init(void)
PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"discard | groupmask | passthrough | restricted", "ACLMODE",
acl_mode_table, sfeatures);
zprop_register_index(ZFS_PROP_ACLTYPE, "acltype",
#ifdef __linux__
/* Linux doesn't natively support ZFS's NFSv4-style ACLs. */
ZFS_ACLTYPE_OFF,
#else
ZFS_ACLTYPE_NFSV4,
#endif
zprop_register_index(ZFS_PROP_ACLTYPE, "acltype", ZFS_ACLTYPE_NFSV4,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"off | nfsv4 | posix", "ACLTYPE", acltype_table, sfeatures);
zprop_register_index(ZFS_PROP_ACLINHERIT, "aclinherit",

View File

@ -27,11 +27,8 @@ props['checksum'] = {{'on', 'default'}, {nil, nil}}
props['dedup'] = {{'off', 'default'}, {nil, nil}}
props['compression'] = {{'off', 'default'}, {nil, nil}}
props['snapdir'] = {{'hidden', 'default'}, {nil, nil}}
if os == "Linux" then
props['acltype'] = {{'off', 'default'}, {'off', 'default'}}
elseif os == "FreeBSD" then
props['aclmode'] = {{'discard', 'default'}, {'discard', 'default'}}
end
props['acltype'] = {{'nfsv4', 'default'}, {'nfsv4', 'default'}}
props['aclmode'] = {{'discard', 'default'}, {'discard', 'default'}}
props['aclinherit'] = {{'restricted','default'}, {nil, nil}}
props['copies'] = {{'1', 'default'}, {nil, nil}}
props['primarycache'] = {{'all', 'default'}, {'all', 'default'}}

View File

@ -62,13 +62,12 @@ typeset -r uint64_max="18446744073709551615"
typeset zfs_props=("type" used available creation volsize referenced \
compressratio mounted origin recordsize quota reservation mountpoint \
sharenfs checksum compression atime devices exec readonly setuid \
snapdir aclinherit canmount primarycache secondarycache version \
usedbychildren usedbydataset usedbyrefreservation usedbysnapshots \
filesystem_limit snapshot_limit filesystem_count snapshot_count)
snapdir aclinherit aclmode acltype canmount primarycache secondarycache \
version usedbychildren usedbydataset usedbyrefreservation usedbysnapshots)
if is_freebsd; then
typeset zfs_props_os=(jailed aclmode)
typeset zfs_props_os=(jailed)
else
typeset zfs_props_os=(zoned acltype)
typeset zfs_props_os=(zoned)
fi
typeset userquota_props=(userquota@root groupquota@root userused@root \
groupused@root)

View File

@ -50,12 +50,12 @@ typeset options=(" " p r H)
typeset zfs_props=("type" used available creation volsize referenced \
compressratio mounted origin recordsize quota reservation mountpoint \
sharenfs checksum compression atime devices exec readonly setuid \
snapdir aclinherit canmount primarycache secondarycache version \
usedbychildren usedbydataset usedbyrefreservation usedbysnapshots)
snapdir aclinherit aclmode acltype canmount primarycache secondarycache \
version usedbychildren usedbydataset usedbyrefreservation usedbysnapshots)
if is_freebsd; then
typeset zfs_props_os=(jailed aclmode)
typeset zfs_props_os=(jailed)
else
typeset zfs_props_os=(zoned acltype)
typeset zfs_props_os=(zoned)
fi
typeset userquota_props=(userquota@root groupquota@root userused@root \
groupused@root)

View File

@ -48,12 +48,12 @@ typeset val_opts=(p r H)
typeset v_props=(type used available creation volsize referenced compressratio \
mounted origin recordsize quota reservation mountpoint sharenfs checksum \
compression atime devices exec readonly setuid snapdir version \
aclinherit canmount primarycache secondarycache \
aclinherit aclmode acltype canmount primarycache secondarycache \
usedbychildren usedbydataset usedbyrefreservation usedbysnapshots)
if is_freebsd; then
typeset v_props_os=(jailed aclmode)
typeset v_props_os=(jailed)
else
typeset v_props_os=(zoned acltype)
typeset v_props_os=(zoned)
fi
typeset userquota_props=(userquota@root groupquota@root userused@root \
groupused@root)

View File

@ -53,13 +53,13 @@ set -A options " " "-r" "-H" "-p" "-rHp" "-o name" \
set -A props type used available creation volsize referenced compressratio \
mounted origin recordsize quota reservation mountpoint sharenfs \
checksum compression atime devices exec readonly setuid snapdir \
aclinherit canmount primarycache secondarycache version \
aclinherit aclmode acltype canmount primarycache secondarycache \
usedbychildren usedbydataset usedbyrefreservation usedbysnapshots \
userquota@root groupquota@root userused@root groupused@root
if is_freebsd; then
set -A props ${props[*]} jailed aclmode
set -A props ${props[*]} jailed
else
set -A props ${props[*]} zoned acltype
set -A props ${props[*]} zoned
fi
set -A dataset $TESTPOOL/$TESTCTR $TESTPOOL/$TESTFS $TESTPOOL/$TESTVOL \

View File

@ -52,16 +52,16 @@ fi
log_assert "'zfs get -d <n>' should get expected output."
log_onexit depth_fs_cleanup
set -A all_props type used available creation volsize referenced \
compressratio mounted origin recordsize quota reservation mountpoint \
sharenfs checksum compression atime devices exec readonly setuid \
snapdir aclinherit canmount primarycache secondarycache version \
set -A all_props type used available creation volsize referenced compressratio \
mounted origin recordsize quota reservation mountpoint sharenfs \
checksum compression atime devices exec readonly setuid snapdir \
aclinherit aclmode acltype canmount primarycache secondarycache \
usedbychildren usedbydataset usedbyrefreservation usedbysnapshots \
userquota@root groupquota@root userused@root groupused@root
if is_freebsd; then
set -A all_props ${all_props[*]} jailed aclmode
set -A all_props ${all_props[*]} jailed
else
set -A all_props ${all_props[*]} zoned acltype
set -A all_props ${all_props[*]} zoned
fi
depth_fs_setup

View File

@ -45,6 +45,8 @@
verify_runnable "global"
log_unsupported "SKIP because auto partitioning removed for SCALE"
function cleanup
{
poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1

View File

@ -38,6 +38,8 @@
verify_runnable "global"
log_unsupported "SKIP because auto partitioning removed for SCALE"
function cleanup
{
log_must zinject -c all

View File

@ -50,6 +50,7 @@
verify_runnable "both"
if is_linux; then
log_unsupported "SKIP because auto partitioning removed for SCALE"
# Add one 512b scsi_debug device (4Kn would generate IO errors)
# NOTE: must be larger than other "file" vdevs and minimum SPA devsize:
# add 32m of fudge

View File

@ -49,6 +49,8 @@
verify_runnable "both"
log_unsupported "SKIP because auto partitioning removed for SCALE"
if ! is_physical_device $DISKS; then
log_unsupported "Unsupported disks for this test."
fi

View File

@ -36,6 +36,8 @@
verify_runnable "global"
log_unsupported "SKIP: nfsv4 acls incompatible with id-mapped mounts for now."
export WORKDIR=$TESTDIR/idmap_test
export IDMAPDIR=$TESTDIR/idmap_dest

View File

@ -37,6 +37,8 @@
verify_runnable "global"
log_unsupported "SKIP: nfsv4 acls incompatible with id-mapped mounts for now."
export WORKDIR=$TESTDIR/idmap_test
export IDMAPDIR=$TESTDIR/idmap_dest

View File

@ -39,6 +39,8 @@
verify_runnable "global"
log_unsupported "SKIP: nfsv4 acls incompatible with id-mapped mounts for now."
export WORKDIR=$TESTDIR/idmap_test
export IDMAPDIR=$TESTDIR/idmap_dest

View File

@ -38,6 +38,8 @@
verify_runnable "global"
log_unsupported "SKIP: nfsv4 acls incompatible with id-mapped mounts for now."
export WORKDIR=$TESTDIR/idmap_test
export IDMAPDIR=$TESTDIR/idmap_dest

View File

@ -41,6 +41,8 @@
verify_runnable "global"
log_unsupported "SKIP: nfsv4 acls incompatible with id-mapped mounts for now."
export WORKDIR=$TESTDIR/idmap_test
export IDMAPDIR=$TESTDIR/idmap_dest

View File

@ -373,6 +373,8 @@ function scan_state { #state-file
#
set -A prop "checksum" "" \
"compression" "" \
"aclmode" "" \
"acltype" "" \
"atime" "" \
"sharenfs" "" \
"recordsize" "recsize" \
@ -386,13 +388,21 @@ set -A prop "checksum" "" \
# above must have a corresponding entry in the two arrays below.
#
set -A def_val "on" "on" "on" \
set -A def_val "on" \
"on" \
"discard" \
"nfsv4" \
"on" \
"off" "" \
"hidden" \
"off" \
"all"
set -A local_val "off" "off" "off" \
set -A local_val "off" \
"off" \
"groupmask" \
"off" \
"off" \
"on" "" \
"visible" \
"off" \
@ -401,15 +411,6 @@ set -A local_val "off" "off" "off" \
#
# Add system specific values
#
if is_linux; then
prop+=("acltype" "")
def_val+=("off")
local_val+=("off")
else
prop+=("aclmode" "")
def_val+=("discard")
local_val+=("groupmask")
fi
if is_illumos; then
prop+=("mountpoint" "")
def_val+=("")