2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-11 21:16:13 +00:00
|
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
2008-11-20 20:01:55 +00:00
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
2012-12-13 23:24:15 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
2010-05-28 20:45:14 +00:00
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
2012-12-13 23:24:15 +00:00
|
|
|
* Copyright (c) 2012 by Delphix. All rights reserved.
|
2013-06-19 06:36:40 +00:00
|
|
|
* Copyright (c) 2013 Steven Hartland. All rights reserved.
|
2021-02-18 05:30:45 +00:00
|
|
|
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file contains the functions which analyze the status of a pool. This
|
|
|
|
* include both the status of an active pool, as well as the status exported
|
|
|
|
* pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
|
|
|
|
* the pool. This status is independent (to a certain degree) from the state of
|
|
|
|
* the pool. A pool's state describes only whether or not it is capable of
|
|
|
|
* providing the necessary fault tolerance for data. The status describes the
|
|
|
|
* overall status of devices. A pool that is online can still have a device
|
|
|
|
* that is experiencing errors.
|
|
|
|
*
|
|
|
|
* Only a subset of the possible faults can be detected using 'zpool status',
|
|
|
|
* and not all possible errors correspond to a FMA message ID. The explanation
|
|
|
|
* is left up to the caller, depending on whether it is a live pool or an
|
|
|
|
* import.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <libzfs.h>
|
2018-11-05 19:22:33 +00:00
|
|
|
#include <libzutil.h>
|
2020-08-21 19:53:17 +00:00
|
|
|
#include <stdlib.h>
|
2008-11-20 20:01:55 +00:00
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
2017-08-09 22:31:08 +00:00
|
|
|
#include <sys/systeminfo.h>
|
2008-11-20 20:01:55 +00:00
|
|
|
#include "libzfs_impl.h"
|
2012-12-14 23:00:45 +00:00
|
|
|
#include "zfeature_common.h"
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
|
2019-01-03 20:15:46 +00:00
|
|
|
* in include/libzfs.h. Note that there are some status results which go past
|
|
|
|
* the end of this table, and hence have no associated message ID.
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
2022-04-19 18:38:30 +00:00
|
|
|
static const char *const zfs_msgid_table[] = {
|
2019-01-03 20:15:46 +00:00
|
|
|
"ZFS-8000-14", /* ZPOOL_STATUS_CORRUPT_CACHE */
|
|
|
|
"ZFS-8000-2Q", /* ZPOOL_STATUS_MISSING_DEV_R */
|
|
|
|
"ZFS-8000-3C", /* ZPOOL_STATUS_MISSING_DEV_NR */
|
|
|
|
"ZFS-8000-4J", /* ZPOOL_STATUS_CORRUPT_LABEL_R */
|
|
|
|
"ZFS-8000-5E", /* ZPOOL_STATUS_CORRUPT_LABEL_NR */
|
|
|
|
"ZFS-8000-6X", /* ZPOOL_STATUS_BAD_GUID_SUM */
|
|
|
|
"ZFS-8000-72", /* ZPOOL_STATUS_CORRUPT_POOL */
|
|
|
|
"ZFS-8000-8A", /* ZPOOL_STATUS_CORRUPT_DATA */
|
|
|
|
"ZFS-8000-9P", /* ZPOOL_STATUS_FAILING_DEV */
|
|
|
|
"ZFS-8000-A5", /* ZPOOL_STATUS_VERSION_NEWER */
|
|
|
|
"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_MISMATCH */
|
|
|
|
"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_ACTIVE */
|
|
|
|
"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_REQUIRED */
|
|
|
|
"ZFS-8000-HC", /* ZPOOL_STATUS_IO_FAILURE_WAIT */
|
|
|
|
"ZFS-8000-JQ", /* ZPOOL_STATUS_IO_FAILURE_CONTINUE */
|
|
|
|
"ZFS-8000-MM", /* ZPOOL_STATUS_IO_FAILURE_MMP */
|
|
|
|
"ZFS-8000-K4", /* ZPOOL_STATUS_BAD_LOG */
|
|
|
|
"ZFS-8000-ER", /* ZPOOL_STATUS_ERRATA */
|
|
|
|
/*
|
|
|
|
* The following results have no message ID.
|
|
|
|
* ZPOOL_STATUS_UNSUP_FEAT_READ
|
|
|
|
* ZPOOL_STATUS_UNSUP_FEAT_WRITE
|
|
|
|
* ZPOOL_STATUS_FAULTED_DEV_R
|
|
|
|
* ZPOOL_STATUS_FAULTED_DEV_NR
|
|
|
|
* ZPOOL_STATUS_VERSION_OLDER
|
|
|
|
* ZPOOL_STATUS_FEAT_DISABLED
|
|
|
|
* ZPOOL_STATUS_RESILVERING
|
|
|
|
* ZPOOL_STATUS_OFFLINE_DEV
|
|
|
|
* ZPOOL_STATUS_REMOVED_DEV
|
2020-07-03 18:05:50 +00:00
|
|
|
* ZPOOL_STATUS_REBUILDING
|
|
|
|
* ZPOOL_STATUS_REBUILD_SCRUB
|
2021-02-18 05:30:45 +00:00
|
|
|
* ZPOOL_STATUS_COMPATIBILITY_ERR
|
2021-04-12 16:08:56 +00:00
|
|
|
* ZPOOL_STATUS_INCOMPATIBLE_FEAT
|
2019-01-03 20:15:46 +00:00
|
|
|
* ZPOOL_STATUS_OK
|
|
|
|
*/
|
2008-11-20 20:01:55 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
|
|
|
|
|
|
|
|
static int
|
2020-08-21 19:53:17 +00:00
|
|
|
vdev_missing(vdev_stat_t *vs, uint_t vsc)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2021-12-12 14:38:17 +00:00
|
|
|
(void) vsc;
|
2020-08-21 19:53:17 +00:00
|
|
|
return (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_OPEN_FAILED);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-08-21 19:53:17 +00:00
|
|
|
vdev_faulted(vdev_stat_t *vs, uint_t vsc)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2021-12-12 14:38:17 +00:00
|
|
|
(void) vsc;
|
2020-08-21 19:53:17 +00:00
|
|
|
return (vs->vs_state == VDEV_STATE_FAULTED);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-08-21 19:53:17 +00:00
|
|
|
vdev_errors(vdev_stat_t *vs, uint_t vsc)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2021-12-12 14:38:17 +00:00
|
|
|
(void) vsc;
|
2020-08-21 19:53:17 +00:00
|
|
|
return (vs->vs_state == VDEV_STATE_DEGRADED ||
|
|
|
|
vs->vs_read_errors != 0 || vs->vs_write_errors != 0 ||
|
|
|
|
vs->vs_checksum_errors != 0);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-08-21 19:53:17 +00:00
|
|
|
vdev_broken(vdev_stat_t *vs, uint_t vsc)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2021-12-12 14:38:17 +00:00
|
|
|
(void) vsc;
|
2020-08-21 19:53:17 +00:00
|
|
|
return (vs->vs_state == VDEV_STATE_CANT_OPEN);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-08-21 19:53:17 +00:00
|
|
|
vdev_offlined(vdev_stat_t *vs, uint_t vsc)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2021-12-12 14:38:17 +00:00
|
|
|
(void) vsc;
|
2020-08-21 19:53:17 +00:00
|
|
|
return (vs->vs_state == VDEV_STATE_OFFLINE);
|
2008-11-20 20:01:55 +00:00
|
|
|
}
|
|
|
|
|
2009-08-18 18:43:27 +00:00
|
|
|
static int
|
2020-08-21 19:53:17 +00:00
|
|
|
vdev_removed(vdev_stat_t *vs, uint_t vsc)
|
2009-08-18 18:43:27 +00:00
|
|
|
{
|
2021-12-12 14:38:17 +00:00
|
|
|
(void) vsc;
|
2020-08-21 19:53:17 +00:00
|
|
|
return (vs->vs_state == VDEV_STATE_REMOVED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vdev_non_native_ashift(vdev_stat_t *vs, uint_t vsc)
|
|
|
|
{
|
|
|
|
if (getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") != NULL)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
return (VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
|
|
|
|
vs->vs_configured_ashift < vs->vs_physical_ashift);
|
2009-08-18 18:43:27 +00:00
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Detect if any leaf devices that have seen errors or could not be opened.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
2020-08-21 19:53:17 +00:00
|
|
|
find_vdev_problem(nvlist_t *vdev, int (*func)(vdev_stat_t *, uint_t),
|
|
|
|
boolean_t ignore_replacing)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
|
|
|
nvlist_t **child;
|
2022-03-14 22:44:56 +00:00
|
|
|
uint_t c, children;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore problems within a 'replacing' vdev, since we're presumably in
|
|
|
|
* the process of repairing any such errors, and don't want to call them
|
|
|
|
* out again. We'll pick up the fact that a resilver is happening
|
|
|
|
* later.
|
|
|
|
*/
|
2020-08-21 19:53:17 +00:00
|
|
|
if (ignore_replacing == B_TRUE) {
|
2022-03-14 22:44:56 +00:00
|
|
|
char *type = fnvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE);
|
2020-08-21 19:53:17 +00:00
|
|
|
if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
|
|
|
|
&children) == 0) {
|
|
|
|
for (c = 0; c < children; c++)
|
2020-08-21 19:53:17 +00:00
|
|
|
if (find_vdev_problem(child[c], func, ignore_replacing))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (B_TRUE);
|
|
|
|
} else {
|
2022-03-14 22:44:56 +00:00
|
|
|
uint_t vsc;
|
|
|
|
vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(
|
|
|
|
vdev, ZPOOL_CONFIG_VDEV_STATS, &vsc);
|
2020-08-21 19:53:17 +00:00
|
|
|
if (func(vs, vsc) != 0)
|
2008-11-20 20:01:55 +00:00
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
|
2013-06-19 06:36:40 +00:00
|
|
|
/*
|
|
|
|
* Check any L2 cache devs
|
|
|
|
*/
|
|
|
|
if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
|
|
|
|
&children) == 0) {
|
|
|
|
for (c = 0; c < children; c++)
|
2020-08-21 19:53:17 +00:00
|
|
|
if (find_vdev_problem(child[c], func, ignore_replacing))
|
2013-06-19 06:36:40 +00:00
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Active pool health status.
|
|
|
|
*
|
|
|
|
* To determine the status for a pool, we make several passes over the config,
|
|
|
|
* picking the most egregious error we find. In order of importance, we do the
|
|
|
|
* following:
|
|
|
|
*
|
|
|
|
* - Check for a complete and valid configuration
|
|
|
|
* - Look for any faulted or missing devices in a non-replicated config
|
|
|
|
* - Check for any data errors
|
|
|
|
* - Check for any faulted or missing devices in a replicated config
|
|
|
|
* - Look for any devices showing errors
|
2020-07-03 18:05:50 +00:00
|
|
|
* - Check for any resilvering or rebuilding devices
|
2008-11-20 20:01:55 +00:00
|
|
|
*
|
|
|
|
* There can obviously be multiple errors within a single pool, so this routine
|
|
|
|
* only picks the most damaging of all the current errors to report.
|
|
|
|
*/
|
|
|
|
static zpool_status_t
|
2021-02-18 05:30:45 +00:00
|
|
|
check_status(nvlist_t *config, boolean_t isimport,
|
|
|
|
zpool_errata_t *erratap, const char *compat)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2010-05-28 20:45:14 +00:00
|
|
|
pool_scan_stat_t *ps = NULL;
|
|
|
|
uint_t vsc, psc;
|
2008-11-20 20:01:55 +00:00
|
|
|
uint64_t nerr;
|
2008-12-03 20:09:06 +00:00
|
|
|
uint64_t suspended;
|
2008-11-20 20:01:55 +00:00
|
|
|
uint64_t hostid = 0;
|
2014-02-21 03:57:17 +00:00
|
|
|
uint64_t errata = 0;
|
2015-01-23 08:05:04 +00:00
|
|
|
unsigned long system_hostid = get_system_hostid();
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2022-03-14 22:44:56 +00:00
|
|
|
uint64_t version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
|
|
|
|
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
|
|
|
|
ZPOOL_CONFIG_VDEV_TREE);
|
|
|
|
vdev_stat_t *vs = (vdev_stat_t *)fnvlist_lookup_uint64_array(nvroot,
|
|
|
|
ZPOOL_CONFIG_VDEV_STATS, &vsc);
|
|
|
|
uint64_t stateval = fnvlist_lookup_uint64(config,
|
|
|
|
ZPOOL_CONFIG_POOL_STATE);
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently resilvering a vdev
|
|
|
|
*/
|
|
|
|
(void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
|
|
|
|
(uint64_t **)&ps, &psc);
|
2017-11-16 01:27:01 +00:00
|
|
|
if (ps != NULL && ps->pss_func == POOL_SCAN_RESILVER &&
|
2010-05-28 20:45:14 +00:00
|
|
|
ps->pss_state == DSS_SCANNING)
|
|
|
|
return (ZPOOL_STATUS_RESILVERING);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2020-07-03 18:05:50 +00:00
|
|
|
/*
|
|
|
|
* Currently rebuilding a vdev, check top-level vdevs.
|
|
|
|
*/
|
|
|
|
vdev_rebuild_stat_t *vrs = NULL;
|
|
|
|
nvlist_t **child;
|
|
|
|
uint_t c, i, children;
|
|
|
|
uint64_t rebuild_end_time = 0;
|
|
|
|
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
|
|
|
|
&child, &children) == 0) {
|
|
|
|
for (c = 0; c < children; c++) {
|
|
|
|
if ((nvlist_lookup_uint64_array(child[c],
|
|
|
|
ZPOOL_CONFIG_REBUILD_STATS,
|
|
|
|
(uint64_t **)&vrs, &i) == 0) && (vrs != NULL)) {
|
|
|
|
uint64_t state = vrs->vrs_state;
|
|
|
|
|
|
|
|
if (state == VDEV_REBUILD_ACTIVE) {
|
|
|
|
return (ZPOOL_STATUS_REBUILDING);
|
|
|
|
} else if (state == VDEV_REBUILD_COMPLETE &&
|
|
|
|
vrs->vrs_end_time > rebuild_end_time) {
|
|
|
|
rebuild_end_time = vrs->vrs_end_time;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we can determine when the last scrub was run, and it
|
|
|
|
* was before the last rebuild completed, then recommend
|
|
|
|
* that the pool be scrubbed to verify all checksums. When
|
|
|
|
* ps is NULL we can infer the pool has never been scrubbed.
|
|
|
|
*/
|
|
|
|
if (rebuild_end_time > 0) {
|
|
|
|
if (ps != NULL) {
|
|
|
|
if ((ps->pss_state == DSS_FINISHED &&
|
|
|
|
ps->pss_func == POOL_SCAN_SCRUB &&
|
|
|
|
rebuild_end_time > ps->pss_end_time) ||
|
|
|
|
ps->pss_state == DSS_NONE)
|
|
|
|
return (ZPOOL_STATUS_REBUILD_SCRUB);
|
|
|
|
} else {
|
|
|
|
return (ZPOOL_STATUS_REBUILD_SCRUB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
/*
|
|
|
|
* The multihost property is set and the pool may be active.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_ACTIVE) {
|
|
|
|
mmp_state_t mmp_state;
|
|
|
|
nvlist_t *nvinfo;
|
|
|
|
|
|
|
|
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
|
|
|
|
mmp_state = fnvlist_lookup_uint64(nvinfo,
|
|
|
|
ZPOOL_CONFIG_MMP_STATE);
|
|
|
|
|
|
|
|
if (mmp_state == MMP_STATE_ACTIVE)
|
|
|
|
return (ZPOOL_STATUS_HOSTID_ACTIVE);
|
|
|
|
else if (mmp_state == MMP_STATE_NO_HOSTID)
|
|
|
|
return (ZPOOL_STATUS_HOSTID_REQUIRED);
|
|
|
|
else
|
|
|
|
return (ZPOOL_STATUS_HOSTID_MISMATCH);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Pool last accessed by another system.
|
|
|
|
*/
|
2010-05-28 20:45:14 +00:00
|
|
|
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
|
2011-04-25 15:18:07 +00:00
|
|
|
if (hostid != 0 && (unsigned long)hostid != system_hostid &&
|
2008-11-20 20:01:55 +00:00
|
|
|
stateval == POOL_STATE_ACTIVE)
|
|
|
|
return (ZPOOL_STATUS_HOSTID_MISMATCH);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Newer on-disk version.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_VERSION_NEWER)
|
|
|
|
return (ZPOOL_STATUS_VERSION_NEWER);
|
|
|
|
|
2012-12-13 23:24:15 +00:00
|
|
|
/*
|
|
|
|
* Unsupported feature(s).
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
|
2022-03-14 22:44:56 +00:00
|
|
|
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
|
|
|
|
ZPOOL_CONFIG_LOAD_INFO);
|
2012-12-13 23:24:15 +00:00
|
|
|
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
|
|
|
|
return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
|
|
|
|
return (ZPOOL_STATUS_UNSUP_FEAT_READ);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Check that the config is complete.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
|
|
|
|
return (ZPOOL_STATUS_BAD_GUID_SUM);
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
/*
|
2018-03-15 17:56:55 +00:00
|
|
|
* Check whether the pool has suspended.
|
2008-12-03 20:09:06 +00:00
|
|
|
*/
|
|
|
|
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
|
|
|
|
&suspended) == 0) {
|
2018-03-15 17:56:55 +00:00
|
|
|
uint64_t reason;
|
|
|
|
|
|
|
|
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED_REASON,
|
|
|
|
&reason) == 0 && reason == ZIO_SUSPEND_MMP)
|
|
|
|
return (ZPOOL_STATUS_IO_FAILURE_MMP);
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
if (suspended == ZIO_FAILURE_MODE_CONTINUE)
|
|
|
|
return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
|
|
|
|
return (ZPOOL_STATUS_IO_FAILURE_WAIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Could not read a log.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_BAD_LOG) {
|
|
|
|
return (ZPOOL_STATUS_BAD_LOG);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Bad devices in non-replicated config.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
2020-08-21 19:53:17 +00:00
|
|
|
find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_FAULTED_DEV_NR);
|
|
|
|
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
2020-08-21 19:53:17 +00:00
|
|
|
find_vdev_problem(nvroot, vdev_missing, B_TRUE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_MISSING_DEV_NR);
|
|
|
|
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
2020-08-21 19:53:17 +00:00
|
|
|
find_vdev_problem(nvroot, vdev_broken, B_TRUE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Corrupted pool metadata
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
|
|
|
|
return (ZPOOL_STATUS_CORRUPT_POOL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Persistent data errors.
|
|
|
|
*/
|
|
|
|
if (!isimport) {
|
|
|
|
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
|
|
|
|
&nerr) == 0 && nerr != 0)
|
|
|
|
return (ZPOOL_STATUS_CORRUPT_DATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Missing devices in a replicated config.
|
|
|
|
*/
|
2020-08-21 19:53:17 +00:00
|
|
|
if (find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_FAULTED_DEV_R);
|
2020-08-21 19:53:17 +00:00
|
|
|
if (find_vdev_problem(nvroot, vdev_missing, B_TRUE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_MISSING_DEV_R);
|
2020-08-21 19:53:17 +00:00
|
|
|
if (find_vdev_problem(nvroot, vdev_broken, B_TRUE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_CORRUPT_LABEL_R);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Devices with errors
|
|
|
|
*/
|
2020-08-21 19:53:17 +00:00
|
|
|
if (!isimport && find_vdev_problem(nvroot, vdev_errors, B_TRUE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_FAILING_DEV);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Offlined devices
|
|
|
|
*/
|
2020-08-21 19:53:17 +00:00
|
|
|
if (find_vdev_problem(nvroot, vdev_offlined, B_TRUE))
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_OFFLINE_DEV);
|
|
|
|
|
2009-08-18 18:43:27 +00:00
|
|
|
/*
|
|
|
|
* Removed device
|
|
|
|
*/
|
2020-08-21 19:53:17 +00:00
|
|
|
if (find_vdev_problem(nvroot, vdev_removed, B_TRUE))
|
2009-08-18 18:43:27 +00:00
|
|
|
return (ZPOOL_STATUS_REMOVED_DEV);
|
|
|
|
|
2020-08-21 19:53:17 +00:00
|
|
|
/*
|
|
|
|
* Suboptimal, but usable, ashift configuration.
|
|
|
|
*/
|
|
|
|
if (find_vdev_problem(nvroot, vdev_non_native_ashift, B_FALSE))
|
|
|
|
return (ZPOOL_STATUS_NON_NATIVE_ASHIFT);
|
|
|
|
|
2017-11-08 19:12:59 +00:00
|
|
|
/*
|
|
|
|
* Informational errata available.
|
|
|
|
*/
|
|
|
|
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRATA, &errata);
|
|
|
|
if (errata) {
|
|
|
|
*erratap = errata;
|
|
|
|
return (ZPOOL_STATUS_ERRATA);
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* Outdated, but usable, version
|
|
|
|
*/
|
2021-04-12 16:08:56 +00:00
|
|
|
if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION) {
|
|
|
|
/* "legacy" compatibility disables old version reporting */
|
|
|
|
if (compat != NULL && strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0)
|
|
|
|
return (ZPOOL_STATUS_OK);
|
|
|
|
else
|
|
|
|
return (ZPOOL_STATUS_VERSION_OLDER);
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2012-12-14 23:00:45 +00:00
|
|
|
/*
|
2021-04-12 16:08:56 +00:00
|
|
|
* Usable pool with disabled or superfluous features
|
|
|
|
* (superfluous = beyond what's requested by 'compatibility')
|
2012-12-14 23:00:45 +00:00
|
|
|
*/
|
|
|
|
if (version >= SPA_VERSION_FEATURES) {
|
|
|
|
int i;
|
|
|
|
nvlist_t *feat;
|
|
|
|
|
|
|
|
if (isimport) {
|
|
|
|
feat = fnvlist_lookup_nvlist(config,
|
|
|
|
ZPOOL_CONFIG_LOAD_INFO);
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 03:20:35 +00:00
|
|
|
if (nvlist_exists(feat, ZPOOL_CONFIG_ENABLED_FEAT))
|
|
|
|
feat = fnvlist_lookup_nvlist(feat,
|
|
|
|
ZPOOL_CONFIG_ENABLED_FEAT);
|
2012-12-14 23:00:45 +00:00
|
|
|
} else {
|
|
|
|
feat = fnvlist_lookup_nvlist(config,
|
|
|
|
ZPOOL_CONFIG_FEATURE_STATS);
|
|
|
|
}
|
|
|
|
|
2021-02-18 05:30:45 +00:00
|
|
|
/* check against all features, or limited set? */
|
2021-04-12 16:08:56 +00:00
|
|
|
boolean_t c_features[SPA_FEATURES];
|
2021-02-18 05:30:45 +00:00
|
|
|
|
2021-04-12 16:08:56 +00:00
|
|
|
switch (zpool_load_compat(compat, c_features, NULL, 0)) {
|
|
|
|
case ZPOOL_COMPATIBILITY_OK:
|
|
|
|
case ZPOOL_COMPATIBILITY_WARNTOKEN:
|
|
|
|
break;
|
|
|
|
default:
|
2021-02-18 05:30:45 +00:00
|
|
|
return (ZPOOL_STATUS_COMPATIBILITY_ERR);
|
2021-04-12 16:08:56 +00:00
|
|
|
}
|
2012-12-14 23:00:45 +00:00
|
|
|
for (i = 0; i < SPA_FEATURES; i++) {
|
|
|
|
zfeature_info_t *fi = &spa_feature_table[i];
|
2021-02-28 01:16:02 +00:00
|
|
|
if (!fi->fi_zfs_mod_supported)
|
|
|
|
continue;
|
2021-04-12 16:08:56 +00:00
|
|
|
if (c_features[i] && !nvlist_exists(feat, fi->fi_guid))
|
2012-12-14 23:00:45 +00:00
|
|
|
return (ZPOOL_STATUS_FEAT_DISABLED);
|
2021-04-12 16:08:56 +00:00
|
|
|
if (!c_features[i] && nvlist_exists(feat, fi->fi_guid))
|
|
|
|
return (ZPOOL_STATUS_INCOMPATIBLE_FEAT);
|
2012-12-14 23:00:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ZPOOL_STATUS_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
zpool_status_t
|
2022-04-19 18:38:30 +00:00
|
|
|
zpool_get_status(zpool_handle_t *zhp, const char **msgid,
|
|
|
|
zpool_errata_t *errata)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2021-02-18 05:30:45 +00:00
|
|
|
/*
|
|
|
|
* pass in the desired feature set, as
|
|
|
|
* it affects check for disabled features
|
|
|
|
*/
|
|
|
|
char compatibility[ZFS_MAXPROPLEN];
|
|
|
|
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compatibility,
|
|
|
|
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
|
|
|
|
compatibility[0] = '\0';
|
|
|
|
|
|
|
|
zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE, errata,
|
|
|
|
compatibility);
|
|
|
|
|
2018-06-06 16:33:54 +00:00
|
|
|
if (msgid != NULL) {
|
|
|
|
if (ret >= NMSGID)
|
|
|
|
*msgid = NULL;
|
|
|
|
else
|
|
|
|
*msgid = zfs_msgid_table[ret];
|
|
|
|
}
|
2008-11-20 20:01:55 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
zpool_status_t
|
2022-04-19 18:38:30 +00:00
|
|
|
zpool_import_status(nvlist_t *config, const char **msgid,
|
|
|
|
zpool_errata_t *errata)
|
2008-11-20 20:01:55 +00:00
|
|
|
{
|
2021-02-18 05:30:45 +00:00
|
|
|
zpool_status_t ret = check_status(config, B_TRUE, errata, NULL);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
if (ret >= NMSGID)
|
|
|
|
*msgid = NULL;
|
|
|
|
else
|
|
|
|
*msgid = zfs_msgid_table[ret];
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|