Add -gLp to zpool subcommands for alt vdev names

The following options have been added to the zpool add, iostat,
list, status, and split subcommands.  The default behavior was
not modified, from zfs(8).

  -g    Display vdev GUIDs  instead  of  the  normal  short
        device  names.  These GUIDs can be used in-place of
        device   names   for    the    zpool    detach/off‐
        line/remove/replace commands.

  -L    Display real paths for vdevs resolving all symbolic
        links. This can be used to lookup the current block
        device  name regardless of the /dev/disk/ path used
        to open it.

  -p    Display  full  paths  for vdevs instead of only the
        last component of the path.  This can  be  used  in
        conjunction with the -L flag.

This behavior may also be enabled using the following environment
variables.

  ZPOOL_VDEV_NAME_GUID
  ZPOOL_VDEV_NAME_FOLLOW_LINKS
  ZPOOL_VDEV_NAME_PATH

This change is based on worked originally started by Richard Yao
to add a -g option.  Then extended by @ilovezfs to add a -L option
for openzfsonosx.  Those changes have been merged, re-factored,
a -p option added and extended to all relevant zpool subcommands.

Original-patch-by: Richard Yao <ryao@gentoo.org>
Extended-by: ilovezfs <ilovezfs@icloud.com>
Extended-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: ilovezfs <ilovezfs@icloud.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #2011
Closes #4341
This commit is contained in:
Richard Yao 2013-12-29 13:40:46 -05:00 committed by Brian Behlendorf
parent e79a6bacc6
commit d2f3e292dc
4 changed files with 407 additions and 104 deletions

View File

@ -207,7 +207,7 @@ static const char *
get_usage(zpool_help_t idx) { get_usage(zpool_help_t idx) {
switch (idx) { switch (idx) {
case HELP_ADD: case HELP_ADD:
return (gettext("\tadd [-fn] [-o property=value] " return (gettext("\tadd [-fgLnp] [-o property=value] "
"<pool> <vdev> ...\n")); "<pool> <vdev> ...\n"));
case HELP_ATTACH: case HELP_ATTACH:
return (gettext("\tattach [-f] [-o property=value] " return (gettext("\tattach [-f] [-o property=value] "
@ -237,12 +237,12 @@ get_usage(zpool_help_t idx) {
"[-R root] [-F [-n]]\n" "[-R root] [-F [-n]]\n"
"\t <pool | id> [newpool]\n")); "\t <pool | id> [newpool]\n"));
case HELP_IOSTAT: case HELP_IOSTAT:
return (gettext("\tiostat [-v] [-T d|u] [-y] [pool] ... " return (gettext("\tiostat [-gLpvy] [-T d|u] [pool] ... "
"[interval [count]]\n")); "[interval [count]]\n"));
case HELP_LABELCLEAR: case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n")); return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST: case HELP_LIST:
return (gettext("\tlist [-Hv] [-o property[,...]] " return (gettext("\tlist [-gHLpv] [-o property[,...]] "
"[-T d|u] [pool] ... [interval [count]]\n")); "[-T d|u] [pool] ... [interval [count]]\n"));
case HELP_OFFLINE: case HELP_OFFLINE:
return (gettext("\toffline [-t] <pool> <device> ...\n")); return (gettext("\toffline [-t] <pool> <device> ...\n"));
@ -258,8 +258,8 @@ get_usage(zpool_help_t idx) {
case HELP_SCRUB: case HELP_SCRUB:
return (gettext("\tscrub [-s] <pool> ...\n")); return (gettext("\tscrub [-s] <pool> ...\n"));
case HELP_STATUS: case HELP_STATUS:
return (gettext("\tstatus [-vxD] [-T d|u] [pool] ... [interval " return (gettext("\tstatus [-gLpvxD] [-T d|u] [pool] ... "
"[count]]\n")); "[interval [count]]\n"));
case HELP_UPGRADE: case HELP_UPGRADE:
return (gettext("\tupgrade\n" return (gettext("\tupgrade\n"
"\tupgrade -v\n" "\tupgrade -v\n"
@ -272,7 +272,7 @@ get_usage(zpool_help_t idx) {
case HELP_SET: case HELP_SET:
return (gettext("\tset <property=value> <pool> \n")); return (gettext("\tset <property=value> <pool> \n"));
case HELP_SPLIT: case HELP_SPLIT:
return (gettext("\tsplit [-n] [-R altroot] [-o mntopts]\n" return (gettext("\tsplit [-gLnp] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> " "\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n")); "[<device> ...]\n"));
case HELP_REGUID: case HELP_REGUID:
@ -371,7 +371,7 @@ usage(boolean_t requested)
void void
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
boolean_t print_logs) boolean_t print_logs, int name_flags)
{ {
nvlist_t **child; nvlist_t **child;
uint_t c, children; uint_t c, children;
@ -392,9 +392,9 @@ print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
if ((is_log && !print_logs) || (!is_log && print_logs)) if ((is_log && !print_logs) || (!is_log && print_logs))
continue; continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE); vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
print_vdev_tree(zhp, vname, child[c], indent + 2, print_vdev_tree(zhp, vname, child[c], indent + 2,
B_FALSE); B_FALSE, name_flags);
free(vname); free(vname);
} }
} }
@ -502,12 +502,15 @@ add_prop_list_default(const char *propname, char *propval, nvlist_t **props,
} }
/* /*
* zpool add [-fn] [-o property=value] <pool> <vdev> ... * zpool add [-fgLnp] [-o property=value] <pool> <vdev> ...
* *
* -f Force addition of devices, even if they appear in use * -f Force addition of devices, even if they appear in use
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not add the devices, but display the resulting layout if * -n Do not add the devices, but display the resulting layout if
* they were to be added. * they were to be added.
* -o Set property=value. * -o Set property=value.
* -p Display full path for vdev name.
* *
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is * Adds the given vdevs to 'pool'. As with create, the bulk of this work is
* handled by get_vdev_spec(), which constructs the nvlist needed to pass to * handled by get_vdev_spec(), which constructs the nvlist needed to pass to
@ -518,6 +521,7 @@ zpool_do_add(int argc, char **argv)
{ {
boolean_t force = B_FALSE; boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE; boolean_t dryrun = B_FALSE;
int name_flags = 0;
int c; int c;
nvlist_t *nvroot; nvlist_t *nvroot;
char *poolname; char *poolname;
@ -528,11 +532,17 @@ zpool_do_add(int argc, char **argv)
char *propval; char *propval;
/* check options */ /* check options */
while ((c = getopt(argc, argv, "fno:")) != -1) { while ((c = getopt(argc, argv, "fgLno:p")) != -1) {
switch (c) { switch (c) {
case 'f': case 'f':
force = B_TRUE; force = B_TRUE;
break; break;
case 'g':
name_flags |= VDEV_NAME_GUID;
break;
case 'L':
name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'n': case 'n':
dryrun = B_TRUE; dryrun = B_TRUE;
break; break;
@ -549,6 +559,9 @@ zpool_do_add(int argc, char **argv)
(add_prop_list(optarg, propval, &props, B_TRUE))) (add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE); usage(B_FALSE);
break; break;
case 'p':
name_flags |= VDEV_NAME_PATH;
break;
case '?': case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"), (void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt); optopt);
@ -606,15 +619,19 @@ zpool_do_add(int argc, char **argv)
"configuration:\n"), zpool_get_name(zhp)); "configuration:\n"), zpool_get_name(zhp));
/* print original main pool and new tree */ /* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, B_FALSE); print_vdev_tree(zhp, poolname, poolnvroot, 0, B_FALSE,
print_vdev_tree(zhp, NULL, nvroot, 0, B_FALSE); name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0, B_FALSE, name_flags);
/* Do the same for the logs */ /* Do the same for the logs */
if (num_logs(poolnvroot) > 0) { if (num_logs(poolnvroot) > 0) {
print_vdev_tree(zhp, "logs", poolnvroot, 0, B_TRUE); print_vdev_tree(zhp, "logs", poolnvroot, 0, B_TRUE,
print_vdev_tree(zhp, NULL, nvroot, 0, B_TRUE); name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0, B_TRUE,
name_flags);
} else if (num_logs(nvroot) > 0) { } else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0, B_TRUE); print_vdev_tree(zhp, "logs", nvroot, 0, B_TRUE,
name_flags);
} }
/* Do the same for the caches */ /* Do the same for the caches */
@ -624,7 +641,7 @@ zpool_do_add(int argc, char **argv)
(void) printf(gettext("\tcache\n")); (void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) { for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], B_FALSE); l2child[c], name_flags);
(void) printf("\t %s\n", vname); (void) printf("\t %s\n", vname);
free(vname); free(vname);
} }
@ -635,7 +652,7 @@ zpool_do_add(int argc, char **argv)
(void) printf(gettext("\tcache\n")); (void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) { for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], B_FALSE); l2child[c], name_flags);
(void) printf("\t %s\n", vname); (void) printf("\t %s\n", vname);
free(vname); free(vname);
} }
@ -1082,9 +1099,9 @@ zpool_do_create(int argc, char **argv)
(void) printf(gettext("would create '%s' with the " (void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), poolname); "following layout:\n\n"), poolname);
print_vdev_tree(NULL, poolname, nvroot, 0, B_FALSE); print_vdev_tree(NULL, poolname, nvroot, 0, B_FALSE, 0);
if (num_logs(nvroot) > 0) if (num_logs(nvroot) > 0)
print_vdev_tree(NULL, "logs", nvroot, 0, B_TRUE); print_vdev_tree(NULL, "logs", nvroot, 0, B_TRUE, 0);
ret = 0; ret = 0;
} else { } else {
@ -1311,13 +1328,15 @@ zpool_do_export(int argc, char **argv)
* name column. * name column.
*/ */
static int static int
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max) max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
int name_flags)
{ {
char *name = zpool_vdev_name(g_zfs, zhp, nv, B_TRUE); char *name;
nvlist_t **child; nvlist_t **child;
uint_t c, children; uint_t c, children;
int ret; int ret;
name = zpool_vdev_name(g_zfs, zhp, nv, name_flags | VDEV_NAME_TYPE_ID);
if (strlen(name) + depth > max) if (strlen(name) + depth > max)
max = strlen(name) + depth; max = strlen(name) + depth;
@ -1327,7 +1346,7 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max)
&child, &children) == 0) { &child, &children) == 0) {
for (c = 0; c < children; c++) for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2, if ((ret = max_width(zhp, child[c], depth + 2,
max)) > max) max, name_flags)) > max)
max = ret; max = ret;
} }
@ -1335,7 +1354,7 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max)
&child, &children) == 0) { &child, &children) == 0) {
for (c = 0; c < children; c++) for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2, if ((ret = max_width(zhp, child[c], depth + 2,
max)) > max) max, name_flags)) > max)
max = ret; max = ret;
} }
@ -1343,11 +1362,10 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max)
&child, &children) == 0) { &child, &children) == 0) {
for (c = 0; c < children; c++) for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2, if ((ret = max_width(zhp, child[c], depth + 2,
max)) > max) max, name_flags)) > max)
max = ret; max = ret;
} }
return (max); return (max);
} }
@ -1399,9 +1417,9 @@ find_spare(zpool_handle_t *zhp, void *data)
/* /*
* Print out configuration state as requested by status_callback. * Print out configuration state as requested by status_callback.
*/ */
void static void
print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv, print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
int namewidth, int depth, boolean_t isspare) int namewidth, int depth, boolean_t isspare, int name_flags)
{ {
nvlist_t **child; nvlist_t **child;
uint_t c, children; uint_t c, children;
@ -1537,20 +1555,21 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
&ishole); &ishole);
if (islog || ishole) if (islog || ishole)
continue; continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c], B_TRUE); vname = zpool_vdev_name(g_zfs, zhp, child[c],
name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, vname, child[c], print_status_config(zhp, vname, child[c],
namewidth, depth + 2, isspare); namewidth, depth + 2, isspare, name_flags);
free(vname); free(vname);
} }
} }
/* /*
* Print the configuration of an exported pool. Iterate over all vdevs in the * Print the configuration of an exported pool. Iterate over all vdevs in the
* pool, printing out the name and status for each one. * pool, printing out the name and status for each one.
*/ */
void static void
print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth) print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth,
int name_flags)
{ {
nvlist_t **child; nvlist_t **child;
uint_t c, children; uint_t c, children;
@ -1615,8 +1634,10 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth)
if (is_log) if (is_log)
continue; continue;
vname = zpool_vdev_name(g_zfs, NULL, child[c], B_TRUE); vname = zpool_vdev_name(g_zfs, NULL, child[c],
print_import_config(vname, child[c], namewidth, depth + 2); name_flags | VDEV_NAME_TYPE_ID);
print_import_config(vname, child[c], namewidth, depth + 2,
name_flags);
free(vname); free(vname);
} }
@ -1624,7 +1645,8 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth)
&child, &children) == 0) { &child, &children) == 0) {
(void) printf(gettext("\tcache\n")); (void) printf(gettext("\tcache\n"));
for (c = 0; c < children; c++) { for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c], B_FALSE); vname = zpool_vdev_name(g_zfs, NULL, child[c],
name_flags);
(void) printf("\t %s\n", vname); (void) printf("\t %s\n", vname);
free(vname); free(vname);
} }
@ -1634,7 +1656,8 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth)
&child, &children) == 0) { &child, &children) == 0) {
(void) printf(gettext("\tspares\n")); (void) printf(gettext("\tspares\n"));
for (c = 0; c < children; c++) { for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c], B_FALSE); vname = zpool_vdev_name(g_zfs, NULL, child[c],
name_flags);
(void) printf("\t %s\n", vname); (void) printf("\t %s\n", vname);
free(vname); free(vname);
} }
@ -1650,7 +1673,8 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth)
* works because only the top level vdev is marked "is_log" * works because only the top level vdev is marked "is_log"
*/ */
static void static void
print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose) print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose,
int name_flags)
{ {
uint_t c, children; uint_t c, children;
nvlist_t **child; nvlist_t **child;
@ -1669,12 +1693,14 @@ print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose)
&is_log); &is_log);
if (!is_log) if (!is_log)
continue; continue;
name = zpool_vdev_name(g_zfs, zhp, child[c], B_TRUE); name = zpool_vdev_name(g_zfs, zhp, child[c],
name_flags | VDEV_NAME_TYPE_ID);
if (verbose) if (verbose)
print_status_config(zhp, name, child[c], namewidth, print_status_config(zhp, name, child[c], namewidth,
2, B_FALSE); 2, B_FALSE, name_flags);
else else
print_import_config(name, child[c], namewidth, 2); print_import_config(name, child[c], namewidth, 2,
name_flags);
free(name); free(name);
} }
} }
@ -1923,13 +1949,13 @@ show_import(nvlist_t *config)
(void) printf(gettext(" config:\n\n")); (void) printf(gettext(" config:\n\n"));
namewidth = max_width(NULL, nvroot, 0, 0); namewidth = max_width(NULL, nvroot, 0, 0, 0);
if (namewidth < 10) if (namewidth < 10)
namewidth = 10; namewidth = 10;
print_import_config(name, nvroot, namewidth, 0); print_import_config(name, nvroot, namewidth, 0, 0);
if (num_logs(nvroot) > 0) if (num_logs(nvroot) > 0)
print_logs(NULL, nvroot, namewidth, B_FALSE); print_logs(NULL, nvroot, namewidth, B_FALSE, 0);
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
(void) printf(gettext("\n\tAdditional devices are known to " (void) printf(gettext("\n\tAdditional devices are known to "
@ -2438,6 +2464,7 @@ error:
typedef struct iostat_cbdata { typedef struct iostat_cbdata {
boolean_t cb_verbose; boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth; int cb_namewidth;
int cb_iteration; int cb_iteration;
zpool_list_t *cb_list; zpool_list_t *cb_list;
@ -2560,7 +2587,8 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
if (ishole || islog) if (ishole || islog)
continue; continue;
vname = zpool_vdev_name(g_zfs, zhp, newchild[c], B_FALSE); vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_name_flags);
print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2); newchild[c], cb, depth + 2);
free(vname); free(vname);
@ -2581,7 +2609,7 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
if (islog) { if (islog) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c], vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
B_FALSE); cb->cb_name_flags);
print_vdev_stats(zhp, vname, oldnv ? print_vdev_stats(zhp, vname, oldnv ?
oldchild[c] : NULL, newchild[c], oldchild[c] : NULL, newchild[c],
cb, depth + 2); cb, depth + 2);
@ -2607,7 +2635,7 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
"-\n", cb->cb_namewidth, "cache"); "-\n", cb->cb_namewidth, "cache");
for (c = 0; c < children; c++) { for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c], vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
B_FALSE); cb->cb_name_flags);
print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2); newchild[c], cb, depth + 2);
free(vname); free(vname);
@ -2700,7 +2728,7 @@ get_namewidth(zpool_handle_t *zhp, void *data)
cb->cb_namewidth = strlen(zpool_get_name(zhp)); cb->cb_namewidth = strlen(zpool_get_name(zhp));
else else
cb->cb_namewidth = max_width(zhp, nvroot, 0, cb->cb_namewidth = max_width(zhp, nvroot, 0,
cb->cb_namewidth); cb->cb_namewidth, cb->cb_name_flags);
} }
/* /*
@ -2800,8 +2828,11 @@ get_timestamp_arg(char c)
} }
/* /*
* zpool iostat [-v] [-T d|u] [pool] ... [interval [count]] * zpool iostat [-gLpv] [-T d|u] [pool] ... [interval [count]]
* *
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -p Display full path for vdev name.
* -v Display statistics for individual vdevs * -v Display statistics for individual vdevs
* -T Display a timestamp in date(1) or Unix format * -T Display a timestamp in date(1) or Unix format
* *
@ -2821,11 +2852,23 @@ zpool_do_iostat(int argc, char **argv)
zpool_list_t *list; zpool_list_t *list;
boolean_t verbose = B_FALSE; boolean_t verbose = B_FALSE;
boolean_t omit_since_boot = B_FALSE; boolean_t omit_since_boot = B_FALSE;
iostat_cbdata_t cb; boolean_t guid = B_FALSE;
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
iostat_cbdata_t cb = { 0 };
/* check options */ /* check options */
while ((c = getopt(argc, argv, "T:vy")) != -1) { while ((c = getopt(argc, argv, "gLpT:vy")) != -1) {
switch (c) { switch (c) {
case 'g':
guid = B_TRUE;
break;
case 'L':
follow_links = B_TRUE;
break;
case 'p':
full_name = B_TRUE;
break;
case 'T': case 'T':
get_timestamp_arg(*optarg); get_timestamp_arg(*optarg);
break; break;
@ -2870,6 +2913,12 @@ zpool_do_iostat(int argc, char **argv)
*/ */
cb.cb_list = list; cb.cb_list = list;
cb.cb_verbose = verbose; cb.cb_verbose = verbose;
if (guid)
cb.cb_name_flags |= VDEV_NAME_GUID;
if (follow_links)
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (full_name)
cb.cb_name_flags |= VDEV_NAME_PATH;
cb.cb_iteration = 0; cb.cb_iteration = 0;
cb.cb_namewidth = 0; cb.cb_namewidth = 0;
@ -2953,6 +3002,7 @@ zpool_do_iostat(int argc, char **argv)
typedef struct list_cbdata { typedef struct list_cbdata {
boolean_t cb_verbose; boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth; int cb_namewidth;
boolean_t cb_scripted; boolean_t cb_scripted;
zprop_list_t *cb_proplist; zprop_list_t *cb_proplist;
@ -3187,7 +3237,8 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
continue; continue;
} }
vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE); vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2); print_list_stats(zhp, vname, child[c], cb, depth + 2);
free(vname); free(vname);
} }
@ -3199,7 +3250,8 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog) != 0 || !islog) &islog) != 0 || !islog)
continue; continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE); vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2); print_list_stats(zhp, vname, child[c], cb, depth + 2);
free(vname); free(vname);
} }
@ -3210,7 +3262,8 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
/* LINTED E_SEC_PRINTF_VAR_FMT */ /* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "cache"); (void) printf(dashes, cb->cb_namewidth, "cache");
for (c = 0; c < children; c++) { for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE); vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2); print_list_stats(zhp, vname, child[c], cb, depth + 2);
free(vname); free(vname);
} }
@ -3221,7 +3274,8 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
/* LINTED E_SEC_PRINTF_VAR_FMT */ /* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "spare"); (void) printf(dashes, cb->cb_namewidth, "spare");
for (c = 0; c < children; c++) { for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE); vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2); print_list_stats(zhp, vname, child[c], cb, depth + 2);
free(vname); free(vname);
} }
@ -3253,13 +3307,16 @@ list_callback(zpool_handle_t *zhp, void *data)
} }
/* /*
* zpool list [-H] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]] * zpool list [-gHLp] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
* *
* -g Display guid for individual vdev name.
* -H Scripted mode. Don't display headers, and separate properties * -H Scripted mode. Don't display headers, and separate properties
* by a single tab. * by a single tab.
* -L Follow links when resolving vdev path name.
* -o List of properties to display. Defaults to * -o List of properties to display. Defaults to
* "name,size,allocated,free,expandsize,fragmentation,capacity," * "name,size,allocated,free,expandsize,fragmentation,capacity,"
* "dedupratio,health,altroot" * "dedupratio,health,altroot"
* -p Display full path for vdev name.
* -T Display a timestamp in date(1) or Unix format * -T Display a timestamp in date(1) or Unix format
* *
* List all pools in the system, whether or not they're healthy. Output space * List all pools in the system, whether or not they're healthy. Output space
@ -3280,14 +3337,23 @@ zpool_do_list(int argc, char **argv)
boolean_t first = B_TRUE; boolean_t first = B_TRUE;
/* check options */ /* check options */
while ((c = getopt(argc, argv, ":Ho:T:v")) != -1) { while ((c = getopt(argc, argv, ":gHLo:pT:v")) != -1) {
switch (c) { switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'H': case 'H':
cb.cb_scripted = B_TRUE; cb.cb_scripted = B_TRUE;
break; break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'o': case 'o':
props = optarg; props = optarg;
break; break;
case 'p':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'T': case 'T':
get_timestamp_arg(*optarg); get_timestamp_arg(*optarg);
break; break;
@ -3543,13 +3609,16 @@ zpool_do_detach(int argc, char **argv)
} }
/* /*
* zpool split [-n] [-o prop=val] ... * zpool split [-gLnp] [-o prop=val] ...
* [-o mntopt] ... * [-o mntopt] ...
* [-R altroot] <pool> <newpool> [<device> ...] * [-R altroot] <pool> <newpool> [<device> ...]
* *
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not split the pool, but display the resulting layout if * -n Do not split the pool, but display the resulting layout if
* it were to be split. * it were to be split.
* -o Set property=value, or set mount options. * -o Set property=value, or set mount options.
* -p Display full path for vdev name.
* -R Mount the split-off pool under an alternate root. * -R Mount the split-off pool under an alternate root.
* *
* Splits the named pool and gives it the new pool name. Devices to be split * Splits the named pool and gives it the new pool name. Devices to be split
@ -3573,10 +3642,17 @@ zpool_do_split(int argc, char **argv)
flags.dryrun = B_FALSE; flags.dryrun = B_FALSE;
flags.import = B_FALSE; flags.import = B_FALSE;
flags.name_flags = 0;
/* check options */ /* check options */
while ((c = getopt(argc, argv, ":R:no:")) != -1) { while ((c = getopt(argc, argv, ":gLR:no:p")) != -1) {
switch (c) { switch (c) {
case 'g':
flags.name_flags |= VDEV_NAME_GUID;
break;
case 'L':
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'R': case 'R':
flags.import = B_TRUE; flags.import = B_TRUE;
if (add_prop_list( if (add_prop_list(
@ -3604,6 +3680,9 @@ zpool_do_split(int argc, char **argv)
mntopts = optarg; mntopts = optarg;
} }
break; break;
case 'p':
flags.name_flags |= VDEV_NAME_PATH;
break;
case ':': case ':':
(void) fprintf(stderr, gettext("missing argument for " (void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt); "'%c' option\n"), optopt);
@ -3651,7 +3730,8 @@ zpool_do_split(int argc, char **argv)
if (flags.dryrun) { if (flags.dryrun) {
(void) printf(gettext("would create '%s' with the " (void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), newpool); "following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, B_FALSE); print_vdev_tree(NULL, newpool, config, 0, B_FALSE,
flags.name_flags);
} }
nvlist_free(config); nvlist_free(config);
} }
@ -4057,6 +4137,7 @@ zpool_do_scrub(int argc, char **argv)
typedef struct status_cbdata { typedef struct status_cbdata {
int cb_count; int cb_count;
int cb_name_flags;
boolean_t cb_allpools; boolean_t cb_allpools;
boolean_t cb_verbose; boolean_t cb_verbose;
boolean_t cb_explain; boolean_t cb_explain;
@ -4213,7 +4294,7 @@ print_error_log(zpool_handle_t *zhp)
static void static void
print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares, print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares,
int namewidth) int namewidth, int name_flags)
{ {
uint_t i; uint_t i;
char *name; char *name;
@ -4224,16 +4305,16 @@ print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares,
(void) printf(gettext("\tspares\n")); (void) printf(gettext("\tspares\n"));
for (i = 0; i < nspares; i++) { for (i = 0; i < nspares; i++) {
name = zpool_vdev_name(g_zfs, zhp, spares[i], B_FALSE); name = zpool_vdev_name(g_zfs, zhp, spares[i], name_flags);
print_status_config(zhp, name, spares[i], print_status_config(zhp, name, spares[i],
namewidth, 2, B_TRUE); namewidth, 2, B_TRUE, name_flags);
free(name); free(name);
} }
} }
static void static void
print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache, print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache,
int namewidth) int namewidth, int name_flags)
{ {
uint_t i; uint_t i;
char *name; char *name;
@ -4244,9 +4325,9 @@ print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache,
(void) printf(gettext("\tcache\n")); (void) printf(gettext("\tcache\n"));
for (i = 0; i < nl2cache; i++) { for (i = 0; i < nl2cache; i++) {
name = zpool_vdev_name(g_zfs, zhp, l2cache[i], B_FALSE); name = zpool_vdev_name(g_zfs, zhp, l2cache[i], name_flags);
print_status_config(zhp, name, l2cache[i], print_status_config(zhp, name, l2cache[i],
namewidth, 2, B_FALSE); namewidth, 2, B_FALSE, name_flags);
free(name); free(name);
} }
} }
@ -4588,7 +4669,7 @@ status_callback(zpool_handle_t *zhp, void *data)
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &c); ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &c);
print_scan_status(ps); print_scan_status(ps);
namewidth = max_width(zhp, nvroot, 0, 0); namewidth = max_width(zhp, nvroot, 0, 0, cbp->cb_name_flags);
if (namewidth < 10) if (namewidth < 10)
namewidth = 10; namewidth = 10;
@ -4596,17 +4677,20 @@ status_callback(zpool_handle_t *zhp, void *data)
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s\n"), namewidth, (void) printf(gettext("\t%-*s %-8s %5s %5s %5s\n"), namewidth,
"NAME", "STATE", "READ", "WRITE", "CKSUM"); "NAME", "STATE", "READ", "WRITE", "CKSUM");
print_status_config(zhp, zpool_get_name(zhp), nvroot, print_status_config(zhp, zpool_get_name(zhp), nvroot,
namewidth, 0, B_FALSE); namewidth, 0, B_FALSE, cbp->cb_name_flags);
if (num_logs(nvroot) > 0) if (num_logs(nvroot) > 0)
print_logs(zhp, nvroot, namewidth, B_TRUE); print_logs(zhp, nvroot, namewidth, B_TRUE,
cbp->cb_name_flags);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) &l2cache, &nl2cache) == 0)
print_l2cache(zhp, l2cache, nl2cache, namewidth); print_l2cache(zhp, l2cache, nl2cache, namewidth,
cbp->cb_name_flags);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) &spares, &nspares) == 0)
print_spares(zhp, spares, nspares, namewidth); print_spares(zhp, spares, nspares, namewidth,
cbp->cb_name_flags);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) { &nerr) == 0) {
@ -4654,8 +4738,11 @@ status_callback(zpool_handle_t *zhp, void *data)
} }
/* /*
* zpool status [-vx] [-T d|u] [pool] ... [interval [count]] * zpool status [-gLpvx] [-T d|u] [pool] ... [interval [count]]
* *
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -p Display full path for vdev name.
* -v Display complete error logs * -v Display complete error logs
* -x Display only pools with potential problems * -x Display only pools with potential problems
* -D Display dedup status (undocumented) * -D Display dedup status (undocumented)
@ -4672,8 +4759,17 @@ zpool_do_status(int argc, char **argv)
status_cbdata_t cb = { 0 }; status_cbdata_t cb = { 0 };
/* check options */ /* check options */
while ((c = getopt(argc, argv, "vxDT:")) != -1) { while ((c = getopt(argc, argv, "gLpvxDT:")) != -1) {
switch (c) { switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'p':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'v': case 'v':
cb.cb_verbose = B_TRUE; cb.cb_verbose = B_TRUE;
break; break;

View File

@ -248,6 +248,7 @@ typedef struct splitflags {
/* after splitting, import the pool */ /* after splitting, import the pool */
int import : 1; int import : 1;
int name_flags;
} splitflags_t; } splitflags_t;
/* /*
@ -406,8 +407,15 @@ struct zfs_cmd;
extern const char *zfs_history_event_names[]; extern const char *zfs_history_event_names[];
typedef enum {
VDEV_NAME_PATH = 1 << 0,
VDEV_NAME_GUID = 1 << 1,
VDEV_NAME_FOLLOW_LINKS = 1 << 2,
VDEV_NAME_TYPE_ID = 1 << 3,
} vdev_name_t;
extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *, extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *,
boolean_t verbose); int name_flags);
extern int zpool_upgrade(zpool_handle_t *, uint64_t); extern int zpool_upgrade(zpool_handle_t *, uint64_t);
extern int zpool_get_history(zpool_handle_t *, nvlist_t **); extern int zpool_get_history(zpool_handle_t *, nvlist_t **);
extern int zpool_history_unpack(char *, uint64_t, uint64_t *, extern int zpool_history_unpack(char *, uint64_t, uint64_t *,

View File

@ -1393,8 +1393,7 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device '%s' contains an EFI label and " "device '%s' contains an EFI label and "
"cannot be used on root pools."), "cannot be used on root pools."),
zpool_vdev_name(hdl, NULL, spares[s], zpool_vdev_name(hdl, NULL, spares[s], 0));
B_FALSE));
return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
} }
} }
@ -1715,7 +1714,7 @@ print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
return; return;
for (c = 0; c < children; c++) { for (c = 0; c < children; c++) {
vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
print_vdev_tree(hdl, vname, child[c], indent + 2); print_vdev_tree(hdl, vname, child[c], indent + 2);
free(vname); free(vname);
} }
@ -2704,7 +2703,7 @@ zpool_vdev_attach(zpool_handle_t *zhp,
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
return (-1); return (-1);
/* /*
@ -2895,11 +2894,11 @@ find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
for (mc = 0; mc < mchildren; mc++) { for (mc = 0; mc < mchildren; mc++) {
uint_t sc; uint_t sc;
char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
mchild[mc], B_FALSE); mchild[mc], 0);
for (sc = 0; sc < schildren; sc++) { for (sc = 0; sc < schildren; sc++) {
char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
schild[sc], B_FALSE); schild[sc], 0);
boolean_t result = (strcmp(mpath, spath) == 0); boolean_t result = (strcmp(mpath, spath) == 0);
free(spath); free(spath);
@ -3442,21 +3441,34 @@ strip_partition(libzfs_handle_t *hdl, char *path)
*/ */
char * char *
zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
boolean_t verbose) int name_flags)
{ {
char *path, *devid, *type; char *path, *devid, *type, *env;
uint64_t value; uint64_t value;
char buf[PATH_BUF_LEN]; char buf[PATH_BUF_LEN];
char tmpbuf[PATH_BUF_LEN]; char tmpbuf[PATH_BUF_LEN];
vdev_stat_t *vs; vdev_stat_t *vs;
uint_t vsc; uint_t vsc;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, env = getenv("ZPOOL_VDEV_NAME_PATH");
&value) == 0) { if (env && (strtoul(env, NULL, 0) > 0 ||
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
&value) == 0); name_flags |= VDEV_NAME_PATH;
(void) snprintf(buf, sizeof (buf), "%llu",
(u_longlong_t)value); env = getenv("ZPOOL_VDEV_NAME_GUID");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
name_flags |= VDEV_NAME_GUID;
env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
name_flags & VDEV_NAME_GUID) {
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
path = buf; path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
/* /*
@ -3497,11 +3509,21 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
devid_str_free(newdevid); devid_str_free(newdevid);
} }
if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
char *rp = realpath(path, NULL);
if (rp) {
strlcpy(buf, rp, sizeof (buf));
path = buf;
free(rp);
}
}
/* /*
* For a block device only use the name. * For a block device only use the name.
*/ */
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_DISK) == 0) { if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
!(name_flags & VDEV_NAME_PATH)) {
path = strrchr(path, '/'); path = strrchr(path, '/');
path++; path++;
} }
@ -3509,8 +3531,8 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
/* /*
* Remove the partition from the path it this is a whole disk. * Remove the partition from the path it this is a whole disk.
*/ */
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
&value) == 0 && value) { == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
return (strip_partition(hdl, path)); return (strip_partition(hdl, path));
} }
} else { } else {
@ -3532,7 +3554,7 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
* We identify each top-level vdev by using a <type-id> * We identify each top-level vdev by using a <type-id>
* naming convention. * naming convention.
*/ */
if (verbose) { if (name_flags & VDEV_NAME_TYPE_ID) {
uint64_t id; uint64_t id;
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,

View File

@ -26,7 +26,7 @@ zpool \- configures ZFS storage pools
.LP .LP
.nf .nf
\fBzpool add\fR [\fB-fn\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIvdev\fR ... \fBzpool add\fR [\fB-fgLnp\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIvdev\fR ...
.fi .fi
.LP .LP
@ -94,7 +94,7 @@ zpool \- configures ZFS storage pools
.LP .LP
.nf .nf
\fBzpool iostat\fR [\fB-T\fR d | u ] [\fB-v\fR] [\fB-y\fR] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]] \fBzpool iostat\fR [\fB-T\fR d | u ] [\fB-gLpvy\fR] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]
.fi .fi
.LP .LP
@ -104,7 +104,7 @@ zpool \- configures ZFS storage pools
.LP .LP
.nf .nf
\fBzpool list\fR [\fB-T\fR d | u ] [\fB-Hv\fR] [\fB-o\fR \fIproperty\fR[,...]] [\fIpool\fR] ... \fBzpool list\fR [\fB-T\fR d | u ] [\fB-HgLpv\fR] [\fB-o\fR \fIproperty\fR[,...]] [\fIpool\fR] ...
[\fIinterval\fR[\fIcount\fR]] [\fIinterval\fR[\fIcount\fR]]
.fi .fi
@ -150,12 +150,12 @@ zpool \- configures ZFS storage pools
.LP .LP
.nf .nf
\fBzpool split\fR [\fB-n\fR] [\fB-R\fR \fIaltroot\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fInewpool\fR [\fIdevice\fR ...] \fBzpool split\fR [\fB-gLnp\fR] [\fB-R\fR \fIaltroot\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fInewpool\fR [\fIdevice\fR ...]
.fi .fi
.LP .LP
.nf .nf
\fBzpool status\fR [\fB-xvD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] \fBzpool status\fR [\fB-gLpvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
.fi .fi
.LP .LP
@ -836,7 +836,7 @@ Displays a help message.
.ne 2 .ne 2
.mk .mk
.na .na
\fB\fBzpool add\fR [\fB-fn\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIvdev\fR ...\fR \fB\fBzpool add\fR [\fB-fgLnp\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fIvdev\fR ...\fR
.ad .ad
.sp .6 .sp .6
.RS 4n .RS 4n
@ -852,6 +852,28 @@ Adds the specified virtual devices to the given pool. The \fIvdev\fR specificati
Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting replication level. Not all devices can be overridden in this manner. Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting replication level. Not all devices can be overridden in this manner.
.RE .RE
.sp
.ne 2
.mk
.na
\fB\fB-g\fR\fR
.ad
.RS 6n
.rt
Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool detach/offline/remove/replace commands.
.RE
.sp
.ne 2
.mk
.na
\fB\fB-L\fR\fR
.ad
.RS 6n
.rt
Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
.RE
.sp .sp
.ne 2 .ne 2
.mk .mk
@ -863,6 +885,17 @@ Forces use of \fBvdev\fRs, even if they appear in use or specify a conflicting r
Displays the configuration that would be used without actually adding the \fBvdev\fRs. The actual pool creation can still fail due to insufficient privileges or device sharing. Displays the configuration that would be used without actually adding the \fBvdev\fRs. The actual pool creation can still fail due to insufficient privileges or device sharing.
.RE .RE
.sp
.ne 2
.mk
.na
\fB\fB-p\fR\fR
.ad
.RS 6n
.rt
Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
.RE
.sp .sp
.ne 2 .ne 2
.mk .mk
@ -1608,7 +1641,7 @@ Allows a pool to import when there is a missing log device.
.ne 2 .ne 2
.mk .mk
.na .na
\fB\fBzpool iostat\fR [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-v\fR] [\fB-y\fR] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]\fR \fB\fBzpool iostat\fR [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-gLpvy\fR] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]\fR
.ad .ad
.sp .6 .sp .6
.RS 4n .RS 4n
@ -1626,6 +1659,39 @@ Display a time stamp.
Specify \fBu\fR for a printed representation of the internal representation of time. See \fBtime\fR(2). Specify \fBd\fR for standard date format. See \fBdate\fR(1). Specify \fBu\fR for a printed representation of the internal representation of time. See \fBtime\fR(2). Specify \fBd\fR for standard date format. See \fBdate\fR(1).
.RE .RE
.sp
.ne 2
.mk
.na
\fB\fB-g\fR\fR
.ad
.RS 12n
.rt
Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool detach/offline/remove/replace commands.
.RE
.sp
.ne 2
.mk
.na
\fB\fB-L\fR\fR
.ad
.RS 12n
.rt
Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
.RE
.sp
.ne 2
.mk
.na
\fB\fB-p\fR\fR
.ad
.RS 12n
.rt
Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
.RE
.sp .sp
.ne 2 .ne 2
.mk .mk
@ -1676,7 +1742,7 @@ Treat exported or foreign devices as inactive.
.ne 2 .ne 2
.mk .mk
.na .na
\fB\fBzpool list\fR [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-Hv\fR] [\fB-o\fR \fIprops\fR[,...]] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]\fR \fB\fBzpool list\fR [\fB-T\fR \fBd\fR | \fBu\fR] [\fB-HgLpv\fR] [\fB-o\fR \fIprops\fR[,...]] [\fIpool\fR] ... [\fIinterval\fR[\fIcount\fR]]\fR
.ad .ad
.sp .6 .sp .6
.RS 4n .RS 4n
@ -1692,6 +1758,39 @@ Lists the given pools along with a health status and space usage. If no \fIpools
Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space.
.RE .RE
.sp
.ne 2
.mk
.na
\fB\fB-g\fR\fR
.ad
.RS 12n
.rt
Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool detach/offline/remove/replace commands.
.RE
.sp
.ne 2
.mk
.na
\fB\fB-L\fR\fR
.ad
.RS 12n
.rt
Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
.RE
.sp
.ne 2
.mk
.na
\fB\fB-p\fR\fR
.ad
.RS 12n
.rt
Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
.RE
.ne 2 .ne 2
.mk .mk
.na .na
@ -1886,7 +1985,7 @@ Sets the given property on the specified pool. See the "Properties" section for
.ne 2 .ne 2
.mk .mk
.na .na
\fBzpool split\fR [\fB-n\fR] [\fB-R\fR \fIaltroot\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fInewpool\fR [\fIdevice\fR ...] \fBzpool split\fR [\fB-gLnp\fR] [\fB-R\fR \fIaltroot\fR] [\fB-o\fR \fIproperty=value\fR] \fIpool\fR \fInewpool\fR [\fIdevice\fR ...]
.ad .ad
.sp .6 .sp .6
.RS 4n .RS 4n
@ -1894,6 +1993,28 @@ Split devices off \fIpool\fR creating \fInewpool\fR. All \fBvdev\fRs in \fIpool\
The optional \fIdevice\fR specification causes the specified device(s) to be included in the new pool and, should any devices remain unspecified, the last device in each mirror is used as would be by default. The optional \fIdevice\fR specification causes the specified device(s) to be included in the new pool and, should any devices remain unspecified, the last device in each mirror is used as would be by default.
.sp
.ne 2
.mk
.na
\fB\fB-g\fR\fR
.ad
.RS 6n
.rt
Display vdev GUIDs instead of the normal device names. These GUIDs can be used in place of device names for the zpool detach/offline/remove/replace commands.
.RE
.sp
.ne 2
.mk
.na
\fB\fB-L\fR\fR
.ad
.RS 6n
.rt
Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
.RE
.sp .sp
.ne 2 .ne 2
.mk .mk
@ -1905,6 +2026,17 @@ The optional \fIdevice\fR specification causes the specified device(s) to be inc
Do dry run, do not actually perform the split. Print out the expected configuration of \fInewpool\fR. Do dry run, do not actually perform the split. Print out the expected configuration of \fInewpool\fR.
.RE .RE
.sp
.ne 2
.mk
.na
\fB\fB-p\fR\fR
.ad
.RS 6n
.rt
Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
.RE
.sp .sp
.ne 2 .ne 2
.mk .mk
@ -1933,22 +2065,45 @@ Sets the specified property for \fInewpool\fR. See the “Properties” section
.ne 2 .ne 2
.mk .mk
.na .na
\fBzpool status\fR [\fB-xvD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] \fBzpool status\fR [\fB-gLpvxD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
.ad .ad
.sp .6 .sp .6
.RS 4n .RS 4n
Displays the detailed health status for the given pools. If no \fIpool\fR is specified, then the status of each pool in the system is displayed. For more information on pool and device health, see the "Device Failure and Recovery" section. Displays the detailed health status for the given pools. If no \fIpool\fR is specified, then the status of each pool in the system is displayed. For more information on pool and device health, see the "Device Failure and Recovery" section.
.sp .sp
If a scrub or resilver is in progress, this command reports the percentage done and the estimated time to completion. Both of these are only approximate, because the amount of data in the pool and the other workloads on the system can change. If a scrub or resilver is in progress, this command reports the percentage done and the estimated time to completion. Both of these are only approximate, because the amount of data in the pool and the other workloads on the system can change.
.sp .sp
.ne 2 .ne 2
.mk .mk
.na .na
\fB\fB-x\fR\fR \fB\fB-g\fR\fR
.ad .ad
.RS 12n .RS 12n
.rt .rt
Only display status for pools that are exhibiting errors or are otherwise unavailable. Warnings about pools not using the latest on-disk format will not be included. Display vdev GUIDs instead of the normal device names. These GUIDs can be used innplace of device names for the zpool detach/offline/remove/replace commands.
.RE
.sp
.ne 2
.mk
.na
\fB\fB-L\fR\fR
.ad
.RS 12n
.rt
Display real paths for vdevs resolving all symbolic links. This can be used to look up the current block device name regardless of the /dev/disk/ path used to open it.
.RE
.sp
.ne 2
.mk
.na
\fB\fB-p\fR\fR
.ad
.RS 12n
.rt
Display full paths for vdevs instead of only the last component of the path. This can be used in conjunction with the \fB-L\fR flag.
.RE .RE
.sp .sp
@ -1962,6 +2117,17 @@ Only display status for pools that are exhibiting errors or are otherwise unavai
Displays verbose data error information, printing out a complete list of all data errors since the last complete pool scrub. Displays verbose data error information, printing out a complete list of all data errors since the last complete pool scrub.
.RE .RE
.sp
.ne 2
.mk
.na
\fB\fB-x\fR\fR
.ad
.RS 12n
.rt
Only display status for pools that are exhibiting errors or are otherwise unavailable. Warnings about pools not using the latest on-disk format will not be included.
.RE
.sp .sp
.ne 2 .ne 2
.mk .mk
@ -2403,6 +2569,17 @@ Cause \fBzpool\fR to dump core on exit for the purposes of running \fB::findleak
.B "ZPOOL_IMPORT_PATH" .B "ZPOOL_IMPORT_PATH"
The search path for devices or files to use with the pool. This is a colon-separated list of directories in which \fBzpool\fR looks for device nodes and files. The search path for devices or files to use with the pool. This is a colon-separated list of directories in which \fBzpool\fR looks for device nodes and files.
Similar to the \fB-d\fR option in \fIzpool import\fR. Similar to the \fB-d\fR option in \fIzpool import\fR.
.TP
.B "ZPOOL_VDEV_NAME_GUID"
Cause \fBzpool\fR subcommands to output vdev guids by default. This behavior
is identical to the \fBzpool status -g\fR command line option.
.TP
.B "ZPOOL_VDEV_NAME_FOLLOW_LINKS"
Cause \fBzpool\fR subcommands to follow links for vdev names by default. This behavior is identical to the \fBzpool status -L\fR command line option.
.TP
.B "ZPOOL_VDEV_NAME_PATH"
Cause \fBzpool\fR subcommands to output full vdev path names by default. This
behavior is identical to the \fBzpool status -p\fR command line option.
.SH SEE ALSO .SH SEE ALSO
.sp .sp