zfs/lib/libzpool/util.c

294 lines
7.7 KiB
C
Raw Normal View History

2008-11-20 20:01:55 +00:00
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 by Delphix. All rights reserved.
* Copyright 2017 Jason King
2008-11-20 20:01:55 +00:00
*/
#include <assert.h>
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/spa.h>
#include <sys/fs/zfs.h>
#include <sys/refcount.h>
#include <dlfcn.h>
2008-11-20 20:01:55 +00:00
/*
* Routines needed by more than one client of libzpool.
*/
/* The largest suffix that can fit, aka an exabyte (2^60 / 10^18) */
#define INDEX_MAX (6)
/* Verify INDEX_MAX fits */
CTASSERT_GLOBAL(INDEX_MAX * 10 < sizeof (uint64_t) * 8);
2008-11-20 20:01:55 +00:00
void
nicenum_scale(uint64_t n, size_t units, char *buf, size_t buflen,
uint32_t flags)
2008-11-20 20:01:55 +00:00
{
uint64_t divamt = 1024;
uint64_t divisor = 1;
2008-11-20 20:01:55 +00:00
int index = 0;
int rc = 0;
2008-11-20 20:01:55 +00:00
char u;
if (units == 0)
units = 1;
if (n > 0) {
n *= units;
if (n < units)
goto overflow;
}
if (flags & NN_DIVISOR_1000)
divamt = 1000;
/*
* This tries to find the suffix S(n) such that
* S(n) <= n < S(n+1), where S(n) = 2^(n*10) | 10^(3*n)
* (i.e. 1024/1000, 1,048,576/1,000,000, etc). Stop once S(n)
* is the largest prefix supported (i.e. don't bother computing
* and checking S(n+1). Since INDEX_MAX should be the largest
* suffix that fits (currently an exabyte), S(INDEX_MAX + 1) is
* never checked as it would overflow.
*/
while (index < INDEX_MAX) {
uint64_t newdiv = divisor * divamt;
/* CTASSERT() guarantee these never trip */
VERIFY3U(newdiv, >=, divamt);
VERIFY3U(newdiv, >=, divisor);
if (n < newdiv)
break;
divisor = newdiv;
2008-11-20 20:01:55 +00:00
index++;
}
u = " KMGTPE"[index];
if (index == 0) {
rc = snprintf(buf, buflen, "%llu", (u_longlong_t)n);
} else if (n % divisor == 0) {
/*
* If this is an even multiple of the base, always display
* without any decimal precision.
*/
rc = snprintf(buf, buflen, "%llu%c",
(u_longlong_t)(n / divisor), u);
2008-11-20 20:01:55 +00:00
} else {
/*
* We want to choose a precision that reflects the best choice
* for fitting in 5 characters. This can get rather tricky
* when we have numbers that are very close to an order of
* magnitude. For example, when displaying 10239 (which is
* really 9.999K), we want only a single place of precision
* for 10.0K. We could develop some complex heuristics for
* this, but it's much easier just to try each combination
* in turn.
*/
int i;
for (i = 2; i >= 0; i--) {
if ((rc = snprintf(buf, buflen, "%.*f%c", i,
(double)n / divisor, u)) <= 5)
break;
}
2008-11-20 20:01:55 +00:00
}
if (rc + 1 > buflen || rc < 0)
goto overflow;
return;
overflow:
/* prefer a more verbose message if possible */
if (buflen > 10)
(void) strlcpy(buf, "<overflow>", buflen);
else
(void) strlcpy(buf, "??", buflen);
}
void
nicenum(uint64_t num, char *buf, size_t buflen)
{
nicenum_scale(num, 1, buf, buflen, 0);
2008-11-20 20:01:55 +00:00
}
static void
show_vdev_stats(const char *desc, const char *ctype, nvlist_t *nv, int indent)
2008-11-20 20:01:55 +00:00
{
vdev_stat_t *vs;
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
vdev_stat_t *v0 = { 0 };
2008-11-20 20:01:55 +00:00
uint64_t sec;
uint64_t is_log = 0;
nvlist_t **child;
uint_t c, children;
2008-11-20 20:01:55 +00:00
char used[6], avail[6];
char rops[6], wops[6], rbytes[6], wbytes[6], rerr[6], werr[6], cerr[6];
char *prefix = "";
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
v0 = umem_zalloc(sizeof (*v0), UMEM_NOFAIL);
if (indent == 0 && desc != NULL) {
(void) printf(" "
2008-11-20 20:01:55 +00:00
" capacity operations bandwidth ---- errors ----\n");
(void) printf("description "
2008-11-20 20:01:55 +00:00
"used avail read write read write read write cksum\n");
}
if (desc != NULL) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
if (is_log)
prefix = "log ";
if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) != 0)
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
vs = v0;
sec = MAX(1, vs->vs_timestamp / NANOSEC);
nicenum(vs->vs_alloc, used, sizeof (used));
nicenum(vs->vs_space - vs->vs_alloc, avail, sizeof (avail));
nicenum(vs->vs_ops[ZIO_TYPE_READ] / sec, rops, sizeof (rops));
nicenum(vs->vs_ops[ZIO_TYPE_WRITE] / sec, wops, sizeof (wops));
nicenum(vs->vs_bytes[ZIO_TYPE_READ] / sec, rbytes,
sizeof (rbytes));
nicenum(vs->vs_bytes[ZIO_TYPE_WRITE] / sec, wbytes,
sizeof (wbytes));
nicenum(vs->vs_read_errors, rerr, sizeof (rerr));
nicenum(vs->vs_write_errors, werr, sizeof (werr));
nicenum(vs->vs_checksum_errors, cerr, sizeof (cerr));
(void) printf("%*s%s%*s%*s%*s %5s %5s %5s %5s %5s %5s %5s\n",
indent, "",
prefix,
(int)(indent+strlen(prefix)-25-(vs->vs_space ? 0 : 12)),
desc,
vs->vs_space ? 6 : 0, vs->vs_space ? used : "",
vs->vs_space ? 6 : 0, vs->vs_space ? avail : "",
rops, wops, rbytes, wbytes, rerr, werr, cerr);
}
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
free(v0);
if (nvlist_lookup_nvlist_array(nv, ctype, &child, &children) != 0)
2008-11-20 20:01:55 +00:00
return;
for (c = 0; c < children; c++) {
nvlist_t *cnv = child[c];
char *cname = NULL, *tname;
2008-11-20 20:01:55 +00:00
uint64_t np;
int len;
2008-11-20 20:01:55 +00:00
if (nvlist_lookup_string(cnv, ZPOOL_CONFIG_PATH, &cname) &&
nvlist_lookup_string(cnv, ZPOOL_CONFIG_TYPE, &cname))
cname = "<unknown>";
len = strlen(cname) + 2;
tname = umem_zalloc(len, UMEM_NOFAIL);
(void) strlcpy(tname, cname, len);
2008-11-20 20:01:55 +00:00
if (nvlist_lookup_uint64(cnv, ZPOOL_CONFIG_NPARITY, &np) == 0)
tname[strlen(tname)] = '0' + np;
show_vdev_stats(tname, ctype, cnv, indent + 2);
2008-11-20 20:01:55 +00:00
free(tname);
}
}
void
show_pool_stats(spa_t *spa)
{
nvlist_t *config, *nvroot;
char *name;
VERIFY(spa_get_stats(spa_name(spa), &config, NULL, 0) == 0);
2008-11-20 20:01:55 +00:00
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
show_vdev_stats(name, ZPOOL_CONFIG_CHILDREN, nvroot, 0);
show_vdev_stats(NULL, ZPOOL_CONFIG_L2CACHE, nvroot, 0);
show_vdev_stats(NULL, ZPOOL_CONFIG_SPARES, nvroot, 0);
nvlist_free(config);
2008-11-20 20:01:55 +00:00
}
/*
* Sets given global variable in libzpool to given unsigned 32-bit value.
* arg: "<variable>=<value>"
*/
int
set_global_var(char *arg)
{
void *zpoolhdl;
char *varname = arg, *varval;
u_longlong_t val;
#ifndef _LITTLE_ENDIAN
/*
* On big endian systems changing a 64-bit variable would set the high
* 32 bits instead of the low 32 bits, which could cause unexpected
* results.
*/
fprintf(stderr, "Setting global variables is only supported on "
"little-endian systems\n");
return (ENOTSUP);
#endif
if (arg != NULL && (varval = strchr(arg, '=')) != NULL) {
*varval = '\0';
varval++;
val = strtoull(varval, NULL, 0);
if (val > UINT32_MAX) {
fprintf(stderr, "Value for global variable '%s' must "
"be a 32-bit unsigned integer\n", varname);
return (EOVERFLOW);
}
} else {
return (EINVAL);
}
zpoolhdl = dlopen("libzpool.so", RTLD_LAZY);
if (zpoolhdl != NULL) {
uint32_t *var;
var = dlsym(zpoolhdl, varname);
if (var == NULL) {
fprintf(stderr, "Global variable '%s' does not exist "
"in libzpool.so\n", varname);
return (EINVAL);
}
*var = (uint32_t)val;
dlclose(zpoolhdl);
} else {
fprintf(stderr, "Failed to open libzpool.so to set global "
"variable\n");
return (EIO);
}
return (0);
}