RAID-Z expansion feature
This feature allows disks to be added one at a time to a RAID-Z group, expanding its capacity incrementally. This feature is especially useful for small pools (typically with only one RAID-Z group), where there isn't sufficient hardware to add capacity by adding a whole new RAID-Z group (typically doubling the number of disks). == Initiating expansion == A new device (disk) can be attached to an existing RAIDZ vdev, by running `zpool attach POOL raidzP-N NEW_DEVICE`, e.g. `zpool attach tank raidz2-0 sda`. The new device will become part of the RAIDZ group. A "raidz expansion" will be initiated, and the new device will contribute additional space to the RAIDZ group once the expansion completes. The `feature@raidz_expansion` on-disk feature flag must be `enabled` to initiate an expansion, and it remains `active` for the life of the pool. In other words, pools with expanded RAIDZ vdevs can not be imported by older releases of the ZFS software. == During expansion == The expansion entails reading all allocated space from existing disks in the RAIDZ group, and rewriting it to the new disks in the RAIDZ group (including the newly added device). The expansion progress can be monitored with `zpool status`. Data redundancy is maintained during (and after) the expansion. If a disk fails while the expansion is in progress, the expansion pauses until the health of the RAIDZ vdev is restored (e.g. by replacing the failed disk and waiting for reconstruction to complete). The pool remains accessible during expansion. Following a reboot or export/import, the expansion resumes where it left off. == After expansion == When the expansion completes, the additional space is available for use, and is reflected in the `available` zfs property (as seen in `zfs list`, `df`, etc). Expansion does not change the number of failures that can be tolerated without data loss (e.g. a RAIDZ2 is still a RAIDZ2 even after expansion). A RAIDZ vdev can be expanded multiple times. After the expansion completes, old blocks remain with their old data-to-parity ratio (e.g. 5-wide RAIDZ2, has 3 data to 2 parity), but distributed among the larger set of disks. New blocks will be written with the new data-to-parity ratio (e.g. a 5-wide RAIDZ2 which has been expanded once to 6-wide, has 4 data to 2 parity). However, the RAIDZ vdev's "assumed parity ratio" does not change, so slightly less space than is expected may be reported for newly-written blocks, according to `zfs list`, `df`, `ls -s`, and similar tools. Sponsored-by: The FreeBSD Foundation Sponsored-by: iXsystems, Inc. Sponsored-by: vStack Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Mark Maybee <mark.maybee@delphix.com> Authored-by: Matthew Ahrens <mahrens@delphix.com> Contributions-by: Fedor Uporov <fuporov.vstack@gmail.com> Contributions-by: Stuart Maybee <stuart.maybee@comcast.net> Contributions-by: Thorsten Behrens <tbehrens@outlook.com> Contributions-by: Fmstrat <nospam@nowsci.com> Contributions-by: Don Brady <dev.fs.zfs@gmail.com> Signed-off-by: Don Brady <dev.fs.zfs@gmail.com> Closes #15022
This commit is contained in:
parent
9198de8f10
commit
5caeef02fa
|
@ -84,10 +84,10 @@ run_gen_bench_impl(const char *impl)
|
|||
|
||||
if (rto_opts.rto_expand) {
|
||||
rm_bench = vdev_raidz_map_alloc_expanded(
|
||||
zio_bench.io_abd,
|
||||
zio_bench.io_size, zio_bench.io_offset,
|
||||
&zio_bench,
|
||||
rto_opts.rto_ashift, ncols+1, ncols,
|
||||
fn+1, rto_opts.rto_expand_offset);
|
||||
fn+1, rto_opts.rto_expand_offset,
|
||||
0, B_FALSE);
|
||||
} else {
|
||||
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
||||
BENCH_ASHIFT, ncols, fn+1);
|
||||
|
@ -172,10 +172,10 @@ run_rec_bench_impl(const char *impl)
|
|||
|
||||
if (rto_opts.rto_expand) {
|
||||
rm_bench = vdev_raidz_map_alloc_expanded(
|
||||
zio_bench.io_abd,
|
||||
zio_bench.io_size, zio_bench.io_offset,
|
||||
&zio_bench,
|
||||
BENCH_ASHIFT, ncols+1, ncols,
|
||||
PARITY_PQR, rto_opts.rto_expand_offset);
|
||||
PARITY_PQR,
|
||||
rto_opts.rto_expand_offset, 0, B_FALSE);
|
||||
} else {
|
||||
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
||||
BENCH_ASHIFT, ncols, PARITY_PQR);
|
||||
|
|
|
@ -327,14 +327,12 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
|
|||
|
||||
if (opts->rto_expand) {
|
||||
opts->rm_golden =
|
||||
vdev_raidz_map_alloc_expanded(opts->zio_golden->io_abd,
|
||||
opts->zio_golden->io_size, opts->zio_golden->io_offset,
|
||||
vdev_raidz_map_alloc_expanded(opts->zio_golden,
|
||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||
parity, opts->rto_expand_offset);
|
||||
rm_test = vdev_raidz_map_alloc_expanded(zio_test->io_abd,
|
||||
zio_test->io_size, zio_test->io_offset,
|
||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
||||
rm_test = vdev_raidz_map_alloc_expanded(zio_test,
|
||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||
parity, opts->rto_expand_offset);
|
||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
||||
} else {
|
||||
opts->rm_golden = vdev_raidz_map_alloc(opts->zio_golden,
|
||||
opts->rto_ashift, total_ncols, parity);
|
||||
|
@ -361,187 +359,6 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
|
|||
return (err);
|
||||
}
|
||||
|
||||
/*
|
||||
* If reflow is not in progress, reflow_offset should be UINT64_MAX.
|
||||
* For each row, if the row is entirely before reflow_offset, it will
|
||||
* come from the new location. Otherwise this row will come from the
|
||||
* old location. Therefore, rows that straddle the reflow_offset will
|
||||
* come from the old location.
|
||||
*
|
||||
* NOTE: Until raidz expansion is implemented this function is only
|
||||
* needed by raidz_test.c to the multi-row raid_map_t functionality.
|
||||
*/
|
||||
raidz_map_t *
|
||||
vdev_raidz_map_alloc_expanded(abd_t *abd, uint64_t size, uint64_t offset,
|
||||
uint64_t ashift, uint64_t physical_cols, uint64_t logical_cols,
|
||||
uint64_t nparity, uint64_t reflow_offset)
|
||||
{
|
||||
/* The zio's size in units of the vdev's minimum sector size. */
|
||||
uint64_t s = size >> ashift;
|
||||
uint64_t q, r, bc, devidx, asize = 0, tot;
|
||||
|
||||
/*
|
||||
* "Quotient": The number of data sectors for this stripe on all but
|
||||
* the "big column" child vdevs that also contain "remainder" data.
|
||||
* AKA "full rows"
|
||||
*/
|
||||
q = s / (logical_cols - nparity);
|
||||
|
||||
/*
|
||||
* "Remainder": The number of partial stripe data sectors in this I/O.
|
||||
* This will add a sector to some, but not all, child vdevs.
|
||||
*/
|
||||
r = s - q * (logical_cols - nparity);
|
||||
|
||||
/* The number of "big columns" - those which contain remainder data. */
|
||||
bc = (r == 0 ? 0 : r + nparity);
|
||||
|
||||
/*
|
||||
* The total number of data and parity sectors associated with
|
||||
* this I/O.
|
||||
*/
|
||||
tot = s + nparity * (q + (r == 0 ? 0 : 1));
|
||||
|
||||
/* How many rows contain data (not skip) */
|
||||
uint64_t rows = howmany(tot, logical_cols);
|
||||
int cols = MIN(tot, logical_cols);
|
||||
|
||||
raidz_map_t *rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[rows]),
|
||||
KM_SLEEP);
|
||||
rm->rm_nrows = rows;
|
||||
|
||||
for (uint64_t row = 0; row < rows; row++) {
|
||||
raidz_row_t *rr = kmem_alloc(offsetof(raidz_row_t,
|
||||
rr_col[cols]), KM_SLEEP);
|
||||
rm->rm_row[row] = rr;
|
||||
|
||||
/* The starting RAIDZ (parent) vdev sector of the row. */
|
||||
uint64_t b = (offset >> ashift) + row * logical_cols;
|
||||
|
||||
/*
|
||||
* If we are in the middle of a reflow, and any part of this
|
||||
* row has not been copied, then use the old location of
|
||||
* this row.
|
||||
*/
|
||||
int row_phys_cols = physical_cols;
|
||||
if (b + (logical_cols - nparity) > reflow_offset >> ashift)
|
||||
row_phys_cols--;
|
||||
|
||||
/* starting child of this row */
|
||||
uint64_t child_id = b % row_phys_cols;
|
||||
/* The starting byte offset on each child vdev. */
|
||||
uint64_t child_offset = (b / row_phys_cols) << ashift;
|
||||
|
||||
/*
|
||||
* We set cols to the entire width of the block, even
|
||||
* if this row is shorter. This is needed because parity
|
||||
* generation (for Q and R) needs to know the entire width,
|
||||
* because it treats the short row as though it was
|
||||
* full-width (and the "phantom" sectors were zero-filled).
|
||||
*
|
||||
* Another approach to this would be to set cols shorter
|
||||
* (to just the number of columns that we might do i/o to)
|
||||
* and have another mechanism to tell the parity generation
|
||||
* about the "entire width". Reconstruction (at least
|
||||
* vdev_raidz_reconstruct_general()) would also need to
|
||||
* know about the "entire width".
|
||||
*/
|
||||
rr->rr_cols = cols;
|
||||
rr->rr_bigcols = bc;
|
||||
rr->rr_missingdata = 0;
|
||||
rr->rr_missingparity = 0;
|
||||
rr->rr_firstdatacol = nparity;
|
||||
rr->rr_abd_empty = NULL;
|
||||
rr->rr_nempty = 0;
|
||||
|
||||
for (int c = 0; c < rr->rr_cols; c++, child_id++) {
|
||||
if (child_id >= row_phys_cols) {
|
||||
child_id -= row_phys_cols;
|
||||
child_offset += 1ULL << ashift;
|
||||
}
|
||||
rr->rr_col[c].rc_devidx = child_id;
|
||||
rr->rr_col[c].rc_offset = child_offset;
|
||||
rr->rr_col[c].rc_orig_data = NULL;
|
||||
rr->rr_col[c].rc_error = 0;
|
||||
rr->rr_col[c].rc_tried = 0;
|
||||
rr->rr_col[c].rc_skipped = 0;
|
||||
rr->rr_col[c].rc_need_orig_restore = B_FALSE;
|
||||
|
||||
uint64_t dc = c - rr->rr_firstdatacol;
|
||||
if (c < rr->rr_firstdatacol) {
|
||||
rr->rr_col[c].rc_size = 1ULL << ashift;
|
||||
rr->rr_col[c].rc_abd =
|
||||
abd_alloc_linear(rr->rr_col[c].rc_size,
|
||||
B_TRUE);
|
||||
} else if (row == rows - 1 && bc != 0 && c >= bc) {
|
||||
/*
|
||||
* Past the end, this for parity generation.
|
||||
*/
|
||||
rr->rr_col[c].rc_size = 0;
|
||||
rr->rr_col[c].rc_abd = NULL;
|
||||
} else {
|
||||
/*
|
||||
* "data column" (col excluding parity)
|
||||
* Add an ASCII art diagram here
|
||||
*/
|
||||
uint64_t off;
|
||||
|
||||
if (c < bc || r == 0) {
|
||||
off = dc * rows + row;
|
||||
} else {
|
||||
off = r * rows +
|
||||
(dc - r) * (rows - 1) + row;
|
||||
}
|
||||
rr->rr_col[c].rc_size = 1ULL << ashift;
|
||||
rr->rr_col[c].rc_abd = abd_get_offset_struct(
|
||||
&rr->rr_col[c].rc_abdstruct,
|
||||
abd, off << ashift, 1 << ashift);
|
||||
}
|
||||
|
||||
asize += rr->rr_col[c].rc_size;
|
||||
}
|
||||
/*
|
||||
* If all data stored spans all columns, there's a danger that
|
||||
* parity will always be on the same device and, since parity
|
||||
* isn't read during normal operation, that that device's I/O
|
||||
* bandwidth won't be used effectively. We therefore switch
|
||||
* the parity every 1MB.
|
||||
*
|
||||
* ...at least that was, ostensibly, the theory. As a practical
|
||||
* matter unless we juggle the parity between all devices
|
||||
* evenly, we won't see any benefit. Further, occasional writes
|
||||
* that aren't a multiple of the LCM of the number of children
|
||||
* and the minimum stripe width are sufficient to avoid pessimal
|
||||
* behavior. Unfortunately, this decision created an implicit
|
||||
* on-disk format requirement that we need to support for all
|
||||
* eternity, but only for single-parity RAID-Z.
|
||||
*
|
||||
* If we intend to skip a sector in the zeroth column for
|
||||
* padding we must make sure to note this swap. We will never
|
||||
* intend to skip the first column since at least one data and
|
||||
* one parity column must appear in each row.
|
||||
*/
|
||||
if (rr->rr_firstdatacol == 1 && rr->rr_cols > 1 &&
|
||||
(offset & (1ULL << 20))) {
|
||||
ASSERT(rr->rr_cols >= 2);
|
||||
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
|
||||
devidx = rr->rr_col[0].rc_devidx;
|
||||
uint64_t o = rr->rr_col[0].rc_offset;
|
||||
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
|
||||
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
|
||||
rr->rr_col[1].rc_devidx = devidx;
|
||||
rr->rr_col[1].rc_offset = o;
|
||||
}
|
||||
|
||||
}
|
||||
ASSERT3U(asize, ==, tot << ashift);
|
||||
|
||||
/* init RAIDZ parity ops */
|
||||
rm->rm_ops = vdev_raidz_math_get_ops();
|
||||
|
||||
return (rm);
|
||||
}
|
||||
|
||||
static raidz_map_t *
|
||||
init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
||||
{
|
||||
|
@ -561,10 +378,9 @@ init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
|||
init_zio_abd(*zio);
|
||||
|
||||
if (opts->rto_expand) {
|
||||
rm = vdev_raidz_map_alloc_expanded((*zio)->io_abd,
|
||||
(*zio)->io_size, (*zio)->io_offset,
|
||||
rm = vdev_raidz_map_alloc_expanded(*zio,
|
||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||
parity, opts->rto_expand_offset);
|
||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
||||
} else {
|
||||
rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
|
||||
total_ncols, parity);
|
||||
|
|
|
@ -119,7 +119,4 @@ void init_zio_abd(zio_t *zio);
|
|||
|
||||
void run_raidz_benchmark(void);
|
||||
|
||||
struct raidz_map *vdev_raidz_map_alloc_expanded(abd_t *, uint64_t, uint64_t,
|
||||
uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
|
||||
|
||||
#endif /* RAIDZ_TEST_H */
|
||||
|
|
|
@ -4134,6 +4134,11 @@ dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
|
|||
}
|
||||
(void) printf("\tcheckpoint_txg = %llu\n",
|
||||
(u_longlong_t)ub->ub_checkpoint_txg);
|
||||
|
||||
(void) printf("\traidz_reflow state=%u off=%llu\n",
|
||||
(int)RRSS_GET_STATE(ub),
|
||||
(u_longlong_t)RRSS_GET_OFFSET(ub));
|
||||
|
||||
(void) printf("%s", footer ? footer : "");
|
||||
}
|
||||
|
||||
|
|
|
@ -6650,9 +6650,17 @@ zpool_do_attach_or_replace(int argc, char **argv, int replacing)
|
|||
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
|
||||
rebuild);
|
||||
|
||||
if (ret == 0 && wait)
|
||||
ret = zpool_wait(zhp,
|
||||
replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
|
||||
if (ret == 0 && wait) {
|
||||
zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER;
|
||||
char raidz_prefix[] = "raidz";
|
||||
if (replacing) {
|
||||
activity = ZPOOL_WAIT_REPLACE;
|
||||
} else if (strncmp(old_disk,
|
||||
raidz_prefix, strlen(raidz_prefix)) == 0) {
|
||||
activity = ZPOOL_WAIT_RAIDZ_EXPAND;
|
||||
}
|
||||
ret = zpool_wait(zhp, activity);
|
||||
}
|
||||
|
||||
nvlist_free(props);
|
||||
nvlist_free(nvroot);
|
||||
|
@ -6678,17 +6686,21 @@ zpool_do_replace(int argc, char **argv)
|
|||
}
|
||||
|
||||
/*
|
||||
* zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
|
||||
* zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
|
||||
*
|
||||
* -f Force attach, even if <new_device> appears to be in use.
|
||||
* -s Use sequential instead of healing reconstruction for resilver.
|
||||
* -o Set property=value.
|
||||
* -w Wait for resilvering to complete before returning
|
||||
* -w Wait for resilvering (mirror) or expansion (raidz) to complete
|
||||
* before returning.
|
||||
*
|
||||
* Attach <new_device> to the mirror containing <device>. If <device> is not
|
||||
* part of a mirror, then <device> will be transformed into a mirror of
|
||||
* <device> and <new_device>. In either case, <new_device> will begin life
|
||||
* with a DTL of [0, now], and will immediately begin to resilver itself.
|
||||
* Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
|
||||
* mirror or raidz. If <device> is not part of a mirror, then <device> will
|
||||
* be transformed into a mirror of <device> and <new_device>. When a mirror
|
||||
* is involved, <new_device> will begin life with a DTL of [0, now], and will
|
||||
* immediately begin to resilver itself. For the raidz case, a expansion will
|
||||
* commence and reflow the raidz data across all the disks including the
|
||||
* <new_device>.
|
||||
*/
|
||||
int
|
||||
zpool_do_attach(int argc, char **argv)
|
||||
|
@ -8195,6 +8207,97 @@ print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Print out detailed raidz expansion status.
|
||||
*/
|
||||
static void
|
||||
print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
|
||||
{
|
||||
char copied_buf[7];
|
||||
|
||||
if (pres == NULL || pres->pres_state == DSS_NONE)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Determine name of vdev.
|
||||
*/
|
||||
nvlist_t *config = zpool_get_config(zhp, NULL);
|
||||
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
|
||||
ZPOOL_CONFIG_VDEV_TREE);
|
||||
nvlist_t **child;
|
||||
uint_t children;
|
||||
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
|
||||
&child, &children) == 0);
|
||||
assert(pres->pres_expanding_vdev < children);
|
||||
|
||||
printf_color(ANSI_BOLD, gettext("expand: "));
|
||||
|
||||
time_t start = pres->pres_start_time;
|
||||
time_t end = pres->pres_end_time;
|
||||
char *vname =
|
||||
zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0);
|
||||
zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf));
|
||||
|
||||
/*
|
||||
* Expansion is finished or canceled.
|
||||
*/
|
||||
if (pres->pres_state == DSS_FINISHED) {
|
||||
char time_buf[32];
|
||||
secs_to_dhms(end - start, time_buf);
|
||||
|
||||
(void) printf(gettext("expanded %s-%u copied %s in %s, "
|
||||
"on %s"), vname, (int)pres->pres_expanding_vdev,
|
||||
copied_buf, time_buf, ctime((time_t *)&end));
|
||||
} else {
|
||||
char examined_buf[7], total_buf[7], rate_buf[7];
|
||||
uint64_t copied, total, elapsed, secs_left;
|
||||
double fraction_done;
|
||||
uint_t rate;
|
||||
|
||||
assert(pres->pres_state == DSS_SCANNING);
|
||||
|
||||
/*
|
||||
* Expansion is in progress.
|
||||
*/
|
||||
(void) printf(gettext(
|
||||
"expansion of %s-%u in progress since %s"),
|
||||
vname, (int)pres->pres_expanding_vdev, ctime(&start));
|
||||
|
||||
copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1;
|
||||
total = pres->pres_to_reflow;
|
||||
fraction_done = (double)copied / total;
|
||||
|
||||
/* elapsed time for this pass */
|
||||
elapsed = time(NULL) - pres->pres_start_time;
|
||||
elapsed = elapsed > 0 ? elapsed : 1;
|
||||
rate = copied / elapsed;
|
||||
rate = rate > 0 ? rate : 1;
|
||||
secs_left = (total - copied) / rate;
|
||||
|
||||
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
|
||||
zfs_nicenum(total, total_buf, sizeof (total_buf));
|
||||
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
|
||||
|
||||
/*
|
||||
* do not print estimated time if hours_left is more than
|
||||
* 30 days
|
||||
*/
|
||||
(void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"),
|
||||
examined_buf, total_buf, rate_buf, 100 * fraction_done);
|
||||
if (pres->pres_waiting_for_resilver) {
|
||||
(void) printf(gettext(", paused for resilver or "
|
||||
"clear\n"));
|
||||
} else if (secs_left < (30 * 24 * 3600)) {
|
||||
char time_buf[32];
|
||||
secs_to_dhms(secs_left, time_buf);
|
||||
(void) printf(gettext(", %s to go\n"), time_buf);
|
||||
} else {
|
||||
(void) printf(gettext(
|
||||
", (copy is slow, no estimated time)\n"));
|
||||
}
|
||||
}
|
||||
free(vname);
|
||||
}
|
||||
static void
|
||||
print_checkpoint_status(pool_checkpoint_stat_t *pcs)
|
||||
{
|
||||
|
@ -8772,19 +8875,24 @@ status_callback(zpool_handle_t *zhp, void *data)
|
|||
uint64_t nerr;
|
||||
nvlist_t **spares, **l2cache;
|
||||
uint_t nspares, nl2cache;
|
||||
pool_checkpoint_stat_t *pcs = NULL;
|
||||
pool_removal_stat_t *prs = NULL;
|
||||
|
||||
print_scan_status(zhp, nvroot);
|
||||
|
||||
pool_removal_stat_t *prs = NULL;
|
||||
(void) nvlist_lookup_uint64_array(nvroot,
|
||||
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
|
||||
print_removal_status(zhp, prs);
|
||||
|
||||
pool_checkpoint_stat_t *pcs = NULL;
|
||||
(void) nvlist_lookup_uint64_array(nvroot,
|
||||
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
|
||||
print_checkpoint_status(pcs);
|
||||
|
||||
pool_raidz_expand_stat_t *pres = NULL;
|
||||
(void) nvlist_lookup_uint64_array(nvroot,
|
||||
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
|
||||
print_raidz_expand_status(zhp, pres);
|
||||
|
||||
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
|
||||
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
|
||||
if (cbp->cb_namewidth < 10)
|
||||
|
@ -10738,8 +10846,9 @@ print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
|
|||
pool_checkpoint_stat_t *pcs = NULL;
|
||||
pool_scan_stat_t *pss = NULL;
|
||||
pool_removal_stat_t *prs = NULL;
|
||||
pool_raidz_expand_stat_t *pres = NULL;
|
||||
const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE",
|
||||
"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM"};
|
||||
"REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"};
|
||||
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
|
||||
|
||||
/* Calculate the width of each column */
|
||||
|
@ -10798,6 +10907,13 @@ print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
|
|||
vdev_activity_top_remaining(nvroot);
|
||||
}
|
||||
|
||||
(void) nvlist_lookup_uint64_array(nvroot,
|
||||
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c);
|
||||
if (pres != NULL && pres->pres_state == DSS_SCANNING) {
|
||||
int64_t rem = pres->pres_to_reflow - pres->pres_reflowed;
|
||||
bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem;
|
||||
}
|
||||
|
||||
bytes_rem[ZPOOL_WAIT_INITIALIZE] =
|
||||
vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
|
||||
bytes_rem[ZPOOL_WAIT_TRIM] =
|
||||
|
@ -10827,11 +10943,12 @@ print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
|
|||
if (!wd->wd_enabled[i])
|
||||
continue;
|
||||
|
||||
if (wd->wd_exact)
|
||||
if (wd->wd_exact) {
|
||||
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
|
||||
bytes_rem[i]);
|
||||
else
|
||||
} else {
|
||||
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
|
||||
}
|
||||
|
||||
if (wd->wd_scripted)
|
||||
(void) printf(i == 0 ? "%s" : "\t%s", buf);
|
||||
|
@ -10937,7 +11054,8 @@ zpool_do_wait(int argc, char **argv)
|
|||
for (char *tok; (tok = strsep(&optarg, ",")); ) {
|
||||
static const char *const col_opts[] = {
|
||||
"discard", "free", "initialize", "replace",
|
||||
"remove", "resilver", "scrub", "trim" };
|
||||
"remove", "resilver", "scrub", "trim",
|
||||
"raidz_expand" };
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(col_opts); ++i)
|
||||
if (strcmp(tok, col_opts[i]) == 0) {
|
||||
|
|
1048
cmd/ztest.c
1048
cmd/ztest.c
File diff suppressed because it is too large
Load Diff
|
@ -103,6 +103,7 @@ zfs_errno = enum_with_offset(1024, [
|
|||
'ZFS_ERR_NOT_USER_NAMESPACE',
|
||||
'ZFS_ERR_RESUME_EXISTS',
|
||||
'ZFS_ERR_CRYPTO_NOTSUP',
|
||||
'ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS',
|
||||
],
|
||||
{}
|
||||
)
|
||||
|
@ -115,5 +116,6 @@ ZFS_ERR_DEVRM_IN_PROGRESS = zfs_errno.ZFS_ERR_DEVRM_IN_PROGRESS
|
|||
ZFS_ERR_VDEV_TOO_BIG = zfs_errno.ZFS_ERR_VDEV_TOO_BIG
|
||||
ZFS_ERR_WRONG_PARENT = zfs_errno.ZFS_ERR_WRONG_PARENT
|
||||
ZFS_ERR_VDEV_NOTSUP = zfs_errno.ZFS_ERR_VDEV_NOTSUP
|
||||
ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS = zfs_errno.ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS
|
||||
|
||||
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
|
||||
|
|
|
@ -43,6 +43,7 @@ from ._constants import (
|
|||
ZFS_ERR_DEVRM_IN_PROGRESS,
|
||||
ZFS_ERR_VDEV_TOO_BIG,
|
||||
ZFS_ERR_WRONG_PARENT,
|
||||
ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS,
|
||||
zfs_errno
|
||||
)
|
||||
|
||||
|
@ -596,6 +597,8 @@ def lzc_pool_checkpoint_translate_error(ret, name, discard=False):
|
|||
raise lzc_exc.DeviceRemovalRunning()
|
||||
if ret == ZFS_ERR_VDEV_TOO_BIG:
|
||||
raise lzc_exc.DeviceTooBig()
|
||||
if ret == ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS:
|
||||
raise lzc_exc.RaidzExpansionRunning()
|
||||
if discard:
|
||||
raise _generic_exception(
|
||||
ret, name, "Failed to discard pool checkpoint")
|
||||
|
|
|
@ -30,6 +30,7 @@ from ._constants import (
|
|||
ZFS_ERR_DEVRM_IN_PROGRESS,
|
||||
ZFS_ERR_VDEV_TOO_BIG,
|
||||
ZFS_ERR_WRONG_PARENT,
|
||||
ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS,
|
||||
zfs_errno
|
||||
)
|
||||
|
||||
|
@ -598,4 +599,9 @@ class DeviceTooBig(ZFSError):
|
|||
message = "One or more top-level vdevs exceed the maximum vdev size"
|
||||
|
||||
|
||||
class RaidzExpansionRunning(ZFSError):
|
||||
errno = ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS
|
||||
message = "A raidz device is currently expanding"
|
||||
|
||||
|
||||
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
|
||||
|
|
|
@ -157,6 +157,7 @@ typedef enum zfs_error {
|
|||
EZFS_CKSUM, /* insufficient replicas */
|
||||
EZFS_RESUME_EXISTS, /* Resume on existing dataset without force */
|
||||
EZFS_SHAREFAILED, /* filesystem share failed */
|
||||
EZFS_RAIDZ_EXPAND_IN_PROGRESS, /* a raidz is currently expanding */
|
||||
EZFS_UNKNOWN
|
||||
} zfs_error_t;
|
||||
|
||||
|
|
|
@ -365,6 +365,7 @@ typedef enum {
|
|||
VDEV_PROP_CHECKSUM_T,
|
||||
VDEV_PROP_IO_N,
|
||||
VDEV_PROP_IO_T,
|
||||
VDEV_PROP_RAIDZ_EXPANDING,
|
||||
VDEV_NUM_PROPS
|
||||
} vdev_prop_t;
|
||||
|
||||
|
@ -724,6 +725,7 @@ typedef struct zpool_load_policy {
|
|||
#define ZPOOL_CONFIG_SCAN_STATS "scan_stats" /* not stored on disk */
|
||||
#define ZPOOL_CONFIG_REMOVAL_STATS "removal_stats" /* not stored on disk */
|
||||
#define ZPOOL_CONFIG_CHECKPOINT_STATS "checkpoint_stats" /* not on disk */
|
||||
#define ZPOOL_CONFIG_RAIDZ_EXPAND_STATS "raidz_expand_stats" /* not on disk */
|
||||
#define ZPOOL_CONFIG_VDEV_STATS "vdev_stats" /* not stored on disk */
|
||||
#define ZPOOL_CONFIG_INDIRECT_SIZE "indirect_size" /* not stored on disk */
|
||||
|
||||
|
@ -789,6 +791,8 @@ typedef struct zpool_load_policy {
|
|||
#define ZPOOL_CONFIG_SPARES "spares"
|
||||
#define ZPOOL_CONFIG_IS_SPARE "is_spare"
|
||||
#define ZPOOL_CONFIG_NPARITY "nparity"
|
||||
#define ZPOOL_CONFIG_RAIDZ_EXPANDING "raidz_expanding"
|
||||
#define ZPOOL_CONFIG_RAIDZ_EXPAND_TXGS "raidz_expand_txgs"
|
||||
#define ZPOOL_CONFIG_HOSTID "hostid"
|
||||
#define ZPOOL_CONFIG_HOSTNAME "hostname"
|
||||
#define ZPOOL_CONFIG_LOADED_TIME "initial_load_time"
|
||||
|
@ -907,6 +911,15 @@ typedef struct zpool_load_policy {
|
|||
#define VDEV_TOP_ZAP_ALLOCATION_BIAS \
|
||||
"org.zfsonlinux:allocation_bias"
|
||||
|
||||
#define VDEV_TOP_ZAP_RAIDZ_EXPAND_STATE \
|
||||
"org.openzfs:raidz_expand_state"
|
||||
#define VDEV_TOP_ZAP_RAIDZ_EXPAND_START_TIME \
|
||||
"org.openzfs:raidz_expand_start_time"
|
||||
#define VDEV_TOP_ZAP_RAIDZ_EXPAND_END_TIME \
|
||||
"org.openzfs:raidz_expand_end_time"
|
||||
#define VDEV_TOP_ZAP_RAIDZ_EXPAND_BYTES_COPIED \
|
||||
"org.openzfs:raidz_expand_bytes_copied"
|
||||
|
||||
/* vdev metaslab allocation bias */
|
||||
#define VDEV_ALLOC_BIAS_LOG "log"
|
||||
#define VDEV_ALLOC_BIAS_SPECIAL "special"
|
||||
|
@ -1138,6 +1151,16 @@ typedef struct pool_removal_stat {
|
|||
uint64_t prs_mapping_memory;
|
||||
} pool_removal_stat_t;
|
||||
|
||||
typedef struct pool_raidz_expand_stat {
|
||||
uint64_t pres_state; /* dsl_scan_state_t */
|
||||
uint64_t pres_expanding_vdev;
|
||||
uint64_t pres_start_time;
|
||||
uint64_t pres_end_time;
|
||||
uint64_t pres_to_reflow; /* bytes that need to be moved */
|
||||
uint64_t pres_reflowed; /* bytes moved so far */
|
||||
uint64_t pres_waiting_for_resilver;
|
||||
} pool_raidz_expand_stat_t;
|
||||
|
||||
typedef enum dsl_scan_state {
|
||||
DSS_NONE,
|
||||
DSS_SCANNING,
|
||||
|
@ -1577,6 +1600,7 @@ typedef enum {
|
|||
ZFS_ERR_NOT_USER_NAMESPACE,
|
||||
ZFS_ERR_RESUME_EXISTS,
|
||||
ZFS_ERR_CRYPTO_NOTSUP,
|
||||
ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS,
|
||||
} zfs_errno_t;
|
||||
|
||||
/*
|
||||
|
@ -1601,6 +1625,7 @@ typedef enum {
|
|||
ZPOOL_WAIT_RESILVER,
|
||||
ZPOOL_WAIT_SCRUB,
|
||||
ZPOOL_WAIT_TRIM,
|
||||
ZPOOL_WAIT_RAIDZ_EXPAND,
|
||||
ZPOOL_WAIT_NUM_ACTIVITIES
|
||||
} zpool_wait_activity_t;
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <sys/vdev.h>
|
||||
#include <sys/vdev_rebuild.h>
|
||||
#include <sys/vdev_removal.h>
|
||||
#include <sys/vdev_raidz.h>
|
||||
#include <sys/metaslab.h>
|
||||
#include <sys/dmu.h>
|
||||
#include <sys/dsl_pool.h>
|
||||
|
@ -333,6 +334,9 @@ struct spa {
|
|||
spa_condensing_indirect_t *spa_condensing_indirect;
|
||||
zthr_t *spa_condense_zthr; /* zthr doing condense. */
|
||||
|
||||
vdev_raidz_expand_t *spa_raidz_expand;
|
||||
zthr_t *spa_raidz_expand_zthr;
|
||||
|
||||
uint64_t spa_checkpoint_txg; /* the txg of the checkpoint */
|
||||
spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */
|
||||
zthr_t *spa_checkpoint_discard_zthr;
|
||||
|
|
|
@ -75,6 +75,39 @@ extern "C" {
|
|||
#define MMP_FAIL_INT_SET(fail) \
|
||||
(((uint64_t)(fail & 0xFFFF) << 48) | MMP_FAIL_INT_VALID_BIT)
|
||||
|
||||
/*
|
||||
* RAIDZ expansion reflow information.
|
||||
*
|
||||
* 64 56 48 40 32 24 16 8 0
|
||||
* +-------+-------+-------+-------+-------+-------+-------+-------+
|
||||
* |Scratch | Reflow |
|
||||
* | State | Offset |
|
||||
* +-------+-------+-------+-------+-------+-------+-------+-------+
|
||||
*/
|
||||
typedef enum raidz_reflow_scratch_state {
|
||||
RRSS_SCRATCH_NOT_IN_USE = 0,
|
||||
RRSS_SCRATCH_VALID,
|
||||
RRSS_SCRATCH_INVALID_SYNCED,
|
||||
RRSS_SCRATCH_INVALID_SYNCED_ON_IMPORT,
|
||||
RRSS_SCRATCH_INVALID_SYNCED_REFLOW
|
||||
} raidz_reflow_scratch_state_t;
|
||||
|
||||
#define RRSS_GET_OFFSET(ub) \
|
||||
BF64_GET_SB((ub)->ub_raidz_reflow_info, 0, 55, SPA_MINBLOCKSHIFT, 0)
|
||||
#define RRSS_SET_OFFSET(ub, x) \
|
||||
BF64_SET_SB((ub)->ub_raidz_reflow_info, 0, 55, SPA_MINBLOCKSHIFT, 0, x)
|
||||
|
||||
#define RRSS_GET_STATE(ub) \
|
||||
BF64_GET((ub)->ub_raidz_reflow_info, 55, 9)
|
||||
#define RRSS_SET_STATE(ub, x) \
|
||||
BF64_SET((ub)->ub_raidz_reflow_info, 55, 9, x)
|
||||
|
||||
#define RAIDZ_REFLOW_SET(ub, state, offset) do { \
|
||||
(ub)->ub_raidz_reflow_info = 0; \
|
||||
RRSS_SET_OFFSET(ub, offset); \
|
||||
RRSS_SET_STATE(ub, state); \
|
||||
} while (0)
|
||||
|
||||
struct uberblock {
|
||||
uint64_t ub_magic; /* UBERBLOCK_MAGIC */
|
||||
uint64_t ub_version; /* SPA_VERSION */
|
||||
|
@ -136,6 +169,8 @@ struct uberblock {
|
|||
* the ZIL block is not allocated [see uses of spa_min_claim_txg()].
|
||||
*/
|
||||
uint64_t ub_checkpoint_txg;
|
||||
|
||||
uint64_t ub_raidz_reflow_info;
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -132,15 +132,19 @@ extern void vdev_space_update(vdev_t *vd,
|
|||
|
||||
extern int64_t vdev_deflated_space(vdev_t *vd, int64_t space);
|
||||
|
||||
extern uint64_t vdev_psize_to_asize_txg(vdev_t *vd, uint64_t psize,
|
||||
uint64_t txg);
|
||||
extern uint64_t vdev_psize_to_asize(vdev_t *vd, uint64_t psize);
|
||||
|
||||
/*
|
||||
* Return the amount of space allocated for a gang block header.
|
||||
* Return the amount of space allocated for a gang block header. Note that
|
||||
* since the physical birth txg is not provided, this must be constant for
|
||||
* a given vdev. (e.g. raidz expansion can't change this)
|
||||
*/
|
||||
static inline uint64_t
|
||||
vdev_gang_header_asize(vdev_t *vd)
|
||||
{
|
||||
return (vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE));
|
||||
return (vdev_psize_to_asize_txg(vd, SPA_GANGBLOCKSIZE, 0));
|
||||
}
|
||||
|
||||
extern int vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux);
|
||||
|
@ -204,6 +208,8 @@ extern void vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t
|
|||
offset, uint64_t size, zio_done_func_t *done, void *priv, int flags);
|
||||
extern int vdev_label_read_bootenv(vdev_t *, nvlist_t *);
|
||||
extern int vdev_label_write_bootenv(vdev_t *, nvlist_t *);
|
||||
extern int vdev_uberblock_sync_list(vdev_t **, int, struct uberblock *, int);
|
||||
extern int vdev_check_boot_reserve(spa_t *, vdev_t *);
|
||||
|
||||
typedef enum {
|
||||
VDEV_LABEL_CREATE, /* create/add a new device */
|
||||
|
|
|
@ -72,7 +72,7 @@ typedef void vdev_fini_func_t(vdev_t *vd);
|
|||
typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
|
||||
uint64_t *ashift, uint64_t *pshift);
|
||||
typedef void vdev_close_func_t(vdev_t *vd);
|
||||
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
|
||||
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize, uint64_t txg);
|
||||
typedef uint64_t vdev_min_asize_func_t(vdev_t *vd);
|
||||
typedef uint64_t vdev_min_alloc_func_t(vdev_t *vd);
|
||||
typedef void vdev_io_start_func_t(zio_t *zio);
|
||||
|
@ -281,6 +281,7 @@ struct vdev {
|
|||
uint64_t vdev_noalloc; /* device is passivated? */
|
||||
uint64_t vdev_removing; /* device is being removed? */
|
||||
uint64_t vdev_failfast; /* device failfast setting */
|
||||
boolean_t vdev_rz_expanding; /* raidz is being expanded? */
|
||||
boolean_t vdev_ishole; /* is a hole in the namespace */
|
||||
uint64_t vdev_top_zap;
|
||||
vdev_alloc_bias_t vdev_alloc_bias; /* metaslab allocation bias */
|
||||
|
@ -536,6 +537,7 @@ typedef struct vdev_label {
|
|||
/*
|
||||
* Size of embedded boot loader region on each label.
|
||||
* The total size of the first two labels plus the boot area is 4MB.
|
||||
* On RAIDZ, this space is overwritten during RAIDZ expansion.
|
||||
*/
|
||||
#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
|
||||
|
||||
|
@ -608,7 +610,7 @@ extern vdev_ops_t vdev_indirect_ops;
|
|||
*/
|
||||
extern void vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
|
||||
range_seg64_t *physical_rs, range_seg64_t *remain_rs);
|
||||
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize);
|
||||
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize, uint64_t txg);
|
||||
extern uint64_t vdev_default_min_asize(vdev_t *vd);
|
||||
extern uint64_t vdev_get_min_asize(vdev_t *vd);
|
||||
extern void vdev_set_min_asize(vdev_t *vd);
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define _SYS_VDEV_RAIDZ_H
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/zfs_rlock.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -35,6 +36,8 @@ struct zio;
|
|||
struct raidz_col;
|
||||
struct raidz_row;
|
||||
struct raidz_map;
|
||||
struct vdev_raidz;
|
||||
struct uberblock;
|
||||
#if !defined(_KERNEL)
|
||||
struct kernel_param {};
|
||||
#endif
|
||||
|
@ -44,13 +47,19 @@ struct kernel_param {};
|
|||
*/
|
||||
struct raidz_map *vdev_raidz_map_alloc(struct zio *, uint64_t, uint64_t,
|
||||
uint64_t);
|
||||
struct raidz_map *vdev_raidz_map_alloc_expanded(struct zio *,
|
||||
uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, boolean_t);
|
||||
void vdev_raidz_map_free(struct raidz_map *);
|
||||
void vdev_raidz_free(struct vdev_raidz *);
|
||||
void vdev_raidz_generate_parity_row(struct raidz_map *, struct raidz_row *);
|
||||
void vdev_raidz_generate_parity(struct raidz_map *);
|
||||
void vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
|
||||
void vdev_raidz_child_done(zio_t *);
|
||||
void vdev_raidz_io_done(zio_t *);
|
||||
void vdev_raidz_checksum_error(zio_t *, struct raidz_col *, abd_t *);
|
||||
struct raidz_row *vdev_raidz_row_alloc(int);
|
||||
void vdev_raidz_reflow_copy_scratch(spa_t *);
|
||||
void raidz_dtl_reassessed(vdev_t *);
|
||||
|
||||
extern const zio_vsd_ops_t vdev_raidz_vsd_ops;
|
||||
|
||||
|
@ -65,11 +74,101 @@ int vdev_raidz_math_reconstruct(struct raidz_map *, struct raidz_row *,
|
|||
const int *, const int *, const int);
|
||||
int vdev_raidz_impl_set(const char *);
|
||||
|
||||
typedef struct vdev_raidz_expand {
|
||||
uint64_t vre_vdev_id;
|
||||
|
||||
kmutex_t vre_lock;
|
||||
kcondvar_t vre_cv;
|
||||
|
||||
/*
|
||||
* How much i/o is outstanding (issued and not completed).
|
||||
*/
|
||||
uint64_t vre_outstanding_bytes;
|
||||
|
||||
/*
|
||||
* Next offset to issue i/o for.
|
||||
*/
|
||||
uint64_t vre_offset;
|
||||
|
||||
/*
|
||||
* Lowest offset of a failed expansion i/o. The expansion will retry
|
||||
* from here. Once the expansion thread notices the failure and exits,
|
||||
* vre_failed_offset is reset back to UINT64_MAX, and
|
||||
* vre_waiting_for_resilver will be set.
|
||||
*/
|
||||
uint64_t vre_failed_offset;
|
||||
boolean_t vre_waiting_for_resilver;
|
||||
|
||||
/*
|
||||
* Offset that is completing each txg
|
||||
*/
|
||||
uint64_t vre_offset_pertxg[TXG_SIZE];
|
||||
|
||||
/*
|
||||
* Bytes copied in each txg.
|
||||
*/
|
||||
uint64_t vre_bytes_copied_pertxg[TXG_SIZE];
|
||||
|
||||
/*
|
||||
* The rangelock prevents normal read/write zio's from happening while
|
||||
* there are expansion (reflow) i/os in progress to the same offsets.
|
||||
*/
|
||||
zfs_rangelock_t vre_rangelock;
|
||||
|
||||
/*
|
||||
* These fields are stored on-disk in the vdev_top_zap:
|
||||
*/
|
||||
dsl_scan_state_t vre_state;
|
||||
uint64_t vre_start_time;
|
||||
uint64_t vre_end_time;
|
||||
uint64_t vre_bytes_copied;
|
||||
} vdev_raidz_expand_t;
|
||||
|
||||
typedef struct vdev_raidz {
|
||||
int vd_logical_width;
|
||||
/*
|
||||
* Number of child vdevs when this raidz vdev was created (i.e. before
|
||||
* any raidz expansions).
|
||||
*/
|
||||
int vd_original_width;
|
||||
|
||||
/*
|
||||
* The current number of child vdevs, which may be more than the
|
||||
* original width if an expansion is in progress or has completed.
|
||||
*/
|
||||
int vd_physical_width;
|
||||
|
||||
int vd_nparity;
|
||||
|
||||
/*
|
||||
* Tree of reflow_node_t's. The lock protects the avl tree only.
|
||||
* The reflow_node_t's describe completed expansions, and are used
|
||||
* to determine the logical width given a block's birth time.
|
||||
*/
|
||||
avl_tree_t vd_expand_txgs;
|
||||
kmutex_t vd_expand_lock;
|
||||
|
||||
/*
|
||||
* If this vdev is being expanded, spa_raidz_expand is set to this
|
||||
*/
|
||||
vdev_raidz_expand_t vn_vre;
|
||||
} vdev_raidz_t;
|
||||
|
||||
extern int vdev_raidz_attach_check(vdev_t *);
|
||||
extern void vdev_raidz_attach_sync(void *, dmu_tx_t *);
|
||||
extern void spa_start_raidz_expansion_thread(spa_t *);
|
||||
extern int spa_raidz_expand_get_stats(spa_t *, pool_raidz_expand_stat_t *);
|
||||
extern int vdev_raidz_load(vdev_t *);
|
||||
|
||||
/* RAIDZ scratch area pause points (for testing) */
|
||||
#define RAIDZ_EXPAND_PAUSE_NONE 0
|
||||
#define RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_1 1
|
||||
#define RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_2 2
|
||||
#define RAIDZ_EXPAND_PAUSE_PRE_SCRATCH_3 3
|
||||
#define RAIDZ_EXPAND_PAUSE_SCRATCH_VALID 4
|
||||
#define RAIDZ_EXPAND_PAUSE_SCRATCH_REFLOWED 5
|
||||
#define RAIDZ_EXPAND_PAUSE_SCRATCH_POST_REFLOW_1 6
|
||||
#define RAIDZ_EXPAND_PAUSE_SCRATCH_POST_REFLOW_2 7
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include <sys/kstat.h>
|
||||
#include <sys/abd.h>
|
||||
#include <sys/vdev_impl.h>
|
||||
#include <sys/abd_impl.h>
|
||||
#include <sys/zfs_rlock.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -102,28 +104,32 @@ typedef struct raidz_impl_ops {
|
|||
char name[RAIDZ_IMPL_NAME_MAX]; /* Name of the implementation */
|
||||
} raidz_impl_ops_t;
|
||||
|
||||
|
||||
typedef struct raidz_col {
|
||||
uint64_t rc_devidx; /* child device index for I/O */
|
||||
int rc_devidx; /* child device index for I/O */
|
||||
uint32_t rc_size; /* I/O size */
|
||||
uint64_t rc_offset; /* device offset */
|
||||
uint64_t rc_size; /* I/O size */
|
||||
abd_t rc_abdstruct; /* rc_abd probably points here */
|
||||
abd_t *rc_abd; /* I/O data */
|
||||
abd_t *rc_orig_data; /* pre-reconstruction */
|
||||
int rc_error; /* I/O error for this device */
|
||||
uint8_t rc_tried; /* Did we attempt this I/O column? */
|
||||
uint8_t rc_skipped; /* Did we skip this I/O column? */
|
||||
uint8_t rc_need_orig_restore; /* need to restore from orig_data? */
|
||||
uint8_t rc_force_repair; /* Write good data to this column */
|
||||
uint8_t rc_allow_repair; /* Allow repair I/O to this column */
|
||||
uint8_t rc_tried:1; /* Did we attempt this I/O column? */
|
||||
uint8_t rc_skipped:1; /* Did we skip this I/O column? */
|
||||
uint8_t rc_need_orig_restore:1; /* need to restore from orig_data? */
|
||||
uint8_t rc_force_repair:1; /* Write good data to this column */
|
||||
uint8_t rc_allow_repair:1; /* Allow repair I/O to this column */
|
||||
int rc_shadow_devidx; /* for double write during expansion */
|
||||
int rc_shadow_error; /* for double write during expansion */
|
||||
uint64_t rc_shadow_offset; /* for double write during expansion */
|
||||
} raidz_col_t;
|
||||
|
||||
typedef struct raidz_row {
|
||||
uint64_t rr_cols; /* Regular column count */
|
||||
uint64_t rr_scols; /* Count including skipped columns */
|
||||
uint64_t rr_bigcols; /* Remainder data column count */
|
||||
uint64_t rr_missingdata; /* Count of missing data devices */
|
||||
uint64_t rr_missingparity; /* Count of missing parity devices */
|
||||
uint64_t rr_firstdatacol; /* First data column/parity count */
|
||||
int rr_cols; /* Regular column count */
|
||||
int rr_scols; /* Count including skipped columns */
|
||||
int rr_bigcols; /* Remainder data column count */
|
||||
int rr_missingdata; /* Count of missing data devices */
|
||||
int rr_missingparity; /* Count of missing parity devices */
|
||||
int rr_firstdatacol; /* First data column/parity count */
|
||||
abd_t *rr_abd_empty; /* dRAID empty sector buffer */
|
||||
int rr_nempty; /* empty sectors included in parity */
|
||||
#ifdef ZFS_DEBUG
|
||||
|
@ -138,10 +144,25 @@ typedef struct raidz_map {
|
|||
int rm_nrows; /* Regular row count */
|
||||
int rm_nskip; /* RAIDZ sectors skipped for padding */
|
||||
int rm_skipstart; /* Column index of padding start */
|
||||
int rm_original_width; /* pre-expansion width of raidz vdev */
|
||||
int rm_nphys_cols; /* num entries in rm_phys_col[] */
|
||||
zfs_locked_range_t *rm_lr;
|
||||
const raidz_impl_ops_t *rm_ops; /* RAIDZ math operations */
|
||||
raidz_col_t *rm_phys_col; /* if non-NULL, read i/o aggregation */
|
||||
raidz_row_t *rm_row[0]; /* flexible array of rows */
|
||||
} raidz_map_t;
|
||||
|
||||
/*
|
||||
* Nodes in vdev_raidz_t:vd_expand_txgs.
|
||||
* Blocks with physical birth time of re_txg or later have the specified
|
||||
* logical width (until the next node).
|
||||
*/
|
||||
typedef struct reflow_node {
|
||||
uint64_t re_txg;
|
||||
uint64_t re_logical_width;
|
||||
avl_node_t re_link;
|
||||
} reflow_node_t;
|
||||
|
||||
|
||||
#define RAIDZ_ORIGINAL_IMPL (INT_MAX)
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ extern int zfs_dbgmsg_enable;
|
|||
#define ZFS_DEBUG_LOG_SPACEMAP (1 << 12)
|
||||
#define ZFS_DEBUG_METASLAB_ALLOC (1 << 13)
|
||||
#define ZFS_DEBUG_BRT (1 << 14)
|
||||
#define ZFS_DEBUG_RAIDZ_RECONSTRUCT (1 << 15)
|
||||
|
||||
extern void __set_error(const char *file, const char *func, int line, int err);
|
||||
extern void __zfs_dbgmsg(char *buf);
|
||||
|
|
|
@ -81,6 +81,7 @@ typedef enum spa_feature {
|
|||
SPA_FEATURE_BLOCK_CLONING,
|
||||
SPA_FEATURE_AVZ_V2,
|
||||
SPA_FEATURE_REDACTION_LIST_SPILL,
|
||||
SPA_FEATURE_RAIDZ_EXPANSION,
|
||||
SPA_FEATURES
|
||||
} spa_feature_t;
|
||||
|
||||
|
|
|
@ -602,7 +602,7 @@
|
|||
<elf-symbol name='fletcher_4_superscalar_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
|
||||
<elf-symbol name='libzfs_config_ops' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
|
||||
<elf-symbol name='sa_protocol_names' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
|
||||
<elf-symbol name='spa_feature_table' size='2240' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
|
||||
<elf-symbol name='spa_feature_table' size='2296' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
|
||||
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
|
||||
<elf-symbol name='zfs_deleg_perm_tab' size='512' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
|
||||
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
|
||||
|
@ -1257,41 +1257,15 @@
|
|||
</function-decl>
|
||||
</abi-instr>
|
||||
<abi-instr address-size='64' path='lib/libtpool/thread_pool.c' language='LANG_C99'>
|
||||
<array-type-def dimensions='1' type-id='8901473c' size-in-bits='576' id='f5da478b'>
|
||||
<subrange length='1' type-id='7359adad' id='52f813b4'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='49ef3ffd' size-in-bits='1024' id='a14403f5'>
|
||||
<subrange length='16' type-id='7359adad' id='848d0938'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='384' id='36d7f119'>
|
||||
<subrange length='48' type-id='7359adad' id='8f6d2a81'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='bd54fe1a' size-in-bits='512' id='5d4efd44'>
|
||||
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='f0981eeb' size-in-bits='64' id='0d532ec1'>
|
||||
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='eaa32e2f' size-in-bits='256' id='209ef23f'>
|
||||
<subrange length='4' type-id='7359adad' id='16fe7105'/>
|
||||
</array-type-def>
|
||||
<class-decl name='__cancel_jmp_buf_tag' size-in-bits='576' is-struct='yes' visibility='default' id='8901473c'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='__cancel_jmp_buf' type-id='379a1ab7' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='512'>
|
||||
<var-decl name='__mask_was_saved' type-id='95e97e5e' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<class-decl name='__pthread_unwind_buf_t' size-in-bits='832' is-struct='yes' naming-typedef-id='4423cf7f' visibility='default' id='a0abc656'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='__cancel_jmp_buf' type-id='f5da478b' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='576'>
|
||||
<var-decl name='__pad' type-id='209ef23f' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<typedef-decl name='__pthread_unwind_buf_t' type-id='a0abc656' id='4423cf7f'/>
|
||||
<union-decl name='__atomic_wide_counter' size-in-bits='64' naming-typedef-id='f3b40860' visibility='default' id='613ce450'>
|
||||
<data-member access='public'>
|
||||
<var-decl name='__value64' type-id='3a47d82b' visibility='default'/>
|
||||
|
@ -1337,7 +1311,6 @@
|
|||
</data-member>
|
||||
</union-decl>
|
||||
<typedef-decl name='pthread_cond_t' type-id='cbb12c12' id='62fab762'/>
|
||||
<typedef-decl name='__jmp_buf' type-id='5d4efd44' id='379a1ab7'/>
|
||||
<class-decl name='__pthread_cond_s' size-in-bits='384' is-struct='yes' visibility='default' id='c987b47c'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='__wseq' type-id='f3b40860' visibility='default'/>
|
||||
|
@ -1387,13 +1360,6 @@
|
|||
<var-decl name='tpa_tid' type-id='4051f5e7' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<pointer-type-def type-id='8901473c' size-in-bits='64' id='eb91b7ea'/>
|
||||
<pointer-type-def type-id='4423cf7f' size-in-bits='64' id='ba7c727c'/>
|
||||
<pointer-type-def type-id='b9c97942' size-in-bits='64' id='bbf06c47'/>
|
||||
<qualified-type-def type-id='bbf06c47' restrict='yes' id='65e6ec45'/>
|
||||
<qualified-type-def type-id='b9c97942' const='yes' id='191f6b72'/>
|
||||
<pointer-type-def type-id='191f6b72' size-in-bits='64' id='e475fb88'/>
|
||||
<qualified-type-def type-id='e475fb88' restrict='yes' id='5a8729d0'/>
|
||||
<qualified-type-def type-id='8037c762' const='yes' id='f50ea9b2'/>
|
||||
<pointer-type-def type-id='f50ea9b2' size-in-bits='64' id='5e14fa48'/>
|
||||
<qualified-type-def type-id='836265dd' const='yes' id='7d24c58d'/>
|
||||
|
@ -1521,18 +1487,10 @@
|
|||
<parameter type-id='7292109c'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='__pthread_register_cancel' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='ba7c727c'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-decl name='__pthread_unregister_cancel' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='ba7c727c'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-decl name='__pthread_unwind_next' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='ba7c727c'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-decl name='pthread_cond_init' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='2a468b41'/>
|
||||
<parameter type-id='4c428e67'/>
|
||||
|
@ -1561,12 +1519,6 @@
|
|||
<parameter type-id='95e97e5e'/>
|
||||
<return type-id='bd54fe1a'/>
|
||||
</function-decl>
|
||||
<function-decl name='pthread_sigmask' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='95e97e5e'/>
|
||||
<parameter type-id='5a8729d0'/>
|
||||
<parameter type-id='65e6ec45'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='tpool_abandon' mangled-name='tpool_abandon' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_abandon'>
|
||||
<parameter type-id='9cf59a50' name='tpool'/>
|
||||
<return type-id='48b5725f'/>
|
||||
|
@ -5671,7 +5623,8 @@
|
|||
<enumerator name='VDEV_PROP_CHECKSUM_T' value='43'/>
|
||||
<enumerator name='VDEV_PROP_IO_N' value='44'/>
|
||||
<enumerator name='VDEV_PROP_IO_T' value='45'/>
|
||||
<enumerator name='VDEV_NUM_PROPS' value='46'/>
|
||||
<enumerator name='VDEV_PROP_RAIDZ_EXPANDING' value='46'/>
|
||||
<enumerator name='VDEV_NUM_PROPS' value='47'/>
|
||||
</enum-decl>
|
||||
<typedef-decl name='vdev_prop_t' type-id='1573bec8' id='5aa5c90c'/>
|
||||
<class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='2f65b36f'>
|
||||
|
@ -5768,6 +5721,112 @@
|
|||
<enumerator name='POOL_TRIM_FUNCS' value='3'/>
|
||||
</enum-decl>
|
||||
<typedef-decl name='pool_trim_func_t' type-id='54ed608a' id='b1146b8d'/>
|
||||
<enum-decl name='zfs_ioc' id='12033f13'>
|
||||
<underlying-type type-id='9cac1fee'/>
|
||||
<enumerator name='ZFS_IOC_FIRST' value='23040'/>
|
||||
<enumerator name='ZFS_IOC' value='23040'/>
|
||||
<enumerator name='ZFS_IOC_POOL_CREATE' value='23040'/>
|
||||
<enumerator name='ZFS_IOC_POOL_DESTROY' value='23041'/>
|
||||
<enumerator name='ZFS_IOC_POOL_IMPORT' value='23042'/>
|
||||
<enumerator name='ZFS_IOC_POOL_EXPORT' value='23043'/>
|
||||
<enumerator name='ZFS_IOC_POOL_CONFIGS' value='23044'/>
|
||||
<enumerator name='ZFS_IOC_POOL_STATS' value='23045'/>
|
||||
<enumerator name='ZFS_IOC_POOL_TRYIMPORT' value='23046'/>
|
||||
<enumerator name='ZFS_IOC_POOL_SCAN' value='23047'/>
|
||||
<enumerator name='ZFS_IOC_POOL_FREEZE' value='23048'/>
|
||||
<enumerator name='ZFS_IOC_POOL_UPGRADE' value='23049'/>
|
||||
<enumerator name='ZFS_IOC_POOL_GET_HISTORY' value='23050'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_ADD' value='23051'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_REMOVE' value='23052'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_SET_STATE' value='23053'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_ATTACH' value='23054'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_DETACH' value='23055'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_SETPATH' value='23056'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_SETFRU' value='23057'/>
|
||||
<enumerator name='ZFS_IOC_OBJSET_STATS' value='23058'/>
|
||||
<enumerator name='ZFS_IOC_OBJSET_ZPLPROPS' value='23059'/>
|
||||
<enumerator name='ZFS_IOC_DATASET_LIST_NEXT' value='23060'/>
|
||||
<enumerator name='ZFS_IOC_SNAPSHOT_LIST_NEXT' value='23061'/>
|
||||
<enumerator name='ZFS_IOC_SET_PROP' value='23062'/>
|
||||
<enumerator name='ZFS_IOC_CREATE' value='23063'/>
|
||||
<enumerator name='ZFS_IOC_DESTROY' value='23064'/>
|
||||
<enumerator name='ZFS_IOC_ROLLBACK' value='23065'/>
|
||||
<enumerator name='ZFS_IOC_RENAME' value='23066'/>
|
||||
<enumerator name='ZFS_IOC_RECV' value='23067'/>
|
||||
<enumerator name='ZFS_IOC_SEND' value='23068'/>
|
||||
<enumerator name='ZFS_IOC_INJECT_FAULT' value='23069'/>
|
||||
<enumerator name='ZFS_IOC_CLEAR_FAULT' value='23070'/>
|
||||
<enumerator name='ZFS_IOC_INJECT_LIST_NEXT' value='23071'/>
|
||||
<enumerator name='ZFS_IOC_ERROR_LOG' value='23072'/>
|
||||
<enumerator name='ZFS_IOC_CLEAR' value='23073'/>
|
||||
<enumerator name='ZFS_IOC_PROMOTE' value='23074'/>
|
||||
<enumerator name='ZFS_IOC_SNAPSHOT' value='23075'/>
|
||||
<enumerator name='ZFS_IOC_DSOBJ_TO_DSNAME' value='23076'/>
|
||||
<enumerator name='ZFS_IOC_OBJ_TO_PATH' value='23077'/>
|
||||
<enumerator name='ZFS_IOC_POOL_SET_PROPS' value='23078'/>
|
||||
<enumerator name='ZFS_IOC_POOL_GET_PROPS' value='23079'/>
|
||||
<enumerator name='ZFS_IOC_SET_FSACL' value='23080'/>
|
||||
<enumerator name='ZFS_IOC_GET_FSACL' value='23081'/>
|
||||
<enumerator name='ZFS_IOC_SHARE' value='23082'/>
|
||||
<enumerator name='ZFS_IOC_INHERIT_PROP' value='23083'/>
|
||||
<enumerator name='ZFS_IOC_SMB_ACL' value='23084'/>
|
||||
<enumerator name='ZFS_IOC_USERSPACE_ONE' value='23085'/>
|
||||
<enumerator name='ZFS_IOC_USERSPACE_MANY' value='23086'/>
|
||||
<enumerator name='ZFS_IOC_USERSPACE_UPGRADE' value='23087'/>
|
||||
<enumerator name='ZFS_IOC_HOLD' value='23088'/>
|
||||
<enumerator name='ZFS_IOC_RELEASE' value='23089'/>
|
||||
<enumerator name='ZFS_IOC_GET_HOLDS' value='23090'/>
|
||||
<enumerator name='ZFS_IOC_OBJSET_RECVD_PROPS' value='23091'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_SPLIT' value='23092'/>
|
||||
<enumerator name='ZFS_IOC_NEXT_OBJ' value='23093'/>
|
||||
<enumerator name='ZFS_IOC_DIFF' value='23094'/>
|
||||
<enumerator name='ZFS_IOC_TMP_SNAPSHOT' value='23095'/>
|
||||
<enumerator name='ZFS_IOC_OBJ_TO_STATS' value='23096'/>
|
||||
<enumerator name='ZFS_IOC_SPACE_WRITTEN' value='23097'/>
|
||||
<enumerator name='ZFS_IOC_SPACE_SNAPS' value='23098'/>
|
||||
<enumerator name='ZFS_IOC_DESTROY_SNAPS' value='23099'/>
|
||||
<enumerator name='ZFS_IOC_POOL_REGUID' value='23100'/>
|
||||
<enumerator name='ZFS_IOC_POOL_REOPEN' value='23101'/>
|
||||
<enumerator name='ZFS_IOC_SEND_PROGRESS' value='23102'/>
|
||||
<enumerator name='ZFS_IOC_LOG_HISTORY' value='23103'/>
|
||||
<enumerator name='ZFS_IOC_SEND_NEW' value='23104'/>
|
||||
<enumerator name='ZFS_IOC_SEND_SPACE' value='23105'/>
|
||||
<enumerator name='ZFS_IOC_CLONE' value='23106'/>
|
||||
<enumerator name='ZFS_IOC_BOOKMARK' value='23107'/>
|
||||
<enumerator name='ZFS_IOC_GET_BOOKMARKS' value='23108'/>
|
||||
<enumerator name='ZFS_IOC_DESTROY_BOOKMARKS' value='23109'/>
|
||||
<enumerator name='ZFS_IOC_RECV_NEW' value='23110'/>
|
||||
<enumerator name='ZFS_IOC_POOL_SYNC' value='23111'/>
|
||||
<enumerator name='ZFS_IOC_CHANNEL_PROGRAM' value='23112'/>
|
||||
<enumerator name='ZFS_IOC_LOAD_KEY' value='23113'/>
|
||||
<enumerator name='ZFS_IOC_UNLOAD_KEY' value='23114'/>
|
||||
<enumerator name='ZFS_IOC_CHANGE_KEY' value='23115'/>
|
||||
<enumerator name='ZFS_IOC_REMAP' value='23116'/>
|
||||
<enumerator name='ZFS_IOC_POOL_CHECKPOINT' value='23117'/>
|
||||
<enumerator name='ZFS_IOC_POOL_DISCARD_CHECKPOINT' value='23118'/>
|
||||
<enumerator name='ZFS_IOC_POOL_INITIALIZE' value='23119'/>
|
||||
<enumerator name='ZFS_IOC_POOL_TRIM' value='23120'/>
|
||||
<enumerator name='ZFS_IOC_REDACT' value='23121'/>
|
||||
<enumerator name='ZFS_IOC_GET_BOOKMARK_PROPS' value='23122'/>
|
||||
<enumerator name='ZFS_IOC_WAIT' value='23123'/>
|
||||
<enumerator name='ZFS_IOC_WAIT_FS' value='23124'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_GET_PROPS' value='23125'/>
|
||||
<enumerator name='ZFS_IOC_VDEV_SET_PROPS' value='23126'/>
|
||||
<enumerator name='ZFS_IOC_POOL_SCRUB' value='23127'/>
|
||||
<enumerator name='ZFS_IOC_PLATFORM' value='23168'/>
|
||||
<enumerator name='ZFS_IOC_EVENTS_NEXT' value='23169'/>
|
||||
<enumerator name='ZFS_IOC_EVENTS_CLEAR' value='23170'/>
|
||||
<enumerator name='ZFS_IOC_EVENTS_SEEK' value='23171'/>
|
||||
<enumerator name='ZFS_IOC_NEXTBOOT' value='23172'/>
|
||||
<enumerator name='ZFS_IOC_JAIL' value='23173'/>
|
||||
<enumerator name='ZFS_IOC_USERNS_ATTACH' value='23173'/>
|
||||
<enumerator name='ZFS_IOC_UNJAIL' value='23174'/>
|
||||
<enumerator name='ZFS_IOC_USERNS_DETACH' value='23174'/>
|
||||
<enumerator name='ZFS_IOC_SET_BOOTENV' value='23175'/>
|
||||
<enumerator name='ZFS_IOC_GET_BOOTENV' value='23176'/>
|
||||
<enumerator name='ZFS_IOC_LAST' value='23177'/>
|
||||
</enum-decl>
|
||||
<typedef-decl name='zfs_ioc_t' type-id='12033f13' id='5b35941c'/>
|
||||
<enum-decl name='zpool_wait_activity_t' naming-typedef-id='73446457' id='849338e3'>
|
||||
<underlying-type type-id='9cac1fee'/>
|
||||
<enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
|
||||
|
@ -5778,7 +5837,8 @@
|
|||
<enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
|
||||
<enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
|
||||
<enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
|
||||
<enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
|
||||
<enumerator name='ZPOOL_WAIT_RAIDZ_EXPAND' value='8'/>
|
||||
<enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='9'/>
|
||||
</enum-decl>
|
||||
<typedef-decl name='zpool_wait_activity_t' type-id='849338e3' id='73446457'/>
|
||||
<enum-decl name='spa_feature' id='33ecb627'>
|
||||
|
@ -5824,7 +5884,8 @@
|
|||
<enumerator name='SPA_FEATURE_BLOCK_CLONING' value='37'/>
|
||||
<enumerator name='SPA_FEATURE_AVZ_V2' value='38'/>
|
||||
<enumerator name='SPA_FEATURE_REDACTION_LIST_SPILL' value='39'/>
|
||||
<enumerator name='SPA_FEATURES' value='40'/>
|
||||
<enumerator name='SPA_FEATURE_RAIDZ_EXPANSION' value='40'/>
|
||||
<enumerator name='SPA_FEATURES' value='41'/>
|
||||
</enum-decl>
|
||||
<typedef-decl name='spa_feature_t' type-id='33ecb627' id='d6618c78'/>
|
||||
<qualified-type-def type-id='22cce67b' const='yes' id='d2816df0'/>
|
||||
|
@ -5941,6 +6002,13 @@
|
|||
<parameter type-id='857bb57e'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='lzc_scrub' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='5b35941c'/>
|
||||
<parameter type-id='80f4b756'/>
|
||||
<parameter type-id='5ce45b60'/>
|
||||
<parameter type-id='857bb57e'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_resolve_shortname'>
|
||||
<parameter type-id='80f4b756'/>
|
||||
<parameter type-id='26a90f95'/>
|
||||
|
@ -6556,6 +6624,15 @@
|
|||
</function-decl>
|
||||
</abi-instr>
|
||||
<abi-instr address-size='64' path='lib/libzfs/libzfs_sendrecv.c' language='LANG_C99'>
|
||||
<array-type-def dimensions='1' type-id='8901473c' size-in-bits='576' id='f5da478b'>
|
||||
<subrange length='1' type-id='7359adad' id='52f813b4'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='384' id='73b82f0f'>
|
||||
<subrange length='12' type-id='7359adad' id='84827bdc'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='bd54fe1a' size-in-bits='512' id='5d4efd44'>
|
||||
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='2176' id='8c2bcad1'>
|
||||
<subrange length='34' type-id='7359adad' id='6a6a7e00'/>
|
||||
</array-type-def>
|
||||
|
@ -6577,6 +6654,9 @@
|
|||
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='64' id='13339fda'>
|
||||
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='eaa32e2f' size-in-bits='256' id='209ef23f'>
|
||||
<subrange length='4' type-id='7359adad' id='16fe7105'/>
|
||||
</array-type-def>
|
||||
<class-decl name='sendflags' size-in-bits='576' is-struct='yes' visibility='default' id='f6aa15be'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='verbosity' type-id='95e97e5e' visibility='default'/>
|
||||
|
@ -7123,25 +7203,103 @@
|
|||
<var-decl name='drr_checksum' type-id='39730d0b' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<class-decl name='__cancel_jmp_buf_tag' size-in-bits='576' is-struct='yes' visibility='default' id='8901473c'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='__cancel_jmp_buf' type-id='379a1ab7' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='512'>
|
||||
<var-decl name='__mask_was_saved' type-id='95e97e5e' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<class-decl name='__pthread_unwind_buf_t' size-in-bits='832' is-struct='yes' naming-typedef-id='4423cf7f' visibility='default' id='a0abc656'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='__cancel_jmp_buf' type-id='f5da478b' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='576'>
|
||||
<var-decl name='__pad' type-id='209ef23f' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<typedef-decl name='__pthread_unwind_buf_t' type-id='a0abc656' id='4423cf7f'/>
|
||||
<typedef-decl name='__jmp_buf' type-id='5d4efd44' id='379a1ab7'/>
|
||||
<typedef-decl name='__clockid_t' type-id='95e97e5e' id='08f9a87a'/>
|
||||
<typedef-decl name='__timer_t' type-id='eaa32e2f' id='df209b60'/>
|
||||
<typedef-decl name='clockid_t' type-id='08f9a87a' id='a1c3b834'/>
|
||||
<class-decl name='sigevent' size-in-bits='512' is-struct='yes' visibility='default' id='519bc206'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='sigev_value' type-id='eabacd01' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='64'>
|
||||
<var-decl name='sigev_signo' type-id='95e97e5e' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='96'>
|
||||
<var-decl name='sigev_notify' type-id='95e97e5e' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='128'>
|
||||
<var-decl name='_sigev_un' type-id='ac5ab599' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<union-decl name='__anonymous_union__1' size-in-bits='384' is-anonymous='yes' visibility='default' id='ac5ab599'>
|
||||
<data-member access='public'>
|
||||
<var-decl name='_pad' type-id='73b82f0f' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public'>
|
||||
<var-decl name='_tid' type-id='3629bad8' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public'>
|
||||
<var-decl name='_sigev_thread' type-id='e7f43f7b' visibility='default'/>
|
||||
</data-member>
|
||||
</union-decl>
|
||||
<class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f7b'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='_function' type-id='5f147c28' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='64'>
|
||||
<var-decl name='_attribute' type-id='7347a39e' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<class-decl name='itimerspec' size-in-bits='256' is-struct='yes' visibility='default' id='acbdbcc6'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='it_interval' type-id='a9c79a1f' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='128'>
|
||||
<var-decl name='it_value' type-id='a9c79a1f' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<typedef-decl name='timer_t' type-id='df209b60' id='b07ae406'/>
|
||||
<typedef-decl name='Byte' type-id='002ac4a6' id='efb9ba06'/>
|
||||
<typedef-decl name='uLong' type-id='7359adad' id='5bbcce85'/>
|
||||
<typedef-decl name='Bytef' type-id='efb9ba06' id='c1606520'/>
|
||||
<typedef-decl name='uLongf' type-id='5bbcce85' id='4d39af59'/>
|
||||
<pointer-type-def type-id='c1606520' size-in-bits='64' id='4c667223'/>
|
||||
<pointer-type-def type-id='8901473c' size-in-bits='64' id='eb91b7ea'/>
|
||||
<pointer-type-def type-id='4423cf7f' size-in-bits='64' id='ba7c727c'/>
|
||||
<pointer-type-def type-id='b9c97942' size-in-bits='64' id='bbf06c47'/>
|
||||
<qualified-type-def type-id='bbf06c47' restrict='yes' id='65e6ec45'/>
|
||||
<qualified-type-def type-id='c1606520' const='yes' id='a6124a50'/>
|
||||
<pointer-type-def type-id='a6124a50' size-in-bits='64' id='e8cb3e0e'/>
|
||||
<qualified-type-def type-id='b9c97942' const='yes' id='191f6b72'/>
|
||||
<pointer-type-def type-id='191f6b72' size-in-bits='64' id='e475fb88'/>
|
||||
<qualified-type-def type-id='e475fb88' restrict='yes' id='5a8729d0'/>
|
||||
<qualified-type-def type-id='781a52d7' const='yes' id='413ab2b8'/>
|
||||
<pointer-type-def type-id='413ab2b8' size-in-bits='64' id='41671bd6'/>
|
||||
<qualified-type-def type-id='acbdbcc6' const='yes' id='4ba62af7'/>
|
||||
<pointer-type-def type-id='4ba62af7' size-in-bits='64' id='f39579e7'/>
|
||||
<qualified-type-def type-id='f39579e7' restrict='yes' id='9b23e165'/>
|
||||
<pointer-type-def type-id='c70fa2e8' size-in-bits='64' id='2e711a2a'/>
|
||||
<pointer-type-def type-id='3ff5601b' size-in-bits='64' id='4aafb922'/>
|
||||
<pointer-type-def type-id='acbdbcc6' size-in-bits='64' id='116842ac'/>
|
||||
<qualified-type-def type-id='116842ac' restrict='yes' id='3d3c4cf4'/>
|
||||
<pointer-type-def type-id='9e59d1d4' size-in-bits='64' id='4ea84b4f'/>
|
||||
<pointer-type-def type-id='945467e6' size-in-bits='64' id='8def7735'/>
|
||||
<pointer-type-def type-id='519bc206' size-in-bits='64' id='ef2f159c'/>
|
||||
<qualified-type-def type-id='ef2f159c' restrict='yes' id='de0eb5a4'/>
|
||||
<pointer-type-def type-id='3d3ffb69' size-in-bits='64' id='72a26210'/>
|
||||
<pointer-type-def type-id='c9d12d66' size-in-bits='64' id='b2eb2c3f'/>
|
||||
<pointer-type-def type-id='b07ae406' size-in-bits='64' id='36e89359'/>
|
||||
<qualified-type-def type-id='36e89359' restrict='yes' id='de98c2bb'/>
|
||||
<pointer-type-def type-id='a9c79a1f' size-in-bits='64' id='3d83ba87'/>
|
||||
<pointer-type-def type-id='4d39af59' size-in-bits='64' id='60db3356'/>
|
||||
<pointer-type-def type-id='f1abb096' size-in-bits='64' id='5f147c28'/>
|
||||
<pointer-type-def type-id='39730d0b' size-in-bits='64' id='c24fc2ee'/>
|
||||
<function-decl name='nvlist_print' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='822cd80b'/>
|
||||
|
@ -7302,6 +7460,23 @@
|
|||
<parameter type-id='eaa32e2f'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='pthread_exit' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='eaa32e2f'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-decl name='__pthread_register_cancel' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='ba7c727c'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-decl name='__pthread_unwind_next' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='ba7c727c'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-decl name='sigaddset' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='9e80f729'/>
|
||||
<parameter type-id='95e97e5e'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='perror' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='80f4b756'/>
|
||||
<return type-id='48b5725f'/>
|
||||
|
@ -7320,15 +7495,37 @@
|
|||
<parameter type-id='3d83ba87'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='timer_create' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='a1c3b834'/>
|
||||
<parameter type-id='de0eb5a4'/>
|
||||
<parameter type-id='de98c2bb'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='timer_delete' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='b07ae406'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='timer_settime' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='b07ae406'/>
|
||||
<parameter type-id='95e97e5e'/>
|
||||
<parameter type-id='9b23e165'/>
|
||||
<parameter type-id='3d3c4cf4'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='write' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='95e97e5e'/>
|
||||
<parameter type-id='eaa32e2f'/>
|
||||
<parameter type-id='b59d7dce'/>
|
||||
<return type-id='79a0948f'/>
|
||||
</function-decl>
|
||||
<function-decl name='sleep' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='f0981eeb'/>
|
||||
<return type-id='f0981eeb'/>
|
||||
<function-decl name='pause' visibility='default' binding='global' size-in-bits='64'>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='pthread_sigmask' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='95e97e5e'/>
|
||||
<parameter type-id='5a8729d0'/>
|
||||
<parameter type-id='65e6ec45'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='uncompress' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='4c667223'/>
|
||||
|
@ -7407,6 +7604,10 @@
|
|||
<parameter type-id='eaa32e2f'/>
|
||||
<return type-id='c19b74c3'/>
|
||||
</function-type>
|
||||
<function-type size-in-bits='64' id='f1abb096'>
|
||||
<parameter type-id='eabacd01'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-type>
|
||||
</abi-instr>
|
||||
<abi-instr address-size='64' path='lib/libzfs/libzfs_status.c' language='LANG_C99'>
|
||||
<function-decl name='zpool_import_status' mangled-name='zpool_import_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_status'>
|
||||
|
@ -8081,14 +8282,6 @@
|
|||
</function-decl>
|
||||
</abi-instr>
|
||||
<abi-instr address-size='64' path='lib/libzfs/os/linux/libzfs_util_os.c' language='LANG_C99'>
|
||||
<class-decl name='itimerspec' size-in-bits='256' is-struct='yes' visibility='default' id='acbdbcc6'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='it_interval' type-id='a9c79a1f' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='128'>
|
||||
<var-decl name='it_value' type-id='a9c79a1f' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<typedef-decl name='nfds_t' type-id='7359adad' id='555eef66'/>
|
||||
<class-decl name='pollfd' size-in-bits='64' is-struct='yes' visibility='default' id='b440e872'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
|
@ -8101,9 +8294,6 @@
|
|||
<var-decl name='revents' type-id='a2185560' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<qualified-type-def type-id='acbdbcc6' const='yes' id='4ba62af7'/>
|
||||
<pointer-type-def type-id='4ba62af7' size-in-bits='64' id='f39579e7'/>
|
||||
<pointer-type-def type-id='acbdbcc6' size-in-bits='64' id='116842ac'/>
|
||||
<pointer-type-def type-id='b440e872' size-in-bits='64' id='3ac36db0'/>
|
||||
<function-decl name='access' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='80f4b756'/>
|
||||
|
@ -8385,9 +8575,6 @@
|
|||
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='256' id='16dc656a'>
|
||||
<subrange length='32' type-id='7359adad' id='ae5bde82'/>
|
||||
</array-type-def>
|
||||
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='384' id='73b82f0f'>
|
||||
<subrange length='12' type-id='7359adad' id='84827bdc'/>
|
||||
</array-type-def>
|
||||
<class-decl name='importargs' size-in-bits='448' is-struct='yes' visibility='default' id='7ac83801'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='path' type-id='9b23c9ad' visibility='default'/>
|
||||
|
@ -8480,39 +8667,6 @@
|
|||
<var-decl name='__glibc_reserved' type-id='16dc656a' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<class-decl name='sigevent' size-in-bits='512' is-struct='yes' visibility='default' id='519bc206'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='sigev_value' type-id='eabacd01' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='64'>
|
||||
<var-decl name='sigev_signo' type-id='95e97e5e' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='96'>
|
||||
<var-decl name='sigev_notify' type-id='95e97e5e' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='128'>
|
||||
<var-decl name='_sigev_un' type-id='ac5ab599' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<union-decl name='__anonymous_union__' size-in-bits='384' is-anonymous='yes' visibility='default' id='ac5ab599'>
|
||||
<data-member access='public'>
|
||||
<var-decl name='_pad' type-id='73b82f0f' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public'>
|
||||
<var-decl name='_tid' type-id='3629bad8' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public'>
|
||||
<var-decl name='_sigev_thread' type-id='e7f43f7b' visibility='default'/>
|
||||
</data-member>
|
||||
</union-decl>
|
||||
<class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f7b'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='_function' type-id='5f147c28' visibility='default'/>
|
||||
</data-member>
|
||||
<data-member access='public' layout-offset-in-bits='64'>
|
||||
<var-decl name='_attribute' type-id='7347a39e' visibility='default'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<pointer-type-def type-id='e4957c49' size-in-bits='64' id='924bbc81'/>
|
||||
<qualified-type-def type-id='924bbc81' const='yes' id='5499dcde'/>
|
||||
<pointer-type-def type-id='5499dcde' size-in-bits='64' id='2236d41c'/>
|
||||
|
@ -8523,9 +8677,6 @@
|
|||
<pointer-type-def type-id='7a842a6b' size-in-bits='64' id='07ee4a58'/>
|
||||
<pointer-type-def type-id='8a70a786' size-in-bits='64' id='5507783b'/>
|
||||
<pointer-type-def type-id='b1e62775' size-in-bits='64' id='f095e320'/>
|
||||
<pointer-type-def type-id='519bc206' size-in-bits='64' id='ef2f159c'/>
|
||||
<qualified-type-def type-id='ef2f159c' restrict='yes' id='de0eb5a4'/>
|
||||
<pointer-type-def type-id='f1abb096' size-in-bits='64' id='5f147c28'/>
|
||||
<qualified-type-def type-id='48b5725f' volatile='yes' id='b0b3cbf9'/>
|
||||
<pointer-type-def type-id='b0b3cbf9' size-in-bits='64' id='fe09dd29'/>
|
||||
<function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='update_vdev_config_dev_strs'>
|
||||
|
@ -8579,10 +8730,6 @@
|
|||
<parameter type-id='eaa32e2f'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-type size-in-bits='64' id='f1abb096'>
|
||||
<parameter type-id='eabacd01'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-type>
|
||||
</abi-instr>
|
||||
<abi-instr address-size='64' path='lib/libzutil/zutil_nicenum.c' language='LANG_C99'>
|
||||
<type-decl name='long double' size-in-bits='128' id='e095c704'/>
|
||||
|
@ -8727,8 +8874,8 @@
|
|||
</function-decl>
|
||||
</abi-instr>
|
||||
<abi-instr address-size='64' path='module/zcommon/zfeature_common.c' language='LANG_C99'>
|
||||
<array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='17920' id='dd432c71'>
|
||||
<subrange length='40' type-id='7359adad' id='ae4a9561'/>
|
||||
<array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='18368' id='b93e4d14'>
|
||||
<subrange length='41' type-id='7359adad' id='cb834f44'/>
|
||||
</array-type-def>
|
||||
<enum-decl name='zfeature_flags' id='6db816a4'>
|
||||
<underlying-type type-id='9cac1fee'/>
|
||||
|
@ -8805,7 +8952,7 @@
|
|||
<pointer-type-def type-id='611586a1' size-in-bits='64' id='2e243169'/>
|
||||
<qualified-type-def type-id='eaa32e2f' const='yes' id='83be723c'/>
|
||||
<pointer-type-def type-id='83be723c' size-in-bits='64' id='7acd98a2'/>
|
||||
<var-decl name='spa_feature_table' type-id='dd432c71' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
|
||||
<var-decl name='spa_feature_table' type-id='b93e4d14' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
|
||||
<var-decl name='zfeature_checks_disable' type-id='c19b74c3' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
|
||||
<function-decl name='opendir' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='80f4b756'/>
|
||||
|
|
|
@ -3378,6 +3378,7 @@ zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
|
|||
boolean_t avail_spare, l2cache, islog;
|
||||
uint64_t val;
|
||||
char *newname;
|
||||
const char *type;
|
||||
nvlist_t **child;
|
||||
uint_t children;
|
||||
nvlist_t *config_root;
|
||||
|
@ -3412,6 +3413,14 @@ zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
|
|||
return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
|
||||
}
|
||||
|
||||
type = fnvlist_lookup_string(tgt, ZPOOL_CONFIG_TYPE);
|
||||
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 &&
|
||||
zfeature_lookup_guid("org.openzfs:raidz_expansion", NULL) != 0) {
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
"the loaded zfs module doesn't support raidz expansion"));
|
||||
return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
|
||||
}
|
||||
|
||||
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
|
||||
&child, &children) != 0 || children != 1) {
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
|
@ -3479,6 +3488,10 @@ zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
|
|||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
"cannot replace a replacing device"));
|
||||
}
|
||||
} else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
"raidz_expansion feature must be enabled "
|
||||
"in order to attach a device to raidz"));
|
||||
} else {
|
||||
char status[64] = {0};
|
||||
zpool_prop_get_feature(zhp,
|
||||
|
@ -3508,8 +3521,7 @@ zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
|
|||
break;
|
||||
|
||||
case EBUSY:
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
|
||||
"or device removal is in progress"),
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
|
||||
new_disk);
|
||||
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
|
||||
break;
|
||||
|
@ -3540,6 +3552,34 @@ zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
|
|||
(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
|
||||
break;
|
||||
|
||||
case ENXIO:
|
||||
/*
|
||||
* The existing raidz vdev has offline children
|
||||
*/
|
||||
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
"raidz vdev has devices that are are offline or "
|
||||
"being replaced"));
|
||||
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
|
||||
break;
|
||||
} else {
|
||||
(void) zpool_standard_error(hdl, errno, errbuf);
|
||||
}
|
||||
break;
|
||||
|
||||
case EADDRINUSE:
|
||||
/*
|
||||
* The boot reserved area is already being used (FreeBSD)
|
||||
*/
|
||||
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
"the reserved boot area needed for the expansion "
|
||||
"is already being used by a boot loader"));
|
||||
(void) zfs_error(hdl, EZFS_BADDEV, errbuf);
|
||||
} else {
|
||||
(void) zpool_standard_error(hdl, errno, errbuf);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
(void) zpool_standard_error(hdl, errno, errbuf);
|
||||
}
|
||||
|
@ -5222,6 +5262,9 @@ zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
|
|||
} else {
|
||||
src = ZPROP_SRC_DEFAULT;
|
||||
intval = vdev_prop_default_numeric(prop);
|
||||
/* Only use if provided by the RAIDZ VDEV above */
|
||||
if (prop == VDEV_PROP_RAIDZ_EXPANDING)
|
||||
return (ENOENT);
|
||||
}
|
||||
if (vdev_prop_index_to_string(prop, intval,
|
||||
(const char **)&strval) != 0)
|
||||
|
|
|
@ -317,6 +317,8 @@ libzfs_error_description(libzfs_handle_t *hdl)
|
|||
case EZFS_RESUME_EXISTS:
|
||||
return (dgettext(TEXT_DOMAIN, "Resuming recv on existing "
|
||||
"dataset without force"));
|
||||
case EZFS_RAIDZ_EXPAND_IN_PROGRESS:
|
||||
return (dgettext(TEXT_DOMAIN, "raidz expansion in progress"));
|
||||
case EZFS_UNKNOWN:
|
||||
return (dgettext(TEXT_DOMAIN, "unknown error"));
|
||||
default:
|
||||
|
@ -763,6 +765,9 @@ zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
|
|||
case ZFS_ERR_IOC_ARG_BADTYPE:
|
||||
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
|
||||
break;
|
||||
case ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS:
|
||||
zfs_verror(hdl, EZFS_RAIDZ_EXPAND_IN_PROGRESS, fmt, ap);
|
||||
break;
|
||||
default:
|
||||
zfs_error_aux(hdl, "%s", strerror(error));
|
||||
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
|
||||
|
|
|
@ -1360,7 +1360,9 @@
|
|||
<enumerator name='ZFS_IOC_EVENTS_SEEK' value='23171'/>
|
||||
<enumerator name='ZFS_IOC_NEXTBOOT' value='23172'/>
|
||||
<enumerator name='ZFS_IOC_JAIL' value='23173'/>
|
||||
<enumerator name='ZFS_IOC_USERNS_ATTACH' value='23173'/>
|
||||
<enumerator name='ZFS_IOC_UNJAIL' value='23174'/>
|
||||
<enumerator name='ZFS_IOC_USERNS_DETACH' value='23174'/>
|
||||
<enumerator name='ZFS_IOC_SET_BOOTENV' value='23175'/>
|
||||
<enumerator name='ZFS_IOC_GET_BOOTENV' value='23176'/>
|
||||
<enumerator name='ZFS_IOC_LAST' value='23177'/>
|
||||
|
@ -1376,7 +1378,8 @@
|
|||
<enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
|
||||
<enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
|
||||
<enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
|
||||
<enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
|
||||
<enumerator name='ZPOOL_WAIT_RAIDZ_EXPAND' value='8'/>
|
||||
<enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='9'/>
|
||||
</enum-decl>
|
||||
<typedef-decl name='zpool_wait_activity_t' type-id='849338e3' id='73446457'/>
|
||||
<enum-decl name='zfs_wait_activity_t' naming-typedef-id='3024501a' id='527d5dc6'>
|
||||
|
@ -2538,6 +2541,13 @@
|
|||
<function-decl name='libzfs_core_fini' mangled-name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_fini'>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-decl name='lzc_scrub' mangled-name='lzc_scrub' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_scrub'>
|
||||
<parameter type-id='5b35941c' name='ioc'/>
|
||||
<parameter type-id='80f4b756' name='name'/>
|
||||
<parameter type-id='5ce45b60' name='source'/>
|
||||
<parameter type-id='857bb57e' name='resultp'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='lzc_create' mangled-name='lzc_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_create'>
|
||||
<parameter type-id='80f4b756' name='fsname'/>
|
||||
<parameter type-id='bc9887f1' name='type'/>
|
||||
|
|
|
@ -43,6 +43,7 @@ nodist_libzpool_la_SOURCES = \
|
|||
module/os/linux/zfs/arc_os.c \
|
||||
module/os/linux/zfs/trace.c \
|
||||
module/os/linux/zfs/vdev_file.c \
|
||||
module/os/linux/zfs/vdev_label_os.c \
|
||||
module/os/linux/zfs/zfs_debug.c \
|
||||
module/os/linux/zfs/zfs_racct.c \
|
||||
module/os/linux/zfs/zfs_znode.c \
|
||||
|
|
|
@ -52,6 +52,16 @@
|
|||
.Op Fl T Ar time
|
||||
.Op Fl z Ar zil_failure_rate
|
||||
.
|
||||
.Nm
|
||||
.Fl X
|
||||
.Op Fl VG
|
||||
.Op Fl s Ar size_of_each_vdev
|
||||
.Op Fl a Ar alignment_shift
|
||||
.Op Fl r Ar raidz_disks
|
||||
.Op Fl R Ar raid_parity
|
||||
.Op Fl d Ar datasets
|
||||
.Op Fl t Ar threads
|
||||
.
|
||||
.Sh DESCRIPTION
|
||||
.Nm
|
||||
was written by the ZFS Developers as a ZFS unit test.
|
||||
|
@ -122,11 +132,17 @@ Number of mirror copies.
|
|||
Number of raidz/draid disks.
|
||||
.It Fl R , -raid-parity Ns = (default: Sy 1 )
|
||||
Raid parity (raidz & draid).
|
||||
.It Fl K , -raid-kind Ns = Ns Sy raidz Ns | Ns Sy draid Ns | Ns Sy random No (default : Sy random )
|
||||
.It Xo
|
||||
.Fl K , -raid-kind Ns = Ns
|
||||
.Sy raidz Ns | Ns Sy eraidz Ns | Ns Sy draid Ns | Ns Sy random
|
||||
(default:
|
||||
.Sy random Ns
|
||||
)
|
||||
.Xc
|
||||
The kind of RAID config to use.
|
||||
With
|
||||
.Sy random
|
||||
the kind alternates between raidz and draid.
|
||||
the kind alternates between raidz, eraidz (expandable raidz) and draid.
|
||||
.It Fl D , -draid-data Ns = (default: Sy 4 )
|
||||
Number of data disks in a dRAID redundancy group.
|
||||
.It Fl S , -draid-spares Ns = (default: Sy 1 )
|
||||
|
@ -181,6 +197,8 @@ to an unsigned 32-bit integer
|
|||
Dump zfs_dbgmsg buffer before exiting due to an error.
|
||||
.It Fl V , -verbose
|
||||
Verbose (use multiple times for ever more verbosity).
|
||||
.It Fl X , -raidz-expansion
|
||||
Perform a dedicated raidz expansion test.
|
||||
.El
|
||||
.
|
||||
.Sh EXAMPLES
|
||||
|
|
|
@ -430,6 +430,19 @@ TXGs must pass before unloading will occur.
|
|||
.It Sy reference_history Ns = Ns Sy 3 Pq uint
|
||||
Maximum reference holders being tracked when reference_tracking_enable is
|
||||
active.
|
||||
.It Sy raidz_expand_max_copy_bytes Ns = Ns Sy 160MB Pq ulong
|
||||
Max amount of memory to use for RAID-Z expansion I/O.
|
||||
This limits how much I/O can be outstanding at once.
|
||||
.
|
||||
.It Sy raidz_expand_max_reflow_bytes Ns = Ns Sy 0 Pq ulong
|
||||
For testing, pause RAID-Z expansion when reflow amount reaches this value.
|
||||
.
|
||||
.It Sy raidz_io_aggregate_rows Ns = Ns Sy 4 Pq ulong
|
||||
For expanded RAID-Z, aggregate reads that have more rows than this.
|
||||
.
|
||||
.It Sy reference_history Ns = Ns Sy 3 Pq int
|
||||
Maximum reference holders being tracked when reference_tracking_enable is
|
||||
active.
|
||||
.
|
||||
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
|
||||
Track reference holders to
|
||||
|
@ -1781,6 +1794,12 @@ even if there were unrepairable errors.
|
|||
Intended to be used during pool repair or recovery to
|
||||
stop resilvering when the pool is next imported.
|
||||
.
|
||||
.It Sy zfs_scrub_after_expand Ns = Ns Sy 1 Ns | Ns 0 Pq int
|
||||
Automatically start a pool scrub after a RAIDZ expansion completes
|
||||
in order to verify the checksums of all blocks which have been
|
||||
copied during the expansion.
|
||||
This is enabled by default and strongly recommended.
|
||||
.
|
||||
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1 s Pc Pq uint
|
||||
Scrubs are processed by the sync thread.
|
||||
While scrubbing, it will spend at least this much time
|
||||
|
|
|
@ -767,6 +767,14 @@ as soon as it is enabled and will never return to being
|
|||
.Sy disabled .
|
||||
\*[remount-upgrade]
|
||||
.
|
||||
.feature org.openzfs raidz_expansion no none
|
||||
This feature enables the
|
||||
.Nm zpool Cm attach
|
||||
subcommand to attach a new device to a RAID-Z group, expanding the total
|
||||
amount usable space in the pool.
|
||||
See
|
||||
.Xr zpool-attach 8 .
|
||||
.
|
||||
.feature com.delphix redaction_bookmarks no bookmarks extensible_dataset
|
||||
This feature enables the use of redacted
|
||||
.Nm zfs Cm send Ns s ,
|
||||
|
@ -784,6 +792,18 @@ and so cannot be safely mounted, and their contents cannot be safely read.
|
|||
For more information about redacted receives, see
|
||||
.Xr zfs-send 8 .
|
||||
.
|
||||
.feature com.delphix redaction_list_spill no redaction_bookmarks
|
||||
This feature enables the redaction list created by zfs redact to store
|
||||
many more entries.
|
||||
It becomes
|
||||
.Sy active
|
||||
when a redaction list is created with more than 36 entries,
|
||||
and returns to being
|
||||
.Sy enabled
|
||||
when no long redaction lists remain in the pool.
|
||||
For more information about redacted sends, see
|
||||
.Xr zfs-send 8 .
|
||||
.
|
||||
.feature com.datto resilver_defer yes
|
||||
This feature allows ZFS to postpone new resilvers if an existing one is already
|
||||
in progress.
|
||||
|
@ -947,18 +967,6 @@ once all filesystems that have ever had their
|
|||
property set to
|
||||
.Sy zstd
|
||||
are destroyed.
|
||||
.
|
||||
.feature com.delphix redaction_list_spill no redaction_bookmarks
|
||||
This feature enables the redaction list created by zfs redact to store
|
||||
many more entries.
|
||||
It becomes
|
||||
.Sy active
|
||||
when a redaction list is created with more than 36 entries,
|
||||
and returns to being
|
||||
.Sy enabled
|
||||
when no long redaction lists remain in the pool.
|
||||
For more information about redacted sends, see
|
||||
.Xr zfs-send 8 .
|
||||
.El
|
||||
.
|
||||
.Sh SEE ALSO
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
.\" Copyright 2017 Nexenta Systems, Inc.
|
||||
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
|
||||
.\"
|
||||
.Dd May 15, 2020
|
||||
.Dd June 28, 2023
|
||||
.Dt ZPOOL-ATTACH 8
|
||||
.Os
|
||||
.
|
||||
|
@ -45,7 +45,15 @@ Attaches
|
|||
.Ar new_device
|
||||
to the existing
|
||||
.Ar device .
|
||||
The existing device cannot be part of a raidz configuration.
|
||||
The behavior differs depending on if the existing
|
||||
.Ar device
|
||||
is a RAID-Z device, or a mirror/plain device.
|
||||
.Pp
|
||||
If the existing device is a mirror or plain device
|
||||
.Pq e.g. specified as Qo Li sda Qc or Qq Li mirror-7 ,
|
||||
the new device will be mirrored with the existing device, a resilver will be
|
||||
initiated, and the new device will contribute to additional redundancy once the
|
||||
resilver completes.
|
||||
If
|
||||
.Ar device
|
||||
is not currently part of a mirrored configuration,
|
||||
|
@ -62,6 +70,42 @@ creates a three-way mirror, and so on.
|
|||
In either case,
|
||||
.Ar new_device
|
||||
begins to resilver immediately and any running scrub is cancelled.
|
||||
.Pp
|
||||
If the existing device is a RAID-Z device
|
||||
.Pq e.g. specified as Qq Ar raidz2-0 ,
|
||||
the new device will become part of that RAID-Z group.
|
||||
A "raidz expansion" will be initiated, and once the expansion completes,
|
||||
the new device will contribute additional space to the RAID-Z group.
|
||||
The expansion entails reading all allocated space from existing disks in the
|
||||
RAID-Z group, and rewriting it to the new disks in the RAID-Z group (including
|
||||
the newly added
|
||||
.Ar device ) .
|
||||
Its progress can be monitored with
|
||||
.Nm zpool Cm status .
|
||||
.Pp
|
||||
Data redundancy is maintained during and after the expansion.
|
||||
If a disk fails while the expansion is in progress, the expansion pauses until
|
||||
the health of the RAID-Z vdev is restored (e.g. by replacing the failed disk
|
||||
and waiting for reconstruction to complete).
|
||||
Expansion does not change the number of failures that can be tolerated
|
||||
without data loss (e.g. a RAID-Z2 is still a RAID-Z2 even after expansion).
|
||||
A RAID-Z vdev can be expanded multiple times.
|
||||
.Pp
|
||||
After the expansion completes, old blocks retain their old data-to-parity
|
||||
ratio
|
||||
.Pq e.g. 5-wide RAID-Z2 has 3 data and 2 parity
|
||||
but distributed among the larger set of disks.
|
||||
New blocks will be written with the new data-to-parity ratio (e.g. a 5-wide
|
||||
RAID-Z2 which has been expanded once to 6-wide, has 4 data and 2 parity).
|
||||
However, the vdev's assumed parity ratio does not change, so slightly less
|
||||
space than is expected may be reported for newly-written blocks, according to
|
||||
.Nm zfs Cm list ,
|
||||
.Nm df ,
|
||||
.Nm ls Fl s ,
|
||||
and similar tools.
|
||||
.Pp
|
||||
A pool-wide scrub is initiated at the end of the expansion in order to verify
|
||||
the checksums of all blocks which have been copied during the expansion.
|
||||
.Bl -tag -width Ds
|
||||
.It Fl f
|
||||
Forces use of
|
||||
|
@ -76,16 +120,15 @@ manual page for a list of valid properties that can be set.
|
|||
The only property supported at the moment is
|
||||
.Sy ashift .
|
||||
.It Fl s
|
||||
The
|
||||
When attaching to a mirror or plain device, the
|
||||
.Ar new_device
|
||||
is reconstructed sequentially to restore redundancy as quickly as possible.
|
||||
Checksums are not verified during sequential reconstruction so a scrub is
|
||||
started when the resilver completes.
|
||||
Sequential reconstruction is not supported for raidz configurations.
|
||||
.It Fl w
|
||||
Waits until
|
||||
.Ar new_device
|
||||
has finished resilvering before returning.
|
||||
has finished resilvering or expanding before returning.
|
||||
.El
|
||||
.
|
||||
.Sh SEE ALSO
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
.\"
|
||||
.\"
|
||||
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
|
||||
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
|
||||
.\" Copyright (c) 2012, 2021 by Delphix. All rights reserved.
|
||||
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
|
||||
.\" Copyright (c) 2017 Datto Inc.
|
||||
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
|
||||
|
@ -57,7 +57,7 @@ immediately.
|
|||
These are the possible values for
|
||||
.Ar activity ,
|
||||
along with what each one waits for:
|
||||
.Bl -tag -compact -offset Ds -width "initialize"
|
||||
.Bl -tag -compact -offset Ds -width "raidz_expand"
|
||||
.It Sy discard
|
||||
Checkpoint to be discarded
|
||||
.It Sy free
|
||||
|
@ -76,6 +76,8 @@ Resilver to cease
|
|||
Scrub to cease
|
||||
.It Sy trim
|
||||
Manual trim to cease
|
||||
.It Sy raidz_expand
|
||||
Attaching to a RAID-Z vdev to complete
|
||||
.El
|
||||
.Pp
|
||||
If an
|
||||
|
|
|
@ -445,6 +445,7 @@ ZFS_OBJS_OS := \
|
|||
trace.o \
|
||||
vdev_disk.o \
|
||||
vdev_file.o \
|
||||
vdev_label_os.o \
|
||||
zfs_acl.o \
|
||||
zfs_ctldir.o \
|
||||
zfs_debug.o \
|
||||
|
|
|
@ -72,3 +72,62 @@ retry:
|
|||
abd_free(pad2);
|
||||
return (error);
|
||||
}
|
||||
|
||||
static void
|
||||
vdev_child_done(zio_t *zio)
|
||||
{
|
||||
zio_t *pio = zio->io_private;
|
||||
|
||||
mutex_enter(&pio->io_lock);
|
||||
pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
|
||||
mutex_exit(&pio->io_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the reserved boot area is in-use.
|
||||
*
|
||||
* When booting FreeBSD with an MBR partition with ZFS, the zfsboot file
|
||||
* (which understands the ZFS file system) is written to the ZFS BOOT
|
||||
* reserve area (at offset 512K). We check for that here before attaching
|
||||
* a disk to raidz which would then corrupt this boot data.
|
||||
*/
|
||||
int
|
||||
vdev_check_boot_reserve(spa_t *spa, vdev_t *childvd)
|
||||
{
|
||||
ASSERT(childvd->vdev_ops->vdev_op_leaf);
|
||||
|
||||
size_t size = SPA_MINBLOCKSIZE;
|
||||
abd_t *abd = abd_alloc_linear(size, B_FALSE);
|
||||
|
||||
zio_t *pio = zio_root(spa, NULL, NULL, 0);
|
||||
/*
|
||||
* Note: zio_vdev_child_io() adds VDEV_LABEL_START_SIZE to the offset
|
||||
* to calculate the physical offset to write to. Passing in a negative
|
||||
* offset lets us access the boot area.
|
||||
*/
|
||||
zio_nowait(zio_vdev_child_io(pio, NULL, childvd,
|
||||
VDEV_BOOT_OFFSET - VDEV_LABEL_START_SIZE, abd, size, ZIO_TYPE_READ,
|
||||
ZIO_PRIORITY_ASYNC_READ, 0, vdev_child_done, pio));
|
||||
zio_wait(pio);
|
||||
|
||||
unsigned char *buf = abd_to_buf(abd);
|
||||
|
||||
/*
|
||||
* The BTX server has a special header at the begining.
|
||||
*
|
||||
* btx_hdr: .byte 0xeb # Machine ID
|
||||
* .byte 0xe # Header size
|
||||
* .ascii "BTX" # Magic
|
||||
* .byte 0x1 # Major version
|
||||
* .byte 0x2 # Minor version
|
||||
* .byte BTX_FLAGS # Flags
|
||||
*/
|
||||
if (buf[0] == 0xeb && buf[1] == 0x0e &&
|
||||
buf[2] == 'B' && buf[3] == 'T' && buf[4] == 'X') {
|
||||
abd_free(abd);
|
||||
return (EBUSY);
|
||||
}
|
||||
|
||||
abd_free(abd);
|
||||
return (0);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (c) 2023 by iXsystems, Inc.
|
||||
*/
|
||||
|
||||
#include <sys/zfs_context.h>
|
||||
#include <sys/spa.h>
|
||||
#include <sys/spa_impl.h>
|
||||
#include <sys/vdev.h>
|
||||
#include <sys/vdev_impl.h>
|
||||
|
||||
/*
|
||||
* Check if the reserved boot area is in-use.
|
||||
*
|
||||
* This function always returns 0, as there are no known external uses
|
||||
* of the reserved area on Linux.
|
||||
*/
|
||||
int
|
||||
vdev_check_boot_reserve(spa_t *spa, vdev_t *childvd)
|
||||
{
|
||||
(void) spa;
|
||||
(void) childvd;
|
||||
|
||||
return (0);
|
||||
}
|
|
@ -175,7 +175,8 @@ __dprintf(boolean_t dprint, const char *file, const char *func,
|
|||
newfile = file;
|
||||
}
|
||||
|
||||
i = snprintf(buf, size, "%s%s:%d:%s(): ", prefix, newfile, line, func);
|
||||
i = snprintf(buf, size, "%px %s%s:%d:%s(): ",
|
||||
curthread, prefix, newfile, line, func);
|
||||
|
||||
if (i < size) {
|
||||
va_start(adx, fmt);
|
||||
|
|
|
@ -749,6 +749,11 @@ zpool_feature_init(void)
|
|||
redact_list_spill_deps, sfeatures);
|
||||
}
|
||||
|
||||
zfeature_register(SPA_FEATURE_RAIDZ_EXPANSION,
|
||||
"org.openzfs:raidz_expansion", "raidz_expansion",
|
||||
"Support for raidz expansion",
|
||||
ZFEATURE_FLAG_MOS, ZFEATURE_TYPE_BOOLEAN, NULL, sfeatures);
|
||||
|
||||
zfs_mod_list_supported_free(sfeatures);
|
||||
}
|
||||
|
||||
|
|
|
@ -439,6 +439,9 @@ vdev_prop_init(void)
|
|||
zprop_register_index(VDEV_PROP_ALLOCATING, "allocating", 1,
|
||||
PROP_DEFAULT, ZFS_TYPE_VDEV, "on | off", "ALLOCATING",
|
||||
boolean_na_table, sfeatures);
|
||||
zprop_register_index(VDEV_PROP_RAIDZ_EXPANDING, "raidz_expanding", 0,
|
||||
PROP_READONLY, ZFS_TYPE_VDEV, "on | off", "RAIDZ_EXPANDING",
|
||||
boolean_table, sfeatures);
|
||||
|
||||
/* default index properties */
|
||||
zprop_register_index(VDEV_PROP_FAILFAST, "failfast", B_TRUE,
|
||||
|
|
|
@ -4518,7 +4518,7 @@ arc_evict_cb_check(void *arg, zthr_t *zthr)
|
|||
static void
|
||||
arc_evict_cb(void *arg, zthr_t *zthr)
|
||||
{
|
||||
(void) arg, (void) zthr;
|
||||
(void) arg;
|
||||
|
||||
uint64_t evicted = 0;
|
||||
fstrans_cookie_t cookie = spl_fstrans_mark();
|
||||
|
@ -4542,9 +4542,13 @@ arc_evict_cb(void *arg, zthr_t *zthr)
|
|||
* infinite loop. Additionally, zthr_iscancelled() is
|
||||
* checked here so that if the arc is shutting down, the
|
||||
* broadcast will wake any remaining arc evict waiters.
|
||||
*
|
||||
* Note we cancel using zthr instead of arc_evict_zthr
|
||||
* because the latter may not yet be initializd when the
|
||||
* callback is first invoked.
|
||||
*/
|
||||
mutex_enter(&arc_evict_lock);
|
||||
arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) &&
|
||||
arc_evict_needed = !zthr_iscancelled(zthr) &&
|
||||
evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
|
||||
if (!arc_evict_needed) {
|
||||
/*
|
||||
|
|
|
@ -3066,7 +3066,6 @@ dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
|
|||
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
|
||||
dsl_scan_visit_rootbp(scn, NULL,
|
||||
&dp->dp_meta_rootbp, tx);
|
||||
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
|
||||
if (scn->scn_suspending)
|
||||
return;
|
||||
|
||||
|
|
|
@ -4342,7 +4342,8 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
|
|||
|
||||
uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
|
||||
metaslab_class_get_alloc(spa_normal_class(spa));
|
||||
if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
|
||||
if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing ||
|
||||
vd->vdev_rz_expanding) {
|
||||
defer_allowed = B_FALSE;
|
||||
}
|
||||
|
||||
|
@ -4650,6 +4651,7 @@ metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
|
|||
ASSERT(MUTEX_HELD(&msp->ms_lock));
|
||||
VERIFY(!msp->ms_condensing);
|
||||
VERIFY0(msp->ms_disabled);
|
||||
VERIFY0(msp->ms_new);
|
||||
|
||||
start = mc->mc_ops->msop_alloc(msp, size);
|
||||
if (start != -1ULL) {
|
||||
|
@ -4721,10 +4723,10 @@ find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
|
|||
}
|
||||
|
||||
/*
|
||||
* If the selected metaslab is condensing or disabled,
|
||||
* skip it.
|
||||
* If the selected metaslab is condensing or disabled, or
|
||||
* hasn't gone through a metaslab_sync_done(), then skip it.
|
||||
*/
|
||||
if (msp->ms_condensing || msp->ms_disabled > 0)
|
||||
if (msp->ms_condensing || msp->ms_disabled > 0 || msp->ms_new)
|
||||
continue;
|
||||
|
||||
*was_active = msp->ms_allocator != -1;
|
||||
|
@ -5270,7 +5272,7 @@ top:
|
|||
|
||||
ASSERT(mg->mg_class == mc);
|
||||
|
||||
uint64_t asize = vdev_psize_to_asize(vd, psize);
|
||||
uint64_t asize = vdev_psize_to_asize_txg(vd, psize, txg);
|
||||
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
|
||||
|
||||
/*
|
||||
|
|
242
module/zfs/spa.c
242
module/zfs/spa.c
|
@ -63,6 +63,7 @@
|
|||
#include <sys/vdev_rebuild.h>
|
||||
#include <sys/vdev_trim.h>
|
||||
#include <sys/vdev_disk.h>
|
||||
#include <sys/vdev_raidz.h>
|
||||
#include <sys/vdev_draid.h>
|
||||
#include <sys/metaslab.h>
|
||||
#include <sys/metaslab_impl.h>
|
||||
|
@ -1709,6 +1710,10 @@ spa_destroy_aux_threads(spa_t *spa)
|
|||
zthr_destroy(spa->spa_livelist_condense_zthr);
|
||||
spa->spa_livelist_condense_zthr = NULL;
|
||||
}
|
||||
if (spa->spa_raidz_expand_zthr != NULL) {
|
||||
zthr_destroy(spa->spa_raidz_expand_zthr);
|
||||
spa->spa_raidz_expand_zthr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1861,6 +1866,8 @@ spa_unload(spa_t *spa)
|
|||
spa->spa_compatibility = NULL;
|
||||
}
|
||||
|
||||
spa->spa_raidz_expand = NULL;
|
||||
|
||||
spa_config_exit(spa, SCL_ALL, spa);
|
||||
}
|
||||
|
||||
|
@ -2999,6 +3006,7 @@ spa_spawn_aux_threads(spa_t *spa)
|
|||
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
spa_start_raidz_expansion_thread(spa);
|
||||
spa_start_indirect_condensing_thread(spa);
|
||||
spa_start_livelist_destroy_thread(spa);
|
||||
spa_start_livelist_condensing_thread(spa);
|
||||
|
@ -3753,6 +3761,12 @@ spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
|
|||
}
|
||||
spa_load_note(spa, "using uberblock with txg=%llu",
|
||||
(u_longlong_t)ub->ub_txg);
|
||||
if (ub->ub_raidz_reflow_info != 0) {
|
||||
spa_load_note(spa, "uberblock raidz_reflow_info: "
|
||||
"state=%u offset=%llu",
|
||||
(int)RRSS_GET_STATE(ub),
|
||||
(u_longlong_t)RRSS_GET_OFFSET(ub));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
|
@ -5091,6 +5105,13 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
|
|||
|
||||
ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
|
||||
|
||||
/*
|
||||
* Before we do any zio_write's, complete the raidz expansion
|
||||
* scratch space copying, if necessary.
|
||||
*/
|
||||
if (RRSS_GET_STATE(&spa->spa_uberblock) == RRSS_SCRATCH_VALID)
|
||||
vdev_raidz_reflow_copy_scratch(spa);
|
||||
|
||||
/*
|
||||
* In case of a checkpoint rewind, log the original txg
|
||||
* of the checkpointed uberblock.
|
||||
|
@ -6905,9 +6926,10 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
|
|||
}
|
||||
|
||||
/*
|
||||
* Attach a device to a mirror. The arguments are the path to any device
|
||||
* in the mirror, and the nvroot for the new device. If the path specifies
|
||||
* a device that is not mirrored, we automatically insert the mirror vdev.
|
||||
* Attach a device to a vdev specified by its guid. The vdev type can be
|
||||
* a mirror, a raidz, or a leaf device that is also a top-level (e.g. a
|
||||
* single device). When the vdev is a single device, a mirror vdev will be
|
||||
* automatically inserted.
|
||||
*
|
||||
* If 'replacing' is specified, the new device is intended to replace the
|
||||
* existing device; in this case the two devices are made into their own
|
||||
|
@ -6930,7 +6952,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
|
|||
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
|
||||
vdev_ops_t *pvops;
|
||||
char *oldvdpath, *newvdpath;
|
||||
int newvd_isspare;
|
||||
int newvd_isspare = B_FALSE;
|
||||
int error;
|
||||
|
||||
ASSERT(spa_writeable(spa));
|
||||
|
@ -6961,16 +6983,35 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
|
|||
ZFS_ERR_REBUILD_IN_PROGRESS));
|
||||
}
|
||||
|
||||
if (spa->spa_vdev_removal != NULL)
|
||||
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
|
||||
if (spa->spa_vdev_removal != NULL) {
|
||||
return (spa_vdev_exit(spa, NULL, txg,
|
||||
ZFS_ERR_DEVRM_IN_PROGRESS));
|
||||
}
|
||||
|
||||
if (oldvd == NULL)
|
||||
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
|
||||
|
||||
if (!oldvd->vdev_ops->vdev_op_leaf)
|
||||
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
|
||||
boolean_t raidz = oldvd->vdev_ops == &vdev_raidz_ops;
|
||||
|
||||
pvd = oldvd->vdev_parent;
|
||||
if (raidz) {
|
||||
if (!spa_feature_is_enabled(spa, SPA_FEATURE_RAIDZ_EXPANSION))
|
||||
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
|
||||
|
||||
/*
|
||||
* Can't expand a raidz while prior expand is in progress.
|
||||
*/
|
||||
if (spa->spa_raidz_expand != NULL) {
|
||||
return (spa_vdev_exit(spa, NULL, txg,
|
||||
ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS));
|
||||
}
|
||||
} else if (!oldvd->vdev_ops->vdev_op_leaf) {
|
||||
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
|
||||
}
|
||||
|
||||
if (raidz)
|
||||
pvd = oldvd;
|
||||
else
|
||||
pvd = oldvd->vdev_parent;
|
||||
|
||||
if (spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
|
||||
VDEV_ALLOC_ATTACH) != 0)
|
||||
|
@ -7026,6 +7067,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
|
|||
* vdev.
|
||||
*/
|
||||
if (pvd->vdev_ops != &vdev_mirror_ops &&
|
||||
pvd->vdev_ops != &vdev_raidz_ops &&
|
||||
pvd->vdev_ops != &vdev_root_ops)
|
||||
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
|
||||
|
||||
|
@ -7065,7 +7107,8 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
|
|||
/*
|
||||
* Make sure the new device is big enough.
|
||||
*/
|
||||
if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
|
||||
vdev_t *min_vdev = raidz ? oldvd->vdev_child[0] : oldvd;
|
||||
if (newvd->vdev_asize < vdev_get_min_asize(min_vdev))
|
||||
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
|
||||
|
||||
/*
|
||||
|
@ -7075,32 +7118,75 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
|
|||
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
|
||||
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
|
||||
|
||||
/*
|
||||
* RAIDZ-expansion-specific checks.
|
||||
*/
|
||||
if (raidz) {
|
||||
if (vdev_raidz_attach_check(newvd) != 0)
|
||||
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
|
||||
|
||||
/*
|
||||
* Fail early if a child is not healthy or being replaced
|
||||
*/
|
||||
for (int i = 0; i < oldvd->vdev_children; i++) {
|
||||
if (vdev_is_dead(oldvd->vdev_child[i]) ||
|
||||
!oldvd->vdev_child[i]->vdev_ops->vdev_op_leaf) {
|
||||
return (spa_vdev_exit(spa, newrootvd, txg,
|
||||
ENXIO));
|
||||
}
|
||||
/* Also fail if reserved boot area is in-use */
|
||||
if (vdev_check_boot_reserve(spa, oldvd->vdev_child[i])
|
||||
!= 0) {
|
||||
return (spa_vdev_exit(spa, newrootvd, txg,
|
||||
EADDRINUSE));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (raidz) {
|
||||
/*
|
||||
* Note: oldvdpath is freed by spa_strfree(), but
|
||||
* kmem_asprintf() is freed by kmem_strfree(), so we have to
|
||||
* move it to a spa_strdup-ed string.
|
||||
*/
|
||||
char *tmp = kmem_asprintf("raidz%u-%u",
|
||||
(uint_t)vdev_get_nparity(oldvd), (uint_t)oldvd->vdev_id);
|
||||
oldvdpath = spa_strdup(tmp);
|
||||
kmem_strfree(tmp);
|
||||
} else {
|
||||
oldvdpath = spa_strdup(oldvd->vdev_path);
|
||||
}
|
||||
newvdpath = spa_strdup(newvd->vdev_path);
|
||||
|
||||
/*
|
||||
* If this is an in-place replacement, update oldvd's path and devid
|
||||
* to make it distinguishable from newvd, and unopenable from now on.
|
||||
*/
|
||||
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
|
||||
if (strcmp(oldvdpath, newvdpath) == 0) {
|
||||
spa_strfree(oldvd->vdev_path);
|
||||
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
|
||||
oldvd->vdev_path = kmem_alloc(strlen(newvdpath) + 5,
|
||||
KM_SLEEP);
|
||||
(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
|
||||
"%s/%s", newvd->vdev_path, "old");
|
||||
(void) sprintf(oldvd->vdev_path, "%s/old",
|
||||
newvdpath);
|
||||
if (oldvd->vdev_devid != NULL) {
|
||||
spa_strfree(oldvd->vdev_devid);
|
||||
oldvd->vdev_devid = NULL;
|
||||
}
|
||||
spa_strfree(oldvdpath);
|
||||
oldvdpath = spa_strdup(oldvd->vdev_path);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the parent is not a mirror, or if we're replacing, insert the new
|
||||
* mirror/replacing/spare vdev above oldvd.
|
||||
*/
|
||||
if (pvd->vdev_ops != pvops)
|
||||
if (!raidz && pvd->vdev_ops != pvops) {
|
||||
pvd = vdev_add_parent(oldvd, pvops);
|
||||
ASSERT(pvd->vdev_ops == pvops);
|
||||
ASSERT(oldvd->vdev_parent == pvd);
|
||||
}
|
||||
|
||||
ASSERT(pvd->vdev_top->vdev_parent == rvd);
|
||||
ASSERT(pvd->vdev_ops == pvops);
|
||||
ASSERT(oldvd->vdev_parent == pvd);
|
||||
|
||||
/*
|
||||
* Extract the new device from its root and add it to pvd.
|
||||
|
@ -7128,41 +7214,66 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
|
|||
*/
|
||||
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
|
||||
|
||||
vdev_dtl_dirty(newvd, DTL_MISSING,
|
||||
TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
|
||||
if (raidz) {
|
||||
/*
|
||||
* Wait for the youngest allocations and frees to sync,
|
||||
* and then wait for the deferral of those frees to finish.
|
||||
*/
|
||||
spa_vdev_config_exit(spa, NULL,
|
||||
txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
|
||||
|
||||
if (newvd->vdev_isspare) {
|
||||
spa_spare_activate(newvd);
|
||||
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
|
||||
}
|
||||
vdev_initialize_stop_all(tvd, VDEV_INITIALIZE_ACTIVE);
|
||||
vdev_trim_stop_all(tvd, VDEV_TRIM_ACTIVE);
|
||||
vdev_autotrim_stop_wait(tvd);
|
||||
|
||||
oldvdpath = spa_strdup(oldvd->vdev_path);
|
||||
newvdpath = spa_strdup(newvd->vdev_path);
|
||||
newvd_isspare = newvd->vdev_isspare;
|
||||
dtl_max_txg = spa_vdev_config_enter(spa);
|
||||
|
||||
/*
|
||||
* Mark newvd's DTL dirty in this txg.
|
||||
*/
|
||||
vdev_dirty(tvd, VDD_DTL, newvd, txg);
|
||||
tvd->vdev_rz_expanding = B_TRUE;
|
||||
|
||||
/*
|
||||
* Schedule the resilver or rebuild to restart in the future. We do
|
||||
* this to ensure that dmu_sync-ed blocks have been stitched into the
|
||||
* respective datasets.
|
||||
*/
|
||||
if (rebuild) {
|
||||
newvd->vdev_rebuild_txg = txg;
|
||||
vdev_dirty_leaves(tvd, VDD_DTL, dtl_max_txg);
|
||||
vdev_config_dirty(tvd);
|
||||
|
||||
vdev_rebuild(tvd);
|
||||
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool,
|
||||
dtl_max_txg);
|
||||
dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_raidz_attach_sync,
|
||||
newvd, tx);
|
||||
dmu_tx_commit(tx);
|
||||
} else {
|
||||
newvd->vdev_resilver_txg = txg;
|
||||
vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
|
||||
dtl_max_txg - TXG_INITIAL);
|
||||
|
||||
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
|
||||
spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
|
||||
vdev_defer_resilver(newvd);
|
||||
if (newvd->vdev_isspare) {
|
||||
spa_spare_activate(newvd);
|
||||
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
|
||||
}
|
||||
|
||||
newvd_isspare = newvd->vdev_isspare;
|
||||
|
||||
/*
|
||||
* Mark newvd's DTL dirty in this txg.
|
||||
*/
|
||||
vdev_dirty(tvd, VDD_DTL, newvd, txg);
|
||||
|
||||
/*
|
||||
* Schedule the resilver or rebuild to restart in the future.
|
||||
* We do this to ensure that dmu_sync-ed blocks have been
|
||||
* stitched into the respective datasets.
|
||||
*/
|
||||
if (rebuild) {
|
||||
newvd->vdev_rebuild_txg = txg;
|
||||
|
||||
vdev_rebuild(tvd);
|
||||
} else {
|
||||
dsl_scan_restart_resilver(spa->spa_dsl_pool,
|
||||
dtl_max_txg);
|
||||
newvd->vdev_resilver_txg = txg;
|
||||
|
||||
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
|
||||
spa_feature_is_enabled(spa,
|
||||
SPA_FEATURE_RESILVER_DEFER)) {
|
||||
vdev_defer_resilver(newvd);
|
||||
} else {
|
||||
dsl_scan_restart_resilver(spa->spa_dsl_pool,
|
||||
dtl_max_txg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7487,7 +7598,7 @@ spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
|
|||
*/
|
||||
if (cmd_type == POOL_INITIALIZE_START &&
|
||||
(vd->vdev_initialize_thread != NULL ||
|
||||
vd->vdev_top->vdev_removing)) {
|
||||
vd->vdev_top->vdev_removing || vd->vdev_top->vdev_rz_expanding)) {
|
||||
mutex_exit(&vd->vdev_initialize_lock);
|
||||
return (SET_ERROR(EBUSY));
|
||||
} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
|
||||
|
@ -7609,7 +7720,8 @@ spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
|
|||
* which has completed but the thread is not exited.
|
||||
*/
|
||||
if (cmd_type == POOL_TRIM_START &&
|
||||
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
|
||||
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing ||
|
||||
vd->vdev_top->vdev_rz_expanding)) {
|
||||
mutex_exit(&vd->vdev_trim_lock);
|
||||
return (SET_ERROR(EBUSY));
|
||||
} else if (cmd_type == POOL_TRIM_CANCEL &&
|
||||
|
@ -8512,6 +8624,10 @@ spa_async_suspend(spa_t *spa)
|
|||
if (condense_thread != NULL)
|
||||
zthr_cancel(condense_thread);
|
||||
|
||||
zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
|
||||
if (raidz_expand_thread != NULL)
|
||||
zthr_cancel(raidz_expand_thread);
|
||||
|
||||
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
|
||||
if (discard_thread != NULL)
|
||||
zthr_cancel(discard_thread);
|
||||
|
@ -8538,6 +8654,10 @@ spa_async_resume(spa_t *spa)
|
|||
if (condense_thread != NULL)
|
||||
zthr_resume(condense_thread);
|
||||
|
||||
zthr_t *raidz_expand_thread = spa->spa_raidz_expand_zthr;
|
||||
if (raidz_expand_thread != NULL)
|
||||
zthr_resume(raidz_expand_thread);
|
||||
|
||||
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
|
||||
if (discard_thread != NULL)
|
||||
zthr_resume(discard_thread);
|
||||
|
@ -9343,6 +9463,27 @@ spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
|
|||
!= NULL)
|
||||
vdev_sync(vd, txg);
|
||||
|
||||
if (pass == 1) {
|
||||
/*
|
||||
* dsl_pool_sync() -> dp_sync_tasks may have dirtied
|
||||
* the config. If that happens, this txg should not
|
||||
* be a no-op. So we must sync the config to the MOS
|
||||
* before checking for no-op.
|
||||
*
|
||||
* Note that when the config is dirty, it will
|
||||
* be written to the MOS (i.e. the MOS will be
|
||||
* dirtied) every time we call spa_sync_config_object()
|
||||
* in this txg. Therefore we can't call this after
|
||||
* dsl_pool_sync() every pass, because it would
|
||||
* prevent us from converging, since we'd dirty
|
||||
* the MOS every pass.
|
||||
*
|
||||
* Sync tasks can only be processed in pass 1, so
|
||||
* there's no need to do this in later passes.
|
||||
*/
|
||||
spa_sync_config_object(spa, tx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: We need to check if the MOS is dirty because we could
|
||||
* have marked the MOS dirty without updating the uberblock
|
||||
|
@ -10100,7 +10241,8 @@ spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
|
|||
DSS_SCANNING);
|
||||
break;
|
||||
case ZPOOL_WAIT_RESILVER:
|
||||
if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
|
||||
*in_progress = vdev_rebuild_active(spa->spa_root_vdev);
|
||||
if (*in_progress)
|
||||
break;
|
||||
zfs_fallthrough;
|
||||
case ZPOOL_WAIT_SCRUB:
|
||||
|
@ -10115,6 +10257,12 @@ spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
|
|||
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
|
||||
break;
|
||||
}
|
||||
case ZPOOL_WAIT_RAIDZ_EXPAND:
|
||||
{
|
||||
vdev_raidz_expand_t *vre = spa->spa_raidz_expand;
|
||||
*in_progress = (vre != NULL && vre->vre_state == DSS_SCANNING);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
panic("unrecognized value for activity %d", activity);
|
||||
}
|
||||
|
|
|
@ -465,6 +465,9 @@ spa_checkpoint_check(void *arg, dmu_tx_t *tx)
|
|||
if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
|
||||
return (SET_ERROR(ZFS_ERR_DEVRM_IN_PROGRESS));
|
||||
|
||||
if (spa->spa_raidz_expand != NULL)
|
||||
return (SET_ERROR(ZFS_ERR_RAIDZ_EXPAND_IN_PROGRESS));
|
||||
|
||||
if (spa->spa_checkpoint_txg != 0)
|
||||
return (SET_ERROR(ZFS_ERR_CHECKPOINT_EXISTS));
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
#include <sys/abd.h>
|
||||
#include <sys/vdev_initialize.h>
|
||||
#include <sys/vdev_trim.h>
|
||||
#include <sys/vdev_raidz.h>
|
||||
#include <sys/zvol.h>
|
||||
#include <sys/zfs_ratelimit.h>
|
||||
#include "zfs_prop.h"
|
||||
|
@ -305,13 +306,13 @@ vdev_derive_alloc_bias(const char *bias)
|
|||
* all children. This is what's used by anything other than RAID-Z.
|
||||
*/
|
||||
uint64_t
|
||||
vdev_default_asize(vdev_t *vd, uint64_t psize)
|
||||
vdev_default_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
|
||||
{
|
||||
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
|
||||
uint64_t csize;
|
||||
|
||||
for (int c = 0; c < vd->vdev_children; c++) {
|
||||
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
|
||||
csize = vdev_psize_to_asize_txg(vd->vdev_child[c], psize, txg);
|
||||
asize = MAX(asize, csize);
|
||||
}
|
||||
|
||||
|
@ -930,6 +931,8 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
|
|||
&vd->vdev_removing);
|
||||
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
|
||||
&vd->vdev_top_zap);
|
||||
vd->vdev_rz_expanding = nvlist_exists(nv,
|
||||
ZPOOL_CONFIG_RAIDZ_EXPANDING);
|
||||
} else {
|
||||
ASSERT0(vd->vdev_top_zap);
|
||||
}
|
||||
|
@ -1692,6 +1695,8 @@ vdev_probe_done(zio_t *zio)
|
|||
|
||||
vd->vdev_cant_read |= !vps->vps_readable;
|
||||
vd->vdev_cant_write |= !vps->vps_writeable;
|
||||
vdev_dbgmsg(vd, "probe done, cant_read=%u cant_write=%u",
|
||||
vd->vdev_cant_read, vd->vdev_cant_write);
|
||||
|
||||
if (vdev_readable(vd) &&
|
||||
(vdev_writeable(vd) || !spa_writeable(spa))) {
|
||||
|
@ -1913,17 +1918,20 @@ vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
|
|||
}
|
||||
|
||||
/*
|
||||
* Compute the raidz-deflation ratio. Note, we hard-code
|
||||
* in 128k (1 << 17) because it is the "typical" blocksize.
|
||||
* Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
|
||||
* otherwise it would inconsistently account for existing bp's.
|
||||
* Compute the raidz-deflation ratio. Note, we hard-code 128k (1 << 17)
|
||||
* because it is the "typical" blocksize. Even though SPA_MAXBLOCKSIZE
|
||||
* changed, this algorithm can not change, otherwise it would inconsistently
|
||||
* account for existing bp's. We also hard-code txg 0 for the same reason
|
||||
* since expanded RAIDZ vdevs can use a different asize for different birth
|
||||
* txg's.
|
||||
*/
|
||||
static void
|
||||
vdev_set_deflate_ratio(vdev_t *vd)
|
||||
{
|
||||
if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
|
||||
vd->vdev_deflate_ratio = (1 << 17) /
|
||||
(vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
|
||||
(vdev_psize_to_asize_txg(vd, 1 << 17, 0) >>
|
||||
SPA_MINBLOCKSHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3228,32 +3236,43 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
|
|||
|
||||
if (txg != 0)
|
||||
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
|
||||
return;
|
||||
} else {
|
||||
mutex_enter(&vd->vdev_dtl_lock);
|
||||
for (int t = 0; t < DTL_TYPES; t++) {
|
||||
/* account for child's outage in parent's missing map */
|
||||
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
|
||||
if (t == DTL_SCRUB) {
|
||||
/* leaf vdevs only */
|
||||
continue;
|
||||
}
|
||||
if (t == DTL_PARTIAL) {
|
||||
/* i.e. non-zero */
|
||||
minref = 1;
|
||||
} else if (vdev_get_nparity(vd) != 0) {
|
||||
/* RAIDZ, DRAID */
|
||||
minref = vdev_get_nparity(vd) + 1;
|
||||
} else {
|
||||
/* any kind of mirror */
|
||||
minref = vd->vdev_children;
|
||||
}
|
||||
space_reftree_create(&reftree);
|
||||
for (int c = 0; c < vd->vdev_children; c++) {
|
||||
vdev_t *cvd = vd->vdev_child[c];
|
||||
mutex_enter(&cvd->vdev_dtl_lock);
|
||||
space_reftree_add_map(&reftree,
|
||||
cvd->vdev_dtl[s], 1);
|
||||
mutex_exit(&cvd->vdev_dtl_lock);
|
||||
}
|
||||
space_reftree_generate_map(&reftree,
|
||||
vd->vdev_dtl[t], minref);
|
||||
space_reftree_destroy(&reftree);
|
||||
}
|
||||
mutex_exit(&vd->vdev_dtl_lock);
|
||||
}
|
||||
|
||||
mutex_enter(&vd->vdev_dtl_lock);
|
||||
for (int t = 0; t < DTL_TYPES; t++) {
|
||||
/* account for child's outage in parent's missing map */
|
||||
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
|
||||
if (t == DTL_SCRUB)
|
||||
continue; /* leaf vdevs only */
|
||||
if (t == DTL_PARTIAL)
|
||||
minref = 1; /* i.e. non-zero */
|
||||
else if (vdev_get_nparity(vd) != 0)
|
||||
minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
|
||||
else
|
||||
minref = vd->vdev_children; /* any kind of mirror */
|
||||
space_reftree_create(&reftree);
|
||||
for (int c = 0; c < vd->vdev_children; c++) {
|
||||
vdev_t *cvd = vd->vdev_child[c];
|
||||
mutex_enter(&cvd->vdev_dtl_lock);
|
||||
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
|
||||
mutex_exit(&cvd->vdev_dtl_lock);
|
||||
}
|
||||
space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
|
||||
space_reftree_destroy(&reftree);
|
||||
if (vd->vdev_top->vdev_ops == &vdev_raidz_ops) {
|
||||
raidz_dtl_reassessed(vd);
|
||||
}
|
||||
mutex_exit(&vd->vdev_dtl_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3628,6 +3647,12 @@ vdev_load(vdev_t *vd)
|
|||
|
||||
vdev_set_deflate_ratio(vd);
|
||||
|
||||
if (vd->vdev_ops == &vdev_raidz_ops) {
|
||||
error = vdev_raidz_load(vd);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* On spa_load path, grab the allocation bias from our zap
|
||||
*/
|
||||
|
@ -4005,10 +4030,22 @@ vdev_sync(vdev_t *vd, uint64_t txg)
|
|||
dmu_tx_commit(tx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the amount of space that should be (or was) allocated for the given
|
||||
* psize (compressed block size) in the given TXG. Note that for expanded
|
||||
* RAIDZ vdevs, the size allocated for older BP's may be larger. See
|
||||
* vdev_raidz_asize().
|
||||
*/
|
||||
uint64_t
|
||||
vdev_psize_to_asize_txg(vdev_t *vd, uint64_t psize, uint64_t txg)
|
||||
{
|
||||
return (vd->vdev_ops->vdev_op_asize(vd, psize, txg));
|
||||
}
|
||||
|
||||
uint64_t
|
||||
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
|
||||
{
|
||||
return (vd->vdev_ops->vdev_op_asize(vd, psize));
|
||||
return (vdev_psize_to_asize_txg(vd, psize, 0));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4174,9 +4211,6 @@ vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
|
|||
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
|
||||
|
||||
if (!vd->vdev_ops->vdev_op_leaf)
|
||||
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
|
||||
|
||||
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
|
||||
oldstate = vd->vdev_state;
|
||||
|
||||
|
@ -5457,7 +5491,9 @@ vdev_expand(vdev_t *vd, uint64_t txg)
|
|||
|
||||
vdev_set_deflate_ratio(vd);
|
||||
|
||||
if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
|
||||
if ((vd->vdev_spa->spa_raidz_expand == NULL ||
|
||||
vd->vdev_spa->spa_raidz_expand->vre_vdev_id != vd->vdev_id) &&
|
||||
(vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
|
||||
vdev_is_concrete(vd)) {
|
||||
vdev_metaslab_group_create(vd);
|
||||
VERIFY(vdev_metaslab_init(vd, txg) == 0);
|
||||
|
@ -6209,6 +6245,14 @@ vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
|
|||
vdev_prop_add_list(outnvl, propname, NULL,
|
||||
vd->vdev_removing, ZPROP_SRC_NONE);
|
||||
continue;
|
||||
case VDEV_PROP_RAIDZ_EXPANDING:
|
||||
/* Only expose this for raidz */
|
||||
if (vd->vdev_ops == &vdev_raidz_ops) {
|
||||
vdev_prop_add_list(outnvl, propname,
|
||||
NULL, vd->vdev_rz_expanding,
|
||||
ZPROP_SRC_NONE);
|
||||
}
|
||||
continue;
|
||||
/* Numeric Properites */
|
||||
case VDEV_PROP_ALLOCATING:
|
||||
/* Leaf vdevs cannot have this property */
|
||||
|
|
|
@ -577,8 +577,9 @@ vdev_draid_permute_id(vdev_draid_config_t *vdc,
|
|||
* i.e. vdev_draid_psize_to_asize().
|
||||
*/
|
||||
static uint64_t
|
||||
vdev_draid_asize(vdev_t *vd, uint64_t psize)
|
||||
vdev_draid_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
|
||||
{
|
||||
(void) txg;
|
||||
vdev_draid_config_t *vdc = vd->vdev_tsd;
|
||||
uint64_t ashift = vd->vdev_ashift;
|
||||
|
||||
|
@ -960,7 +961,7 @@ vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
|
|||
vdev_draid_config_t *vdc = vd->vdev_tsd;
|
||||
uint64_t ashift = vd->vdev_top->vdev_ashift;
|
||||
uint64_t io_size = abd_size;
|
||||
uint64_t io_asize = vdev_draid_asize(vd, io_size);
|
||||
uint64_t io_asize = vdev_draid_asize(vd, io_size, 0);
|
||||
uint64_t group = vdev_draid_offset_to_group(vd, io_offset);
|
||||
uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1);
|
||||
|
||||
|
@ -1025,15 +1026,9 @@ vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
|
|||
|
||||
ASSERT3U(vdc->vdc_nparity, >, 0);
|
||||
|
||||
raidz_row_t *rr;
|
||||
rr = kmem_alloc(offsetof(raidz_row_t, rr_col[groupwidth]), KM_SLEEP);
|
||||
rr->rr_cols = groupwidth;
|
||||
rr->rr_scols = groupwidth;
|
||||
raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth);
|
||||
rr->rr_bigcols = bc;
|
||||
rr->rr_missingdata = 0;
|
||||
rr->rr_missingparity = 0;
|
||||
rr->rr_firstdatacol = vdc->vdc_nparity;
|
||||
rr->rr_abd_empty = NULL;
|
||||
#ifdef ZFS_DEBUG
|
||||
rr->rr_offset = io_offset;
|
||||
rr->rr_size = io_size;
|
||||
|
@ -1053,14 +1048,6 @@ vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
|
|||
|
||||
rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c);
|
||||
rc->rc_offset = physical_offset;
|
||||
rc->rc_abd = NULL;
|
||||
rc->rc_orig_data = NULL;
|
||||
rc->rc_error = 0;
|
||||
rc->rc_tried = 0;
|
||||
rc->rc_skipped = 0;
|
||||
rc->rc_force_repair = 0;
|
||||
rc->rc_allow_repair = 1;
|
||||
rc->rc_need_orig_restore = B_FALSE;
|
||||
|
||||
if (q == 0 && i >= bc)
|
||||
rc->rc_size = 0;
|
||||
|
@ -1129,7 +1116,7 @@ vdev_draid_map_alloc(zio_t *zio)
|
|||
if (size < abd_size) {
|
||||
vdev_t *vd = zio->io_vd;
|
||||
|
||||
io_offset += vdev_draid_asize(vd, size);
|
||||
io_offset += vdev_draid_asize(vd, size, 0);
|
||||
abd_offset += size;
|
||||
abd_size -= size;
|
||||
nrows++;
|
||||
|
@ -1151,7 +1138,6 @@ vdev_draid_map_alloc(zio_t *zio)
|
|||
rm->rm_row[0] = rr[0];
|
||||
if (nrows == 2)
|
||||
rm->rm_row[1] = rr[1];
|
||||
|
||||
return (rm);
|
||||
}
|
||||
|
||||
|
@ -1783,7 +1769,7 @@ vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
|
|||
uint64_t phys_birth)
|
||||
{
|
||||
uint64_t offset = DVA_GET_OFFSET(dva);
|
||||
uint64_t asize = vdev_draid_asize(vd, psize);
|
||||
uint64_t asize = vdev_draid_asize(vd, psize, 0);
|
||||
|
||||
if (phys_birth == TXG_UNKNOWN) {
|
||||
/*
|
||||
|
@ -1840,7 +1826,7 @@ vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
|
|||
range_seg64_t logical_rs, physical_rs, remain_rs;
|
||||
logical_rs.rs_start = rr->rr_offset;
|
||||
logical_rs.rs_end = logical_rs.rs_start +
|
||||
vdev_draid_asize(vd, rr->rr_size);
|
||||
vdev_draid_asize(vd, rr->rr_size, 0);
|
||||
|
||||
raidz_col_t *rc = &rr->rr_col[col];
|
||||
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
|
||||
|
|
|
@ -48,7 +48,8 @@ static boolean_t
|
|||
vdev_initialize_should_stop(vdev_t *vd)
|
||||
{
|
||||
return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) ||
|
||||
vd->vdev_detached || vd->vdev_top->vdev_removing);
|
||||
vd->vdev_detached || vd->vdev_top->vdev_removing ||
|
||||
vd->vdev_top->vdev_rz_expanding);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -67,7 +68,8 @@ vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx)
|
|||
kmem_free(arg, sizeof (uint64_t));
|
||||
|
||||
vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
|
||||
if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
|
||||
if (vd == NULL || vd->vdev_top->vdev_removing ||
|
||||
!vdev_is_concrete(vd) || vd->vdev_top->vdev_rz_expanding)
|
||||
return;
|
||||
|
||||
uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
|
||||
|
@ -631,6 +633,7 @@ vdev_initialize(vdev_t *vd)
|
|||
ASSERT(!vd->vdev_detached);
|
||||
ASSERT(!vd->vdev_initialize_exit_wanted);
|
||||
ASSERT(!vd->vdev_top->vdev_removing);
|
||||
ASSERT(!vd->vdev_top->vdev_rz_expanding);
|
||||
|
||||
vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE);
|
||||
vd->vdev_initialize_thread = thread_create(NULL, 0,
|
||||
|
@ -791,13 +794,14 @@ vdev_initialize_restart(vdev_t *vd)
|
|||
ASSERT(err == 0 || err == ENOENT);
|
||||
vd->vdev_initialize_action_time = timestamp;
|
||||
|
||||
if (vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
|
||||
vd->vdev_offline) {
|
||||
if ((vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
|
||||
vd->vdev_offline) && !vd->vdev_top->vdev_rz_expanding) {
|
||||
/* load progress for reporting, but don't resume */
|
||||
VERIFY0(vdev_initialize_load(vd));
|
||||
} else if (vd->vdev_initialize_state ==
|
||||
VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd) &&
|
||||
!vd->vdev_top->vdev_removing &&
|
||||
!vd->vdev_top->vdev_rz_expanding &&
|
||||
vd->vdev_initialize_thread == NULL) {
|
||||
vdev_initialize(vd);
|
||||
}
|
||||
|
|
|
@ -142,6 +142,7 @@
|
|||
#include <sys/zap.h>
|
||||
#include <sys/vdev.h>
|
||||
#include <sys/vdev_impl.h>
|
||||
#include <sys/vdev_raidz.h>
|
||||
#include <sys/vdev_draid.h>
|
||||
#include <sys/uberblock_impl.h>
|
||||
#include <sys/metaslab.h>
|
||||
|
@ -423,6 +424,13 @@ root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
|
|||
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs,
|
||||
sizeof (pcs) / sizeof (uint64_t));
|
||||
}
|
||||
|
||||
pool_raidz_expand_stat_t pres;
|
||||
if (spa_raidz_expand_get_stats(spa, &pres) == 0) {
|
||||
fnvlist_add_uint64_array(nvl,
|
||||
ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t *)&pres,
|
||||
sizeof (pres) / sizeof (uint64_t));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1504,7 +1512,8 @@ vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
|
|||
}
|
||||
|
||||
struct ubl_cbdata {
|
||||
uberblock_t *ubl_ubbest; /* Best uberblock */
|
||||
uberblock_t ubl_latest; /* Most recent uberblock */
|
||||
uberblock_t *ubl_ubbest; /* Best uberblock (w/r/t max_txg) */
|
||||
vdev_t *ubl_vd; /* vdev associated with the above */
|
||||
};
|
||||
|
||||
|
@ -1521,6 +1530,9 @@ vdev_uberblock_load_done(zio_t *zio)
|
|||
|
||||
if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
|
||||
mutex_enter(&rio->io_lock);
|
||||
if (vdev_uberblock_compare(ub, &cbp->ubl_latest) > 0) {
|
||||
cbp->ubl_latest = *ub;
|
||||
}
|
||||
if (ub->ub_txg <= spa->spa_load_max_txg &&
|
||||
vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
|
||||
/*
|
||||
|
@ -1578,10 +1590,10 @@ vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
|
|||
ASSERT(config);
|
||||
|
||||
memset(ub, 0, sizeof (uberblock_t));
|
||||
memset(&cb, 0, sizeof (cb));
|
||||
*config = NULL;
|
||||
|
||||
cb.ubl_ubbest = ub;
|
||||
cb.ubl_vd = NULL;
|
||||
|
||||
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
|
||||
zio = zio_root(spa, NULL, &cb, flags);
|
||||
|
@ -1598,6 +1610,22 @@ vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
|
|||
vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
|
||||
"txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
|
||||
|
||||
if (ub->ub_raidz_reflow_info !=
|
||||
cb.ubl_latest.ub_raidz_reflow_info) {
|
||||
vdev_dbgmsg(cb.ubl_vd,
|
||||
"spa=%s best uberblock (txg=%llu info=0x%llx) "
|
||||
"has different raidz_reflow_info than latest "
|
||||
"uberblock (txg=%llu info=0x%llx)",
|
||||
spa->spa_name,
|
||||
(u_longlong_t)ub->ub_txg,
|
||||
(u_longlong_t)ub->ub_raidz_reflow_info,
|
||||
(u_longlong_t)cb.ubl_latest.ub_txg,
|
||||
(u_longlong_t)cb.ubl_latest.ub_raidz_reflow_info);
|
||||
memset(ub, 0, sizeof (uberblock_t));
|
||||
spa_config_exit(spa, SCL_ALL, FTAG);
|
||||
return;
|
||||
}
|
||||
|
||||
*config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
|
||||
if (*config == NULL && spa->spa_extreme_rewind) {
|
||||
vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
|
||||
|
@ -1719,8 +1747,23 @@ vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
|
|||
vd->vdev_copy_uberblocks = B_FALSE;
|
||||
}
|
||||
|
||||
/*
|
||||
* We chose a slot based on the txg. If this uberblock has a special
|
||||
* RAIDZ expansion state, then it is essentially an update of the
|
||||
* current uberblock (it has the same txg). However, the current
|
||||
* state is committed, so we want to write it to a different slot. If
|
||||
* we overwrote the same slot, and we lose power during the uberblock
|
||||
* write, and the disk does not do single-sector overwrites
|
||||
* atomically (even though it is required to - i.e. we should see
|
||||
* either the old or the new uberblock), then we could lose this
|
||||
* txg's uberblock. Rewinding to the previous txg's uberblock may not
|
||||
* be possible because RAIDZ expansion may have already overwritten
|
||||
* some of the data, so we need the progress indicator in the
|
||||
* uberblock.
|
||||
*/
|
||||
int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
|
||||
int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
|
||||
int n = (ub->ub_txg - (RRSS_GET_STATE(ub) == RRSS_SCRATCH_VALID)) %
|
||||
(VDEV_UBERBLOCK_COUNT(vd) - m);
|
||||
|
||||
/* Copy the uberblock_t into the ABD */
|
||||
abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
|
||||
|
@ -1737,7 +1780,7 @@ vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
|
|||
}
|
||||
|
||||
/* Sync the uberblocks to all vdevs in svd[] */
|
||||
static int
|
||||
int
|
||||
vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
|
||||
{
|
||||
spa_t *spa = svd[0]->vdev_spa;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -169,7 +169,8 @@ static boolean_t
|
|||
vdev_trim_should_stop(vdev_t *vd)
|
||||
{
|
||||
return (vd->vdev_trim_exit_wanted || !vdev_writeable(vd) ||
|
||||
vd->vdev_detached || vd->vdev_top->vdev_removing);
|
||||
vd->vdev_detached || vd->vdev_top->vdev_removing ||
|
||||
vd->vdev_top->vdev_rz_expanding);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -180,6 +181,7 @@ vdev_autotrim_should_stop(vdev_t *tvd)
|
|||
{
|
||||
return (tvd->vdev_autotrim_exit_wanted ||
|
||||
!vdev_writeable(tvd) || tvd->vdev_removing ||
|
||||
tvd->vdev_rz_expanding ||
|
||||
spa_get_autotrim(tvd->vdev_spa) == SPA_AUTOTRIM_OFF);
|
||||
}
|
||||
|
||||
|
@ -222,7 +224,8 @@ vdev_trim_zap_update_sync(void *arg, dmu_tx_t *tx)
|
|||
kmem_free(arg, sizeof (uint64_t));
|
||||
|
||||
vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
|
||||
if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
|
||||
if (vd == NULL || vd->vdev_top->vdev_removing ||
|
||||
!vdev_is_concrete(vd) || vd->vdev_top->vdev_rz_expanding)
|
||||
return;
|
||||
|
||||
uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK];
|
||||
|
@ -1005,6 +1008,7 @@ vdev_trim(vdev_t *vd, uint64_t rate, boolean_t partial, boolean_t secure)
|
|||
ASSERT(!vd->vdev_detached);
|
||||
ASSERT(!vd->vdev_trim_exit_wanted);
|
||||
ASSERT(!vd->vdev_top->vdev_removing);
|
||||
ASSERT(!vd->vdev_rz_expanding);
|
||||
|
||||
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, rate, partial, secure);
|
||||
vd->vdev_trim_thread = thread_create(NULL, 0,
|
||||
|
@ -1162,12 +1166,13 @@ vdev_trim_restart(vdev_t *vd)
|
|||
ASSERT(err == 0 || err == ENOENT);
|
||||
vd->vdev_trim_action_time = timestamp;
|
||||
|
||||
if (vd->vdev_trim_state == VDEV_TRIM_SUSPENDED ||
|
||||
vd->vdev_offline) {
|
||||
if ((vd->vdev_trim_state == VDEV_TRIM_SUSPENDED ||
|
||||
vd->vdev_offline) && !vd->vdev_top->vdev_rz_expanding) {
|
||||
/* load progress for reporting, but don't resume */
|
||||
VERIFY0(vdev_trim_load(vd));
|
||||
} else if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE &&
|
||||
vdev_writeable(vd) && !vd->vdev_top->vdev_removing &&
|
||||
!vd->vdev_top->vdev_rz_expanding &&
|
||||
vd->vdev_trim_thread == NULL) {
|
||||
VERIFY0(vdev_trim_load(vd));
|
||||
vdev_trim(vd, vd->vdev_trim_rate,
|
||||
|
@ -1492,7 +1497,8 @@ vdev_autotrim(spa_t *spa)
|
|||
|
||||
mutex_enter(&tvd->vdev_autotrim_lock);
|
||||
if (vdev_writeable(tvd) && !tvd->vdev_removing &&
|
||||
tvd->vdev_autotrim_thread == NULL) {
|
||||
tvd->vdev_autotrim_thread == NULL &&
|
||||
!tvd->vdev_rz_expanding) {
|
||||
ASSERT3P(tvd->vdev_top, ==, tvd);
|
||||
|
||||
tvd->vdev_autotrim_thread = thread_create(NULL, 0,
|
||||
|
@ -1717,6 +1723,7 @@ vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
|
|||
ASSERT(vd->vdev_ops->vdev_op_leaf);
|
||||
ASSERT(!vd->vdev_detached);
|
||||
ASSERT(!vd->vdev_top->vdev_removing);
|
||||
ASSERT(!vd->vdev_top->vdev_rz_expanding);
|
||||
|
||||
ta.trim_vdev = vd;
|
||||
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
|
||||
|
|
|
@ -252,38 +252,57 @@ while (( timeout == 0 )) || (( curtime <= (starttime + timeout) )); do
|
|||
or_die rm -rf "$workdir"
|
||||
or_die mkdir "$workdir"
|
||||
|
||||
# switch between three types of configs
|
||||
# 1/3 basic, 1/3 raidz mix, and 1/3 draid mix
|
||||
choice=$((RANDOM % 3))
|
||||
|
||||
# ashift range 9 - 15
|
||||
align=$(((RANDOM % 2) * 3 + 9))
|
||||
|
||||
# choose parity value
|
||||
parity=$(((RANDOM % 3) + 1))
|
||||
|
||||
draid_data=0
|
||||
draid_spares=0
|
||||
|
||||
# randomly use special classes
|
||||
class="special=random"
|
||||
|
||||
if [[ $choice -eq 0 ]]; then
|
||||
# basic mirror only
|
||||
parity=1
|
||||
# choose between four types of configs
|
||||
# (basic, raidz mix, raidz expansion, and draid mix)
|
||||
case $((RANDOM % 4)) in
|
||||
|
||||
# basic mirror configuration
|
||||
0) parity=1
|
||||
mirrors=2
|
||||
draid_data=0
|
||||
draid_spares=0
|
||||
raid_children=0
|
||||
vdevs=2
|
||||
raid_type="raidz"
|
||||
elif [[ $choice -eq 1 ]]; then
|
||||
# fully randomized mirror/raidz (sans dRAID)
|
||||
parity=$(((RANDOM % 3) + 1))
|
||||
mirrors=$(((RANDOM % 3) * 1))
|
||||
draid_data=0
|
||||
draid_spares=0
|
||||
;;
|
||||
|
||||
# fully randomized mirror/raidz (sans dRAID)
|
||||
1) mirrors=$(((RANDOM % 3) * 1))
|
||||
raid_children=$((((RANDOM % 9) + parity + 1) * (RANDOM % 2)))
|
||||
vdevs=$(((RANDOM % 3) + 3))
|
||||
raid_type="raidz"
|
||||
else
|
||||
# fully randomized dRAID (sans mirror/raidz)
|
||||
parity=$(((RANDOM % 3) + 1))
|
||||
mirrors=0
|
||||
;;
|
||||
|
||||
# randomized raidz expansion (one top-level raidz vdev)
|
||||
2) mirrors=0
|
||||
vdevs=1
|
||||
# derive initial raidz disk count based on parity choice
|
||||
# P1: 3 - 7 disks
|
||||
# P2: 5 - 9 disks
|
||||
# P3: 7 - 11 disks
|
||||
raid_children=$(((RANDOM % 5) + (parity * 2) + 1))
|
||||
|
||||
# 1/3 of the time use a dedicated '-X' raidz expansion test
|
||||
if [[ $((RANDOM % 3)) -eq 0 ]]; then
|
||||
zopt="$zopt -X -t 16"
|
||||
raid_type="raidz"
|
||||
else
|
||||
raid_type="eraidz"
|
||||
fi
|
||||
;;
|
||||
|
||||
# fully randomized dRAID (sans mirror/raidz)
|
||||
3) mirrors=0
|
||||
draid_data=$(((RANDOM % 8) + 3))
|
||||
draid_spares=$(((RANDOM % 2) + parity))
|
||||
stripe=$((draid_data + parity))
|
||||
|
@ -291,7 +310,11 @@ while (( timeout == 0 )) || (( curtime <= (starttime + timeout) )); do
|
|||
raid_children=$(((((RANDOM % 4) + 1) * stripe) + extra))
|
||||
vdevs=$((RANDOM % 3))
|
||||
raid_type="draid"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# avoid shellcheck SC2249
|
||||
;;
|
||||
esac
|
||||
|
||||
zopt="$zopt -K $raid_type"
|
||||
zopt="$zopt -m $mirrors"
|
||||
|
|
|
@ -769,8 +769,12 @@ tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
|
|||
tags = ['functional', 'redacted_send']
|
||||
|
||||
[tests/functional/raidz]
|
||||
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_003_pos', 'raidz_004_pos']
|
||||
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_expand_001_pos',
|
||||
'raidz_expand_002_pos', 'raidz_expand_003_neg', 'raidz_expand_003_pos',
|
||||
'raidz_expand_004_pos', 'raidz_expand_005_pos', 'raidz_expand_006_neg',
|
||||
'raidz_expand_007_neg']
|
||||
tags = ['functional', 'raidz']
|
||||
timeout = 1200
|
||||
|
||||
[tests/functional/redundancy]
|
||||
tests = ['redundancy_draid', 'redundancy_draid1', 'redundancy_draid2',
|
||||
|
|
|
@ -34,6 +34,7 @@ DEADMAN_SYNCTIME_MS deadman.synctime_ms zfs_deadman_synctime_ms
|
|||
DEADMAN_ZIOTIME_MS deadman.ziotime_ms zfs_deadman_ziotime_ms
|
||||
DISABLE_IVSET_GUID_CHECK disable_ivset_guid_check zfs_disable_ivset_guid_check
|
||||
DMU_OFFSET_NEXT_SYNC dmu_offset_next_sync zfs_dmu_offset_next_sync
|
||||
EMBEDDED_SLOG_MIN_MS embedded_slog_min_ms zfs_embedded_slog_min_ms
|
||||
INITIALIZE_CHUNK_SIZE initialize_chunk_size zfs_initialize_chunk_size
|
||||
INITIALIZE_VALUE initialize_value zfs_initialize_value
|
||||
KEEP_LOG_SPACEMAPS_AT_EXPORT keep_log_spacemaps_at_export zfs_keep_log_spacemaps_at_export
|
||||
|
@ -62,6 +63,7 @@ MULTIHOST_IMPORT_INTERVALS multihost.import_intervals zfs_multihost_import_inter
|
|||
MULTIHOST_INTERVAL multihost.interval zfs_multihost_interval
|
||||
OVERRIDE_ESTIMATE_RECORDSIZE send.override_estimate_recordsize zfs_override_estimate_recordsize
|
||||
PREFETCH_DISABLE prefetch.disable zfs_prefetch_disable
|
||||
RAIDZ_EXPAND_MAX_REFLOW_BYTES vdev.expand_max_reflow_bytes raidz_expand_max_reflow_bytes
|
||||
REBUILD_SCRUB_ENABLED rebuild_scrub_enabled zfs_rebuild_scrub_enabled
|
||||
REMOVAL_SUSPEND_PROGRESS removal_suspend_progress zfs_removal_suspend_progress
|
||||
REMOVE_MAX_SEGMENT remove_max_segment zfs_remove_max_segment
|
||||
|
@ -69,6 +71,7 @@ RESILVER_MIN_TIME_MS resilver_min_time_ms zfs_resilver_min_time_ms
|
|||
SCAN_LEGACY scan_legacy zfs_scan_legacy
|
||||
SCAN_SUSPEND_PROGRESS scan_suspend_progress zfs_scan_suspend_progress
|
||||
SCAN_VDEV_LIMIT scan_vdev_limit zfs_scan_vdev_limit
|
||||
SCRUB_AFTER_EXPAND scrub_after_expand zfs_scrub_after_expand
|
||||
SEND_HOLES_WITHOUT_BIRTH_TIME send_holes_without_birth_time send_holes_without_birth_time
|
||||
SLOW_IO_EVENTS_PER_SECOND slow_io_events_per_second zfs_slow_io_events_per_second
|
||||
SPA_ASIZE_INFLATION spa.asize_inflation spa_asize_inflation
|
||||
|
|
|
@ -1668,8 +1668,14 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
|
|||
functional/raidz/cleanup.ksh \
|
||||
functional/raidz/raidz_001_neg.ksh \
|
||||
functional/raidz/raidz_002_pos.ksh \
|
||||
functional/raidz/raidz_003_pos.ksh \
|
||||
functional/raidz/raidz_004_pos.ksh \
|
||||
functional/raidz/raidz_expand_001_pos.ksh \
|
||||
functional/raidz/raidz_expand_002_pos.ksh \
|
||||
functional/raidz/raidz_expand_003_neg.ksh \
|
||||
functional/raidz/raidz_expand_003_pos.ksh \
|
||||
functional/raidz/raidz_expand_004_pos.ksh \
|
||||
functional/raidz/raidz_expand_005_pos.ksh \
|
||||
functional/raidz/raidz_expand_006_neg.ksh \
|
||||
functional/raidz/raidz_expand_007_neg.ksh \
|
||||
functional/raidz/setup.ksh \
|
||||
functional/redacted_send/cleanup.ksh \
|
||||
functional/redacted_send/redacted_compressed.ksh \
|
||||
|
|
|
@ -106,5 +106,6 @@ if is_linux || is_freebsd; then
|
|||
"feature@blake3"
|
||||
"feature@block_cloning"
|
||||
"feature@vdev_zaps_v2"
|
||||
"feature@raidz_expansion"
|
||||
)
|
||||
fi
|
||||
|
|
|
@ -23,19 +23,39 @@
|
|||
#
|
||||
# Copyright (c) 2016 by Gvozden Neskovic. All rights reserved.
|
||||
# Use is subject to license terms.
|
||||
# Copyright (c) 2020 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# Call the raidz_test tool with -S to test all supported raidz
|
||||
# implementations. This options will test several raidz block geometries
|
||||
# Call the raidz_test tool with sweep to test all supported raidz
|
||||
# implementations. This will test several raidz block geometries
|
||||
# and several zio parameters that affect raidz block layout. Data
|
||||
# reconstruction performs all combinations of failed disks. Wall time
|
||||
# is set to 5min, but actual runtime might be longer.
|
||||
# reconstruction performs all combinations of failed disks. Wall
|
||||
# time is set to 5 min, but actual runtime might be longer.
|
||||
#
|
||||
|
||||
log_must raidz_test -S -t 300
|
||||
|
||||
log_pass "raidz_test parameter sweep test succeeded."
|
||||
case $((RANDOM % 3)) in
|
||||
0)
|
||||
# Basic sweep test
|
||||
log_must raidz_test -S -t 300
|
||||
log_pass "raidz_test parameter sweep test succeeded."
|
||||
;;
|
||||
1)
|
||||
# Using expanded raidz map to test all supported raidz
|
||||
# implementations with expanded map and default reflow offset.
|
||||
log_must raidz_test -S -e -t 300
|
||||
log_pass "raidz_test sweep test with expanded map succeeded."
|
||||
;;
|
||||
2)
|
||||
# Using expanded raidz map ('-e') to test all supported raidz
|
||||
# implementations with expanded map and zero reflow offset.
|
||||
log_must raidz_test -S -e -r 0 -t 300
|
||||
log_pass "raidz_test sweep test with expanded map succeeded."
|
||||
;;
|
||||
*)
|
||||
# avoid shellcheck SC2249
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or https://opensource.org/licenses/CDDL-1.0.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2020 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# Call the raidz_test tool with -S and -e to test all supported raidz
|
||||
# implementations with expanded map and default reflow offset.
|
||||
# This options will test several raidz block geometries and several zio
|
||||
# parameters that affect raidz block layout. Data reconstruction performs
|
||||
# all combinations of failed disks. Wall time is set to 5min, but actual
|
||||
# runtime might be longer.
|
||||
#
|
||||
|
||||
log_must raidz_test -S -e -t 60
|
||||
|
||||
log_pass "raidz_test parameter sweep test with expanded map succeeded."
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or https://opensource.org/licenses/CDDL-1.0.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2020 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# Call the raidz_test tool with -S and -e to test all supported raidz
|
||||
# implementations with expanded map and zero reflow offset.
|
||||
# This options will test several raidz block geometries and several zio
|
||||
# parameters that affect raidz block layout. Data reconstruction performs
|
||||
# all combinations of failed disks. Wall time is set to 5min, but actual
|
||||
# runtime might be longer.
|
||||
#
|
||||
|
||||
log_must raidz_test -S -e -r 0 -t 60
|
||||
|
||||
log_pass "raidz_test parameter sweep test with expanded map succeeded."
|
|
@ -0,0 +1,215 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2020 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# 'zpool attach poolname raidz ...' should attach new device to the pool.
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create block device files for the test raidz pool
|
||||
# 2. For each parity value [1..3]
|
||||
# - create raidz pool
|
||||
# - fill it with some directories/files
|
||||
# - attach device to the raidz pool
|
||||
# - verify that device attached and the raidz pool size increase
|
||||
# - verify resilver by replacing parity devices
|
||||
# - verify resilver by replacing data devices
|
||||
# - verify scrub by zeroing parity devices
|
||||
# - verify scrub by zeroing data devices
|
||||
# - verify the raidz pool
|
||||
# - destroy the raidz pool
|
||||
|
||||
typeset -r devs=6
|
||||
typeset -r dev_size_mb=128
|
||||
|
||||
typeset -a disks
|
||||
|
||||
prefetch_disable=$(get_tunable PREFETCH_DISABLE)
|
||||
|
||||
function cleanup
|
||||
{
|
||||
log_pos zpool status $TESTPOOL
|
||||
|
||||
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
|
||||
|
||||
for i in {0..$devs}; do
|
||||
log_must rm -f "$TEST_BASE_DIR/dev-$i"
|
||||
done
|
||||
|
||||
log_must set_tunable32 PREFETCH_DISABLE $prefetch_disable
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES 0
|
||||
}
|
||||
|
||||
function wait_expand_paused
|
||||
{
|
||||
oldcopied='0'
|
||||
newcopied='1'
|
||||
while [[ $oldcopied != $newcopied ]]; do
|
||||
oldcopied=$newcopied
|
||||
sleep 2
|
||||
newcopied=$(zpool status $TESTPOOL | \
|
||||
grep 'copied out of' | \
|
||||
awk '{print $1}')
|
||||
log_note "newcopied=$newcopied"
|
||||
done
|
||||
log_note "paused at $newcopied"
|
||||
}
|
||||
|
||||
function test_resilver # <pool> <parity> <dir>
|
||||
{
|
||||
typeset pool=$1
|
||||
typeset nparity=$2
|
||||
typeset dir=$3
|
||||
|
||||
for (( i=0; i<$nparity; i=i+1 )); do
|
||||
log_must zpool offline $pool $dir/dev-$i
|
||||
done
|
||||
|
||||
log_must zpool export $pool
|
||||
|
||||
for (( i=0; i<$nparity; i=i+1 )); do
|
||||
log_must zpool labelclear -f $dir/dev-$i
|
||||
done
|
||||
|
||||
log_must zpool import -o cachefile=none -d $dir $pool
|
||||
|
||||
for (( i=0; i<$nparity; i=i+1 )); do
|
||||
log_must zpool replace -f $pool $dir/dev-$i
|
||||
done
|
||||
|
||||
log_must zpool wait -t resilver $pool
|
||||
|
||||
log_must check_pool_status $pool "errors" "No known data errors"
|
||||
|
||||
log_must zpool clear $pool
|
||||
|
||||
for (( i=$nparity; i<$nparity*2; i=i+1 )); do
|
||||
log_must zpool offline $pool $dir/dev-$i
|
||||
done
|
||||
|
||||
log_must zpool export $pool
|
||||
|
||||
for (( i=$nparity; i<$nparity*2; i=i+1 )); do
|
||||
log_must zpool labelclear -f $dir/dev-$i
|
||||
done
|
||||
|
||||
log_must zpool import -o cachefile=none -d $dir $pool
|
||||
|
||||
for (( i=$nparity; i<$nparity*2; i=i+1 )); do
|
||||
log_must zpool replace -f $pool $dir/dev-$i
|
||||
done
|
||||
|
||||
log_must zpool wait -t resilver $pool
|
||||
|
||||
log_must check_pool_status $pool "errors" "No known data errors"
|
||||
|
||||
log_must zpool clear $pool
|
||||
}
|
||||
|
||||
function test_scrub # <pool> <parity> <dir>
|
||||
{
|
||||
typeset pool=$1
|
||||
typeset nparity=$2
|
||||
typeset dir=$3
|
||||
typeset combrec=$4
|
||||
|
||||
reflow_size=$(get_pool_prop allocated $pool)
|
||||
randbyte=$(( ((RANDOM<<15) + RANDOM) % $reflow_size ))
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $randbyte
|
||||
log_must zpool attach $TESTPOOL ${raid}-0 $dir/dev-$devs
|
||||
wait_expand_paused
|
||||
|
||||
log_must zpool export $pool
|
||||
|
||||
# zero out parity disks
|
||||
for (( i=0; i<$nparity; i=i+1 )); do
|
||||
dd conv=notrunc if=/dev/zero of=$dir/dev-$i \
|
||||
bs=1M seek=4 count=$(($dev_size_mb-4))
|
||||
done
|
||||
|
||||
log_must zpool import -o cachefile=none -d $dir $pool
|
||||
|
||||
log_must zpool scrub -w $pool
|
||||
log_must zpool clear $pool
|
||||
log_must zpool export $pool
|
||||
|
||||
# zero out parity count worth of data disks
|
||||
for (( i=$nparity; i<$nparity*2; i=i+1 )); do
|
||||
dd conv=notrunc if=/dev/zero of=$dir/dev-$i \
|
||||
bs=1M seek=4 count=$(($dev_size_mb-4))
|
||||
done
|
||||
|
||||
log_must zpool import -o cachefile=none -d $dir $pool
|
||||
|
||||
log_must zpool scrub -w $pool
|
||||
|
||||
log_must check_pool_status $pool "errors" "No known data errors"
|
||||
|
||||
log_must zpool clear $pool
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES 0
|
||||
log_must zpool wait -t raidz_expand $TESTPOOL
|
||||
}
|
||||
|
||||
log_onexit cleanup
|
||||
|
||||
log_must set_tunable32 PREFETCH_DISABLE 1
|
||||
|
||||
# Disk files which will be used by pool
|
||||
for i in {0..$(($devs - 1))}; do
|
||||
device=$TEST_BASE_DIR/dev-$i
|
||||
log_must truncate -s ${dev_size_mb}M $device
|
||||
disks[${#disks[*]}+1]=$device
|
||||
done
|
||||
|
||||
# Disk file which will be attached
|
||||
log_must truncate -s 512M $TEST_BASE_DIR/dev-$devs
|
||||
|
||||
nparity=$((RANDOM%(3) + 1))
|
||||
raid=raidz$nparity
|
||||
dir=$TEST_BASE_DIR
|
||||
|
||||
log_must zpool create -f -o cachefile=none $TESTPOOL $raid ${disks[@]}
|
||||
log_must zfs set primarycache=metadata $TESTPOOL
|
||||
|
||||
log_must zfs create $TESTPOOL/fs
|
||||
log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R
|
||||
|
||||
log_must zfs create -o compress=on $TESTPOOL/fs2
|
||||
log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R
|
||||
|
||||
log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3
|
||||
log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R
|
||||
|
||||
log_must check_pool_status $TESTPOOL "errors" "No known data errors"
|
||||
|
||||
test_scrub $TESTPOOL $nparity $dir
|
||||
test_resilver $TESTPOOL $nparity $dir
|
||||
|
||||
zpool destroy "$TESTPOOL"
|
||||
|
||||
log_pass "raidz expansion test succeeded."
|
|
@ -0,0 +1,115 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2020 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# 'zpool attach poolname raidz ...' should attach new devive to the pool.
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create block device files for the test raidz pool
|
||||
# 2. For each parity value [1..3]
|
||||
# - create raidz pool with minimum block device files required
|
||||
# - for each free test block device
|
||||
# - attach to the pool
|
||||
# - verify the raidz pool
|
||||
# - destroy the raidz pool
|
||||
|
||||
typeset -r devs=6
|
||||
typeset -r dev_size_mb=512
|
||||
|
||||
typeset -a disks
|
||||
|
||||
prefetch_disable=$(get_tunable PREFETCH_DISABLE)
|
||||
|
||||
function cleanup
|
||||
{
|
||||
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
|
||||
|
||||
for i in {0..$devs}; do
|
||||
log_must rm -f "$TEST_BASE_DIR/dev-$i"
|
||||
done
|
||||
|
||||
log_must set_tunable32 PREFETCH_DISABLE $prefetch_disable
|
||||
}
|
||||
|
||||
log_onexit cleanup
|
||||
|
||||
log_must set_tunable32 PREFETCH_DISABLE 1
|
||||
|
||||
# Disk files which will be used by pool
|
||||
for i in {0..$(($devs))}; do
|
||||
device=$TEST_BASE_DIR/dev-$i
|
||||
log_must truncate -s ${dev_size_mb}M $device
|
||||
disks[${#disks[*]}+1]=$device
|
||||
done
|
||||
|
||||
nparity=$((RANDOM%(3) + 1))
|
||||
raid=raidz$nparity
|
||||
dir=$TEST_BASE_DIR
|
||||
pool=$TESTPOOL
|
||||
opts="-o cachefile=none"
|
||||
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
|
||||
log_must zfs set primarycache=metadata $pool
|
||||
|
||||
log_must zfs create $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 512 100 1024 R
|
||||
|
||||
log_must zfs create -o compress=on $pool/fs2
|
||||
log_must fill_fs /$pool/fs2 1 512 100 1024 R
|
||||
|
||||
log_must zfs create -o compress=on -o recordsize=8k $pool/fs3
|
||||
log_must fill_fs /$pool/fs3 1 512 100 1024 R
|
||||
|
||||
typeset pool_size=$(get_pool_prop size $pool)
|
||||
|
||||
for disk in ${disks[$(($nparity+2))..$devs]}; do
|
||||
log_must dd if=/dev/urandom of=/${pool}/FILE-$RANDOM bs=1M \
|
||||
count=64
|
||||
|
||||
log_must zpool attach -w $pool ${raid}-0 $disk
|
||||
|
||||
# Wait some time for pool size increase
|
||||
sleep 5
|
||||
|
||||
# Confirm that disk was attached to the pool
|
||||
log_must zpool get -H path $TESTPOOL $disk
|
||||
|
||||
typeset expand_size=$(get_pool_prop size $pool)
|
||||
if [[ "$expand_size" -le "$pool_size" ]]; then
|
||||
log_fail "pool $pool not expanded"
|
||||
fi
|
||||
|
||||
verify_pool $pool
|
||||
|
||||
pool_size=$expand_size
|
||||
done
|
||||
|
||||
zpool destroy "$pool"
|
||||
|
||||
log_pass "raidz expansion test succeeded."
|
|
@ -0,0 +1,102 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2021 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# 'zpool attach poolname raidz ...' should reject device attach if pool
|
||||
# is in checkpointed state. If checkpoint creation requested on
|
||||
# expanding pool, the request should be rejected.
|
||||
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create block device files for the test raidz pool.
|
||||
# 2. Create pool and checkpoint it.
|
||||
# 3. Try to expand raidz, ensure that request rejected.
|
||||
# 4. Recreate the pool.
|
||||
# 5. Apply raidz expansion.
|
||||
# 6. Ensure, that checkpoint cannot be created.
|
||||
|
||||
typeset -r devs=6
|
||||
typeset -r dev_size_mb=512
|
||||
|
||||
typeset -a disks
|
||||
|
||||
prefetch_disable=$(get_tunable PREFETCH_DISABLE)
|
||||
|
||||
function cleanup
|
||||
{
|
||||
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
|
||||
|
||||
for i in {0..$devs}; do
|
||||
log_must rm -f "$TEST_BASE_DIR/dev-$i"
|
||||
done
|
||||
|
||||
log_must set_tunable32 PREFETCH_DISABLE $prefetch_disable
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES 0
|
||||
}
|
||||
|
||||
log_onexit cleanup
|
||||
|
||||
log_must set_tunable32 PREFETCH_DISABLE 1
|
||||
|
||||
# Disk files which will be used by pool
|
||||
for i in {0..$(($devs))}; do
|
||||
device=$TEST_BASE_DIR/dev-$i
|
||||
log_must truncate -s ${dev_size_mb}M $device
|
||||
disks[${#disks[*]}+1]=$device
|
||||
done
|
||||
|
||||
nparity=1
|
||||
raid=raidz$nparity
|
||||
pool=$TESTPOOL
|
||||
opts="-o cachefile=none"
|
||||
|
||||
# case 1: checkpoint exist, try to expand
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($devs-1))]}
|
||||
log_must zfs set primarycache=metadata $pool
|
||||
log_must zpool checkpoint $pool
|
||||
log_mustnot zpool attach $pool ${raid}-0 ${disks[$devs]}
|
||||
log_must zpool destroy $pool
|
||||
|
||||
#
|
||||
# case 2: expansion in progress, try to checkpoint
|
||||
#
|
||||
# Sets pause point at 25% of allocated space so that we know an
|
||||
# expansion is still in progress when we attempt the checkpoint
|
||||
#
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($devs-1))]}
|
||||
log_must zfs set primarycache=metadata $pool
|
||||
log_must zfs create $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 512 100 1024 R
|
||||
allocated=$(zpool list -Hp -o allocated $pool)
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $((allocated / 4))
|
||||
log_must zpool attach $pool ${raid}-0 ${disks[$devs]}
|
||||
log_mustnot zpool checkpoint $pool
|
||||
log_must zpool destroy $pool
|
||||
|
||||
log_pass "raidz expansion test succeeded."
|
|
@ -0,0 +1,141 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2021 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
. $STF_SUITE/tests/functional/cli_root/zfs_wait/zfs_wait.kshlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# Check raidz expansion is able to work correctly under i/o load.
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create block device files for the test raidz pool
|
||||
# 2. For each parity value [1..3]
|
||||
# - create raidz pool with minimum block device files required
|
||||
# - create couple of datasets with different recordsize and fill it
|
||||
# - set a max reflow value near pool capacity
|
||||
# - wait for reflow to reach this max
|
||||
# - verify pool
|
||||
# - set reflow bytes to max value to complete the expansion
|
||||
|
||||
typeset -r devs=10
|
||||
typeset -r dev_size_mb=256
|
||||
|
||||
typeset -a disks
|
||||
|
||||
embedded_slog_min_ms=$(get_tunable EMBEDDED_SLOG_MIN_MS)
|
||||
|
||||
function cleanup
|
||||
{
|
||||
poolexists "$TESTPOOL" && zpool status -v "$TESTPOOL"
|
||||
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
|
||||
|
||||
for i in {0..$devs}; do
|
||||
log_must rm -f "$TEST_BASE_DIR/dev-$i"
|
||||
done
|
||||
|
||||
log_must set_tunable32 EMBEDDED_SLOG_MIN_MS $embedded_slog_min_ms
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES 0
|
||||
}
|
||||
|
||||
function wait_expand_paused
|
||||
{
|
||||
oldcopied='0'
|
||||
newcopied='1'
|
||||
# wait until reflow copied value stops changing
|
||||
while [[ $oldcopied != $newcopied ]]; do
|
||||
oldcopied=$newcopied
|
||||
sleep 1
|
||||
newcopied=$(zpool status $TESTPOOL | \
|
||||
grep 'copied out of' | \
|
||||
awk '{print $1}')
|
||||
done
|
||||
}
|
||||
|
||||
log_onexit cleanup
|
||||
|
||||
log_must set_tunable32 EMBEDDED_SLOG_MIN_MS 99999
|
||||
|
||||
# Disk files which will be used by pool
|
||||
for i in {0..$(($devs))}; do
|
||||
device=$TEST_BASE_DIR/dev-$i
|
||||
log_must truncate -s ${dev_size_mb}M $device
|
||||
disks[${#disks[*]}+1]=$device
|
||||
done
|
||||
|
||||
nparity=$((RANDOM%(3) + 1))
|
||||
raid=raidz$nparity
|
||||
pool=$TESTPOOL
|
||||
opts="-o cachefile=none"
|
||||
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
|
||||
|
||||
log_must zfs create -o recordsize=8k $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 256 100 1024 R
|
||||
|
||||
log_must zfs create -o recordsize=128k $pool/fs2
|
||||
log_must fill_fs /$pool/fs2 1 256 100 1024 R
|
||||
|
||||
for disk in ${disks[$(($nparity+2))..$devs]}; do
|
||||
log_must mkfile -n 400m /$pool/fs/file
|
||||
log_bkgrnd randwritecomp /$pool/fs/file 250
|
||||
pid0=$!
|
||||
|
||||
# start some random writes in the background during expansion
|
||||
log_must mkfile -n 400m /$pool/fs2/file2
|
||||
log_bkgrnd randwritecomp /$pool/fs2/file2 250
|
||||
pid1=$!
|
||||
sleep 10
|
||||
|
||||
# Pause at half total bytes to be copied for expansion
|
||||
reflow_size=$(get_pool_prop allocated $pool)
|
||||
log_note need to reflow $reflow_size bytes
|
||||
pause=$((reflow_size/2))
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $pause
|
||||
|
||||
log_must zpool attach $pool ${raid}-0 $disk
|
||||
wait_expand_paused
|
||||
|
||||
kill_if_running $pid0
|
||||
kill_if_running $pid1
|
||||
|
||||
log_must zpool scrub -w $pool
|
||||
|
||||
log_must check_pool_status $pool "errors" "No known data errors"
|
||||
log_must check_pool_status $pool "scan" "with 0 errors"
|
||||
log_must check_pool_status $pool "scan" "repaired 0B"
|
||||
|
||||
# Set pause past largest possible value for this pool
|
||||
pause=$((devs*dev_size_mb*1024*1024))
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $pause
|
||||
|
||||
log_must zpool wait -t raidz_expand $pool
|
||||
done
|
||||
|
||||
log_must zpool destroy "$pool"
|
||||
|
||||
log_pass "raidz expansion test succeeded."
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2021 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# Check device replacement during raidz expansion.
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create block device files for the test raidz pool
|
||||
# 2. For each parity value [1..3]
|
||||
# - create raidz pool with minimum block device files required
|
||||
# - create couple of datasets with different recordsize and fill it
|
||||
# - attach new device to the pool
|
||||
# - offline and zero vdevs allowed by parity
|
||||
# - wait some time and start offlined vdevs replacement
|
||||
# - wait replacement completion and verify pool status
|
||||
|
||||
typeset -r devs=10
|
||||
typeset -r dev_size_mb=128
|
||||
|
||||
typeset -a disks
|
||||
|
||||
embedded_slog_min_ms=$(get_tunable EMBEDDED_SLOG_MIN_MS)
|
||||
original_scrub_after_expand=$(get_tunable SCRUB_AFTER_EXPAND)
|
||||
|
||||
function cleanup
|
||||
{
|
||||
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
|
||||
|
||||
for i in {0..$devs}; do
|
||||
log_must rm -f "$TEST_BASE_DIR/dev-$i"
|
||||
done
|
||||
|
||||
log_must set_tunable32 EMBEDDED_SLOG_MIN_MS $embedded_slog_min_ms
|
||||
log_must set_tunable32 SCRUB_AFTER_EXPAND $original_scrub_after_expand
|
||||
}
|
||||
|
||||
log_onexit cleanup
|
||||
|
||||
log_must set_tunable32 EMBEDDED_SLOG_MIN_MS 99999
|
||||
|
||||
# Disk files which will be used by pool
|
||||
for i in {0..$(($devs))}; do
|
||||
device=$TEST_BASE_DIR/dev-$i
|
||||
log_must truncate -s ${dev_size_mb}M $device
|
||||
disks[${#disks[*]}+1]=$device
|
||||
done
|
||||
|
||||
nparity=$((RANDOM%(3) + 1))
|
||||
raid=raidz$nparity
|
||||
pool=$TESTPOOL
|
||||
opts="-o cachefile=none"
|
||||
|
||||
log_must set_tunable32 SCRUB_AFTER_EXPAND 0
|
||||
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
|
||||
|
||||
log_must zfs create -o recordsize=8k $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 128 100 1024 R
|
||||
|
||||
log_must zfs create -o recordsize=128k $pool/fs2
|
||||
log_must fill_fs /$pool/fs2 1 128 100 1024 R
|
||||
|
||||
for disk in ${disks[$(($nparity+2))..$devs]}; do
|
||||
log_must zpool attach $pool ${raid}-0 $disk
|
||||
|
||||
sleep 10
|
||||
|
||||
for (( i=1; i<=$nparity; i=i+1 )); do
|
||||
log_must zpool offline $pool ${disks[$i]}
|
||||
log_must dd if=/dev/zero of=${disks[$i]} \
|
||||
bs=1024k count=$dev_size_mb conv=notrunc
|
||||
done
|
||||
|
||||
sleep 3
|
||||
|
||||
for (( i=1; i<=$nparity; i=i+1 )); do
|
||||
log_must zpool replace $pool ${disks[$i]}
|
||||
done
|
||||
|
||||
log_must zpool wait -t replace $pool
|
||||
log_must check_pool_status $pool "scan" "with 0 errors"
|
||||
|
||||
log_must zpool wait -t raidz_expand $pool
|
||||
|
||||
log_must zpool clear $pool
|
||||
log_must zpool scrub -w $pool
|
||||
|
||||
log_must zpool status -v
|
||||
log_must check_pool_status $pool "scan" "with 0 errors"
|
||||
done
|
||||
|
||||
log_must zpool destroy "$pool"
|
||||
|
||||
log_pass "raidz expansion test succeeded."
|
||||
|
|
@ -0,0 +1,177 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2021 by vStack. All rights reserved.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# Check device replacement during raidz expansion using expansion pausing.
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create block device files for the test raidz pool
|
||||
# 2. For each parity value [1..3]
|
||||
# - create raidz pool with minimum block device files required
|
||||
# - create couple of datasets with different recordsize and fill it
|
||||
# - set raidz expand maximum reflow bytes
|
||||
# - attach new device to the pool
|
||||
# - wait for reflow bytes to reach the maximum
|
||||
# - offline and zero vdevs allowed by parity
|
||||
# - wait some time and start offlined vdevs replacement
|
||||
# - wait replacement completion and verify pool status
|
||||
# - loop thru vdevs replacing with the max reflow bytes increasing
|
||||
# - verify pool
|
||||
# - set reflow bytes to max value to complete the expansion
|
||||
|
||||
typeset -r devs=10
|
||||
typeset -r dev_size_mb=128
|
||||
|
||||
typeset -a disks
|
||||
|
||||
embedded_slog_min_ms=$(get_tunable EMBEDDED_SLOG_MIN_MS)
|
||||
original_scrub_after_expand=$(get_tunable SCRUB_AFTER_EXPAND)
|
||||
|
||||
function cleanup
|
||||
{
|
||||
poolexists "$TESTPOOL" && zpool status -v "$TESTPOOL"
|
||||
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
|
||||
|
||||
for i in {0..$devs}; do
|
||||
log_must rm -f "$TEST_BASE_DIR/dev-$i"
|
||||
done
|
||||
|
||||
log_must set_tunable32 EMBEDDED_SLOG_MIN_MS $embedded_slog_min_ms
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES 0
|
||||
log_must set_tunable32 SCRUB_AFTER_EXPAND $original_scrub_after_expand
|
||||
}
|
||||
|
||||
function wait_expand_paused
|
||||
{
|
||||
oldcopied='0'
|
||||
newcopied='1'
|
||||
while [[ $oldcopied != $newcopied ]]; do
|
||||
oldcopied=$newcopied
|
||||
sleep 1
|
||||
newcopied=$(zpool status $TESTPOOL | \
|
||||
grep 'copied out of' | \
|
||||
awk '{print $1}')
|
||||
done
|
||||
}
|
||||
|
||||
log_onexit cleanup
|
||||
|
||||
function test_replace # <pool> <devices> <parity>
|
||||
{
|
||||
pool=${1}
|
||||
devices=${2}
|
||||
nparity=${3}
|
||||
device_count=0
|
||||
|
||||
log_must echo "devices=$devices"
|
||||
|
||||
for dev in ${devices}; do
|
||||
device_count=$((device_count+1))
|
||||
done
|
||||
|
||||
index=$((RANDOM%(device_count-nparity)))
|
||||
for (( j=1; j<=$nparity; j=j+1 )); do
|
||||
log_must zpool offline $pool ${disks[$((index+j))]}
|
||||
log_must dd if=/dev/zero of=${disks[$((index+j))]} \
|
||||
bs=1024k count=$dev_size_mb conv=notrunc
|
||||
done
|
||||
|
||||
for (( j=1; j<=$nparity; j=j+1 )); do
|
||||
log_must zpool replace $pool ${disks[$((index+j))]}
|
||||
done
|
||||
|
||||
log_must zpool wait -t replace $pool
|
||||
log_must check_pool_status $pool "scan" "with 0 errors"
|
||||
|
||||
log_must zpool clear $pool
|
||||
log_must zpool scrub -w $pool
|
||||
|
||||
log_must zpool status -v
|
||||
log_must check_pool_status $pool "scan" "repaired 0B"
|
||||
}
|
||||
|
||||
log_must set_tunable32 EMBEDDED_SLOG_MIN_MS 99999
|
||||
|
||||
# Disk files which will be used by pool
|
||||
for i in {0..$(($devs))}; do
|
||||
device=$TEST_BASE_DIR/dev-$i
|
||||
log_must truncate -s ${dev_size_mb}M $device
|
||||
disks[${#disks[*]}+1]=$device
|
||||
done
|
||||
|
||||
nparity=$((RANDOM%(3) + 1))
|
||||
raid=raidz$nparity
|
||||
pool=$TESTPOOL
|
||||
opts="-o cachefile=none"
|
||||
devices=""
|
||||
|
||||
log_must set_tunable32 SCRUB_AFTER_EXPAND 0
|
||||
|
||||
log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]}
|
||||
devices="${disks[1..$(($nparity+1))]}"
|
||||
|
||||
log_must zfs create -o recordsize=8k $pool/fs
|
||||
log_must fill_fs /$pool/fs 1 128 100 1024 R
|
||||
|
||||
log_must zfs create -o recordsize=128k $pool/fs2
|
||||
log_must fill_fs /$pool/fs2 1 128 100 1024 R
|
||||
|
||||
for disk in ${disks[$(($nparity+2))..$devs]}; do
|
||||
# Set pause to some random value near halfway point
|
||||
reflow_size=$(get_pool_prop allocated $pool)
|
||||
pause=$((((RANDOM << 15) + RANDOM) % reflow_size / 2))
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $pause
|
||||
|
||||
log_must zpool attach $pool ${raid}-0 $disk
|
||||
devices="$devices $disk"
|
||||
|
||||
wait_expand_paused
|
||||
|
||||
for (( i=0; i<2; i++ )); do
|
||||
test_replace $pool "$devices" $nparity
|
||||
|
||||
# Increase pause by about 25%
|
||||
pause=$((pause + (((RANDOM << 15) + RANDOM) % \
|
||||
reflow_size) / 4))
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $pause
|
||||
|
||||
wait_expand_paused
|
||||
done
|
||||
|
||||
# Set pause past largest possible value for this pool
|
||||
pause=$((devs*dev_size_mb*1024*1024))
|
||||
log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $pause
|
||||
|
||||
log_must zpool wait -t raidz_expand $pool
|
||||
done
|
||||
|
||||
log_must zpool destroy "$pool"
|
||||
|
||||
log_pass "raidz expansion test succeeded."
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2023 by iXsystems, Inc.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# 'zpool attach poolname raidz ...' should fail if raidz_expansion
|
||||
# feature is not enabled.
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create raidz pool with raidz_expansion feature disabled
|
||||
# 2. Attempt to attach a device to the raidz vdev
|
||||
# 3. Verify that device attached failed
|
||||
# 4. Destroy the raidz pool
|
||||
|
||||
typeset -r devs=4
|
||||
typeset -r dev_size_mb=128
|
||||
typeset -a disks
|
||||
|
||||
function cleanup
|
||||
{
|
||||
log_pos zpool status "$TESTPOOL"
|
||||
|
||||
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
|
||||
|
||||
for i in {0..$devs}; do
|
||||
log_must rm -f "$TEST_BASE_DIR/dev-$i"
|
||||
done
|
||||
}
|
||||
|
||||
log_onexit cleanup
|
||||
|
||||
for i in {0..$devs}; do
|
||||
device=$TEST_BASE_DIR/dev-$i
|
||||
log_must truncate -s ${dev_size_mb}M "$device"
|
||||
if [[ $i -ne $devs ]]; then
|
||||
disks[${#disks[*]}+1]=$device
|
||||
fi
|
||||
done
|
||||
|
||||
# create a pool with raidz_expansion feature disabled
|
||||
log_must zpool create -f -o cachefile=none -o feature@raidz_expansion=disabled \
|
||||
"$TESTPOOL" raidz1 "${disks[@]}"
|
||||
status=$(zpool list -H -o feature@raidz_expansion "$TESTPOOL")
|
||||
if [[ "$status" != "disabled" ]]; then
|
||||
log_fail "raidz_expansion feature was not disabled"
|
||||
fi
|
||||
|
||||
# expecting attach to fail
|
||||
log_mustnot_expect "raidz_expansion feature must be enabled" zpool attach -f \
|
||||
"$TESTPOOL" raidz1-0 "$TEST_BASE_DIR/dev-$devs"
|
||||
log_must zpool destroy "$TESTPOOL"
|
||||
|
||||
log_pass "raidz attach failed with feature disabled as expected"
|
|
@ -0,0 +1,86 @@
|
|||
#!/bin/ksh -p
|
||||
#
|
||||
# CDDL HEADER START
|
||||
#
|
||||
# The contents of this file are subject to the terms of the
|
||||
# Common Development and Distribution License (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
#
|
||||
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
# or http://www.opensolaris.org/os/licensing.
|
||||
# See the License for the specific language governing permissions
|
||||
# and limitations under the License.
|
||||
#
|
||||
# When distributing Covered Code, include this CDDL HEADER in each
|
||||
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
# If applicable, add the following below this CDDL HEADER, with the
|
||||
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
#
|
||||
# CDDL HEADER END
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (c) 2023 by iXsystems, Inc.
|
||||
#
|
||||
|
||||
. $STF_SUITE/include/libtest.shlib
|
||||
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# Negative for FreeBSD Only
|
||||
#
|
||||
# Attempting to expand a RAIDZ should fail if the scratch area on the
|
||||
# existing disks contains BTX Server binary (used to boot FreeBSD when
|
||||
# using MBR partitions with ZFS).
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create raidz pool
|
||||
# 2. Add a BTX header to the reserved boot area
|
||||
# 3. Attempt to attach a device to the raidz vdev
|
||||
# 4. Verify that device attached failed
|
||||
# 5. Destroy the raidz pool
|
||||
|
||||
typeset -r devs=4
|
||||
typeset -r dev_size_mb=128
|
||||
typeset -a disks
|
||||
|
||||
function cleanup
|
||||
{
|
||||
log_pos zpool status "$TESTPOOL"
|
||||
|
||||
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
|
||||
|
||||
for i in {0..$devs}; do
|
||||
log_must rm -f "$TEST_BASE_DIR/dev-$i"
|
||||
done
|
||||
}
|
||||
|
||||
log_onexit cleanup
|
||||
|
||||
for i in {0..$devs}; do
|
||||
device=$TEST_BASE_DIR/dev-$i
|
||||
# simulate active BTX Server data by inserting a BTX header
|
||||
printf "\xeb\x0e%s\x01\x02\x80" "BTX" | dd of="$device" \
|
||||
bs=512 seek=1024 status=none
|
||||
log_must truncate -s ${dev_size_mb}M "$device"
|
||||
if [[ $i -ne $devs ]]; then
|
||||
disks[${#disks[*]}+1]=$device
|
||||
fi
|
||||
done
|
||||
|
||||
log_must zpool create -f -o cachefile=none "$TESTPOOL" raidz1 "${disks[@]}"
|
||||
|
||||
if is_freebsd; then
|
||||
# expecting attach to fail
|
||||
log_mustnot_expect "the reserved boot area" zpool attach -f \
|
||||
"$TESTPOOL" raidz1-0 "$TEST_BASE_DIR/dev-$devs"
|
||||
log_must zpool destroy "$TESTPOOL"
|
||||
log_pass "raidz attach failed with in-use reserved boot area"
|
||||
else
|
||||
# expecting attach to pass everywhere else
|
||||
log_must zpool attach -f "$TESTPOOL" raidz1-0 "$TEST_BASE_DIR/dev-$devs"
|
||||
log_must zpool destroy "$TESTPOOL"
|
||||
log_pass "raidz attach passed with in-use reserved boot area"
|
||||
fi
|
||||
|
Loading…
Reference in New Issue