Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2016, Intel Corporation.
|
|
|
|
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _SYS_VDEV_DRAID_H
|
|
|
|
#define _SYS_VDEV_DRAID_H
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/abd.h>
|
|
|
|
#include <sys/nvpair.h>
|
|
|
|
#include <sys/zio.h>
|
|
|
|
#include <sys/vdev_impl.h>
|
|
|
|
#include <sys/vdev_raidz_impl.h>
|
|
|
|
#include <sys/vdev.h>
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Constants required to generate and use dRAID permutations.
|
|
|
|
*/
|
|
|
|
#define VDEV_DRAID_SEED 0xd7a1d5eed
|
|
|
|
#define VDEV_DRAID_MAX_MAPS 254
|
|
|
|
#define VDEV_DRAID_ROWSHIFT SPA_MAXBLOCKSHIFT
|
|
|
|
#define VDEV_DRAID_ROWHEIGHT (1ULL << VDEV_DRAID_ROWSHIFT)
|
|
|
|
#define VDEV_DRAID_REFLOW_RESERVE (2 * VDEV_DRAID_ROWHEIGHT)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dRAID permutation map.
|
|
|
|
*/
|
|
|
|
typedef struct draid_map {
|
2021-04-03 01:38:53 +00:00
|
|
|
uint64_t dm_children; /* # of permutation columns */
|
Distributed Spare (dRAID) Feature
This patch adds a new top-level vdev type called dRAID, which stands
for Distributed parity RAID. This pool configuration allows all dRAID
vdevs to participate when rebuilding to a distributed hot spare device.
This can substantially reduce the total time required to restore full
parity to pool with a failed device.
A dRAID pool can be created using the new top-level `draid` type.
Like `raidz`, the desired redundancy is specified after the type:
`draid[1,2,3]`. No additional information is required to create the
pool and reasonable default values will be chosen based on the number
of child vdevs in the dRAID vdev.
zpool create <pool> draid[1,2,3] <vdevs...>
Unlike raidz, additional optional dRAID configuration values can be
provided as part of the draid type as colon separated values. This
allows administrators to fully specify a layout for either performance
or capacity reasons. The supported options include:
zpool create <pool> \
draid[<parity>][:<data>d][:<children>c][:<spares>s] \
<vdevs...>
- draid[parity] - Parity level (default 1)
- draid[:<data>d] - Data devices per group (default 8)
- draid[:<children>c] - Expected number of child vdevs
- draid[:<spares>s] - Distributed hot spares (default 0)
Abbreviated example `zpool status` output for a 68 disk dRAID pool
with two distributed spares using special allocation classes.
```
pool: tank
state: ONLINE
config:
NAME STATE READ WRITE CKSUM
slag7 ONLINE 0 0 0
draid2:8d:68c:2s-0 ONLINE 0 0 0
L0 ONLINE 0 0 0
L1 ONLINE 0 0 0
...
U25 ONLINE 0 0 0
U26 ONLINE 0 0 0
spare-53 ONLINE 0 0 0
U27 ONLINE 0 0 0
draid2-0-0 ONLINE 0 0 0
U28 ONLINE 0 0 0
U29 ONLINE 0 0 0
...
U42 ONLINE 0 0 0
U43 ONLINE 0 0 0
special
mirror-1 ONLINE 0 0 0
L5 ONLINE 0 0 0
U5 ONLINE 0 0 0
mirror-2 ONLINE 0 0 0
L6 ONLINE 0 0 0
U6 ONLINE 0 0 0
spares
draid2-0-0 INUSE currently in use
draid2-0-1 AVAIL
```
When adding test coverage for the new dRAID vdev type the following
options were added to the ztest command. These options are leverages
by zloop.sh to test a wide range of dRAID configurations.
-K draid|raidz|random - kind of RAID to test
-D <value> - dRAID data drives per group
-S <value> - dRAID distributed hot spares
-R <value> - RAID parity (raidz or dRAID)
The zpool_create, zpool_import, redundancy, replacement and fault
test groups have all been updated provide test coverage for the
dRAID feature.
Co-authored-by: Isaac Huang <he.huang@intel.com>
Co-authored-by: Mark Maybee <mmaybee@cray.com>
Co-authored-by: Don Brady <don.brady@delphix.com>
Co-authored-by: Matthew Ahrens <mahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Mark Maybee <mmaybee@cray.com>
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #10102
2020-11-13 21:51:51 +00:00
|
|
|
uint64_t dm_nperms; /* # of permutation rows */
|
|
|
|
uint64_t dm_seed; /* dRAID map seed */
|
|
|
|
uint64_t dm_checksum; /* Checksum of generated map */
|
|
|
|
uint8_t *dm_perms; /* base permutation array */
|
|
|
|
} draid_map_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dRAID configuration.
|
|
|
|
*/
|
|
|
|
typedef struct vdev_draid_config {
|
|
|
|
/*
|
|
|
|
* Values read from the dRAID nvlist configuration.
|
|
|
|
*/
|
|
|
|
uint64_t vdc_ndata; /* # of data devices in group */
|
|
|
|
uint64_t vdc_nparity; /* # of parity devices in group */
|
|
|
|
uint64_t vdc_nspares; /* # of distributed spares */
|
|
|
|
uint64_t vdc_children; /* # of children */
|
|
|
|
uint64_t vdc_ngroups; /* # groups per slice */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Immutable derived constants.
|
|
|
|
*/
|
|
|
|
uint8_t *vdc_perms; /* permutation array */
|
|
|
|
uint64_t vdc_nperms; /* # of permutations */
|
|
|
|
uint64_t vdc_groupwidth; /* = data + parity */
|
|
|
|
uint64_t vdc_ndisks; /* = children - spares */
|
|
|
|
uint64_t vdc_groupsz; /* = groupwidth * DRAID_ROWSIZE */
|
|
|
|
uint64_t vdc_devslicesz; /* = (groupsz * groups) / ndisks */
|
|
|
|
} vdev_draid_config_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions for handling dRAID permutation maps.
|
|
|
|
*/
|
|
|
|
extern uint64_t vdev_draid_rand(uint64_t *);
|
|
|
|
extern int vdev_draid_lookup_map(uint64_t, const draid_map_t **);
|
|
|
|
extern int vdev_draid_generate_perms(const draid_map_t *, uint8_t **);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* General dRAID support functions.
|
|
|
|
*/
|
|
|
|
extern boolean_t vdev_draid_readable(vdev_t *, uint64_t);
|
|
|
|
extern boolean_t vdev_draid_missing(vdev_t *, uint64_t, uint64_t, uint64_t);
|
|
|
|
extern uint64_t vdev_draid_asize_to_psize(vdev_t *, uint64_t);
|
|
|
|
extern void vdev_draid_map_alloc_empty(zio_t *, struct raidz_row *);
|
|
|
|
extern nvlist_t *vdev_draid_read_config_spare(vdev_t *);
|
|
|
|
|
|
|
|
/* Functions for dRAID distributed spares. */
|
|
|
|
extern vdev_t *vdev_draid_spare_get_child(vdev_t *, uint64_t);
|
|
|
|
extern vdev_t *vdev_draid_spare_get_parent(vdev_t *);
|
|
|
|
extern int vdev_draid_spare_create(nvlist_t *, vdev_t *, uint64_t *, uint64_t);
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _SYS_VDEV_DRAID_H */
|