Use a helper function to clarify gang block size

For gang blocks, `DVA_GET_ASIZE()` is the total space allocated for the
gang DVA including its children BP's.  The space allocated at each DVA's
vdev/offset is `vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE)`.

This commit makes this relationship more clear by using a helper
function, `vdev_gang_header_asize()`, for the space allocated at the
gang block's vdev/offset.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #11744
This commit is contained in:
Matthew Ahrens 2021-03-26 11:19:35 -07:00 committed by GitHub
parent b85f47efd0
commit 2b56a63457
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 30 additions and 11 deletions

View File

@ -405,6 +405,12 @@ typedef struct blkptr {
/* /*
* Macros to get and set fields in a bp or DVA. * Macros to get and set fields in a bp or DVA.
*/ */
/*
* Note, for gang blocks, DVA_GET_ASIZE() is the total space allocated for
* this gang DVA including its children BP's. The space allocated at this
* DVA's vdev/offset is vdev_gang_header_asize(vdev).
*/
#define DVA_GET_ASIZE(dva) \ #define DVA_GET_ASIZE(dva) \
BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0) BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_ASIZE(dva, x) \ #define DVA_SET_ASIZE(dva, x) \

View File

@ -133,6 +133,15 @@ extern int64_t vdev_deflated_space(vdev_t *vd, int64_t space);
extern uint64_t vdev_psize_to_asize(vdev_t *vd, uint64_t psize); extern uint64_t vdev_psize_to_asize(vdev_t *vd, uint64_t psize);
/*
* Return the amount of space allocated for a gang block header.
*/
static inline uint64_t
vdev_gang_header_asize(vdev_t *vd)
{
return (vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE));
}
extern int vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux); extern int vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux); extern int vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, extern int vdev_online(spa_t *spa, uint64_t guid, uint64_t flags,

View File

@ -5544,7 +5544,7 @@ metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
if (DVA_GET_GANG(dva)) if (DVA_GET_GANG(dva))
size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); size = vdev_gang_header_asize(vd);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
@ -5579,7 +5579,7 @@ metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (DVA_GET_GANG(dva)) { if (DVA_GET_GANG(dva)) {
size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); size = vdev_gang_header_asize(vd);
} }
metaslab_free_impl(vd, offset, size, checkpoint); metaslab_free_impl(vd, offset, size, checkpoint);
@ -5768,7 +5768,7 @@ metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
ASSERT(DVA_IS_VALID(dva)); ASSERT(DVA_IS_VALID(dva));
if (DVA_GET_GANG(dva)) if (DVA_GET_GANG(dva))
size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); size = vdev_gang_header_asize(vd);
return (metaslab_claim_impl(vd, offset, size, txg)); return (metaslab_claim_impl(vd, offset, size, txg));
} }
@ -6034,7 +6034,7 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp)
uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (DVA_GET_GANG(&bp->blk_dva[i])) if (DVA_GET_GANG(&bp->blk_dva[i]))
size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); size = vdev_gang_header_asize(vd);
ASSERT3P(vd, !=, NULL); ASSERT3P(vd, !=, NULL);

View File

@ -995,7 +995,8 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held,
* that are in the log) to be arbitrarily large. * that are in the log) to be arbitrarily large.
*/ */
for (int i = 0; i < BP_GET_NDVAS(bp); i++) { for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children) { if (vdevid >= spa->spa_root_vdev->vdev_children) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify, errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
@ -1024,10 +1025,10 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held,
*/ */
continue; continue;
} }
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]); uint64_t asize = DVA_GET_ASIZE(dva);
if (BP_IS_GANG(bp)) if (DVA_GET_GANG(dva))
asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize) { if (offset + asize > vd->vdev_asize) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify, errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has invalid OFFSET %llu", "blkptr at %p DVA %u has invalid OFFSET %llu",
@ -1064,8 +1065,8 @@ zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
uint64_t offset = DVA_GET_OFFSET(dva); uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva); uint64_t asize = DVA_GET_ASIZE(dva);
if (BP_IS_GANG(bp)) if (DVA_GET_GANG(dva))
asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize) if (offset + asize > vd->vdev_asize)
return (B_FALSE); return (B_FALSE);
@ -3984,6 +3985,9 @@ zio_vdev_io_assess(zio_t *zio)
*/ */
if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
vd != NULL && !vd->vdev_ops->vdev_op_leaf) { vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
"cant_write=TRUE due to write failure with ENXIO",
zio);
vd->vdev_cant_write = B_TRUE; vd->vdev_cant_write = B_TRUE;
} }