Fix and disable blocks statistics during scrub

Block statistics calculation during scrub I/O issue in case of sorted
scrub accounted ditto blocks several times.  Embedded blocks on other
side were not accounted at all.  This change moves the accounting from
issue to scan stage, that fixes both problems and also allows to avoid
pool-wide locking and the lock contention it created.

Since this statistics is quite specific and is not even exposed now
anywhere, disable its calculation by default to not waste CPU time.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored-By: iXsystems, Inc.
Closes #13579
This commit is contained in:
Alexander Motin 2022-06-28 14:23:31 -04:00 committed by Brian Behlendorf
parent 5e06805d8e
commit 4b8f16072d
3 changed files with 30 additions and 34 deletions

View File

@ -82,7 +82,6 @@ typedef struct zfs_blkstat {
typedef struct zfs_all_blkstats { typedef struct zfs_all_blkstats {
zfs_blkstat_t zab_type[DN_MAX_LEVELS + 1][DMU_OT_TOTAL + 1]; zfs_blkstat_t zab_type[DN_MAX_LEVELS + 1][DMU_OT_TOTAL + 1];
kmutex_t zab_lock;
} zfs_all_blkstats_t; } zfs_all_blkstats_t;

View File

@ -418,10 +418,8 @@ dsl_pool_close(dsl_pool_t *dp)
cv_destroy(&dp->dp_spaceavail_cv); cv_destroy(&dp->dp_spaceavail_cv);
taskq_destroy(dp->dp_unlinked_drain_taskq); taskq_destroy(dp->dp_unlinked_drain_taskq);
taskq_destroy(dp->dp_zrele_taskq); taskq_destroy(dp->dp_zrele_taskq);
if (dp->dp_blkstats != NULL) { if (dp->dp_blkstats != NULL)
mutex_destroy(&dp->dp_blkstats->zab_lock);
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
}
kmem_free(dp, sizeof (dsl_pool_t)); kmem_free(dp, sizeof (dsl_pool_t));
} }

View File

@ -129,6 +129,7 @@ static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
static uint64_t dsl_scan_count_data_disks(vdev_t *vd); static uint64_t dsl_scan_count_data_disks(vdev_t *vd);
extern int zfs_vdev_async_write_active_min_dirty_percent; extern int zfs_vdev_async_write_active_min_dirty_percent;
static int zfs_scan_blkstats = 0;
/* /*
* By default zfs will check to ensure it is not over the hard memory * By default zfs will check to ensure it is not over the hard memory
@ -788,13 +789,19 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
/* back to the generic stuff */ /* back to the generic stuff */
if (zfs_scan_blkstats) {
if (dp->dp_blkstats == NULL) { if (dp->dp_blkstats == NULL) {
dp->dp_blkstats = dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
mutex_init(&dp->dp_blkstats->zab_lock, NULL,
MUTEX_DEFAULT, NULL);
} }
bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type)); memset(&dp->dp_blkstats->zab_type, 0,
sizeof (dp->dp_blkstats->zab_type));
} else {
if (dp->dp_blkstats) {
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
dp->dp_blkstats = NULL;
}
}
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER; ot = DMU_OT_ZAP_OTHER;
@ -3779,10 +3786,8 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
} }
static void static void
count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp) count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all)
{ {
int i;
/* /*
* Don't count embedded bp's, since we already did the work of * Don't count embedded bp's, since we already did the work of
* scanning these when we scanned the containing block. * scanning these when we scanned the containing block.
@ -3797,18 +3802,13 @@ count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
* zio code will only try the first one unless there is an issue. * zio code will only try the first one unless there is an issue.
* Therefore, we should only count the first DVA for these IOs. * Therefore, we should only count the first DVA for these IOs.
*/ */
if (scn->scn_is_sorted) {
atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued,
DVA_GET_ASIZE(&bp->blk_dva[0]));
} else {
spa_t *spa = scn->scn_dp->dp_spa;
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
atomic_add_64(&spa->spa_scan_pass_issued, atomic_add_64(&spa->spa_scan_pass_issued,
DVA_GET_ASIZE(&bp->blk_dva[i])); all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0]));
}
} }
static void
count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
{
/* /*
* If we resume after a reboot, zab will be NULL; don't record * If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case. * incomplete stats in that case.
@ -3816,9 +3816,7 @@ count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
if (zab == NULL) if (zab == NULL)
return; return;
mutex_enter(&zab->zab_lock); for (int i = 0; i < 4; i++) {
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
@ -3853,8 +3851,6 @@ count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
break; break;
} }
} }
mutex_exit(&zab->zab_lock);
} }
static void static void
@ -3952,10 +3948,10 @@ dsl_scan_scrub_cb(dsl_pool_t *dp,
boolean_t needs_io = B_FALSE; boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
count_block(dp->dp_blkstats, bp);
if (phys_birth <= scn->scn_phys.scn_min_txg || if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) { phys_birth >= scn->scn_phys.scn_max_txg) {
count_block(scn, dp->dp_blkstats, bp); count_block_issued(spa, bp, B_TRUE);
return (0); return (0);
} }
@ -3996,7 +3992,7 @@ dsl_scan_scrub_cb(dsl_pool_t *dp,
if (needs_io && !zfs_no_scrub_io) { if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb); dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else { } else {
count_block(scn, dp->dp_blkstats, bp); count_block_issued(spa, bp, B_TRUE);
} }
/* do not relocate this block */ /* do not relocate this block */
@ -4070,7 +4066,7 @@ scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
} }
ASSERT(pio != NULL); ASSERT(pio != NULL);
count_block(scn, dp->dp_blkstats, bp); count_block_issued(spa, bp, queue == NULL);
zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done, zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done,
queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
} }
@ -4355,7 +4351,7 @@ dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
/* count the block as though we issued it */ /* count the block as though we issued it */
sio2bp(sio, &tmpbp); sio2bp(sio, &tmpbp);
count_block(scn, dp->dp_blkstats, &tmpbp); count_block_issued(spa, &tmpbp, B_FALSE);
sio_free(sio); sio_free(sio);
} }
@ -4447,6 +4443,9 @@ ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, ULONG, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
"Enable processing of the free_bpobj"); "Enable processing of the free_bpobj");
ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW,
"Enable block statistics calculation during scrub");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW,
"Fraction of RAM for scan hard limit"); "Fraction of RAM for scan hard limit");