Redesign scan/scrub interfact to allow scrubing range of TXGs

Sponsored-By: Wasabi Technology, Inc.
Sponsored-By: Klara Inc.
Signed-off-by: Mariusz Zaborski <mariusz.zaborski@klarasystems.com>
This commit is contained in:
Mariusz Zaborski 2023-08-21 16:35:24 +02:00
parent b3b7491615
commit 065e76e2d2
4 changed files with 53 additions and 15 deletions

View File

@ -189,7 +189,8 @@ void dsl_scan_setup_sync(void *, dmu_tx_t *);
void dsl_scan_fini(struct dsl_pool *dp); void dsl_scan_fini(struct dsl_pool *dp);
void dsl_scan_sync(struct dsl_pool *, dmu_tx_t *); void dsl_scan_sync(struct dsl_pool *, dmu_tx_t *);
int dsl_scan_cancel(struct dsl_pool *); int dsl_scan_cancel(struct dsl_pool *);
int dsl_scan(struct dsl_pool *, pool_scan_func_t); int dsl_scan(struct dsl_pool *, pool_scan_func_t, uint64_t starttxg,
uint64_t txgend);
void dsl_scan_assess_vdev(struct dsl_pool *dp, vdev_t *vd); void dsl_scan_assess_vdev(struct dsl_pool *dp, vdev_t *vd);
boolean_t dsl_scan_scrubbing(const struct dsl_pool *dp); boolean_t dsl_scan_scrubbing(const struct dsl_pool *dp);
boolean_t dsl_errorscrubbing(const struct dsl_pool *dp); boolean_t dsl_errorscrubbing(const struct dsl_pool *dp);

View File

@ -821,6 +821,8 @@ extern void spa_l2cache_drop(spa_t *spa);
/* scanning */ /* scanning */
extern int spa_scan(spa_t *spa, pool_scan_func_t func); extern int spa_scan(spa_t *spa, pool_scan_func_t func);
extern int spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend);
extern int spa_scan_stop(spa_t *spa); extern int spa_scan_stop(spa_t *spa);
extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag); extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag);

View File

@ -848,18 +848,24 @@ dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
return (0); return (0);
} }
typedef struct {
pool_scan_func_t func;
uint64_t txgstart;
uint64_t txgend;
} setup_sync_arg_t;
void void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{ {
(void) arg; setup_sync_arg_t *setup_sync_arg = (setup_sync_arg_t *)arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dmu_object_type_t ot = 0; dmu_object_type_t ot = 0;
dsl_pool_t *dp = scn->scn_dp; dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa; spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn)); ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); ASSERT(setup_sync_arg->func > POOL_SCAN_NONE &&
setup_sync_arg->func < POOL_SCAN_FUNCS);
memset(&scn->scn_phys, 0, sizeof (scn->scn_phys)); memset(&scn->scn_phys, 0, sizeof (scn->scn_phys));
/* /*
@ -869,10 +875,14 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys)); memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys));
dsl_errorscrub_sync_state(scn, tx); dsl_errorscrub_sync_state(scn, tx);
scn->scn_phys.scn_func = *funcp; scn->scn_phys.scn_func = setup_sync_arg->func;
scn->scn_phys.scn_state = DSS_SCANNING; scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0; scn->scn_phys.scn_min_txg = setup_sync_arg->txgstart;
scn->scn_phys.scn_max_txg = tx->tx_txg; if (setup_sync_arg->txgend == 0) {
scn->scn_phys.scn_max_txg = tx->tx_txg;
} else {
scn->scn_phys.scn_max_txg = setup_sync_arg->txgend;
}
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
scn->scn_phys.scn_start_time = gethrestime_sec(); scn->scn_phys.scn_start_time = gethrestime_sec();
scn->scn_phys.scn_errors = 0; scn->scn_phys.scn_errors = 0;
@ -959,7 +969,7 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
spa_history_log_internal(spa, "scan setup", tx, spa_history_log_internal(spa, "scan setup", tx,
"func=%u mintxg=%llu maxtxg=%llu", "func=%u mintxg=%llu maxtxg=%llu",
*funcp, (u_longlong_t)scn->scn_phys.scn_min_txg, setup_sync_arg->func, (u_longlong_t)scn->scn_phys.scn_min_txg,
(u_longlong_t)scn->scn_phys.scn_max_txg); (u_longlong_t)scn->scn_phys.scn_max_txg);
} }
@ -969,10 +979,16 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
* error scrub. * error scrub.
*/ */
int int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) dsl_scan(dsl_pool_t *dp, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{ {
spa_t *spa = dp->dp_spa; spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan; dsl_scan_t *scn = dp->dp_scan;
setup_sync_arg_t setup_sync_arg;
if (func != POOL_SCAN_SCRUB && (txgstart != 0 || txgend != 0)) {
return (EINVAL);
}
/* /*
* Purge all vdev caches and probe all devices. We do this here * Purge all vdev caches and probe all devices. We do this here
@ -1023,8 +1039,13 @@ dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
return (SET_ERROR(err)); return (SET_ERROR(err));
} }
setup_sync_arg.func = func;
setup_sync_arg.txgstart = txgstart;
setup_sync_arg.txgend = txgend;
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); dsl_scan_setup_sync, &setup_sync_arg, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
} }
static void static void
@ -4301,13 +4322,16 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
*/ */
if (dsl_scan_restarting(scn, tx) || if (dsl_scan_restarting(scn, tx) ||
(spa->spa_resilver_deferred && zfs_resilver_disable_defer)) { (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
pool_scan_func_t func = POOL_SCAN_SCRUB; setup_sync_arg_t setup_sync_arg = {
.func = POOL_SCAN_SCRUB,
};
dsl_scan_done(scn, B_FALSE, tx); dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
func = POOL_SCAN_RESILVER; setup_sync_arg.func = POOL_SCAN_RESILVER;
zfs_dbgmsg("restarting scan func=%u on %s txg=%llu", zfs_dbgmsg("restarting scan func=%u on %s txg=%llu",
func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg); setup_sync_arg.func, dp->dp_spa->spa_name,
dsl_scan_setup_sync(&func, tx); (longlong_t)tx->tx_txg);
dsl_scan_setup_sync(&setup_sync_arg, tx);
} }
/* /*

View File

@ -8877,6 +8877,13 @@ spa_scan_stop(spa_t *spa)
int int
spa_scan(spa_t *spa, pool_scan_func_t func) spa_scan(spa_t *spa, pool_scan_func_t func)
{
return (spa_scan_range(spa, func, 0, 0));
}
int
spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{ {
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
@ -8887,6 +8894,9 @@ spa_scan(spa_t *spa, pool_scan_func_t func)
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP)); return (SET_ERROR(ENOTSUP));
if (func != POOL_SCAN_SCRUB && (txgstart != 0 || txgend != 0))
return (SET_ERROR(ENOTSUP));
/* /*
* If a resilver was requested, but there is no DTL on a * If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do. * writeable leaf device, we have nothing to do.
@ -8901,7 +8911,7 @@ spa_scan(spa_t *spa, pool_scan_func_t func)
!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) !spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG))
return (SET_ERROR(ENOTSUP)); return (SET_ERROR(ENOTSUP));
return (dsl_scan(spa->spa_dsl_pool, func)); return (dsl_scan(spa->spa_dsl_pool, func, txgstart, txgend));
} }
/* /*
@ -10982,6 +10992,7 @@ EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */ /* scanning */
EXPORT_SYMBOL(spa_scan); EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_range);
EXPORT_SYMBOL(spa_scan_stop); EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */ /* spa syncing */