Optimize txg_kick() process (#12274)

Use dp_dirty_pertxg[] for txg_kick(), instead of dp_dirty_total in
original code. Extra parameter "txg" is added for txg_kick(), thus it
knows which txg to kick. Also txg_kick() call is moved from
dsl_pool_need_dirty_delay() to dsl_pool_dirty_space() so that we can
know the txg number assigned for txg_kick().

Some unnecessary code regarding dp_dirty_total in txg_sync_thread() is
also cleaned up.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Signed-off-by: jxdking <lostking2008@hotmail.com>
Closes #12274
This commit is contained in:
Kevin Jin 2021-07-01 11:20:27 -04:00 committed by GitHub
parent 42afb12da7
commit 50e09eddd0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 34 additions and 29 deletions

View File

@ -78,7 +78,7 @@ extern void txg_register_callbacks(txg_handle_t *txghp, list_t *tx_callbacks);
extern void txg_delay(struct dsl_pool *dp, uint64_t txg, hrtime_t delta,
hrtime_t resolution);
extern void txg_kick(struct dsl_pool *dp);
extern void txg_kick(struct dsl_pool *dp, uint64_t txg);
/*
* Wait until the given transaction group has finished syncing.

View File

@ -898,18 +898,26 @@ dsl_pool_need_dirty_delay(dsl_pool_t *dp)
{
uint64_t delay_min_bytes =
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
uint64_t dirty_min_bytes =
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
uint64_t dirty;
mutex_enter(&dp->dp_lock);
dirty = dp->dp_dirty_total;
uint64_t dirty = dp->dp_dirty_total;
mutex_exit(&dp->dp_lock);
if (dirty > dirty_min_bytes)
txg_kick(dp);
return (dirty > delay_min_bytes);
}
static boolean_t
dsl_pool_need_dirty_sync(dsl_pool_t *dp, uint64_t txg)
{
ASSERT(MUTEX_HELD(&dp->dp_lock));
uint64_t dirty_min_bytes =
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
uint64_t dirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
return (dirty > dirty_min_bytes);
}
void
dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
{
@ -917,7 +925,12 @@ dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
mutex_enter(&dp->dp_lock);
dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
dsl_pool_dirty_delta(dp, space);
boolean_t needsync = !dmu_tx_is_syncing(tx) &&
dsl_pool_need_dirty_sync(dp, tx->tx_txg);
mutex_exit(&dp->dp_lock);
if (needsync)
txg_kick(dp, tx->tx_txg);
}
}

View File

@ -498,14 +498,6 @@ txg_wait_callbacks(dsl_pool_t *dp)
taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
}
static boolean_t
txg_is_syncing(dsl_pool_t *dp)
{
tx_state_t *tx = &dp->dp_tx;
ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
return (tx->tx_syncing_txg != 0);
}
static boolean_t
txg_is_quiescing(dsl_pool_t *dp)
{
@ -539,8 +531,6 @@ txg_sync_thread(void *arg)
clock_t timeout = zfs_txg_timeout * hz;
clock_t timer;
uint64_t txg;
uint64_t dirty_min_bytes =
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
/*
* We sync when we're scanning, there's someone waiting
@ -551,8 +541,7 @@ txg_sync_thread(void *arg)
while (!dsl_scan_active(dp->dp_scan) &&
!tx->tx_exiting && timer > 0 &&
tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
!txg_has_quiesced_to_sync(dp) &&
dp->dp_dirty_total < dirty_min_bytes) {
!txg_has_quiesced_to_sync(dp)) {
dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
(u_longlong_t)tx->tx_synced_txg,
(u_longlong_t)tx->tx_sync_txg_waiting, dp);
@ -566,6 +555,11 @@ txg_sync_thread(void *arg)
* prompting it to do so if necessary.
*/
while (!tx->tx_exiting && !txg_has_quiesced_to_sync(dp)) {
if (txg_is_quiescing(dp)) {
txg_thread_wait(tx, &cpr,
&tx->tx_quiesce_done_cv, 0);
continue;
}
if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
cv_broadcast(&tx->tx_quiesce_more_cv);
@ -791,24 +785,22 @@ txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
}
/*
* If there isn't a txg syncing or in the pipeline, push another txg through
* the pipeline by quiescing the open txg.
* Pass in the txg number that should be synced.
*/
void
txg_kick(dsl_pool_t *dp)
txg_kick(dsl_pool_t *dp, uint64_t txg)
{
tx_state_t *tx = &dp->dp_tx;
ASSERT(!dsl_pool_config_held(dp));
if (tx->tx_sync_txg_waiting >= txg)
return;
mutex_enter(&tx->tx_sync_lock);
if (!txg_is_syncing(dp) &&
!txg_is_quiescing(dp) &&
tx->tx_quiesce_txg_waiting <= tx->tx_open_txg &&
tx->tx_sync_txg_waiting <= tx->tx_synced_txg &&
tx->tx_quiesced_txg <= tx->tx_synced_txg) {
tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1;
cv_broadcast(&tx->tx_quiesce_more_cv);
if (tx->tx_sync_txg_waiting < txg) {
tx->tx_sync_txg_waiting = txg;
cv_broadcast(&tx->tx_sync_more_cv);
}
mutex_exit(&tx->tx_sync_lock);
}