Refactor txg history kstat
It was observed that even when the txg history is disabled by setting `zfs_txg_history=0` the txg_sync thread still fetches the vdev stats unnecessarily. This patch refactors the code such that vdev_get_stats() is no longer called when `zfs_txg_history=0`. And it further reduces the differences between upstream and the ZoL txg_sync_thread() function. Reviewed-by: Tony Hutter <hutter2@llnl.gov> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #5412
This commit is contained in:
parent
6c09d3e5a0
commit
baf67d15a5
|
@ -725,6 +725,13 @@ typedef enum txg_state {
|
||||||
TXG_STATE_COMMITTED = 5,
|
TXG_STATE_COMMITTED = 5,
|
||||||
} txg_state_t;
|
} txg_state_t;
|
||||||
|
|
||||||
|
typedef struct txg_stat {
|
||||||
|
vdev_stat_t vs1;
|
||||||
|
vdev_stat_t vs2;
|
||||||
|
uint64_t txg;
|
||||||
|
uint64_t ndirty;
|
||||||
|
} txg_stat_t;
|
||||||
|
|
||||||
extern void spa_stats_init(spa_t *spa);
|
extern void spa_stats_init(spa_t *spa);
|
||||||
extern void spa_stats_destroy(spa_t *spa);
|
extern void spa_stats_destroy(spa_t *spa);
|
||||||
extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
|
extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
|
||||||
|
@ -732,8 +739,9 @@ extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
|
||||||
extern void spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time);
|
extern void spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time);
|
||||||
extern int spa_txg_history_set(spa_t *spa, uint64_t txg,
|
extern int spa_txg_history_set(spa_t *spa, uint64_t txg,
|
||||||
txg_state_t completed_state, hrtime_t completed_time);
|
txg_state_t completed_state, hrtime_t completed_time);
|
||||||
extern int spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
|
extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t,
|
||||||
uint64_t nwritten, uint64_t reads, uint64_t writes, uint64_t ndirty);
|
struct dsl_pool *);
|
||||||
|
extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *);
|
||||||
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
|
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
|
||||||
|
|
||||||
/* Pool configuration locks */
|
/* Pool configuration locks */
|
||||||
|
|
|
@ -474,7 +474,7 @@ spa_txg_history_set(spa_t *spa, uint64_t txg, txg_state_t completed_state,
|
||||||
/*
|
/*
|
||||||
* Set txg IO stats.
|
* Set txg IO stats.
|
||||||
*/
|
*/
|
||||||
int
|
static int
|
||||||
spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
|
spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
|
||||||
uint64_t nwritten, uint64_t reads, uint64_t writes, uint64_t ndirty)
|
uint64_t nwritten, uint64_t reads, uint64_t writes, uint64_t ndirty)
|
||||||
{
|
{
|
||||||
|
@ -503,6 +503,54 @@ spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
|
||||||
return (error);
|
return (error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
txg_stat_t *
|
||||||
|
spa_txg_history_init_io(spa_t *spa, uint64_t txg, dsl_pool_t *dp)
|
||||||
|
{
|
||||||
|
txg_stat_t *ts;
|
||||||
|
|
||||||
|
if (zfs_txg_history == 0)
|
||||||
|
return (NULL);
|
||||||
|
|
||||||
|
ts = kmem_alloc(sizeof (txg_stat_t), KM_SLEEP);
|
||||||
|
|
||||||
|
spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
|
||||||
|
vdev_get_stats(spa->spa_root_vdev, &ts->vs1);
|
||||||
|
spa_config_exit(spa, SCL_ALL, FTAG);
|
||||||
|
|
||||||
|
ts->txg = txg;
|
||||||
|
ts->ndirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
|
||||||
|
|
||||||
|
spa_txg_history_set(spa, txg, TXG_STATE_WAIT_FOR_SYNC, gethrtime());
|
||||||
|
|
||||||
|
return (ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
spa_txg_history_fini_io(spa_t *spa, txg_stat_t *ts)
|
||||||
|
{
|
||||||
|
if (ts == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (zfs_txg_history == 0) {
|
||||||
|
kmem_free(ts, sizeof (txg_stat_t));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
|
||||||
|
vdev_get_stats(spa->spa_root_vdev, &ts->vs2);
|
||||||
|
spa_config_exit(spa, SCL_ALL, FTAG);
|
||||||
|
|
||||||
|
spa_txg_history_set(spa, ts->txg, TXG_STATE_SYNCED, gethrtime());
|
||||||
|
spa_txg_history_set_io(spa, ts->txg,
|
||||||
|
ts->vs2.vs_bytes[ZIO_TYPE_READ] - ts->vs1.vs_bytes[ZIO_TYPE_READ],
|
||||||
|
ts->vs2.vs_bytes[ZIO_TYPE_WRITE] - ts->vs1.vs_bytes[ZIO_TYPE_WRITE],
|
||||||
|
ts->vs2.vs_ops[ZIO_TYPE_READ] - ts->vs1.vs_ops[ZIO_TYPE_READ],
|
||||||
|
ts->vs2.vs_ops[ZIO_TYPE_WRITE] - ts->vs1.vs_ops[ZIO_TYPE_WRITE],
|
||||||
|
ts->ndirty);
|
||||||
|
|
||||||
|
kmem_free(ts, sizeof (txg_stat_t));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ==========================================================================
|
* ==========================================================================
|
||||||
* SPA TX Assign Histogram Routines
|
* SPA TX Assign Histogram Routines
|
||||||
|
|
|
@ -481,22 +481,17 @@ txg_sync_thread(dsl_pool_t *dp)
|
||||||
spa_t *spa = dp->dp_spa;
|
spa_t *spa = dp->dp_spa;
|
||||||
tx_state_t *tx = &dp->dp_tx;
|
tx_state_t *tx = &dp->dp_tx;
|
||||||
callb_cpr_t cpr;
|
callb_cpr_t cpr;
|
||||||
vdev_stat_t *vs1, *vs2;
|
|
||||||
clock_t start, delta;
|
clock_t start, delta;
|
||||||
|
|
||||||
(void) spl_fstrans_mark();
|
(void) spl_fstrans_mark();
|
||||||
txg_thread_enter(tx, &cpr);
|
txg_thread_enter(tx, &cpr);
|
||||||
|
|
||||||
vs1 = kmem_alloc(sizeof (vdev_stat_t), KM_SLEEP);
|
|
||||||
vs2 = kmem_alloc(sizeof (vdev_stat_t), KM_SLEEP);
|
|
||||||
|
|
||||||
start = delta = 0;
|
start = delta = 0;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
clock_t timer, timeout;
|
clock_t timeout = zfs_txg_timeout * hz;
|
||||||
|
clock_t timer;
|
||||||
uint64_t txg;
|
uint64_t txg;
|
||||||
uint64_t ndirty;
|
txg_stat_t *ts;
|
||||||
|
|
||||||
timeout = zfs_txg_timeout * hz;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We sync when we're scanning, there's someone waiting
|
* We sync when we're scanning, there's someone waiting
|
||||||
|
@ -527,15 +522,8 @@ txg_sync_thread(dsl_pool_t *dp)
|
||||||
txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
|
txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tx->tx_exiting) {
|
if (tx->tx_exiting)
|
||||||
kmem_free(vs2, sizeof (vdev_stat_t));
|
|
||||||
kmem_free(vs1, sizeof (vdev_stat_t));
|
|
||||||
txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
|
txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
|
||||||
}
|
|
||||||
|
|
||||||
spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
|
|
||||||
vdev_get_stats(spa->spa_root_vdev, vs1);
|
|
||||||
spa_config_exit(spa, SCL_ALL, FTAG);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Consume the quiesced txg which has been handed off to
|
* Consume the quiesced txg which has been handed off to
|
||||||
|
@ -546,16 +534,13 @@ txg_sync_thread(dsl_pool_t *dp)
|
||||||
tx->tx_quiesced_txg = 0;
|
tx->tx_quiesced_txg = 0;
|
||||||
tx->tx_syncing_txg = txg;
|
tx->tx_syncing_txg = txg;
|
||||||
DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
|
DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
|
||||||
|
ts = spa_txg_history_init_io(spa, txg, dp);
|
||||||
cv_broadcast(&tx->tx_quiesce_more_cv);
|
cv_broadcast(&tx->tx_quiesce_more_cv);
|
||||||
|
|
||||||
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
|
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
|
||||||
txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
|
txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
|
||||||
mutex_exit(&tx->tx_sync_lock);
|
mutex_exit(&tx->tx_sync_lock);
|
||||||
|
|
||||||
spa_txg_history_set(spa, txg, TXG_STATE_WAIT_FOR_SYNC,
|
|
||||||
gethrtime());
|
|
||||||
ndirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
|
|
||||||
|
|
||||||
start = ddi_get_lbolt();
|
start = ddi_get_lbolt();
|
||||||
spa_sync(spa, txg);
|
spa_sync(spa, txg);
|
||||||
delta = ddi_get_lbolt() - start;
|
delta = ddi_get_lbolt() - start;
|
||||||
|
@ -564,23 +549,13 @@ txg_sync_thread(dsl_pool_t *dp)
|
||||||
tx->tx_synced_txg = txg;
|
tx->tx_synced_txg = txg;
|
||||||
tx->tx_syncing_txg = 0;
|
tx->tx_syncing_txg = 0;
|
||||||
DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
|
DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
|
||||||
|
spa_txg_history_fini_io(spa, ts);
|
||||||
cv_broadcast(&tx->tx_sync_done_cv);
|
cv_broadcast(&tx->tx_sync_done_cv);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Dispatch commit callbacks to worker threads.
|
* Dispatch commit callbacks to worker threads.
|
||||||
*/
|
*/
|
||||||
txg_dispatch_callbacks(dp, txg);
|
txg_dispatch_callbacks(dp, txg);
|
||||||
|
|
||||||
spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
|
|
||||||
vdev_get_stats(spa->spa_root_vdev, vs2);
|
|
||||||
spa_config_exit(spa, SCL_ALL, FTAG);
|
|
||||||
spa_txg_history_set_io(spa, txg,
|
|
||||||
vs2->vs_bytes[ZIO_TYPE_READ]-vs1->vs_bytes[ZIO_TYPE_READ],
|
|
||||||
vs2->vs_bytes[ZIO_TYPE_WRITE]-vs1->vs_bytes[ZIO_TYPE_WRITE],
|
|
||||||
vs2->vs_ops[ZIO_TYPE_READ]-vs1->vs_ops[ZIO_TYPE_READ],
|
|
||||||
vs2->vs_ops[ZIO_TYPE_WRITE]-vs1->vs_ops[ZIO_TYPE_WRITE],
|
|
||||||
ndirty);
|
|
||||||
spa_txg_history_set(spa, txg, TXG_STATE_SYNCED, gethrtime());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue