Simplify log vdev removal code
Get rid of the majority metaslab metadata when removing log vdevs in spa_vdev_remove_log() with a call to metaslab_fini() instead of duplicating a lot of that in vdev_remove_empty_log(). Reviewed-by: Matt Ahrens <mahrens@delphix.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Serapheim Dimitropoulos <serapheim@delphix.com> Closes #8347
This commit is contained in:
parent
7558997d2f
commit
6c926f426a
|
@ -3179,47 +3179,6 @@ vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
|
||||||
ASSERT(vd == vd->vdev_top);
|
ASSERT(vd == vd->vdev_top);
|
||||||
ASSERT3U(txg, ==, spa_syncing_txg(spa));
|
ASSERT3U(txg, ==, spa_syncing_txg(spa));
|
||||||
|
|
||||||
if (vd->vdev_ms != NULL) {
|
|
||||||
metaslab_group_t *mg = vd->vdev_mg;
|
|
||||||
|
|
||||||
metaslab_group_histogram_verify(mg);
|
|
||||||
metaslab_class_histogram_verify(mg->mg_class);
|
|
||||||
|
|
||||||
for (int m = 0; m < vd->vdev_ms_count; m++) {
|
|
||||||
metaslab_t *msp = vd->vdev_ms[m];
|
|
||||||
|
|
||||||
if (msp == NULL || msp->ms_sm == NULL)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
mutex_enter(&msp->ms_lock);
|
|
||||||
/*
|
|
||||||
* If the metaslab was not loaded when the vdev
|
|
||||||
* was removed then the histogram accounting may
|
|
||||||
* not be accurate. Update the histogram information
|
|
||||||
* here so that we ensure that the metaslab group
|
|
||||||
* and metaslab class are up-to-date.
|
|
||||||
*/
|
|
||||||
metaslab_group_histogram_remove(mg, msp);
|
|
||||||
|
|
||||||
VERIFY0(space_map_allocated(msp->ms_sm));
|
|
||||||
space_map_close(msp->ms_sm);
|
|
||||||
msp->ms_sm = NULL;
|
|
||||||
mutex_exit(&msp->ms_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vd->vdev_checkpoint_sm != NULL) {
|
|
||||||
ASSERT(spa_has_checkpoint(spa));
|
|
||||||
space_map_close(vd->vdev_checkpoint_sm);
|
|
||||||
vd->vdev_checkpoint_sm = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
metaslab_group_histogram_verify(mg);
|
|
||||||
metaslab_class_histogram_verify(mg->mg_class);
|
|
||||||
|
|
||||||
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
|
|
||||||
ASSERT0(mg->mg_histogram[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
|
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
|
||||||
|
|
||||||
vdev_destroy_spacemaps(vd, tx);
|
vdev_destroy_spacemaps(vd, tx);
|
||||||
|
@ -3253,17 +3212,14 @@ vdev_sync(vdev_t *vd, uint64_t txg)
|
||||||
spa_t *spa = vd->vdev_spa;
|
spa_t *spa = vd->vdev_spa;
|
||||||
vdev_t *lvd;
|
vdev_t *lvd;
|
||||||
metaslab_t *msp;
|
metaslab_t *msp;
|
||||||
dmu_tx_t *tx;
|
|
||||||
|
|
||||||
|
ASSERT3U(txg, ==, spa->spa_syncing_txg);
|
||||||
|
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
|
||||||
if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
|
if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
|
||||||
dmu_tx_t *tx;
|
|
||||||
|
|
||||||
ASSERT(vd->vdev_removing ||
|
ASSERT(vd->vdev_removing ||
|
||||||
vd->vdev_ops == &vdev_indirect_ops);
|
vd->vdev_ops == &vdev_indirect_ops);
|
||||||
|
|
||||||
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
|
|
||||||
vdev_indirect_sync_obsolete(vd, tx);
|
vdev_indirect_sync_obsolete(vd, tx);
|
||||||
dmu_tx_commit(tx);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the vdev is indirect, it can't have dirty
|
* If the vdev is indirect, it can't have dirty
|
||||||
|
@ -3272,6 +3228,7 @@ vdev_sync(vdev_t *vd, uint64_t txg)
|
||||||
if (vd->vdev_ops == &vdev_indirect_ops) {
|
if (vd->vdev_ops == &vdev_indirect_ops) {
|
||||||
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
|
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
|
||||||
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
|
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
|
||||||
|
dmu_tx_commit(tx);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3282,12 +3239,10 @@ vdev_sync(vdev_t *vd, uint64_t txg)
|
||||||
!vd->vdev_removing) {
|
!vd->vdev_removing) {
|
||||||
ASSERT(vd == vd->vdev_top);
|
ASSERT(vd == vd->vdev_top);
|
||||||
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
|
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
|
||||||
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
|
|
||||||
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
|
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
|
||||||
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
|
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
|
||||||
ASSERT(vd->vdev_ms_array != 0);
|
ASSERT(vd->vdev_ms_array != 0);
|
||||||
vdev_config_dirty(vd);
|
vdev_config_dirty(vd);
|
||||||
dmu_tx_commit(tx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
|
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
|
||||||
|
@ -3306,6 +3261,7 @@ vdev_sync(vdev_t *vd, uint64_t txg)
|
||||||
vdev_remove_empty_log(vd, txg);
|
vdev_remove_empty_log(vd, txg);
|
||||||
|
|
||||||
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
|
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
|
||||||
|
dmu_tx_commit(tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t
|
uint64_t
|
||||||
|
|
|
@ -1858,6 +1858,7 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
|
||||||
|
|
||||||
ASSERT(vd->vdev_islog);
|
ASSERT(vd->vdev_islog);
|
||||||
ASSERT(vd == vd->vdev_top);
|
ASSERT(vd == vd->vdev_top);
|
||||||
|
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Stop allocating from this vdev.
|
* Stop allocating from this vdev.
|
||||||
|
@ -1872,15 +1873,14 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
|
||||||
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
|
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Evacuate the device. We don't hold the config lock as writer
|
* Evacuate the device. We don't hold the config lock as
|
||||||
* since we need to do I/O but we do keep the
|
* writer since we need to do I/O but we do keep the
|
||||||
* spa_namespace_lock held. Once this completes the device
|
* spa_namespace_lock held. Once this completes the device
|
||||||
* should no longer have any blocks allocated on it.
|
* should no longer have any blocks allocated on it.
|
||||||
*/
|
*/
|
||||||
if (vd->vdev_islog) {
|
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||||
if (vd->vdev_stat.vs_alloc != 0)
|
if (vd->vdev_stat.vs_alloc != 0)
|
||||||
error = spa_reset_logs(spa);
|
error = spa_reset_logs(spa);
|
||||||
}
|
|
||||||
|
|
||||||
*txg = spa_vdev_config_enter(spa);
|
*txg = spa_vdev_config_enter(spa);
|
||||||
|
|
||||||
|
@ -1899,6 +1899,8 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
|
||||||
vdev_dirty_leaves(vd, VDD_DTL, *txg);
|
vdev_dirty_leaves(vd, VDD_DTL, *txg);
|
||||||
vdev_config_dirty(vd);
|
vdev_config_dirty(vd);
|
||||||
|
|
||||||
|
vdev_metaslab_fini(vd);
|
||||||
|
|
||||||
spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
|
spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
|
||||||
|
|
||||||
/* Stop initializing */
|
/* Stop initializing */
|
||||||
|
@ -1923,6 +1925,8 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
|
||||||
if (list_link_active(&vd->vdev_config_dirty_node))
|
if (list_link_active(&vd->vdev_config_dirty_node))
|
||||||
vdev_config_clean(vd);
|
vdev_config_clean(vd);
|
||||||
|
|
||||||
|
ASSERT0(vd->vdev_stat.vs_alloc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clean up the vdev namespace.
|
* Clean up the vdev namespace.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue