OpenZFS 8023 - Panic destroying a metaslab deferred range tree
Authored by: George Wilson <george.wilson@delphix.com> Approved by: Dan McDonald <danmcd@omniti.com> Reviewed by: Brad Lewis <brad.lewis@delphix.com> Reviewed by: Matt Ahrens <mahrens@delphix.com> Reviewed by: Dan Kimmel <dan.kimmel@delphix.com> Reviewed by: Saso Kiselkov <saso.kiselkov@nexenta.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: George Melikov <mail@gmelikov.ru> Ported-by: Giuseppe Di Natale <dinatale2@llnl.gov> We don't want to dirty any data when we're in the final txgs of the pool export logic. This change introduces checks to make sure that no data is dirtied after a certain point. It also addresses the culprit of this specific bug – the space map cannot be upgraded when we're in final stages of pool export. If we encounter a space map that wants to be upgraded in this phase, then we simply ignore the request as it will get retried the next time we set the fragmentation metric on that metaslab. OpenZFS-issue: https://www.illumos.org/issues/8023 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/2ef00f5 Closes #5991
This commit is contained in:
parent
4c3c6b6c73
commit
3b7f360c96
|
@ -802,6 +802,7 @@ extern uint64_t spa_load_guid(spa_t *spa);
|
||||||
extern uint64_t spa_last_synced_txg(spa_t *spa);
|
extern uint64_t spa_last_synced_txg(spa_t *spa);
|
||||||
extern uint64_t spa_first_txg(spa_t *spa);
|
extern uint64_t spa_first_txg(spa_t *spa);
|
||||||
extern uint64_t spa_syncing_txg(spa_t *spa);
|
extern uint64_t spa_syncing_txg(spa_t *spa);
|
||||||
|
extern uint64_t spa_final_dirty_txg(spa_t *spa);
|
||||||
extern uint64_t spa_version(spa_t *spa);
|
extern uint64_t spa_version(spa_t *spa);
|
||||||
extern pool_state_t spa_state(spa_t *spa);
|
extern pool_state_t spa_state(spa_t *spa);
|
||||||
extern spa_load_state_t spa_load_state(spa_t *spa);
|
extern spa_load_state_t spa_load_state(spa_t *spa);
|
||||||
|
|
|
@ -1639,6 +1639,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
|
||||||
* this assertion only if we're not already dirty.
|
* this assertion only if we're not already dirty.
|
||||||
*/
|
*/
|
||||||
os = dn->dn_objset;
|
os = dn->dn_objset;
|
||||||
|
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (dn->dn_objset->os_dsl_dataset != NULL)
|
if (dn->dn_objset->os_dsl_dataset != NULL)
|
||||||
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
|
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
|
||||||
|
|
|
@ -1582,15 +1582,22 @@ metaslab_set_fragmentation(metaslab_t *msp)
|
||||||
* so that we upgrade next time we encounter it.
|
* so that we upgrade next time we encounter it.
|
||||||
*/
|
*/
|
||||||
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
|
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
|
||||||
|
uint64_t txg = spa_syncing_txg(spa);
|
||||||
vdev_t *vd = msp->ms_group->mg_vd;
|
vdev_t *vd = msp->ms_group->mg_vd;
|
||||||
|
|
||||||
if (spa_writeable(vd->vdev_spa)) {
|
/*
|
||||||
uint64_t txg = spa_syncing_txg(spa);
|
* If we've reached the final dirty txg, then we must
|
||||||
|
* be shutting down the pool. We don't want to dirty
|
||||||
|
* any data past this point so skip setting the condense
|
||||||
|
* flag. We can retry this action the next time the pool
|
||||||
|
* is imported.
|
||||||
|
*/
|
||||||
|
if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
|
||||||
msp->ms_condense_wanted = B_TRUE;
|
msp->ms_condense_wanted = B_TRUE;
|
||||||
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
|
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
|
||||||
spa_dbgmsg(spa, "txg %llu, requesting force condense: "
|
spa_dbgmsg(spa, "txg %llu, requesting force condense: "
|
||||||
"msp %p, vd %p", txg, msp, vd);
|
"ms_id %llu, vdev_id %llu", txg, msp->ms_id,
|
||||||
|
vd->vdev_id);
|
||||||
}
|
}
|
||||||
msp->ms_fragmentation = ZFS_FRAG_INVALID;
|
msp->ms_fragmentation = ZFS_FRAG_INVALID;
|
||||||
return;
|
return;
|
||||||
|
@ -2217,13 +2224,17 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||||
/*
|
/*
|
||||||
* Normally, we don't want to process a metaslab if there
|
* Normally, we don't want to process a metaslab if there
|
||||||
* are no allocations or frees to perform. However, if the metaslab
|
* are no allocations or frees to perform. However, if the metaslab
|
||||||
* is being forced to condense we need to let it through.
|
* is being forced to condense and it's loaded, we need to let it
|
||||||
|
* through.
|
||||||
*/
|
*/
|
||||||
if (range_tree_space(alloctree) == 0 &&
|
if (range_tree_space(alloctree) == 0 &&
|
||||||
range_tree_space(msp->ms_freeingtree) == 0 &&
|
range_tree_space(msp->ms_freeingtree) == 0 &&
|
||||||
!msp->ms_condense_wanted)
|
!(msp->ms_loaded && msp->ms_condense_wanted))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
||||||
|
VERIFY(txg <= spa_final_dirty_txg(spa));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The only state that can actually be changing concurrently with
|
* The only state that can actually be changing concurrently with
|
||||||
* metaslab_sync() is the metaslab's ms_tree. No other thread can
|
* metaslab_sync() is the metaslab's ms_tree. No other thread can
|
||||||
|
|
|
@ -1596,6 +1596,16 @@ spa_syncing_txg(spa_t *spa)
|
||||||
return (spa->spa_syncing_txg);
|
return (spa->spa_syncing_txg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the last txg where data can be dirtied. The final txgs
|
||||||
|
* will be used to just clear out any deferred frees that remain.
|
||||||
|
*/
|
||||||
|
uint64_t
|
||||||
|
spa_final_dirty_txg(spa_t *spa)
|
||||||
|
{
|
||||||
|
return (spa->spa_final_txg - TXG_DEFER_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
pool_state_t
|
pool_state_t
|
||||||
spa_state(spa_t *spa)
|
spa_state(spa_t *spa)
|
||||||
{
|
{
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
* Use is subject to license terms.
|
* Use is subject to license terms.
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
|
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <sys/zfs_context.h>
|
#include <sys/zfs_context.h>
|
||||||
|
@ -411,6 +411,7 @@ space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
|
||||||
|
|
||||||
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
|
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
|
||||||
ASSERT(dmu_tx_is_syncing(tx));
|
ASSERT(dmu_tx_is_syncing(tx));
|
||||||
|
VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
|
||||||
|
|
||||||
dmu_object_info_from_db(sm->sm_dbuf, &doi);
|
dmu_object_info_from_db(sm->sm_dbuf, &doi);
|
||||||
|
|
||||||
|
@ -425,9 +426,10 @@ space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
|
||||||
if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
|
if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
|
||||||
doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
|
doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
|
||||||
doi.doi_data_block_size != space_map_blksz) {
|
doi.doi_data_block_size != space_map_blksz) {
|
||||||
zfs_dbgmsg("txg %llu, spa %s, reallocating: "
|
zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
|
||||||
"old bonus %llu, old blocksz %u", dmu_tx_get_txg(tx),
|
"object[%llu]: old bonus %u, old blocksz %u",
|
||||||
spa_name(spa), doi.doi_bonus_size, doi.doi_data_block_size);
|
dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
|
||||||
|
doi.doi_bonus_size, doi.doi_data_block_size);
|
||||||
|
|
||||||
space_map_free(sm, tx);
|
space_map_free(sm, tx);
|
||||||
dmu_buf_rele(sm->sm_dbuf, sm);
|
dmu_buf_rele(sm->sm_dbuf, sm);
|
||||||
|
|
Loading…
Reference in New Issue