OpenZFS 7613 - ms_freetree[4] is only used in syncing context
metaslab_t:ms_freetree[TXG_SIZE] is only used in syncing context. We should replace it with two trees: the freeing tree (ranges that we are freeing this syncing txg) and the freed tree (ranges which have been freed this txg). Authored by: Matthew Ahrens <mahrens@delphix.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Alex Reece <alex@delphix.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Ported-by: Tim Chase <tim@chase2k.com> OpenZFS-issue: https://www.illumos.org/issues/7613 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/a8698da2 Closes #5598
This commit is contained in:
parent
9c9531cb6f
commit
258553d3d7
|
@ -254,21 +254,24 @@ struct metaslab_group {
|
||||||
#define MAX_LBAS 64
|
#define MAX_LBAS 64
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each metaslab maintains a set of in-core trees to track metaslab operations.
|
* Each metaslab maintains a set of in-core trees to track metaslab
|
||||||
* The in-core free tree (ms_tree) contains the current list of free segments.
|
* operations. The in-core free tree (ms_tree) contains the list of
|
||||||
* As blocks are allocated, the allocated segment are removed from the ms_tree
|
* free segments which are eligible for allocation. As blocks are
|
||||||
* and added to a per txg allocation tree (ms_alloctree). As blocks are freed,
|
* allocated, the allocated segments are removed from the ms_tree and
|
||||||
* they are added to the per txg free tree (ms_freetree). These per txg
|
* added to a per txg allocation tree (ms_alloctree). This allows us to
|
||||||
* trees allow us to process all allocations and frees in syncing context
|
* process all allocations in syncing context where it is safe to update
|
||||||
* where it is safe to update the on-disk space maps. One additional in-core
|
* the on-disk space maps. Frees are also processed in syncing context.
|
||||||
* tree is maintained to track deferred frees (ms_defertree). Once a block
|
* Most frees are generated from syncing context, and those that are not
|
||||||
* is freed it will move from the ms_freetree to the ms_defertree. A deferred
|
* are held in the spa_free_bplist for processing in syncing context.
|
||||||
* free means that a block has been freed but cannot be used by the pool
|
* An additional set of in-core trees is maintained to track deferred
|
||||||
* until TXG_DEFER_SIZE transactions groups later. For example, a block
|
* frees (ms_defertree). Once a block is freed it will move from the
|
||||||
* that is freed in txg 50 will not be available for reallocation until
|
* ms_freedtree to the ms_defertree. A deferred free means that a block
|
||||||
* txg 52 (50 + TXG_DEFER_SIZE). This provides a safety net for uberblock
|
* has been freed but cannot be used by the pool until TXG_DEFER_SIZE
|
||||||
* rollback. A pool could be safely rolled back TXG_DEFERS_SIZE
|
* transactions groups later. For example, a block that is freed in txg
|
||||||
* transactions groups and ensure that no block has been reallocated.
|
* 50 will not be available for reallocation until txg 52 (50 +
|
||||||
|
* TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
|
||||||
|
* A pool could be safely rolled back TXG_DEFERS_SIZE transactions
|
||||||
|
* groups and ensure that no block has been reallocated.
|
||||||
*
|
*
|
||||||
* The simplified transition diagram looks like this:
|
* The simplified transition diagram looks like this:
|
||||||
*
|
*
|
||||||
|
@ -276,33 +279,34 @@ struct metaslab_group {
|
||||||
* ALLOCATE
|
* ALLOCATE
|
||||||
* |
|
* |
|
||||||
* V
|
* V
|
||||||
* free segment (ms_tree) --------> ms_alloctree ----> (write to space map)
|
* free segment (ms_tree) -----> ms_alloctree[4] ----> (write to space map)
|
||||||
* ^
|
* ^
|
||||||
* |
|
* | ms_freeingtree <--- FREE
|
||||||
* | ms_freetree <--- FREE
|
|
||||||
* | |
|
* | |
|
||||||
|
* | v
|
||||||
|
* | ms_freedtree
|
||||||
* | |
|
* | |
|
||||||
* | |
|
* +-------- ms_defertree[2] <-------+---------> (write to space map)
|
||||||
* +----------- ms_defertree <-------+---------> (write to space map)
|
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
* Each metaslab's space is tracked in a single space map in the MOS,
|
* Each metaslab's space is tracked in a single space map in the MOS,
|
||||||
* which is only updated in syncing context. Each time we sync a txg,
|
* which is only updated in syncing context. Each time we sync a txg,
|
||||||
* we append the allocs and frees from that txg to the space map.
|
* we append the allocs and frees from that txg to the space map. The
|
||||||
* The pool space is only updated once all metaslabs have finished syncing.
|
* pool space is only updated once all metaslabs have finished syncing.
|
||||||
*
|
*
|
||||||
* To load the in-core free tree we read the space map from disk.
|
* To load the in-core free tree we read the space map from disk. This
|
||||||
* This object contains a series of alloc and free records that are
|
* object contains a series of alloc and free records that are combined
|
||||||
* combined to make up the list of all free segments in this metaslab. These
|
* to make up the list of all free segments in this metaslab. These
|
||||||
* segments are represented in-core by the ms_tree and are stored in an
|
* segments are represented in-core by the ms_tree and are stored in an
|
||||||
* AVL tree.
|
* AVL tree.
|
||||||
*
|
*
|
||||||
* As the space map grows (as a result of the appends) it will
|
* As the space map grows (as a result of the appends) it will
|
||||||
* eventually become space-inefficient. When the metaslab's in-core free tree
|
* eventually become space-inefficient. When the metaslab's in-core
|
||||||
* is zfs_condense_pct/100 times the size of the minimal on-disk
|
* free tree is zfs_condense_pct/100 times the size of the minimal
|
||||||
* representation, we rewrite it in its minimized form. If a metaslab
|
* on-disk representation, we rewrite it in its minimized form. If a
|
||||||
* needs to condense then we must set the ms_condensing flag to ensure
|
* metaslab needs to condense then we must set the ms_condensing flag to
|
||||||
* that allocations are not performed on the metaslab that is being written.
|
* ensure that allocations are not performed on the metaslab that is
|
||||||
|
* being written.
|
||||||
*/
|
*/
|
||||||
struct metaslab {
|
struct metaslab {
|
||||||
kmutex_t ms_lock;
|
kmutex_t ms_lock;
|
||||||
|
@ -314,10 +318,17 @@ struct metaslab {
|
||||||
uint64_t ms_fragmentation;
|
uint64_t ms_fragmentation;
|
||||||
|
|
||||||
range_tree_t *ms_alloctree[TXG_SIZE];
|
range_tree_t *ms_alloctree[TXG_SIZE];
|
||||||
range_tree_t *ms_freetree[TXG_SIZE];
|
|
||||||
range_tree_t *ms_defertree[TXG_DEFER_SIZE];
|
|
||||||
range_tree_t *ms_tree;
|
range_tree_t *ms_tree;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following range trees are accessed only from syncing context.
|
||||||
|
* ms_free*tree only have entries while syncing, and are empty
|
||||||
|
* between syncs.
|
||||||
|
*/
|
||||||
|
range_tree_t *ms_freeingtree; /* to free this syncing txg */
|
||||||
|
range_tree_t *ms_freedtree; /* already freed this syncing txg */
|
||||||
|
range_tree_t *ms_defertree[TXG_DEFER_SIZE];
|
||||||
|
|
||||||
boolean_t ms_condensing; /* condensing? */
|
boolean_t ms_condensing; /* condensing? */
|
||||||
boolean_t ms_condense_wanted;
|
boolean_t ms_condense_wanted;
|
||||||
|
|
||||||
|
|
|
@ -442,7 +442,6 @@ metaslab_verify_space(metaslab_t *msp, uint64_t txg)
|
||||||
{
|
{
|
||||||
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
|
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
|
||||||
uint64_t allocated = 0;
|
uint64_t allocated = 0;
|
||||||
uint64_t freed = 0;
|
|
||||||
uint64_t sm_free_space, msp_free_space;
|
uint64_t sm_free_space, msp_free_space;
|
||||||
int t;
|
int t;
|
||||||
|
|
||||||
|
@ -473,10 +472,9 @@ metaslab_verify_space(metaslab_t *msp, uint64_t txg)
|
||||||
allocated +=
|
allocated +=
|
||||||
range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
|
range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
|
||||||
}
|
}
|
||||||
freed = range_tree_space(msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]);
|
|
||||||
|
|
||||||
msp_free_space = range_tree_space(msp->ms_tree) + allocated +
|
msp_free_space = range_tree_space(msp->ms_tree) + allocated +
|
||||||
msp->ms_deferspace + freed;
|
msp->ms_deferspace + range_tree_space(msp->ms_freedtree);
|
||||||
|
|
||||||
VERIFY3U(sm_free_space, ==, msp_free_space);
|
VERIFY3U(sm_free_space, ==, msp_free_space);
|
||||||
}
|
}
|
||||||
|
@ -1423,7 +1421,7 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We create the main range tree here, but we don't create the
|
* We create the main range tree here, but we don't create the
|
||||||
* alloctree and freetree until metaslab_sync_done(). This serves
|
* other range trees until metaslab_sync_done(). This serves
|
||||||
* two purposes: it allows metaslab_sync_done() to detect the
|
* two purposes: it allows metaslab_sync_done() to detect the
|
||||||
* addition of new space; and for debugging, it ensures that we'd
|
* addition of new space; and for debugging, it ensures that we'd
|
||||||
* data fault on any attempt to use this metaslab before it's ready.
|
* data fault on any attempt to use this metaslab before it's ready.
|
||||||
|
@ -1483,10 +1481,11 @@ metaslab_fini(metaslab_t *msp)
|
||||||
|
|
||||||
metaslab_unload(msp);
|
metaslab_unload(msp);
|
||||||
range_tree_destroy(msp->ms_tree);
|
range_tree_destroy(msp->ms_tree);
|
||||||
|
range_tree_destroy(msp->ms_freeingtree);
|
||||||
|
range_tree_destroy(msp->ms_freedtree);
|
||||||
|
|
||||||
for (t = 0; t < TXG_SIZE; t++) {
|
for (t = 0; t < TXG_SIZE; t++) {
|
||||||
range_tree_destroy(msp->ms_alloctree[t]);
|
range_tree_destroy(msp->ms_alloctree[t]);
|
||||||
range_tree_destroy(msp->ms_freetree[t]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (t = 0; t < TXG_DEFER_SIZE; t++) {
|
for (t = 0; t < TXG_DEFER_SIZE; t++) {
|
||||||
|
@ -2103,7 +2102,6 @@ static void
|
||||||
metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
|
metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
|
||||||
{
|
{
|
||||||
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
|
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
|
||||||
range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
|
|
||||||
range_tree_t *condense_tree;
|
range_tree_t *condense_tree;
|
||||||
space_map_t *sm = msp->ms_sm;
|
space_map_t *sm = msp->ms_sm;
|
||||||
int t;
|
int t;
|
||||||
|
@ -2135,9 +2133,9 @@ metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
|
||||||
/*
|
/*
|
||||||
* Remove what's been freed in this txg from the condense_tree.
|
* Remove what's been freed in this txg from the condense_tree.
|
||||||
* Since we're in sync_pass 1, we know that all the frees from
|
* Since we're in sync_pass 1, we know that all the frees from
|
||||||
* this txg are in the freetree.
|
* this txg are in the freeingtree.
|
||||||
*/
|
*/
|
||||||
range_tree_walk(freetree, range_tree_remove, condense_tree);
|
range_tree_walk(msp->ms_freeingtree, range_tree_remove, condense_tree);
|
||||||
|
|
||||||
for (t = 0; t < TXG_DEFER_SIZE; t++) {
|
for (t = 0; t < TXG_DEFER_SIZE; t++) {
|
||||||
range_tree_walk(msp->ms_defertree[t],
|
range_tree_walk(msp->ms_defertree[t],
|
||||||
|
@ -2193,9 +2191,6 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||||
spa_t *spa = vd->vdev_spa;
|
spa_t *spa = vd->vdev_spa;
|
||||||
objset_t *mos = spa_meta_objset(spa);
|
objset_t *mos = spa_meta_objset(spa);
|
||||||
range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
|
range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
|
||||||
range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
|
|
||||||
range_tree_t **freed_tree =
|
|
||||||
&msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
|
|
||||||
dmu_tx_t *tx;
|
dmu_tx_t *tx;
|
||||||
uint64_t object = space_map_object(msp->ms_sm);
|
uint64_t object = space_map_object(msp->ms_sm);
|
||||||
|
|
||||||
|
@ -2204,14 +2199,14 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||||
/*
|
/*
|
||||||
* This metaslab has just been added so there's no work to do now.
|
* This metaslab has just been added so there's no work to do now.
|
||||||
*/
|
*/
|
||||||
if (*freetree == NULL) {
|
if (msp->ms_freeingtree == NULL) {
|
||||||
ASSERT3P(alloctree, ==, NULL);
|
ASSERT3P(alloctree, ==, NULL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT3P(alloctree, !=, NULL);
|
ASSERT3P(alloctree, !=, NULL);
|
||||||
ASSERT3P(*freetree, !=, NULL);
|
ASSERT3P(msp->ms_freeingtree, !=, NULL);
|
||||||
ASSERT3P(*freed_tree, !=, NULL);
|
ASSERT3P(msp->ms_freedtree, !=, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Normally, we don't want to process a metaslab if there
|
* Normally, we don't want to process a metaslab if there
|
||||||
|
@ -2219,14 +2214,14 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||||
* is being forced to condense we need to let it through.
|
* is being forced to condense we need to let it through.
|
||||||
*/
|
*/
|
||||||
if (range_tree_space(alloctree) == 0 &&
|
if (range_tree_space(alloctree) == 0 &&
|
||||||
range_tree_space(*freetree) == 0 &&
|
range_tree_space(msp->ms_freeingtree) == 0 &&
|
||||||
!msp->ms_condense_wanted)
|
!msp->ms_condense_wanted)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The only state that can actually be changing concurrently with
|
* The only state that can actually be changing concurrently with
|
||||||
* metaslab_sync() is the metaslab's ms_tree. No other thread can
|
* metaslab_sync() is the metaslab's ms_tree. No other thread can
|
||||||
* be modifying this txg's alloctree, freetree, freed_tree, or
|
* be modifying this txg's alloctree, freeingtree, freedtree, or
|
||||||
* space_map_phys_t. Therefore, we only hold ms_lock to satify
|
* space_map_phys_t. Therefore, we only hold ms_lock to satify
|
||||||
* space map ASSERTs. We drop it whenever we call into the DMU,
|
* space map ASSERTs. We drop it whenever we call into the DMU,
|
||||||
* because the DMU can call down to us (e.g. via zio_free()) at
|
* because the DMU can call down to us (e.g. via zio_free()) at
|
||||||
|
@ -2263,7 +2258,7 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||||
metaslab_condense(msp, txg, tx);
|
metaslab_condense(msp, txg, tx);
|
||||||
} else {
|
} else {
|
||||||
space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
|
space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
|
||||||
space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
|
space_map_write(msp->ms_sm, msp->ms_freeingtree, SM_FREE, tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (msp->ms_loaded) {
|
if (msp->ms_loaded) {
|
||||||
|
@ -2285,7 +2280,7 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||||
* to accurately reflect all free space even if some space
|
* to accurately reflect all free space even if some space
|
||||||
* is not yet available for allocation (i.e. deferred).
|
* is not yet available for allocation (i.e. deferred).
|
||||||
*/
|
*/
|
||||||
space_map_histogram_add(msp->ms_sm, *freed_tree, tx);
|
space_map_histogram_add(msp->ms_sm, msp->ms_freedtree, tx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add back any deferred free space that has not been
|
* Add back any deferred free space that has not been
|
||||||
|
@ -2307,7 +2302,7 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||||
* then we will lose some accuracy but will correct it the next
|
* then we will lose some accuracy but will correct it the next
|
||||||
* time we load the space map.
|
* time we load the space map.
|
||||||
*/
|
*/
|
||||||
space_map_histogram_add(msp->ms_sm, *freetree, tx);
|
space_map_histogram_add(msp->ms_sm, msp->ms_freeingtree, tx);
|
||||||
|
|
||||||
metaslab_group_histogram_add(mg, msp);
|
metaslab_group_histogram_add(mg, msp);
|
||||||
metaslab_group_histogram_verify(mg);
|
metaslab_group_histogram_verify(mg);
|
||||||
|
@ -2315,20 +2310,21 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For sync pass 1, we avoid traversing this txg's free range tree
|
* For sync pass 1, we avoid traversing this txg's free range tree
|
||||||
* and instead will just swap the pointers for freetree and
|
* and instead will just swap the pointers for freeingtree and
|
||||||
* freed_tree. We can safely do this since the freed_tree is
|
* freedtree. We can safely do this since the freed_tree is
|
||||||
* guaranteed to be empty on the initial pass.
|
* guaranteed to be empty on the initial pass.
|
||||||
*/
|
*/
|
||||||
if (spa_sync_pass(spa) == 1) {
|
if (spa_sync_pass(spa) == 1) {
|
||||||
range_tree_swap(freetree, freed_tree);
|
range_tree_swap(&msp->ms_freeingtree, &msp->ms_freedtree);
|
||||||
} else {
|
} else {
|
||||||
range_tree_vacate(*freetree, range_tree_add, *freed_tree);
|
range_tree_vacate(msp->ms_freeingtree,
|
||||||
|
range_tree_add, msp->ms_freedtree);
|
||||||
}
|
}
|
||||||
range_tree_vacate(alloctree, NULL, NULL);
|
range_tree_vacate(alloctree, NULL, NULL);
|
||||||
|
|
||||||
ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
|
ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
|
||||||
ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
|
ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
|
||||||
ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
|
ASSERT0(range_tree_space(msp->ms_freeingtree));
|
||||||
|
|
||||||
mutex_exit(&msp->ms_lock);
|
mutex_exit(&msp->ms_lock);
|
||||||
|
|
||||||
|
@ -2350,7 +2346,6 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
|
||||||
metaslab_group_t *mg = msp->ms_group;
|
metaslab_group_t *mg = msp->ms_group;
|
||||||
vdev_t *vd = mg->mg_vd;
|
vdev_t *vd = mg->mg_vd;
|
||||||
spa_t *spa = vd->vdev_spa;
|
spa_t *spa = vd->vdev_spa;
|
||||||
range_tree_t **freed_tree;
|
|
||||||
range_tree_t **defer_tree;
|
range_tree_t **defer_tree;
|
||||||
int64_t alloc_delta, defer_delta;
|
int64_t alloc_delta, defer_delta;
|
||||||
uint64_t free_space;
|
uint64_t free_space;
|
||||||
|
@ -2363,20 +2358,24 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this metaslab is just becoming available, initialize its
|
* If this metaslab is just becoming available, initialize its
|
||||||
* alloctrees, freetrees, and defertree and add its capacity to
|
* range trees and add its capacity to the vdev.
|
||||||
* the vdev.
|
|
||||||
*/
|
*/
|
||||||
if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
|
if (msp->ms_freedtree == NULL) {
|
||||||
for (t = 0; t < TXG_SIZE; t++) {
|
for (t = 0; t < TXG_SIZE; t++) {
|
||||||
ASSERT(msp->ms_alloctree[t] == NULL);
|
ASSERT(msp->ms_alloctree[t] == NULL);
|
||||||
ASSERT(msp->ms_freetree[t] == NULL);
|
|
||||||
|
|
||||||
msp->ms_alloctree[t] = range_tree_create(NULL, msp,
|
msp->ms_alloctree[t] = range_tree_create(NULL, msp,
|
||||||
&msp->ms_lock);
|
&msp->ms_lock);
|
||||||
msp->ms_freetree[t] = range_tree_create(NULL, msp,
|
|
||||||
&msp->ms_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ASSERT3P(msp->ms_freeingtree, ==, NULL);
|
||||||
|
msp->ms_freeingtree = range_tree_create(NULL, msp,
|
||||||
|
&msp->ms_lock);
|
||||||
|
|
||||||
|
ASSERT3P(msp->ms_freedtree, ==, NULL);
|
||||||
|
msp->ms_freedtree = range_tree_create(NULL, msp,
|
||||||
|
&msp->ms_lock);
|
||||||
|
|
||||||
for (t = 0; t < TXG_DEFER_SIZE; t++) {
|
for (t = 0; t < TXG_DEFER_SIZE; t++) {
|
||||||
ASSERT(msp->ms_defertree[t] == NULL);
|
ASSERT(msp->ms_defertree[t] == NULL);
|
||||||
|
|
||||||
|
@ -2387,7 +2386,6 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
|
||||||
vdev_space_update(vd, 0, 0, msp->ms_size);
|
vdev_space_update(vd, 0, 0, msp->ms_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
|
|
||||||
defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
|
defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
|
||||||
|
|
||||||
free_space = metaslab_class_get_space(spa_normal_class(spa)) -
|
free_space = metaslab_class_get_space(spa_normal_class(spa)) -
|
||||||
|
@ -2399,7 +2397,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
|
||||||
defer_delta = 0;
|
defer_delta = 0;
|
||||||
alloc_delta = space_map_alloc_delta(msp->ms_sm);
|
alloc_delta = space_map_alloc_delta(msp->ms_sm);
|
||||||
if (defer_allowed) {
|
if (defer_allowed) {
|
||||||
defer_delta = range_tree_space(*freed_tree) -
|
defer_delta = range_tree_space(msp->ms_freedtree) -
|
||||||
range_tree_space(*defer_tree);
|
range_tree_space(*defer_tree);
|
||||||
} else {
|
} else {
|
||||||
defer_delta -= range_tree_space(*defer_tree);
|
defer_delta -= range_tree_space(*defer_tree);
|
||||||
|
@ -2407,9 +2405,6 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
|
||||||
|
|
||||||
vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
|
vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
|
||||||
|
|
||||||
ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
|
|
||||||
ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there's a metaslab_load() in progress, wait for it to complete
|
* If there's a metaslab_load() in progress, wait for it to complete
|
||||||
* so that we have a consistent view of the in-core space map.
|
* so that we have a consistent view of the in-core space map.
|
||||||
|
@ -2425,9 +2420,9 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
|
||||||
range_tree_vacate(*defer_tree,
|
range_tree_vacate(*defer_tree,
|
||||||
msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
|
msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
|
||||||
if (defer_allowed) {
|
if (defer_allowed) {
|
||||||
range_tree_swap(freed_tree, defer_tree);
|
range_tree_swap(&msp->ms_freedtree, defer_tree);
|
||||||
} else {
|
} else {
|
||||||
range_tree_vacate(*freed_tree,
|
range_tree_vacate(msp->ms_freedtree,
|
||||||
msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
|
msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3252,10 +3247,10 @@ metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
|
||||||
range_tree_add(msp->ms_tree, offset, size);
|
range_tree_add(msp->ms_tree, offset, size);
|
||||||
msp->ms_max_size = metaslab_block_maxsize(msp);
|
msp->ms_max_size = metaslab_block_maxsize(msp);
|
||||||
} else {
|
} else {
|
||||||
if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
|
VERIFY3U(txg, ==, spa->spa_syncing_txg);
|
||||||
|
if (range_tree_space(msp->ms_freeingtree) == 0)
|
||||||
vdev_dirty(vd, VDD_METASLAB, msp, txg);
|
vdev_dirty(vd, VDD_METASLAB, msp, txg);
|
||||||
range_tree_add(msp->ms_freetree[txg & TXG_MASK],
|
range_tree_add(msp->ms_freeingtree, offset, size);
|
||||||
offset, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_exit(&msp->ms_lock);
|
mutex_exit(&msp->ms_lock);
|
||||||
|
@ -3543,8 +3538,8 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp)
|
||||||
if (msp->ms_loaded)
|
if (msp->ms_loaded)
|
||||||
range_tree_verify(msp->ms_tree, offset, size);
|
range_tree_verify(msp->ms_tree, offset, size);
|
||||||
|
|
||||||
for (j = 0; j < TXG_SIZE; j++)
|
range_tree_verify(msp->ms_freeingtree, offset, size);
|
||||||
range_tree_verify(msp->ms_freetree[j], offset, size);
|
range_tree_verify(msp->ms_freedtree, offset, size);
|
||||||
for (j = 0; j < TXG_DEFER_SIZE; j++)
|
for (j = 0; j < TXG_DEFER_SIZE; j++)
|
||||||
range_tree_verify(msp->ms_defertree[j], offset, size);
|
range_tree_verify(msp->ms_defertree[j], offset, size);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue