Simplified the scope of the namespace lock
If we wait until after we check for no spa references to drop the namespace lock, then we know that spa consumers will need to call spa_lookup() and end up waiting on the spa_namespace_cv until we finish. This narrows the external checks to spa_lookup and we no longer need to worry about the spa_vdev_enter case. Sponsored-By: Klara Inc. Sponsored-by: Wasabi Technology, Inc. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: George Wilson <gwilson@delphix.com> Signed-off-by: Don Brady <don.brady@klarasystems.com> Closes #16153
This commit is contained in:
parent
975a13259b
commit
89acef992b
|
@ -6994,15 +6994,11 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
|
||||||
mutex_enter(&spa_namespace_lock);
|
mutex_enter(&spa_namespace_lock);
|
||||||
spa->spa_export_thread = curthread;
|
spa->spa_export_thread = curthread;
|
||||||
spa_close(spa, FTAG);
|
spa_close(spa, FTAG);
|
||||||
|
|
||||||
|
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
|
||||||
mutex_exit(&spa_namespace_lock);
|
mutex_exit(&spa_namespace_lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* At this point we no longer hold the spa_namespace_lock and
|
|
||||||
* the spa_export_thread indicates that an export is in progress.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
|
|
||||||
goto export_spa;
|
goto export_spa;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The pool will be in core if it's openable, in which case we can
|
* The pool will be in core if it's openable, in which case we can
|
||||||
|
@ -7024,6 +7020,14 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_exit(&spa_namespace_lock);
|
||||||
|
/*
|
||||||
|
* At this point we no longer hold the spa_namespace_lock and
|
||||||
|
* there were no references on the spa. Future spa_lookups will
|
||||||
|
* notice the spa->spa_export_thread and wait until we signal
|
||||||
|
* that we are finshed.
|
||||||
|
*/
|
||||||
|
|
||||||
if (spa->spa_sync_on) {
|
if (spa->spa_sync_on) {
|
||||||
vdev_t *rvd = spa->spa_root_vdev;
|
vdev_t *rvd = spa->spa_root_vdev;
|
||||||
/*
|
/*
|
||||||
|
@ -7035,6 +7039,7 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
|
||||||
if (!force && new_state == POOL_STATE_EXPORTED &&
|
if (!force && new_state == POOL_STATE_EXPORTED &&
|
||||||
spa_has_active_shared_spare(spa)) {
|
spa_has_active_shared_spare(spa)) {
|
||||||
error = SET_ERROR(EXDEV);
|
error = SET_ERROR(EXDEV);
|
||||||
|
mutex_enter(&spa_namespace_lock);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7102,6 +7107,9 @@ export_spa:
|
||||||
if (new_state == POOL_STATE_EXPORTED)
|
if (new_state == POOL_STATE_EXPORTED)
|
||||||
zio_handle_export_delay(spa, gethrtime() - export_start);
|
zio_handle_export_delay(spa, gethrtime() - export_start);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Take the namespace lock for the actual spa_t removal
|
||||||
|
*/
|
||||||
mutex_enter(&spa_namespace_lock);
|
mutex_enter(&spa_namespace_lock);
|
||||||
if (new_state != POOL_STATE_UNINITIALIZED) {
|
if (new_state != POOL_STATE_UNINITIALIZED) {
|
||||||
if (!hardforce)
|
if (!hardforce)
|
||||||
|
@ -7118,20 +7126,20 @@ export_spa:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wake up any waiters on spa_namespace_lock
|
* Wake up any waiters in spa_lookup()
|
||||||
* They need to re-attempt a spa_lookup()
|
|
||||||
*/
|
*/
|
||||||
cv_broadcast(&spa_namespace_cv);
|
cv_broadcast(&spa_namespace_cv);
|
||||||
mutex_exit(&spa_namespace_lock);
|
mutex_exit(&spa_namespace_lock);
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
mutex_enter(&spa_namespace_lock);
|
|
||||||
spa->spa_is_exporting = B_FALSE;
|
spa->spa_is_exporting = B_FALSE;
|
||||||
spa->spa_export_thread = NULL;
|
spa->spa_export_thread = NULL;
|
||||||
spa_async_resume(spa);
|
|
||||||
|
|
||||||
/* Wake up any waiters on spa_namespace_lock */
|
spa_async_resume(spa);
|
||||||
|
/*
|
||||||
|
* Wake up any waiters in spa_lookup()
|
||||||
|
*/
|
||||||
cv_broadcast(&spa_namespace_cv);
|
cv_broadcast(&spa_namespace_cv);
|
||||||
mutex_exit(&spa_namespace_lock);
|
mutex_exit(&spa_namespace_lock);
|
||||||
return (error);
|
return (error);
|
||||||
|
|
|
@ -1240,20 +1240,7 @@ spa_vdev_enter(spa_t *spa)
|
||||||
mutex_enter(&spa->spa_vdev_top_lock);
|
mutex_enter(&spa->spa_vdev_top_lock);
|
||||||
mutex_enter(&spa_namespace_lock);
|
mutex_enter(&spa_namespace_lock);
|
||||||
|
|
||||||
/*
|
ASSERT0(spa->spa_export_thread);
|
||||||
* We have a reference on the spa and a spa export could be
|
|
||||||
* starting but no longer holding the spa_namespace_lock. So
|
|
||||||
* check if there is an export and if so wait. It will fail
|
|
||||||
* fast (EBUSY) since we are still holding a spa reference.
|
|
||||||
*
|
|
||||||
* Note that we can be woken by a different spa transitioning
|
|
||||||
* through an import/export, so we must wait for our condition
|
|
||||||
* to change before proceeding.
|
|
||||||
*/
|
|
||||||
while (spa->spa_export_thread != NULL &&
|
|
||||||
spa->spa_export_thread != curthread) {
|
|
||||||
cv_wait(&spa_namespace_cv, &spa_namespace_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
vdev_autotrim_stop_all(spa);
|
vdev_autotrim_stop_all(spa);
|
||||||
|
|
||||||
|
@ -1272,11 +1259,7 @@ spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
|
||||||
mutex_enter(&spa->spa_vdev_top_lock);
|
mutex_enter(&spa->spa_vdev_top_lock);
|
||||||
mutex_enter(&spa_namespace_lock);
|
mutex_enter(&spa_namespace_lock);
|
||||||
|
|
||||||
/* See comment in spa_vdev_enter() */
|
ASSERT0(spa->spa_export_thread);
|
||||||
while (spa->spa_export_thread != NULL &&
|
|
||||||
spa->spa_export_thread != curthread) {
|
|
||||||
cv_wait(&spa_namespace_cv, &spa_namespace_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
vdev_autotrim_stop_all(spa);
|
vdev_autotrim_stop_all(spa);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue