diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 1f047dcd2a..9eb14b4f1d 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -6994,15 +6994,11 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig, mutex_enter(&spa_namespace_lock); spa->spa_export_thread = curthread; spa_close(spa, FTAG); - mutex_exit(&spa_namespace_lock); - /* - * At this point we no longer hold the spa_namespace_lock and - * the spa_export_thread indicates that an export is in progress. - */ - - if (spa->spa_state == POOL_STATE_UNINITIALIZED) + if (spa->spa_state == POOL_STATE_UNINITIALIZED) { + mutex_exit(&spa_namespace_lock); goto export_spa; + } /* * The pool will be in core if it's openable, in which case we can @@ -7024,6 +7020,14 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig, goto fail; } + mutex_exit(&spa_namespace_lock); + /* + * At this point we no longer hold the spa_namespace_lock and + * there were no references on the spa. Future spa_lookups will + * notice the spa->spa_export_thread and wait until we signal + * that we are finshed. + */ + if (spa->spa_sync_on) { vdev_t *rvd = spa->spa_root_vdev; /* @@ -7035,6 +7039,7 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig, if (!force && new_state == POOL_STATE_EXPORTED && spa_has_active_shared_spare(spa)) { error = SET_ERROR(EXDEV); + mutex_enter(&spa_namespace_lock); goto fail; } @@ -7102,6 +7107,9 @@ export_spa: if (new_state == POOL_STATE_EXPORTED) zio_handle_export_delay(spa, gethrtime() - export_start); + /* + * Take the namespace lock for the actual spa_t removal + */ mutex_enter(&spa_namespace_lock); if (new_state != POOL_STATE_UNINITIALIZED) { if (!hardforce) @@ -7118,20 +7126,20 @@ export_spa: } /* - * Wake up any waiters on spa_namespace_lock - * They need to re-attempt a spa_lookup() + * Wake up any waiters in spa_lookup() */ cv_broadcast(&spa_namespace_cv); mutex_exit(&spa_namespace_lock); return (0); fail: - mutex_enter(&spa_namespace_lock); spa->spa_is_exporting = B_FALSE; spa->spa_export_thread = NULL; - spa_async_resume(spa); - /* Wake up any waiters on spa_namespace_lock */ + spa_async_resume(spa); + /* + * Wake up any waiters in spa_lookup() + */ cv_broadcast(&spa_namespace_cv); mutex_exit(&spa_namespace_lock); return (error); diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index 6d7667cf3e..d1d41bbe72 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -1240,20 +1240,7 @@ spa_vdev_enter(spa_t *spa) mutex_enter(&spa->spa_vdev_top_lock); mutex_enter(&spa_namespace_lock); - /* - * We have a reference on the spa and a spa export could be - * starting but no longer holding the spa_namespace_lock. So - * check if there is an export and if so wait. It will fail - * fast (EBUSY) since we are still holding a spa reference. - * - * Note that we can be woken by a different spa transitioning - * through an import/export, so we must wait for our condition - * to change before proceeding. - */ - while (spa->spa_export_thread != NULL && - spa->spa_export_thread != curthread) { - cv_wait(&spa_namespace_cv, &spa_namespace_lock); - } + ASSERT0(spa->spa_export_thread); vdev_autotrim_stop_all(spa); @@ -1272,11 +1259,7 @@ spa_vdev_detach_enter(spa_t *spa, uint64_t guid) mutex_enter(&spa->spa_vdev_top_lock); mutex_enter(&spa_namespace_lock); - /* See comment in spa_vdev_enter() */ - while (spa->spa_export_thread != NULL && - spa->spa_export_thread != curthread) { - cv_wait(&spa_namespace_cv, &spa_namespace_lock); - } + ASSERT0(spa->spa_export_thread); vdev_autotrim_stop_all(spa);