diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index c624833bc9..24d52a7493 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -1223,7 +1223,7 @@ metaslab_group_fragmentation(metaslab_group_t *mg) */ static boolean_t metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, - uint64_t psize, int allocator, int d) + int flags, uint64_t psize, int allocator, int d) { spa_t *spa = mg->mg_vd->vdev_spa; metaslab_class_t *mc = mg->mg_class; @@ -1267,6 +1267,15 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, if (mg->mg_no_free_space) return (B_FALSE); + /* + * Some allocations (e.g., those coming from device removal + * where the * allocations are not even counted in the + * metaslab * allocation queues) are allowed to bypass + * the throttle. + */ + if (flags & METASLAB_DONT_THROTTLE) + return (B_TRUE); + /* * Relax allocation throttling for ditto blocks. Due to * random imbalances in allocation it tends to push copies @@ -5188,7 +5197,7 @@ top: */ if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { allocatable = metaslab_group_allocatable(mg, rotor, - psize, allocator, d); + flags, psize, allocator, d); } if (!allocatable) { diff --git a/module/zfs/vdev_removal.c b/module/zfs/vdev_removal.c index 53592dbfdf..aaa88eb89e 100644 --- a/module/zfs/vdev_removal.c +++ b/module/zfs/vdev_removal.c @@ -1168,11 +1168,11 @@ spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, metaslab_class_t *mc = mg->mg_class; if (mc->mc_groups == 0) mc = spa_normal_class(spa); - int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0, - zal, 0); + int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, + METASLAB_DONT_THROTTLE, zal, 0); if (error == ENOSPC && mc != spa_normal_class(spa)) { error = metaslab_alloc_dva(spa, spa_normal_class(spa), size, - &dst, 0, NULL, txg, 0, zal, 0); + &dst, 0, NULL, txg, METASLAB_DONT_THROTTLE, zal, 0); } if (error != 0) return (error);