spa_taskq_dispatch_ent: simplify arguments
This renames it to spa_taskq_dispatch(), and reduces and simplifies its arguments based on these observations from its two call sites: - arg is always the zio, so it can be typed that way, and we don't need to provide it twice; - ent is always &zio->io_tqent, and zio is always provided, so we can use it directly; - the only flag used is TQ_FRONT, which can just be a bool; - zio != NULL was part of the "use allocator" test, but it never would have got that far, because that arg was only set to NULL in the reexecute path, which is forced to type CLAIM, so the condition would fail at t == WRITE anyway. Sponsored-by: Klara, Inc. Sponsored-by: Wasabi Technology, Inc. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Alexander Motin <mav@FreeBSD.org> Signed-off-by: Rob Norris <rob.norris@klarasystems.com> Closes #16151
This commit is contained in:
parent
515c4dd213
commit
0a543db371
|
@ -479,8 +479,8 @@ struct spa {
|
||||||
extern char *spa_config_path;
|
extern char *spa_config_path;
|
||||||
extern const char *zfs_deadman_failmode;
|
extern const char *zfs_deadman_failmode;
|
||||||
extern uint_t spa_slop_shift;
|
extern uint_t spa_slop_shift;
|
||||||
extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
|
extern void spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
|
||||||
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent, zio_t *zio);
|
task_func_t *func, zio_t *zio, boolean_t cutinline);
|
||||||
extern void spa_load_spares(spa_t *spa);
|
extern void spa_load_spares(spa_t *spa);
|
||||||
extern void spa_load_l2cache(spa_t *spa);
|
extern void spa_load_l2cache(spa_t *spa);
|
||||||
extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
|
extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
|
||||||
|
|
|
@ -1491,9 +1491,8 @@ spa_taskq_write_param(ZFS_MODULE_PARAM_ARGS)
|
||||||
* on the taskq itself.
|
* on the taskq itself.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
|
spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
|
||||||
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent,
|
task_func_t *func, zio_t *zio, boolean_t cutinline)
|
||||||
zio_t *zio)
|
|
||||||
{
|
{
|
||||||
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
|
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
|
||||||
taskq_t *tq;
|
taskq_t *tq;
|
||||||
|
@ -1501,16 +1500,25 @@ spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
|
||||||
ASSERT3P(tqs->stqs_taskq, !=, NULL);
|
ASSERT3P(tqs->stqs_taskq, !=, NULL);
|
||||||
ASSERT3U(tqs->stqs_count, !=, 0);
|
ASSERT3U(tqs->stqs_count, !=, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NB: We are assuming that the zio can only be dispatched
|
||||||
|
* to a single taskq at a time. It would be a grievous error
|
||||||
|
* to dispatch the zio to another taskq at the same time.
|
||||||
|
*/
|
||||||
|
ASSERT(zio);
|
||||||
|
ASSERT(taskq_empty_ent(&zio->io_tqent));
|
||||||
|
|
||||||
if (tqs->stqs_count == 1) {
|
if (tqs->stqs_count == 1) {
|
||||||
tq = tqs->stqs_taskq[0];
|
tq = tqs->stqs_taskq[0];
|
||||||
} else if ((t == ZIO_TYPE_WRITE) && (q == ZIO_TASKQ_ISSUE) &&
|
} else if ((t == ZIO_TYPE_WRITE) && (q == ZIO_TASKQ_ISSUE) &&
|
||||||
(zio != NULL) && ZIO_HAS_ALLOCATOR(zio)) {
|
ZIO_HAS_ALLOCATOR(zio)) {
|
||||||
tq = tqs->stqs_taskq[zio->io_allocator % tqs->stqs_count];
|
tq = tqs->stqs_taskq[zio->io_allocator % tqs->stqs_count];
|
||||||
} else {
|
} else {
|
||||||
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
|
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
|
||||||
}
|
}
|
||||||
|
|
||||||
taskq_dispatch_ent(tq, func, arg, flags, ent);
|
taskq_dispatch_ent(tq, func, zio, cutinline ? TQ_FRONT : 0,
|
||||||
|
&zio->io_tqent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -2023,7 +2023,6 @@ zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
|
||||||
{
|
{
|
||||||
spa_t *spa = zio->io_spa;
|
spa_t *spa = zio->io_spa;
|
||||||
zio_type_t t = zio->io_type;
|
zio_type_t t = zio->io_type;
|
||||||
int flags = (cutinline ? TQ_FRONT : 0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we're a config writer or a probe, the normal issue and
|
* If we're a config writer or a probe, the normal issue and
|
||||||
|
@ -2047,19 +2046,12 @@ zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
|
||||||
if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
|
if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
|
||||||
q++;
|
q++;
|
||||||
else
|
else
|
||||||
flags |= TQ_FRONT;
|
cutinline = B_TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
|
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
|
||||||
|
|
||||||
/*
|
spa_taskq_dispatch(spa, t, q, zio_execute, zio, cutinline);
|
||||||
* NB: We are assuming that the zio can only be dispatched
|
|
||||||
* to a single taskq at a time. It would be a grievous error
|
|
||||||
* to dispatch the zio to another taskq at the same time.
|
|
||||||
*/
|
|
||||||
ASSERT(taskq_empty_ent(&zio->io_tqent));
|
|
||||||
spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
|
|
||||||
&zio->io_tqent, zio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static boolean_t
|
static boolean_t
|
||||||
|
@ -5007,10 +4999,9 @@ zio_done(zio_t *zio)
|
||||||
* Reexecution is potentially a huge amount of work.
|
* Reexecution is potentially a huge amount of work.
|
||||||
* Hand it off to the otherwise-unused claim taskq.
|
* Hand it off to the otherwise-unused claim taskq.
|
||||||
*/
|
*/
|
||||||
ASSERT(taskq_empty_ent(&zio->io_tqent));
|
spa_taskq_dispatch(zio->io_spa,
|
||||||
spa_taskq_dispatch_ent(zio->io_spa,
|
|
||||||
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
|
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
|
||||||
zio_reexecute, zio, 0, &zio->io_tqent, NULL);
|
zio_reexecute, zio, B_FALSE);
|
||||||
}
|
}
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue