Linux 6.11: enable queue flush through queue limits

In 6.11 struct queue_limits gains a 'features' field, where, among other
things, flush and write-cache are enabled. Detect it and use it.

Along the way, the blk_queue_set_write_cache() compat wrapper gets a
little cleanup. Since both flags are alway set together, its now a
single bool. Also the very very ancient version that sets q->flush_flags
directly couldn't actually turn it off, so I've fixed that. Not that we
use it, but still.

Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rob Norris <robn@despairlabs.com>
Sponsored-by: https://despairlabs.com/sponsor/
Closes #16400
This commit is contained in:
Rob Norris 2024-07-30 21:40:35 +10:00 committed by Tony Hutter
parent 6961d4fb57
commit 4fa84563b8
3 changed files with 50 additions and 14 deletions

View File

@ -58,6 +58,13 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN], [
disk = blk_alloc_disk(lim, NUMA_NO_NODE);
])
ZFS_LINUX_TEST_SRC([blkdev_queue_limits_features], [
#include <linux/blkdev.h>
],[
struct queue_limits *lim = NULL;
lim->features = 0;
])
ZFS_LINUX_TEST_SRC([blk_cleanup_disk], [
#include <linux/blkdev.h>
],[
@ -114,6 +121,20 @@ AC_DEFUN([ZFS_AC_KERNEL_MAKE_REQUEST_FN], [
AC_MSG_RESULT(yes)
AC_DEFINE([HAVE_BLK_ALLOC_DISK_2ARG], 1, [blk_alloc_disk() exists and takes 2 args])
dnl #
dnl # Linux 6.11 API change:
dnl # struct queue_limits gains a 'features' field,
dnl # used to set flushing options
dnl #
AC_MSG_CHECKING([whether struct queue_limits has a features field])
ZFS_LINUX_TEST_RESULT([blkdev_queue_limits_features], [
AC_MSG_RESULT(yes)
AC_DEFINE([HAVE_BLKDEV_QUEUE_LIMITS_FEATURES], 1,
[struct queue_limits has a features field])
], [
AC_MSG_RESULT(no)
])
dnl #
dnl # 5.20 API change,
dnl # Removed blk_cleanup_disk(), put_disk() should be used.

View File

@ -57,6 +57,11 @@ blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
#endif
/*
* 6.11 API
* Setting the flush flags directly is no longer possible; flush flags are set
* on the queue_limits structure and passed to blk_disk_alloc(). In this case
* we remove this function entirely.
*
* 4.7 API,
* The blk_queue_write_cache() interface has replaced blk_queue_flush()
* interface. However, the new interface is GPL-only thus we implement
@ -68,31 +73,33 @@ blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
* new one is GPL-only. Thus if the GPL-only version is detected we
* implement our own trivial helper.
*/
#if !defined(HAVE_BLK_ALLOC_DISK_2ARG) || \
!defined(HAVE_BLKDEV_QUEUE_LIMITS_FEATURES)
static inline void
blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
blk_queue_set_write_cache(struct request_queue *q, bool on)
{
#if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
if (wc)
if (on) {
blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
blk_queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua)
blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else
} else {
blk_queue_flag_clear(QUEUE_FLAG_WC, q);
blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
}
#elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
blk_queue_write_cache(q, wc, fua);
blk_queue_write_cache(q, on, on);
#elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
if (wc)
q->flush_flags |= REQ_FLUSH;
if (fua)
q->flush_flags |= REQ_FUA;
if (on)
q->flush_flags |= REQ_FLUSH | REQ_FUA;
else
q->flush_flags &= ~(REQ_FLUSH | REQ_FUA);
#elif defined(HAVE_BLK_QUEUE_FLUSH)
blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0));
blk_queue_flush(q, on ? (REQ_FLUSH | REQ_FUA) : 0);
#else
#error "Unsupported kernel"
#endif
}
#endif /* !HAVE_BLK_ALLOC_DISK_2ARG || !HAVE_BLKDEV_QUEUE_LIMITS_FEATURES */
static inline void
blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)

View File

@ -1159,6 +1159,9 @@ zvol_queue_limits_convert(zvol_queue_limits_t *limits,
qlimits->max_segments = limits->zql_max_segments;
qlimits->max_segment_size = limits->zql_max_segment_size;
qlimits->io_opt = limits->zql_io_opt;
#ifdef HAVE_BLKDEV_QUEUE_LIMITS_FEATURES
qlimits->features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
#endif
}
#else
static void
@ -1169,6 +1172,9 @@ zvol_queue_limits_apply(zvol_queue_limits_t *limits,
blk_queue_max_segments(queue, limits->zql_max_segments);
blk_queue_max_segment_size(queue, limits->zql_max_segment_size);
blk_queue_io_opt(queue, limits->zql_io_opt);
#ifndef HAVE_BLKDEV_QUEUE_LIMITS_FEATURES
blk_queue_set_write_cache(queue, B_TRUE);
#endif
}
#endif
@ -1193,6 +1199,10 @@ zvol_alloc_non_blk_mq(struct zvol_state_os *zso, zvol_queue_limits_t *limits)
return (1);
}
#ifndef HAVE_BLKDEV_QUEUE_LIMITS_FEATURES
blk_queue_set_write_cache(zso->zvo_queue, B_TRUE);
#endif
zso->zvo_disk = disk;
zso->zvo_disk->minors = ZVOL_MINORS;
zso->zvo_queue = zso->zvo_disk->queue;
@ -1344,8 +1354,6 @@ zvol_alloc(dev_t dev, const char *name)
if (ret != 0)
goto out_kmem;
blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE);
/* Limit read-ahead to a single page to prevent over-prefetching. */
blk_queue_set_read_ahead(zso->zvo_queue, 1);