Linux 6.8 compat: replace MAX_ORDER define

MAX_ORDER has been renamed to MAX_PAGE_ORDER. Rather than just
redefining it, instead define our own name and set it consistently from
the start.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rob Norris <robn@despairlabs.com>
Sponsored-by: https://despairlabs.com/sponsor/
Closes #15805
(cherry picked from commit 09e6724e1e)
This commit is contained in:
Rob Norris 2024-01-23 16:41:05 +11:00 committed by Tony Hutter
parent 38f6f3ada1
commit 35252ae0fd
1 changed files with 17 additions and 9 deletions

View File

@ -60,8 +60,16 @@
#ifdef _KERNEL
#include <linux/kmap_compat.h>
#include <linux/scatterlist.h>
#endif
#ifdef _KERNEL
#if defined(MAX_ORDER)
#define ABD_MAX_ORDER (MAX_ORDER)
#elif defined(MAX_PAGE_ORDER)
#define ABD_MAX_ORDER (MAX_PAGE_ORDER)
#endif
#else
#define MAX_ORDER 1
#define ABD_MAX_ORDER (1)
#endif
typedef struct abd_stats {
@ -71,7 +79,7 @@ typedef struct abd_stats {
kstat_named_t abdstat_scatter_cnt;
kstat_named_t abdstat_scatter_data_size;
kstat_named_t abdstat_scatter_chunk_waste;
kstat_named_t abdstat_scatter_orders[MAX_ORDER];
kstat_named_t abdstat_scatter_orders[ABD_MAX_ORDER];
kstat_named_t abdstat_scatter_page_multi_chunk;
kstat_named_t abdstat_scatter_page_multi_zone;
kstat_named_t abdstat_scatter_page_alloc_retry;
@ -139,7 +147,7 @@ struct {
wmsum_t abdstat_scatter_cnt;
wmsum_t abdstat_scatter_data_size;
wmsum_t abdstat_scatter_chunk_waste;
wmsum_t abdstat_scatter_orders[MAX_ORDER];
wmsum_t abdstat_scatter_orders[ABD_MAX_ORDER];
wmsum_t abdstat_scatter_page_multi_chunk;
wmsum_t abdstat_scatter_page_multi_zone;
wmsum_t abdstat_scatter_page_alloc_retry;
@ -149,7 +157,7 @@ struct {
#define abd_for_each_sg(abd, sg, n, i) \
for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
unsigned zfs_abd_scatter_max_order = ABD_MAX_ORDER - 1;
/*
* zfs_abd_scatter_min_size is the minimum allocation size to use scatter
@ -271,7 +279,7 @@ abd_alloc_chunks(abd_t *abd, size_t size)
struct page *page, *tmp_page = NULL;
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
int max_order = MIN(zfs_abd_scatter_max_order, ABD_MAX_ORDER - 1);
int nr_pages = abd_chunkcnt_for_bytes(size);
int chunks = 0, zones = 0;
size_t remaining_size;
@ -728,7 +736,7 @@ abd_kstats_update(kstat_t *ksp, int rw)
wmsum_value(&abd_sums.abdstat_scatter_data_size);
as->abdstat_scatter_chunk_waste.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
for (int i = 0; i < MAX_ORDER; i++) {
for (int i = 0; i < ABD_MAX_ORDER; i++) {
as->abdstat_scatter_orders[i].value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
}
@ -757,7 +765,7 @@ abd_init(void)
wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
for (i = 0; i < MAX_ORDER; i++)
for (i = 0; i < ABD_MAX_ORDER; i++)
wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
@ -767,7 +775,7 @@ abd_init(void)
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (abd_ksp != NULL) {
for (i = 0; i < MAX_ORDER; i++) {
for (i = 0; i < ABD_MAX_ORDER; i++) {
snprintf(abd_stats.abdstat_scatter_orders[i].name,
KSTAT_STRLEN, "scatter_order_%d", i);
abd_stats.abdstat_scatter_orders[i].data_type =
@ -797,7 +805,7 @@ abd_fini(void)
wmsum_fini(&abd_sums.abdstat_scatter_cnt);
wmsum_fini(&abd_sums.abdstat_scatter_data_size);
wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
for (int i = 0; i < MAX_ORDER; i++)
for (int i = 0; i < ABD_MAX_ORDER; i++)
wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);