Avoid small buffer copying on write
It is wrong for arc_write_ready() to use zfs_abd_scatter_enabled to decide whether to reallocate/copy the buffer, because the answer is OS-specific and depends on the buffer size. Instead of that use abd_size_alloc_linear(), moved into public header. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Brian Atkinson <batkinson@lanl.gov> Signed-off-by: Alexander Motin <mav@FreeBSD.org> Closes #12425
This commit is contained in:
parent
b72611f0f6
commit
7eebcd2be6
|
@ -91,6 +91,7 @@ abd_t *abd_alloc_linear(size_t, boolean_t);
|
||||||
abd_t *abd_alloc_gang(void);
|
abd_t *abd_alloc_gang(void);
|
||||||
abd_t *abd_alloc_for_io(size_t, boolean_t);
|
abd_t *abd_alloc_for_io(size_t, boolean_t);
|
||||||
abd_t *abd_alloc_sametype(abd_t *, size_t);
|
abd_t *abd_alloc_sametype(abd_t *, size_t);
|
||||||
|
boolean_t abd_size_alloc_linear(size_t);
|
||||||
void abd_gang_add(abd_t *, abd_t *, boolean_t);
|
void abd_gang_add(abd_t *, abd_t *, boolean_t);
|
||||||
void abd_free(abd_t *);
|
void abd_free(abd_t *);
|
||||||
abd_t *abd_get_offset(abd_t *, size_t);
|
abd_t *abd_get_offset(abd_t *, size_t);
|
||||||
|
|
|
@ -68,7 +68,6 @@ abd_t *abd_get_offset_scatter(abd_t *, abd_t *, size_t, size_t);
|
||||||
void abd_free_struct_impl(abd_t *);
|
void abd_free_struct_impl(abd_t *);
|
||||||
void abd_alloc_chunks(abd_t *, size_t);
|
void abd_alloc_chunks(abd_t *, size_t);
|
||||||
void abd_free_chunks(abd_t *);
|
void abd_free_chunks(abd_t *);
|
||||||
boolean_t abd_size_alloc_linear(size_t);
|
|
||||||
void abd_update_scatter_stats(abd_t *, abd_stats_op_t);
|
void abd_update_scatter_stats(abd_t *, abd_stats_op_t);
|
||||||
void abd_update_linear_stats(abd_t *, abd_stats_op_t);
|
void abd_update_linear_stats(abd_t *, abd_stats_op_t);
|
||||||
void abd_verify_scatter(abd_t *);
|
void abd_verify_scatter(abd_t *);
|
||||||
|
|
|
@ -132,7 +132,7 @@ abd_scatter_chunkcnt(abd_t *abd)
|
||||||
boolean_t
|
boolean_t
|
||||||
abd_size_alloc_linear(size_t size)
|
abd_size_alloc_linear(size_t size)
|
||||||
{
|
{
|
||||||
return (size < zfs_abd_scatter_min_size ? B_TRUE : B_FALSE);
|
return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -632,7 +632,7 @@ abd_alloc_zero_scatter(void)
|
||||||
boolean_t
|
boolean_t
|
||||||
abd_size_alloc_linear(size_t size)
|
abd_size_alloc_linear(size_t size)
|
||||||
{
|
{
|
||||||
return (size < zfs_abd_scatter_min_size ? B_TRUE : B_FALSE);
|
return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -181,7 +181,7 @@ abd_free_struct(abd_t *abd)
|
||||||
abd_t *
|
abd_t *
|
||||||
abd_alloc(size_t size, boolean_t is_metadata)
|
abd_alloc(size_t size, boolean_t is_metadata)
|
||||||
{
|
{
|
||||||
if (!zfs_abd_scatter_enabled || abd_size_alloc_linear(size))
|
if (abd_size_alloc_linear(size))
|
||||||
return (abd_alloc_linear(size, is_metadata));
|
return (abd_alloc_linear(size, is_metadata));
|
||||||
|
|
||||||
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
|
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
|
||||||
|
|
|
@ -6872,7 +6872,8 @@ arc_write_ready(zio_t *zio)
|
||||||
ASSERT(ARC_BUF_COMPRESSED(buf));
|
ASSERT(ARC_BUF_COMPRESSED(buf));
|
||||||
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT|ARC_HDR_ALLOC_RDATA);
|
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT|ARC_HDR_ALLOC_RDATA);
|
||||||
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
|
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
|
||||||
} else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
|
} else if (!abd_size_alloc_linear(arc_buf_size(buf)) ||
|
||||||
|
!arc_can_share(hdr, buf)) {
|
||||||
/*
|
/*
|
||||||
* Ideally, we would always copy the io_abd into b_pabd, but the
|
* Ideally, we would always copy the io_abd into b_pabd, but the
|
||||||
* user may have disabled compressed ARC, thus we must check the
|
* user may have disabled compressed ARC, thus we must check the
|
||||||
|
|
Loading…
Reference in New Issue