Gang ABD Type
Adding the gang ABD type, which allows for linear and scatter ABDs to be chained together into a single ABD. This can be used to avoid doing memory copies to/from ABDs. An example of this can be found in vdev_queue.c in the vdev_queue_aggregate() function. Reviewed-by: Matthew Ahrens <mahrens@delphix.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Co-authored-by: Brian <bwa@clemson.edu> Co-authored-by: Mark Maybee <mmaybee@cray.com> Signed-off-by: Brian Atkinson <batkinson@lanl.gov> Closes #10069
This commit is contained in:
parent
501a1511ae
commit
fb822260b1
|
@ -42,6 +42,7 @@ typedef int abd_iter_func_t(void *buf, size_t len, void *private);
|
||||||
typedef int abd_iter_func2_t(void *bufa, void *bufb, size_t len, void *private);
|
typedef int abd_iter_func2_t(void *bufa, void *bufb, size_t len, void *private);
|
||||||
|
|
||||||
extern int zfs_abd_scatter_enabled;
|
extern int zfs_abd_scatter_enabled;
|
||||||
|
extern abd_t *abd_zero_scatter;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocations and deallocations
|
* Allocations and deallocations
|
||||||
|
@ -49,13 +50,16 @@ extern int zfs_abd_scatter_enabled;
|
||||||
|
|
||||||
abd_t *abd_alloc(size_t, boolean_t);
|
abd_t *abd_alloc(size_t, boolean_t);
|
||||||
abd_t *abd_alloc_linear(size_t, boolean_t);
|
abd_t *abd_alloc_linear(size_t, boolean_t);
|
||||||
|
abd_t *abd_alloc_gang_abd(void);
|
||||||
abd_t *abd_alloc_for_io(size_t, boolean_t);
|
abd_t *abd_alloc_for_io(size_t, boolean_t);
|
||||||
abd_t *abd_alloc_sametype(abd_t *, size_t);
|
abd_t *abd_alloc_sametype(abd_t *, size_t);
|
||||||
|
void abd_gang_add(abd_t *, abd_t *, boolean_t);
|
||||||
void abd_free(abd_t *);
|
void abd_free(abd_t *);
|
||||||
|
void abd_put(abd_t *);
|
||||||
abd_t *abd_get_offset(abd_t *, size_t);
|
abd_t *abd_get_offset(abd_t *, size_t);
|
||||||
abd_t *abd_get_offset_size(abd_t *, size_t, size_t);
|
abd_t *abd_get_offset_size(abd_t *, size_t, size_t);
|
||||||
|
abd_t *abd_get_zeros(size_t);
|
||||||
abd_t *abd_get_from_buf(void *, size_t);
|
abd_t *abd_get_from_buf(void *, size_t);
|
||||||
void abd_put(abd_t *);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Conversion to and from a normal buffer
|
* Conversion to and from a normal buffer
|
||||||
|
@ -132,6 +136,7 @@ abd_zero(abd_t *abd, size_t size)
|
||||||
* ABD type check functions
|
* ABD type check functions
|
||||||
*/
|
*/
|
||||||
boolean_t abd_is_linear(abd_t *);
|
boolean_t abd_is_linear(abd_t *);
|
||||||
|
boolean_t abd_is_gang(abd_t *);
|
||||||
boolean_t abd_is_linear_page(abd_t *);
|
boolean_t abd_is_linear_page(abd_t *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -146,8 +151,7 @@ void abd_fini(void);
|
||||||
* Linux ABD bio functions
|
* Linux ABD bio functions
|
||||||
*/
|
*/
|
||||||
#if defined(__linux__) && defined(_KERNEL)
|
#if defined(__linux__) && defined(_KERNEL)
|
||||||
unsigned int abd_scatter_bio_map_off(struct bio *, abd_t *, unsigned int,
|
unsigned int abd_bio_map_off(struct bio *, abd_t *, unsigned int, size_t);
|
||||||
size_t);
|
|
||||||
unsigned long abd_nr_pages_off(abd_t *, unsigned int, size_t);
|
unsigned long abd_nr_pages_off(abd_t *, unsigned int, size_t);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,9 @@ typedef enum abd_flags {
|
||||||
ABD_FLAG_MULTI_ZONE = 1 << 3, /* pages split over memory zones */
|
ABD_FLAG_MULTI_ZONE = 1 << 3, /* pages split over memory zones */
|
||||||
ABD_FLAG_MULTI_CHUNK = 1 << 4, /* pages split over multiple chunks */
|
ABD_FLAG_MULTI_CHUNK = 1 << 4, /* pages split over multiple chunks */
|
||||||
ABD_FLAG_LINEAR_PAGE = 1 << 5, /* linear but allocd from page */
|
ABD_FLAG_LINEAR_PAGE = 1 << 5, /* linear but allocd from page */
|
||||||
|
ABD_FLAG_GANG = 1 << 6, /* mult ABDs chained together */
|
||||||
|
ABD_FLAG_GANG_FREE = 1 << 7, /* gang ABD is responsible for mem */
|
||||||
|
ABD_FLAG_ZEROS = 1 << 8, /* ABD for zero-filled buffer */
|
||||||
} abd_flags_t;
|
} abd_flags_t;
|
||||||
|
|
||||||
typedef enum abd_stats_op {
|
typedef enum abd_stats_op {
|
||||||
|
@ -49,8 +52,10 @@ typedef enum abd_stats_op {
|
||||||
struct abd {
|
struct abd {
|
||||||
abd_flags_t abd_flags;
|
abd_flags_t abd_flags;
|
||||||
uint_t abd_size; /* excludes scattered abd_offset */
|
uint_t abd_size; /* excludes scattered abd_offset */
|
||||||
|
list_node_t abd_gang_link;
|
||||||
struct abd *abd_parent;
|
struct abd *abd_parent;
|
||||||
zfs_refcount_t abd_children;
|
zfs_refcount_t abd_children;
|
||||||
|
kmutex_t abd_mtx;
|
||||||
union {
|
union {
|
||||||
struct abd_scatter {
|
struct abd_scatter {
|
||||||
uint_t abd_offset;
|
uint_t abd_offset;
|
||||||
|
@ -66,6 +71,9 @@ struct abd {
|
||||||
void *abd_buf;
|
void *abd_buf;
|
||||||
struct scatterlist *abd_sgl; /* for LINEAR_PAGE */
|
struct scatterlist *abd_sgl; /* for LINEAR_PAGE */
|
||||||
} abd_linear;
|
} abd_linear;
|
||||||
|
struct abd_gang {
|
||||||
|
list_t abd_gang_chain;
|
||||||
|
} abd_gang;
|
||||||
} abd_u;
|
} abd_u;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -84,6 +92,8 @@ struct abd_iter {
|
||||||
struct scatterlist *iter_sg; /* current sg */
|
struct scatterlist *iter_sg; /* current sg */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
abd_t *abd_gang_get_offset(abd_t *, size_t *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OS specific functions
|
* OS specific functions
|
||||||
*/
|
*/
|
||||||
|
@ -116,6 +126,7 @@ void abd_iter_unmap(struct abd_iter *);
|
||||||
|
|
||||||
#define ABD_SCATTER(abd) (abd->abd_u.abd_scatter)
|
#define ABD_SCATTER(abd) (abd->abd_u.abd_scatter)
|
||||||
#define ABD_LINEAR_BUF(abd) (abd->abd_u.abd_linear.abd_buf)
|
#define ABD_LINEAR_BUF(abd) (abd->abd_u.abd_linear.abd_buf)
|
||||||
|
#define ABD_GANG(abd) (abd->abd_u.abd_gang)
|
||||||
|
|
||||||
#if defined(_KERNEL)
|
#if defined(_KERNEL)
|
||||||
#if defined(__FreeBSD__)
|
#if defined(__FreeBSD__)
|
||||||
|
|
|
@ -90,6 +90,15 @@ SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_chunk_size, CTLFLAG_RDTUN,
|
||||||
kmem_cache_t *abd_chunk_cache;
|
kmem_cache_t *abd_chunk_cache;
|
||||||
static kstat_t *abd_ksp;
|
static kstat_t *abd_ksp;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are
|
||||||
|
* just a single zero'd sized zfs_abd_chunk_size buffer. This
|
||||||
|
* allows us to conserve memory by only using a single zero buffer
|
||||||
|
* for the scatter chunks.
|
||||||
|
*/
|
||||||
|
abd_t *abd_zero_scatter = NULL;
|
||||||
|
static char *abd_zero_buf = NULL;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
abd_free_chunk(void *c)
|
abd_free_chunk(void *c)
|
||||||
{
|
{
|
||||||
|
@ -193,6 +202,8 @@ abd_alloc_struct(size_t size)
|
||||||
abd_u.abd_scatter.abd_chunks[chunkcnt]);
|
abd_u.abd_scatter.abd_chunks[chunkcnt]);
|
||||||
abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
|
abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
|
||||||
ASSERT3P(abd, !=, NULL);
|
ASSERT3P(abd, !=, NULL);
|
||||||
|
list_link_init(&abd->abd_gang_link);
|
||||||
|
mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||||
ABDSTAT_INCR(abdstat_struct_size, abd_size);
|
ABDSTAT_INCR(abdstat_struct_size, abd_size);
|
||||||
|
|
||||||
return (abd);
|
return (abd);
|
||||||
|
@ -203,10 +214,53 @@ abd_free_struct(abd_t *abd)
|
||||||
{
|
{
|
||||||
size_t chunkcnt = abd_is_linear(abd) ? 0 : abd_scatter_chunkcnt(abd);
|
size_t chunkcnt = abd_is_linear(abd) ? 0 : abd_scatter_chunkcnt(abd);
|
||||||
int size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
|
int size = offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]);
|
||||||
|
mutex_destroy(&abd->abd_mtx);
|
||||||
|
ASSERT(!list_link_active(&abd->abd_gang_link));
|
||||||
kmem_free(abd, size);
|
kmem_free(abd, size);
|
||||||
ABDSTAT_INCR(abdstat_struct_size, -size);
|
ABDSTAT_INCR(abdstat_struct_size, -size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where
|
||||||
|
* each chunk in the scatterlist will be set to abd_zero_buf.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
abd_alloc_zero_scatter(void)
|
||||||
|
{
|
||||||
|
size_t n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
|
||||||
|
abd_zero_buf = kmem_zalloc(zfs_abd_chunk_size, KM_SLEEP);
|
||||||
|
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
|
||||||
|
|
||||||
|
abd_zero_scatter->abd_flags = ABD_FLAG_OWNER | ABD_FLAG_ZEROS;
|
||||||
|
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
|
||||||
|
abd_zero_scatter->abd_parent = NULL;
|
||||||
|
zfs_refcount_create(&abd_zero_scatter->abd_children);
|
||||||
|
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_chunk_size =
|
||||||
|
zfs_abd_chunk_size;
|
||||||
|
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
|
||||||
|
abd_zero_buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
ABDSTAT_BUMP(abdstat_scatter_cnt);
|
||||||
|
ABDSTAT_INCR(abdstat_scatter_data_size, zfs_abd_chunk_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
abd_free_zero_scatter(void)
|
||||||
|
{
|
||||||
|
zfs_refcount_destroy(&abd_zero_scatter->abd_children);
|
||||||
|
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
|
||||||
|
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)zfs_abd_chunk_size);
|
||||||
|
|
||||||
|
abd_free_struct(abd_zero_scatter);
|
||||||
|
abd_zero_scatter = NULL;
|
||||||
|
kmem_free(abd_zero_buf, zfs_abd_chunk_size);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
abd_init(void)
|
abd_init(void)
|
||||||
{
|
{
|
||||||
|
@ -219,11 +273,15 @@ abd_init(void)
|
||||||
abd_ksp->ks_data = &abd_stats;
|
abd_ksp->ks_data = &abd_stats;
|
||||||
kstat_install(abd_ksp);
|
kstat_install(abd_ksp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
abd_alloc_zero_scatter();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
abd_fini(void)
|
abd_fini(void)
|
||||||
{
|
{
|
||||||
|
abd_free_zero_scatter();
|
||||||
|
|
||||||
if (abd_ksp != NULL) {
|
if (abd_ksp != NULL) {
|
||||||
kstat_delete(abd_ksp);
|
kstat_delete(abd_ksp);
|
||||||
abd_ksp = NULL;
|
abd_ksp = NULL;
|
||||||
|
@ -271,12 +329,13 @@ abd_alloc_scatter_offset_chunkcnt(size_t chunkcnt)
|
||||||
abd_u.abd_scatter.abd_chunks[chunkcnt]);
|
abd_u.abd_scatter.abd_chunks[chunkcnt]);
|
||||||
abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
|
abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
|
||||||
ASSERT3P(abd, !=, NULL);
|
ASSERT3P(abd, !=, NULL);
|
||||||
|
list_link_init(&abd->abd_gang_link);
|
||||||
|
mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||||
ABDSTAT_INCR(abdstat_struct_size, abd_size);
|
ABDSTAT_INCR(abdstat_struct_size, abd_size);
|
||||||
|
|
||||||
return (abd);
|
return (abd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
abd_t *
|
abd_t *
|
||||||
abd_get_offset_scatter(abd_t *sabd, size_t off)
|
abd_get_offset_scatter(abd_t *sabd, size_t off)
|
||||||
{
|
{
|
||||||
|
@ -332,6 +391,7 @@ abd_iter_scatter_chunk_index(struct abd_iter *aiter)
|
||||||
void
|
void
|
||||||
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
|
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
|
||||||
{
|
{
|
||||||
|
ASSERT(!abd_is_gang(abd));
|
||||||
abd_verify(abd);
|
abd_verify(abd);
|
||||||
aiter->iter_abd = abd;
|
aiter->iter_abd = abd;
|
||||||
aiter->iter_pos = 0;
|
aiter->iter_pos = 0;
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See abd.c for an general overview of the arc buffered data (ABD).
|
* See abd.c for a general overview of the arc buffered data (ABD).
|
||||||
*
|
*
|
||||||
* Linear buffers act exactly like normal buffers and are always mapped into the
|
* Linear buffers act exactly like normal buffers and are always mapped into the
|
||||||
* kernel's virtual memory space, while scattered ABD data chunks are allocated
|
* kernel's virtual memory space, while scattered ABD data chunks are allocated
|
||||||
|
@ -48,7 +48,7 @@
|
||||||
*
|
*
|
||||||
* If we are not using HIGHMEM, scattered buffers which have only one chunk
|
* If we are not using HIGHMEM, scattered buffers which have only one chunk
|
||||||
* can be treated as linear buffers, because they are contiguous in the
|
* can be treated as linear buffers, because they are contiguous in the
|
||||||
* kernel's virtual address space. See abd_alloc_chunks() for details.
|
* kernel's virtual address space. See abd_alloc_chunks() for details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <sys/abd_impl.h>
|
#include <sys/abd_impl.h>
|
||||||
|
@ -160,6 +160,13 @@ unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
|
||||||
*/
|
*/
|
||||||
int zfs_abd_scatter_min_size = 512 * 3;
|
int zfs_abd_scatter_min_size = 512 * 3;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
|
||||||
|
* just a single zero'd page. This allows us to conserve memory by
|
||||||
|
* only using a single zero page for the scatterlist.
|
||||||
|
*/
|
||||||
|
abd_t *abd_zero_scatter = NULL;
|
||||||
|
|
||||||
static kmem_cache_t *abd_cache = NULL;
|
static kmem_cache_t *abd_cache = NULL;
|
||||||
static kstat_t *abd_ksp;
|
static kstat_t *abd_ksp;
|
||||||
|
|
||||||
|
@ -178,6 +185,8 @@ abd_alloc_struct(size_t size)
|
||||||
*/
|
*/
|
||||||
abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
|
abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
|
||||||
ASSERT3P(abd, !=, NULL);
|
ASSERT3P(abd, !=, NULL);
|
||||||
|
list_link_init(&abd->abd_gang_link);
|
||||||
|
mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||||
ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
|
ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
|
||||||
|
|
||||||
return (abd);
|
return (abd);
|
||||||
|
@ -186,6 +195,8 @@ abd_alloc_struct(size_t size)
|
||||||
void
|
void
|
||||||
abd_free_struct(abd_t *abd)
|
abd_free_struct(abd_t *abd)
|
||||||
{
|
{
|
||||||
|
mutex_destroy(&abd->abd_mtx);
|
||||||
|
ASSERT(!list_link_active(&abd->abd_gang_link));
|
||||||
kmem_cache_free(abd_cache, abd);
|
kmem_cache_free(abd_cache, abd);
|
||||||
ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
|
ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
|
||||||
}
|
}
|
||||||
|
@ -426,14 +437,59 @@ abd_free_chunks(abd_t *abd)
|
||||||
abd_free_sg_table(abd);
|
abd_free_sg_table(abd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
|
||||||
|
* the scatterlist will be set to ZERO_PAGE(0). ZERO_PAGE(0) returns
|
||||||
|
* a global shared page that is always zero'd out.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
abd_alloc_zero_scatter(void)
|
||||||
|
{
|
||||||
|
struct scatterlist *sg = NULL;
|
||||||
|
struct sg_table table;
|
||||||
|
gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
|
||||||
|
int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
while (sg_alloc_table(&table, nr_pages, gfp)) {
|
||||||
|
ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
|
||||||
|
schedule_timeout_interruptible(1);
|
||||||
|
}
|
||||||
|
ASSERT3U(table.nents, ==, nr_pages);
|
||||||
|
|
||||||
|
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
|
||||||
|
abd_zero_scatter->abd_flags = ABD_FLAG_OWNER;
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
|
||||||
|
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
|
||||||
|
abd_zero_scatter->abd_parent = NULL;
|
||||||
|
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
|
||||||
|
zfs_refcount_create(&abd_zero_scatter->abd_children);
|
||||||
|
|
||||||
|
abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
|
||||||
|
sg_set_page(sg, ZERO_PAGE(0), PAGESIZE, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
ABDSTAT_BUMP(abdstat_scatter_cnt);
|
||||||
|
ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
|
||||||
|
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
|
||||||
|
}
|
||||||
|
|
||||||
#else /* _KERNEL */
|
#else /* _KERNEL */
|
||||||
|
|
||||||
|
struct page;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In user space abd_zero_page we will be an allocated zero'd PAGESIZE
|
||||||
|
* buffer, which is assigned to set each of the pages of abd_zero_scatter.
|
||||||
|
*/
|
||||||
|
static struct page *abd_zero_page = NULL;
|
||||||
|
|
||||||
#ifndef PAGE_SHIFT
|
#ifndef PAGE_SHIFT
|
||||||
#define PAGE_SHIFT (highbit64(PAGESIZE)-1)
|
#define PAGE_SHIFT (highbit64(PAGESIZE)-1)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct page;
|
|
||||||
|
|
||||||
#define zfs_kmap_atomic(chunk, km) ((void *)chunk)
|
#define zfs_kmap_atomic(chunk, km) ((void *)chunk)
|
||||||
#define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
|
#define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
|
||||||
#define local_irq_save(flags) do { (void)(flags); } while (0)
|
#define local_irq_save(flags) do { (void)(flags); } while (0)
|
||||||
|
@ -527,6 +583,37 @@ abd_free_chunks(abd_t *abd)
|
||||||
abd_free_sg_table(abd);
|
abd_free_sg_table(abd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
abd_alloc_zero_scatter(void)
|
||||||
|
{
|
||||||
|
unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
|
||||||
|
struct scatterlist *sg;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
|
||||||
|
memset(abd_zero_page, 0, PAGESIZE);
|
||||||
|
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
|
||||||
|
abd_zero_scatter->abd_flags = ABD_FLAG_OWNER;
|
||||||
|
abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
|
||||||
|
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
|
||||||
|
abd_zero_scatter->abd_parent = NULL;
|
||||||
|
zfs_refcount_create(&abd_zero_scatter->abd_children);
|
||||||
|
ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages *
|
||||||
|
sizeof (struct scatterlist), KM_SLEEP);
|
||||||
|
|
||||||
|
sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages);
|
||||||
|
|
||||||
|
abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
|
||||||
|
sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
ABDSTAT_BUMP(abdstat_scatter_cnt);
|
||||||
|
ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
|
||||||
|
ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _KERNEL */
|
#endif /* _KERNEL */
|
||||||
|
|
||||||
boolean_t
|
boolean_t
|
||||||
|
@ -582,6 +669,22 @@ abd_verify_scatter(abd_t *abd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
abd_free_zero_scatter(void)
|
||||||
|
{
|
||||||
|
zfs_refcount_destroy(&abd_zero_scatter->abd_children);
|
||||||
|
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
|
||||||
|
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
|
||||||
|
ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
|
||||||
|
|
||||||
|
abd_free_sg_table(abd_zero_scatter);
|
||||||
|
abd_free_struct(abd_zero_scatter);
|
||||||
|
abd_zero_scatter = NULL;
|
||||||
|
#if !defined(_KERNEL)
|
||||||
|
umem_free(abd_zero_page, PAGESIZE);
|
||||||
|
#endif /* _KERNEL */
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
abd_init(void)
|
abd_init(void)
|
||||||
{
|
{
|
||||||
|
@ -602,11 +705,15 @@ abd_init(void)
|
||||||
abd_ksp->ks_data = &abd_stats;
|
abd_ksp->ks_data = &abd_stats;
|
||||||
kstat_install(abd_ksp);
|
kstat_install(abd_ksp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
abd_alloc_zero_scatter();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
abd_fini(void)
|
abd_fini(void)
|
||||||
{
|
{
|
||||||
|
abd_free_zero_scatter();
|
||||||
|
|
||||||
if (abd_ksp != NULL) {
|
if (abd_ksp != NULL) {
|
||||||
kstat_delete(abd_ksp);
|
kstat_delete(abd_ksp);
|
||||||
abd_ksp = NULL;
|
abd_ksp = NULL;
|
||||||
|
@ -692,6 +799,7 @@ abd_get_offset_scatter(abd_t *sabd, size_t off)
|
||||||
void
|
void
|
||||||
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
|
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
|
||||||
{
|
{
|
||||||
|
ASSERT(!abd_is_gang(abd));
|
||||||
abd_verify(abd);
|
abd_verify(abd);
|
||||||
aiter->iter_abd = abd;
|
aiter->iter_abd = abd;
|
||||||
aiter->iter_mapaddr = NULL;
|
aiter->iter_mapaddr = NULL;
|
||||||
|
@ -813,6 +921,10 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
|
||||||
{
|
{
|
||||||
unsigned long pos;
|
unsigned long pos;
|
||||||
|
|
||||||
|
while (abd_is_gang(abd))
|
||||||
|
abd = abd_gang_get_offset(abd, &off);
|
||||||
|
|
||||||
|
ASSERT(!abd_is_gang(abd));
|
||||||
if (abd_is_linear(abd))
|
if (abd_is_linear(abd))
|
||||||
pos = (unsigned long)abd_to_buf(abd) + off;
|
pos = (unsigned long)abd_to_buf(abd) + off;
|
||||||
else
|
else
|
||||||
|
@ -822,20 +934,88 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
|
||||||
(pos >> PAGE_SHIFT);
|
(pos >> PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int
|
||||||
|
bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
|
||||||
|
{
|
||||||
|
unsigned int offset, size, i;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
offset = offset_in_page(buf_ptr);
|
||||||
|
for (i = 0; i < bio->bi_max_vecs; i++) {
|
||||||
|
size = PAGE_SIZE - offset;
|
||||||
|
|
||||||
|
if (bio_size <= 0)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (size > bio_size)
|
||||||
|
size = bio_size;
|
||||||
|
|
||||||
|
if (is_vmalloc_addr(buf_ptr))
|
||||||
|
page = vmalloc_to_page(buf_ptr);
|
||||||
|
else
|
||||||
|
page = virt_to_page(buf_ptr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some network related block device uses tcp_sendpage, which
|
||||||
|
* doesn't behave well when using 0-count page, this is a
|
||||||
|
* safety net to catch them.
|
||||||
|
*/
|
||||||
|
ASSERT3S(page_count(page), >, 0);
|
||||||
|
|
||||||
|
if (bio_add_page(bio, page, size, offset) != size)
|
||||||
|
break;
|
||||||
|
|
||||||
|
buf_ptr += size;
|
||||||
|
bio_size -= size;
|
||||||
|
offset = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (bio_size);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bio_map for scatter ABD.
|
* bio_map for gang ABD.
|
||||||
|
*/
|
||||||
|
static unsigned int
|
||||||
|
abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
|
||||||
|
unsigned int io_size, size_t off)
|
||||||
|
{
|
||||||
|
ASSERT(abd_is_gang(abd));
|
||||||
|
|
||||||
|
for (abd_t *cabd = abd_gang_get_offset(abd, &off);
|
||||||
|
cabd != NULL;
|
||||||
|
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
|
||||||
|
ASSERT3U(off, <, cabd->abd_size);
|
||||||
|
int size = MIN(io_size, cabd->abd_size - off);
|
||||||
|
int remainder = abd_bio_map_off(bio, cabd, size, off);
|
||||||
|
io_size -= (size - remainder);
|
||||||
|
if (io_size == 0 || remainder > 0)
|
||||||
|
return (io_size);
|
||||||
|
off = 0;
|
||||||
|
}
|
||||||
|
ASSERT0(io_size);
|
||||||
|
return (io_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bio_map for ABD.
|
||||||
* @off is the offset in @abd
|
* @off is the offset in @abd
|
||||||
* Remaining IO size is returned
|
* Remaining IO size is returned
|
||||||
*/
|
*/
|
||||||
unsigned int
|
unsigned int
|
||||||
abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
|
abd_bio_map_off(struct bio *bio, abd_t *abd,
|
||||||
unsigned int io_size, size_t off)
|
unsigned int io_size, size_t off)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct abd_iter aiter;
|
struct abd_iter aiter;
|
||||||
|
|
||||||
ASSERT(!abd_is_linear(abd));
|
|
||||||
ASSERT3U(io_size, <=, abd->abd_size - off);
|
ASSERT3U(io_size, <=, abd->abd_size - off);
|
||||||
|
if (abd_is_linear(abd))
|
||||||
|
return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
|
||||||
|
|
||||||
|
ASSERT(!abd_is_linear(abd));
|
||||||
|
if (abd_is_gang(abd))
|
||||||
|
return (abd_gang_bio_map_off(bio, abd, io_size, off));
|
||||||
|
|
||||||
abd_iter_init(&aiter, abd);
|
abd_iter_init(&aiter, abd);
|
||||||
abd_iter_advance(&aiter, off);
|
abd_iter_advance(&aiter, off);
|
||||||
|
|
|
@ -396,54 +396,6 @@ BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
|
||||||
rc = vdev_disk_dio_put(dr);
|
rc = vdev_disk_dio_put(dr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int
|
|
||||||
bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
|
|
||||||
{
|
|
||||||
unsigned int offset, size, i;
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
offset = offset_in_page(bio_ptr);
|
|
||||||
for (i = 0; i < bio->bi_max_vecs; i++) {
|
|
||||||
size = PAGE_SIZE - offset;
|
|
||||||
|
|
||||||
if (bio_size <= 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (size > bio_size)
|
|
||||||
size = bio_size;
|
|
||||||
|
|
||||||
if (is_vmalloc_addr(bio_ptr))
|
|
||||||
page = vmalloc_to_page(bio_ptr);
|
|
||||||
else
|
|
||||||
page = virt_to_page(bio_ptr);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Some network related block device uses tcp_sendpage, which
|
|
||||||
* doesn't behave well when using 0-count page, this is a
|
|
||||||
* safety net to catch them.
|
|
||||||
*/
|
|
||||||
ASSERT3S(page_count(page), >, 0);
|
|
||||||
|
|
||||||
if (bio_add_page(bio, page, size, offset) != size)
|
|
||||||
break;
|
|
||||||
|
|
||||||
bio_ptr += size;
|
|
||||||
bio_size -= size;
|
|
||||||
offset = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (bio_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int
|
|
||||||
bio_map_abd_off(struct bio *bio, abd_t *abd, unsigned int size, size_t off)
|
|
||||||
{
|
|
||||||
if (abd_is_linear(abd))
|
|
||||||
return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, size));
|
|
||||||
|
|
||||||
return (abd_scatter_bio_map_off(bio, abd, size, off));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
vdev_submit_bio_impl(struct bio *bio)
|
vdev_submit_bio_impl(struct bio *bio)
|
||||||
{
|
{
|
||||||
|
@ -603,7 +555,7 @@ retry:
|
||||||
bio_set_op_attrs(dr->dr_bio[i], rw, flags);
|
bio_set_op_attrs(dr->dr_bio[i], rw, flags);
|
||||||
|
|
||||||
/* Remaining size is returned to become the new size */
|
/* Remaining size is returned to become the new size */
|
||||||
bio_size = bio_map_abd_off(dr->dr_bio[i], zio->io_abd,
|
bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd,
|
||||||
bio_size, abd_offset);
|
bio_size, abd_offset);
|
||||||
|
|
||||||
/* Advance in buffer and construct another bio if needed */
|
/* Advance in buffer and construct another bio if needed */
|
||||||
|
|
342
module/zfs/abd.c
342
module/zfs/abd.c
|
@ -88,6 +88,10 @@
|
||||||
* function which progressively accesses the whole ABD, use the abd_iterate_*
|
* function which progressively accesses the whole ABD, use the abd_iterate_*
|
||||||
* functions.
|
* functions.
|
||||||
*
|
*
|
||||||
|
* As an additional feature, linear and scatter ABD's can be stitched together
|
||||||
|
* by using the gang ABD type (abd_alloc_gang_abd()). This allows for
|
||||||
|
* multiple ABDs to be viewed as a singular ABD.
|
||||||
|
*
|
||||||
* It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
|
* It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
|
||||||
* B_FALSE.
|
* B_FALSE.
|
||||||
*/
|
*/
|
||||||
|
@ -114,6 +118,13 @@ abd_is_linear_page(abd_t *abd)
|
||||||
B_TRUE : B_FALSE);
|
B_TRUE : B_FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boolean_t
|
||||||
|
abd_is_gang(abd_t *abd)
|
||||||
|
{
|
||||||
|
return ((abd->abd_flags & ABD_FLAG_GANG) != 0 ? B_TRUE :
|
||||||
|
B_FALSE);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
abd_verify(abd_t *abd)
|
abd_verify(abd_t *abd)
|
||||||
{
|
{
|
||||||
|
@ -121,11 +132,18 @@ abd_verify(abd_t *abd)
|
||||||
ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
|
ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
|
||||||
ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
|
ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
|
||||||
ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
|
ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
|
||||||
ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE));
|
ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
|
||||||
|
ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS));
|
||||||
IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
|
IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
|
||||||
IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
|
IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
|
||||||
if (abd_is_linear(abd)) {
|
if (abd_is_linear(abd)) {
|
||||||
ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
|
ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
|
||||||
|
} else if (abd_is_gang(abd)) {
|
||||||
|
for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
|
||||||
|
cabd != NULL;
|
||||||
|
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
|
||||||
|
abd_verify(cabd);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
abd_verify_scatter(abd);
|
abd_verify_scatter(abd);
|
||||||
}
|
}
|
||||||
|
@ -177,6 +195,22 @@ abd_free_scatter(abd_t *abd)
|
||||||
abd_free_struct(abd);
|
abd_free_struct(abd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
abd_put_gang_abd(abd_t *abd)
|
||||||
|
{
|
||||||
|
ASSERT(abd_is_gang(abd));
|
||||||
|
abd_t *cabd;
|
||||||
|
|
||||||
|
while ((cabd = list_remove_head(&ABD_GANG(abd).abd_gang_chain))
|
||||||
|
!= NULL) {
|
||||||
|
ASSERT0(cabd->abd_flags & ABD_FLAG_GANG_FREE);
|
||||||
|
abd->abd_size -= cabd->abd_size;
|
||||||
|
abd_put(cabd);
|
||||||
|
}
|
||||||
|
ASSERT0(abd->abd_size);
|
||||||
|
list_destroy(&ABD_GANG(abd).abd_gang_chain);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
|
* Free an ABD allocated from abd_get_offset() or abd_get_from_buf(). Will not
|
||||||
* free the underlying scatterlist or buffer.
|
* free the underlying scatterlist or buffer.
|
||||||
|
@ -195,6 +229,9 @@ abd_put(abd_t *abd)
|
||||||
abd->abd_size, abd);
|
abd->abd_size, abd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (abd_is_gang(abd))
|
||||||
|
abd_put_gang_abd(abd);
|
||||||
|
|
||||||
zfs_refcount_destroy(&abd->abd_children);
|
zfs_refcount_destroy(&abd->abd_children);
|
||||||
abd_free_struct(abd);
|
abd_free_struct(abd);
|
||||||
}
|
}
|
||||||
|
@ -249,9 +286,31 @@ abd_free_linear(abd_t *abd)
|
||||||
abd_free_struct(abd);
|
abd_free_struct(abd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
abd_free_gang_abd(abd_t *abd)
|
||||||
|
{
|
||||||
|
ASSERT(abd_is_gang(abd));
|
||||||
|
abd_t *cabd;
|
||||||
|
|
||||||
|
while ((cabd = list_remove_head(&ABD_GANG(abd).abd_gang_chain))
|
||||||
|
!= NULL) {
|
||||||
|
abd->abd_size -= cabd->abd_size;
|
||||||
|
if (cabd->abd_flags & ABD_FLAG_GANG_FREE) {
|
||||||
|
if (cabd->abd_flags & ABD_FLAG_OWNER)
|
||||||
|
abd_free(cabd);
|
||||||
|
else
|
||||||
|
abd_put(cabd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT0(abd->abd_size);
|
||||||
|
list_destroy(&ABD_GANG(abd).abd_gang_chain);
|
||||||
|
zfs_refcount_destroy(&abd->abd_children);
|
||||||
|
abd_free_struct(abd);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free an ABD. Only use this on ABDs allocated with abd_alloc() or
|
* Free an ABD. Only use this on ABDs allocated with abd_alloc(),
|
||||||
* abd_alloc_linear().
|
* abd_alloc_linear(), or abd_alloc_gang_abd().
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
abd_free(abd_t *abd)
|
abd_free(abd_t *abd)
|
||||||
|
@ -264,6 +323,8 @@ abd_free(abd_t *abd)
|
||||||
ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
|
ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
|
||||||
if (abd_is_linear(abd))
|
if (abd_is_linear(abd))
|
||||||
abd_free_linear(abd);
|
abd_free_linear(abd);
|
||||||
|
else if (abd_is_gang(abd))
|
||||||
|
abd_free_gang_abd(abd);
|
||||||
else
|
else
|
||||||
abd_free_scatter(abd);
|
abd_free_scatter(abd);
|
||||||
}
|
}
|
||||||
|
@ -284,6 +345,109 @@ abd_alloc_sametype(abd_t *sabd, size_t size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create gang ABD that will be the head of a list of ABD's. This is used
|
||||||
|
* to "chain" scatter/gather lists together when constructing aggregated
|
||||||
|
* IO's. To free this abd, abd_free() must be called.
|
||||||
|
*/
|
||||||
|
abd_t *
|
||||||
|
abd_alloc_gang_abd(void)
|
||||||
|
{
|
||||||
|
abd_t *abd;
|
||||||
|
|
||||||
|
abd = abd_alloc_struct(0);
|
||||||
|
abd->abd_flags = ABD_FLAG_GANG | ABD_FLAG_OWNER;
|
||||||
|
abd->abd_size = 0;
|
||||||
|
abd->abd_parent = NULL;
|
||||||
|
list_create(&ABD_GANG(abd).abd_gang_chain,
|
||||||
|
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
|
||||||
|
zfs_refcount_create(&abd->abd_children);
|
||||||
|
return (abd);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add a child ABD to a gang ABD's chained list.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
|
||||||
|
{
|
||||||
|
ASSERT(abd_is_gang(pabd));
|
||||||
|
abd_t *child_abd = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In order to verify that an ABD is not already part of
|
||||||
|
* another gang ABD, we must lock the child ABD's abd_mtx
|
||||||
|
* to check its abd_gang_link status. We unlock the abd_mtx
|
||||||
|
* only after it is has been added to a gang ABD, which
|
||||||
|
* will update the abd_gang_link's status. See comment below
|
||||||
|
* for how an ABD can be in multiple gang ABD's simultaneously.
|
||||||
|
*/
|
||||||
|
mutex_enter(&cabd->abd_mtx);
|
||||||
|
if (list_link_active(&cabd->abd_gang_link)) {
|
||||||
|
/*
|
||||||
|
* If the child ABD is already part of another
|
||||||
|
* gang ABD then we must allocate a new
|
||||||
|
* ABD to use a seperate link. We mark the newly
|
||||||
|
* allocated ABD with ABD_FLAG_GANG_FREE, before
|
||||||
|
* adding it to the gang ABD's list, to make the
|
||||||
|
* gang ABD aware that it is responsible to call
|
||||||
|
* abd_put(). We use abd_get_offset() in order
|
||||||
|
* to just allocate a new ABD but avoid copying the
|
||||||
|
* data over into the newly allocated ABD.
|
||||||
|
*
|
||||||
|
* An ABD may become part of multiple gang ABD's. For
|
||||||
|
* example, when writting ditto bocks, the same ABD
|
||||||
|
* is used to write 2 or 3 locations with 2 or 3
|
||||||
|
* zio_t's. Each of the zio's may be aggregated with
|
||||||
|
* different adjacent zio's. zio aggregation uses gang
|
||||||
|
* zio's, so the single ABD can become part of multiple
|
||||||
|
* gang zio's.
|
||||||
|
*
|
||||||
|
* The ASSERT below is to make sure that if
|
||||||
|
* free_on_free is passed as B_TRUE, the ABD can
|
||||||
|
* not be in mulitple gang ABD's. The gang ABD
|
||||||
|
* can not be responsible for cleaning up the child
|
||||||
|
* ABD memory allocation if the ABD can be in
|
||||||
|
* multiple gang ABD's at one time.
|
||||||
|
*/
|
||||||
|
ASSERT3B(free_on_free, ==, B_FALSE);
|
||||||
|
child_abd = abd_get_offset(cabd, 0);
|
||||||
|
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
|
||||||
|
} else {
|
||||||
|
child_abd = cabd;
|
||||||
|
if (free_on_free)
|
||||||
|
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
|
||||||
|
}
|
||||||
|
ASSERT3P(child_abd, !=, NULL);
|
||||||
|
|
||||||
|
list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
|
||||||
|
mutex_exit(&cabd->abd_mtx);
|
||||||
|
pabd->abd_size += child_abd->abd_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Locate the ABD for the supplied offset in the gang ABD.
|
||||||
|
* Return a new offset relative to the returned ABD.
|
||||||
|
*/
|
||||||
|
abd_t *
|
||||||
|
abd_gang_get_offset(abd_t *abd, size_t *off)
|
||||||
|
{
|
||||||
|
abd_t *cabd;
|
||||||
|
|
||||||
|
ASSERT(abd_is_gang(abd));
|
||||||
|
ASSERT3U(*off, <, abd->abd_size);
|
||||||
|
for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
|
||||||
|
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
|
||||||
|
if (*off >= cabd->abd_size)
|
||||||
|
*off -= cabd->abd_size;
|
||||||
|
else
|
||||||
|
return (cabd);
|
||||||
|
}
|
||||||
|
VERIFY3P(cabd, !=, NULL);
|
||||||
|
return (cabd);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a new ABD to point to offset off of sabd. It shares the underlying
|
* Allocate a new ABD to point to offset off of sabd. It shares the underlying
|
||||||
* buffer data with sabd. Use abd_put() to free. sabd must not be freed while
|
* buffer data with sabd. Use abd_put() to free. sabd must not be freed while
|
||||||
|
@ -308,6 +472,21 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
|
||||||
abd->abd_flags = ABD_FLAG_LINEAR;
|
abd->abd_flags = ABD_FLAG_LINEAR;
|
||||||
|
|
||||||
ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
|
ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
|
||||||
|
} else if (abd_is_gang(sabd)) {
|
||||||
|
size_t left = size;
|
||||||
|
abd = abd_alloc_gang_abd();
|
||||||
|
abd->abd_flags &= ~ABD_FLAG_OWNER;
|
||||||
|
for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
|
||||||
|
cabd != NULL && left > 0;
|
||||||
|
cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
|
||||||
|
int csize = MIN(left, cabd->abd_size - off);
|
||||||
|
|
||||||
|
abd_t *nabd = abd_get_offset_impl(cabd, off, csize);
|
||||||
|
abd_gang_add(abd, nabd, B_FALSE);
|
||||||
|
left -= csize;
|
||||||
|
off = 0;
|
||||||
|
}
|
||||||
|
ASSERT3U(left, ==, 0);
|
||||||
} else {
|
} else {
|
||||||
abd = abd_get_offset_scatter(sabd, off);
|
abd = abd_get_offset_scatter(sabd, off);
|
||||||
}
|
}
|
||||||
|
@ -334,6 +513,18 @@ abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
|
||||||
return (abd_get_offset_impl(sabd, off, size));
|
return (abd_get_offset_impl(sabd, off, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return a size scatter ABD. In order to free the returned
|
||||||
|
* ABD abd_put() must be called.
|
||||||
|
*/
|
||||||
|
abd_t *
|
||||||
|
abd_get_zeros(size_t size)
|
||||||
|
{
|
||||||
|
ASSERT3P(abd_zero_scatter, !=, NULL);
|
||||||
|
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
|
||||||
|
return (abd_get_offset_size(abd_zero_scatter, 0, size));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a linear ABD structure for buf. You must free this with abd_put()
|
* Allocate a linear ABD structure for buf. You must free this with abd_put()
|
||||||
* since the resulting ABD doesn't own its own buffer.
|
* since the resulting ABD doesn't own its own buffer.
|
||||||
|
@ -477,20 +668,69 @@ abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
|
||||||
abd_update_linear_stats(abd, ABDSTAT_INCR);
|
abd_update_linear_stats(abd, ABDSTAT_INCR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initializes an abd_iter based on whether the abd is a gang ABD
|
||||||
|
* or just a single ABD.
|
||||||
|
*/
|
||||||
|
static inline abd_t *
|
||||||
|
abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
|
||||||
|
{
|
||||||
|
abd_t *cabd = NULL;
|
||||||
|
|
||||||
|
if (abd_is_gang(abd)) {
|
||||||
|
cabd = abd_gang_get_offset(abd, &off);
|
||||||
|
if (cabd) {
|
||||||
|
abd_iter_init(aiter, cabd);
|
||||||
|
abd_iter_advance(aiter, off);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
abd_iter_init(aiter, abd);
|
||||||
|
abd_iter_advance(aiter, off);
|
||||||
|
}
|
||||||
|
return (cabd);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Advances an abd_iter. We have to be careful with gang ABD as
|
||||||
|
* advancing could mean that we are at the end of a particular ABD and
|
||||||
|
* must grab the ABD in the gang ABD's list.
|
||||||
|
*/
|
||||||
|
static inline abd_t *
|
||||||
|
abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
|
||||||
|
size_t len)
|
||||||
|
{
|
||||||
|
abd_iter_advance(aiter, len);
|
||||||
|
if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
|
||||||
|
ASSERT3P(cabd, !=, NULL);
|
||||||
|
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
|
||||||
|
if (cabd) {
|
||||||
|
abd_iter_init(aiter, cabd);
|
||||||
|
abd_iter_advance(aiter, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (cabd);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
abd_iterate_func(abd_t *abd, size_t off, size_t size,
|
abd_iterate_func(abd_t *abd, size_t off, size_t size,
|
||||||
abd_iter_func_t *func, void *private)
|
abd_iter_func_t *func, void *private)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct abd_iter aiter;
|
struct abd_iter aiter;
|
||||||
|
boolean_t abd_multi;
|
||||||
|
abd_t *c_abd;
|
||||||
|
|
||||||
abd_verify(abd);
|
abd_verify(abd);
|
||||||
ASSERT3U(off + size, <=, abd->abd_size);
|
ASSERT3U(off + size, <=, abd->abd_size);
|
||||||
|
|
||||||
abd_iter_init(&aiter, abd);
|
abd_multi = abd_is_gang(abd);
|
||||||
abd_iter_advance(&aiter, off);
|
c_abd = abd_init_abd_iter(abd, &aiter, off);
|
||||||
|
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
|
/* If we are at the end of the gang ABD we are done */
|
||||||
|
if (abd_multi && !c_abd)
|
||||||
|
break;
|
||||||
|
|
||||||
abd_iter_map(&aiter);
|
abd_iter_map(&aiter);
|
||||||
|
|
||||||
size_t len = MIN(aiter.iter_mapsize, size);
|
size_t len = MIN(aiter.iter_mapsize, size);
|
||||||
|
@ -504,7 +744,7 @@ abd_iterate_func(abd_t *abd, size_t off, size_t size,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
size -= len;
|
size -= len;
|
||||||
abd_iter_advance(&aiter, len);
|
c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
|
@ -611,6 +851,8 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct abd_iter daiter, saiter;
|
struct abd_iter daiter, saiter;
|
||||||
|
boolean_t dabd_is_gang_abd, sabd_is_gang_abd;
|
||||||
|
abd_t *c_dabd, *c_sabd;
|
||||||
|
|
||||||
abd_verify(dabd);
|
abd_verify(dabd);
|
||||||
abd_verify(sabd);
|
abd_verify(sabd);
|
||||||
|
@ -618,12 +860,17 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
|
||||||
ASSERT3U(doff + size, <=, dabd->abd_size);
|
ASSERT3U(doff + size, <=, dabd->abd_size);
|
||||||
ASSERT3U(soff + size, <=, sabd->abd_size);
|
ASSERT3U(soff + size, <=, sabd->abd_size);
|
||||||
|
|
||||||
abd_iter_init(&daiter, dabd);
|
dabd_is_gang_abd = abd_is_gang(dabd);
|
||||||
abd_iter_init(&saiter, sabd);
|
sabd_is_gang_abd = abd_is_gang(sabd);
|
||||||
abd_iter_advance(&daiter, doff);
|
c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
|
||||||
abd_iter_advance(&saiter, soff);
|
c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
|
||||||
|
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
|
/* if we are at the end of the gang ABD we are done */
|
||||||
|
if ((dabd_is_gang_abd && !c_dabd) ||
|
||||||
|
(sabd_is_gang_abd && !c_sabd))
|
||||||
|
break;
|
||||||
|
|
||||||
abd_iter_map(&daiter);
|
abd_iter_map(&daiter);
|
||||||
abd_iter_map(&saiter);
|
abd_iter_map(&saiter);
|
||||||
|
|
||||||
|
@ -642,8 +889,10 @@ abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
size -= len;
|
size -= len;
|
||||||
abd_iter_advance(&daiter, len);
|
c_dabd =
|
||||||
abd_iter_advance(&saiter, len);
|
abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
|
||||||
|
c_sabd =
|
||||||
|
abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
|
@ -704,29 +953,46 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
|
||||||
struct abd_iter daiter = {0};
|
struct abd_iter daiter = {0};
|
||||||
void *caddrs[3];
|
void *caddrs[3];
|
||||||
unsigned long flags __maybe_unused = 0;
|
unsigned long flags __maybe_unused = 0;
|
||||||
|
abd_t *c_cabds[3];
|
||||||
|
abd_t *c_dabd = NULL;
|
||||||
|
boolean_t cabds_is_gang_abd[3];
|
||||||
|
boolean_t dabd_is_gang_abd = B_FALSE;
|
||||||
|
|
||||||
ASSERT3U(parity, <=, 3);
|
ASSERT3U(parity, <=, 3);
|
||||||
|
|
||||||
for (i = 0; i < parity; i++)
|
for (i = 0; i < parity; i++) {
|
||||||
abd_iter_init(&caiters[i], cabds[i]);
|
cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
|
||||||
|
c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0);
|
||||||
|
}
|
||||||
|
|
||||||
if (dabd)
|
if (dabd) {
|
||||||
abd_iter_init(&daiter, dabd);
|
dabd_is_gang_abd = abd_is_gang(dabd);
|
||||||
|
c_dabd = abd_init_abd_iter(dabd, &daiter, 0);
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT3S(dsize, >=, 0);
|
ASSERT3S(dsize, >=, 0);
|
||||||
|
|
||||||
abd_enter_critical(flags);
|
abd_enter_critical(flags);
|
||||||
while (csize > 0) {
|
while (csize > 0) {
|
||||||
len = csize;
|
/* if we are at the end of the gang ABD we are done */
|
||||||
|
if (dabd_is_gang_abd && !c_dabd)
|
||||||
if (dabd && dsize > 0)
|
break;
|
||||||
abd_iter_map(&daiter);
|
|
||||||
|
|
||||||
for (i = 0; i < parity; i++) {
|
for (i = 0; i < parity; i++) {
|
||||||
|
/*
|
||||||
|
* If we are at the end of the gang ABD we are
|
||||||
|
* done.
|
||||||
|
*/
|
||||||
|
if (cabds_is_gang_abd[i] && !c_cabds[i])
|
||||||
|
break;
|
||||||
abd_iter_map(&caiters[i]);
|
abd_iter_map(&caiters[i]);
|
||||||
caddrs[i] = caiters[i].iter_mapaddr;
|
caddrs[i] = caiters[i].iter_mapaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
len = csize;
|
||||||
|
|
||||||
|
if (dabd && dsize > 0)
|
||||||
|
abd_iter_map(&daiter);
|
||||||
|
|
||||||
switch (parity) {
|
switch (parity) {
|
||||||
case 3:
|
case 3:
|
||||||
|
@ -761,12 +1027,16 @@ abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
|
||||||
|
|
||||||
for (i = parity-1; i >= 0; i--) {
|
for (i = parity-1; i >= 0; i--) {
|
||||||
abd_iter_unmap(&caiters[i]);
|
abd_iter_unmap(&caiters[i]);
|
||||||
abd_iter_advance(&caiters[i], len);
|
c_cabds[i] =
|
||||||
|
abd_advance_abd_iter(cabds[i], c_cabds[i],
|
||||||
|
&caiters[i], len);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dabd && dsize > 0) {
|
if (dabd && dsize > 0) {
|
||||||
abd_iter_unmap(&daiter);
|
abd_iter_unmap(&daiter);
|
||||||
abd_iter_advance(&daiter, dlen);
|
c_dabd =
|
||||||
|
abd_advance_abd_iter(dabd, c_dabd, &daiter,
|
||||||
|
dlen);
|
||||||
dsize -= dlen;
|
dsize -= dlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -801,18 +1071,34 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
|
||||||
struct abd_iter xiters[3];
|
struct abd_iter xiters[3];
|
||||||
void *caddrs[3], *xaddrs[3];
|
void *caddrs[3], *xaddrs[3];
|
||||||
unsigned long flags __maybe_unused = 0;
|
unsigned long flags __maybe_unused = 0;
|
||||||
|
boolean_t cabds_is_gang_abd[3];
|
||||||
|
boolean_t tabds_is_gang_abd[3];
|
||||||
|
abd_t *c_cabds[3];
|
||||||
|
abd_t *c_tabds[3];
|
||||||
|
|
||||||
ASSERT3U(parity, <=, 3);
|
ASSERT3U(parity, <=, 3);
|
||||||
|
|
||||||
for (i = 0; i < parity; i++) {
|
for (i = 0; i < parity; i++) {
|
||||||
abd_iter_init(&citers[i], cabds[i]);
|
cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
|
||||||
abd_iter_init(&xiters[i], tabds[i]);
|
tabds_is_gang_abd[i] = abd_is_gang(tabds[i]);
|
||||||
|
c_cabds[i] =
|
||||||
|
abd_init_abd_iter(cabds[i], &citers[i], 0);
|
||||||
|
c_tabds[i] =
|
||||||
|
abd_init_abd_iter(tabds[i], &xiters[i], 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
abd_enter_critical(flags);
|
abd_enter_critical(flags);
|
||||||
while (tsize > 0) {
|
while (tsize > 0) {
|
||||||
|
|
||||||
for (i = 0; i < parity; i++) {
|
for (i = 0; i < parity; i++) {
|
||||||
|
/*
|
||||||
|
* If we are at the end of the gang ABD we
|
||||||
|
* are done.
|
||||||
|
*/
|
||||||
|
if (cabds_is_gang_abd[i] && !c_cabds[i])
|
||||||
|
break;
|
||||||
|
if (tabds_is_gang_abd[i] && !c_tabds[i])
|
||||||
|
break;
|
||||||
abd_iter_map(&citers[i]);
|
abd_iter_map(&citers[i]);
|
||||||
abd_iter_map(&xiters[i]);
|
abd_iter_map(&xiters[i]);
|
||||||
caddrs[i] = citers[i].iter_mapaddr;
|
caddrs[i] = citers[i].iter_mapaddr;
|
||||||
|
@ -846,8 +1132,12 @@ abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
|
||||||
for (i = parity-1; i >= 0; i--) {
|
for (i = parity-1; i >= 0; i--) {
|
||||||
abd_iter_unmap(&xiters[i]);
|
abd_iter_unmap(&xiters[i]);
|
||||||
abd_iter_unmap(&citers[i]);
|
abd_iter_unmap(&citers[i]);
|
||||||
abd_iter_advance(&xiters[i], len);
|
c_tabds[i] =
|
||||||
abd_iter_advance(&citers[i], len);
|
abd_advance_abd_iter(tabds[i], c_tabds[i],
|
||||||
|
&xiters[i], len);
|
||||||
|
c_cabds[i] =
|
||||||
|
abd_advance_abd_iter(cabds[i], c_cabds[i],
|
||||||
|
&citers[i], len);
|
||||||
}
|
}
|
||||||
|
|
||||||
tsize -= len;
|
tsize -= len;
|
||||||
|
|
|
@ -535,15 +535,6 @@ vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
|
||||||
static void
|
static void
|
||||||
vdev_queue_agg_io_done(zio_t *aio)
|
vdev_queue_agg_io_done(zio_t *aio)
|
||||||
{
|
{
|
||||||
if (aio->io_type == ZIO_TYPE_READ) {
|
|
||||||
zio_t *pio;
|
|
||||||
zio_link_t *zl = NULL;
|
|
||||||
while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
|
|
||||||
abd_copy_off(pio->io_abd, aio->io_abd,
|
|
||||||
0, pio->io_offset - aio->io_offset, pio->io_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
abd_free(aio->io_abd);
|
abd_free(aio->io_abd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -556,6 +547,14 @@ vdev_queue_agg_io_done(zio_t *aio)
|
||||||
#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
|
#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
|
||||||
#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
|
#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sufficiently adjacent io_offset's in ZIOs will be aggregated. We do this
|
||||||
|
* by creating a gang ABD from the adjacent ZIOs io_abd's. By using
|
||||||
|
* a gang ABD we avoid doing memory copies to and from the parent,
|
||||||
|
* child ZIOs. The gang ABD also accounts for gaps between adjacent
|
||||||
|
* io_offsets by simply getting the zero ABD for writes or allocating
|
||||||
|
* a new ABD for reads and placing them in the gang ABD as well.
|
||||||
|
*/
|
||||||
static zio_t *
|
static zio_t *
|
||||||
vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
|
vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
|
||||||
{
|
{
|
||||||
|
@ -568,6 +567,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
|
||||||
boolean_t stretch = B_FALSE;
|
boolean_t stretch = B_FALSE;
|
||||||
avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
|
avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
|
||||||
enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
|
enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
|
||||||
|
uint64_t next_offset;
|
||||||
abd_t *abd;
|
abd_t *abd;
|
||||||
|
|
||||||
maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa);
|
maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa);
|
||||||
|
@ -695,7 +695,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
|
||||||
size = IO_SPAN(first, last);
|
size = IO_SPAN(first, last);
|
||||||
ASSERT3U(size, <=, maxblocksize);
|
ASSERT3U(size, <=, maxblocksize);
|
||||||
|
|
||||||
abd = abd_alloc_for_io(size, B_TRUE);
|
abd = abd_alloc_gang_abd();
|
||||||
if (abd == NULL)
|
if (abd == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
|
@ -706,32 +706,58 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
|
||||||
aio->io_timestamp = first->io_timestamp;
|
aio->io_timestamp = first->io_timestamp;
|
||||||
|
|
||||||
nio = first;
|
nio = first;
|
||||||
|
next_offset = first->io_offset;
|
||||||
do {
|
do {
|
||||||
dio = nio;
|
dio = nio;
|
||||||
nio = AVL_NEXT(t, dio);
|
nio = AVL_NEXT(t, dio);
|
||||||
zio_add_child(dio, aio);
|
zio_add_child(dio, aio);
|
||||||
vdev_queue_io_remove(vq, dio);
|
vdev_queue_io_remove(vq, dio);
|
||||||
|
|
||||||
|
if (dio->io_offset != next_offset) {
|
||||||
|
/* allocate a buffer for a read gap */
|
||||||
|
ASSERT3U(dio->io_type, ==, ZIO_TYPE_READ);
|
||||||
|
ASSERT3U(dio->io_offset, >, next_offset);
|
||||||
|
abd = abd_alloc_for_io(
|
||||||
|
dio->io_offset - next_offset, B_TRUE);
|
||||||
|
abd_gang_add(aio->io_abd, abd, B_TRUE);
|
||||||
|
}
|
||||||
|
if (dio->io_abd &&
|
||||||
|
(dio->io_size != abd_get_size(dio->io_abd))) {
|
||||||
|
/* abd size not the same as IO size */
|
||||||
|
ASSERT3U(abd_get_size(dio->io_abd), >, dio->io_size);
|
||||||
|
abd = abd_get_offset_size(dio->io_abd, 0, dio->io_size);
|
||||||
|
abd_gang_add(aio->io_abd, abd, B_TRUE);
|
||||||
|
} else {
|
||||||
|
if (dio->io_flags & ZIO_FLAG_NODATA) {
|
||||||
|
/* allocate a buffer for a write gap */
|
||||||
|
ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
|
||||||
|
ASSERT3P(dio->io_abd, ==, NULL);
|
||||||
|
abd_gang_add(aio->io_abd,
|
||||||
|
abd_get_zeros(dio->io_size), B_TRUE);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* We pass B_FALSE to abd_gang_add()
|
||||||
|
* because we did not allocate a new
|
||||||
|
* ABD, so it is assumed the caller
|
||||||
|
* will free this ABD.
|
||||||
|
*/
|
||||||
|
abd_gang_add(aio->io_abd, dio->io_abd,
|
||||||
|
B_FALSE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
next_offset = dio->io_offset + dio->io_size;
|
||||||
} while (dio != last);
|
} while (dio != last);
|
||||||
|
ASSERT3U(abd_get_size(aio->io_abd), ==, aio->io_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to drop the vdev queue's lock during zio_execute() to
|
* We need to drop the vdev queue's lock during zio_execute() to
|
||||||
* avoid a deadlock that we could encounter due to lock order
|
* avoid a deadlock that we could encounter due to lock order
|
||||||
* reversal between vq_lock and io_lock in zio_change_priority().
|
* reversal between vq_lock and io_lock in zio_change_priority().
|
||||||
* Use the dropped lock to do memory copy without congestion.
|
|
||||||
*/
|
*/
|
||||||
mutex_exit(&vq->vq_lock);
|
mutex_exit(&vq->vq_lock);
|
||||||
while ((dio = zio_walk_parents(aio, &zl)) != NULL) {
|
while ((dio = zio_walk_parents(aio, &zl)) != NULL) {
|
||||||
ASSERT3U(dio->io_type, ==, aio->io_type);
|
ASSERT3U(dio->io_type, ==, aio->io_type);
|
||||||
|
|
||||||
if (dio->io_flags & ZIO_FLAG_NODATA) {
|
|
||||||
ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
|
|
||||||
abd_zero_off(aio->io_abd,
|
|
||||||
dio->io_offset - aio->io_offset, dio->io_size);
|
|
||||||
} else if (dio->io_type == ZIO_TYPE_WRITE) {
|
|
||||||
abd_copy_off(aio->io_abd, dio->io_abd,
|
|
||||||
dio->io_offset - aio->io_offset, 0, dio->io_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
zio_vdev_io_bypass(dio);
|
zio_vdev_io_bypass(dio);
|
||||||
zio_execute(dio);
|
zio_execute(dio);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue