OpenZFS 6950 - ARC should cache compressed data
Authored by: George Wilson <george.wilson@delphix.com> Reviewed by: Prakash Surya <prakash.surya@delphix.com> Reviewed by: Dan Kimmel <dan.kimmel@delphix.com> Reviewed by: Matt Ahrens <mahrens@delphix.com> Reviewed by: Paul Dagnelie <pcd@delphix.com> Reviewed by: Tom Caputi <tcaputi@datto.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Ported by: David Quigley <david.quigley@intel.com> This review covers the reading and writing of compressed arc headers, sharing data between the arc_hdr_t and the arc_buf_t, and the implementation of a new dbuf cache to keep frequently access data uncompressed. I've added a new member to l1 arc hdr called b_pdata. The b_pdata always hangs off the arc_buf_hdr_t (if an L1 hdr is in use) and points to the physical block for that DVA. The physical block may or may not be compressed. If compressed arc is enabled and the block on-disk is compressed, then the b_pdata will match the block on-disk and remain compressed in memory. If the block on disk is not compressed, then neither will the b_pdata. Lastly, if compressed arc is disabled, then b_pdata will always be an uncompressed version of the on-disk block. Typically the arc will cache only the arc_buf_hdr_t and will aggressively evict any arc_buf_t's that are no longer referenced. This means that the arc will primarily have compressed blocks as the arc_buf_t's are considered overhead and are always uncompressed. When a consumer reads a block we first look to see if the arc_buf_hdr_t is cached. If the hdr is cached then we allocate a new arc_buf_t and decompress the b_pdata contents into the arc_buf_t's b_data. If the hdr already has a arc_buf_t, then we will allocate an additional arc_buf_t and bcopy the uncompressed contents from the first arc_buf_t to the new one. Writing to the compressed arc requires that we first discard the b_pdata since the physical block is about to be rewritten. The new data contents will be passed in via an arc_buf_t (uncompressed) and during the I/O pipeline stages we will copy the physical block contents to a newly allocated b_pdata. When an l2arc is inuse it will also take advantage of the b_pdata. Now the l2arc will always write the contents of b_pdata to the l2arc. This means that when compressed arc is enabled that the l2arc blocks are identical to those stored in the main data pool. This provides a significant advantage since we can leverage the bp's checksum when reading from the l2arc to determine if the contents are valid. If the compressed arc is disabled, then we must first transform the read block to look like the physical block in the main data pool before comparing the checksum and determining it's valid. OpenZFS-issue: https://www.illumos.org/issues/6950 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7fc10f0 Issue #5078
This commit is contained in:
parent
b8eb3c4e3c
commit
d3c2ae1c08
|
@ -1297,7 +1297,7 @@ visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
|
||||||
}
|
}
|
||||||
if (!err)
|
if (!err)
|
||||||
ASSERT3U(fill, ==, BP_GET_FILL(bp));
|
ASSERT3U(fill, ==, BP_GET_FILL(bp));
|
||||||
(void) arc_buf_remove_ref(buf, &buf);
|
arc_buf_destroy(buf, &buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (err);
|
return (err);
|
||||||
|
|
|
@ -192,6 +192,7 @@ static const ztest_shared_opts_t ztest_opts_defaults = {
|
||||||
extern uint64_t metaslab_gang_bang;
|
extern uint64_t metaslab_gang_bang;
|
||||||
extern uint64_t metaslab_df_alloc_threshold;
|
extern uint64_t metaslab_df_alloc_threshold;
|
||||||
extern int metaslab_preload_limit;
|
extern int metaslab_preload_limit;
|
||||||
|
extern boolean_t zfs_compressed_arc_enabled;
|
||||||
|
|
||||||
static ztest_shared_opts_t *ztest_shared_opts;
|
static ztest_shared_opts_t *ztest_shared_opts;
|
||||||
static ztest_shared_opts_t ztest_opts;
|
static ztest_shared_opts_t ztest_opts;
|
||||||
|
@ -5880,6 +5881,12 @@ ztest_resume_thread(void *arg)
|
||||||
if (spa_suspended(spa))
|
if (spa_suspended(spa))
|
||||||
ztest_resume(spa);
|
ztest_resume(spa);
|
||||||
(void) poll(NULL, 0, 100);
|
(void) poll(NULL, 0, 100);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Periodically change the zfs_compressed_arc_enabled setting.
|
||||||
|
*/
|
||||||
|
if (ztest_random(10) == 0)
|
||||||
|
zfs_compressed_arc_enabled = ztest_random(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
thread_exit();
|
thread_exit();
|
||||||
|
|
|
@ -44,12 +44,24 @@ extern "C" {
|
||||||
*/
|
*/
|
||||||
#define ARC_EVICT_ALL -1ULL
|
#define ARC_EVICT_ALL -1ULL
|
||||||
|
|
||||||
|
#define HDR_SET_LSIZE(hdr, x) do { \
|
||||||
|
ASSERT(IS_P2ALIGNED(x, 1U << SPA_MINBLOCKSHIFT)); \
|
||||||
|
(hdr)->b_lsize = ((x) >> SPA_MINBLOCKSHIFT); \
|
||||||
|
_NOTE(CONSTCOND) } while (0)
|
||||||
|
|
||||||
|
#define HDR_SET_PSIZE(hdr, x) do { \
|
||||||
|
ASSERT(IS_P2ALIGNED((x), 1U << SPA_MINBLOCKSHIFT)); \
|
||||||
|
(hdr)->b_psize = ((x) >> SPA_MINBLOCKSHIFT); \
|
||||||
|
_NOTE(CONSTCOND) } while (0)
|
||||||
|
|
||||||
|
#define HDR_GET_LSIZE(hdr) ((hdr)->b_lsize << SPA_MINBLOCKSHIFT)
|
||||||
|
#define HDR_GET_PSIZE(hdr) ((hdr)->b_psize << SPA_MINBLOCKSHIFT)
|
||||||
|
|
||||||
typedef struct arc_buf_hdr arc_buf_hdr_t;
|
typedef struct arc_buf_hdr arc_buf_hdr_t;
|
||||||
typedef struct arc_buf arc_buf_t;
|
typedef struct arc_buf arc_buf_t;
|
||||||
typedef struct arc_prune arc_prune_t;
|
typedef struct arc_prune arc_prune_t;
|
||||||
typedef void arc_done_func_t(zio_t *zio, arc_buf_t *buf, void *private);
|
typedef void arc_done_func_t(zio_t *zio, arc_buf_t *buf, void *private);
|
||||||
typedef void arc_prune_func_t(int64_t bytes, void *private);
|
typedef void arc_prune_func_t(int64_t bytes, void *private);
|
||||||
typedef int arc_evict_func_t(void *private);
|
|
||||||
|
|
||||||
/* Shared module parameters */
|
/* Shared module parameters */
|
||||||
extern int zfs_arc_average_blocksize;
|
extern int zfs_arc_average_blocksize;
|
||||||
|
@ -58,6 +70,8 @@ extern int zfs_arc_average_blocksize;
|
||||||
arc_done_func_t arc_bcopy_func;
|
arc_done_func_t arc_bcopy_func;
|
||||||
arc_done_func_t arc_getbuf_func;
|
arc_done_func_t arc_getbuf_func;
|
||||||
|
|
||||||
|
extern int zfs_arc_num_sublists_per_state;
|
||||||
|
|
||||||
/* generic arc_prune_func_t wrapper for callbacks */
|
/* generic arc_prune_func_t wrapper for callbacks */
|
||||||
struct arc_prune {
|
struct arc_prune {
|
||||||
arc_prune_func_t *p_pfunc;
|
arc_prune_func_t *p_pfunc;
|
||||||
|
@ -77,37 +91,54 @@ typedef enum arc_flags
|
||||||
/*
|
/*
|
||||||
* Public flags that can be passed into the ARC by external consumers.
|
* Public flags that can be passed into the ARC by external consumers.
|
||||||
*/
|
*/
|
||||||
ARC_FLAG_NONE = 1 << 0, /* No flags set */
|
ARC_FLAG_WAIT = 1 << 0, /* perform sync I/O */
|
||||||
ARC_FLAG_WAIT = 1 << 1, /* perform sync I/O */
|
ARC_FLAG_NOWAIT = 1 << 1, /* perform async I/O */
|
||||||
ARC_FLAG_NOWAIT = 1 << 2, /* perform async I/O */
|
ARC_FLAG_PREFETCH = 1 << 2, /* I/O is a prefetch */
|
||||||
ARC_FLAG_PREFETCH = 1 << 3, /* I/O is a prefetch */
|
ARC_FLAG_CACHED = 1 << 3, /* I/O was in cache */
|
||||||
ARC_FLAG_CACHED = 1 << 4, /* I/O was in cache */
|
ARC_FLAG_L2CACHE = 1 << 4, /* cache in L2ARC */
|
||||||
ARC_FLAG_L2CACHE = 1 << 5, /* cache in L2ARC */
|
ARC_FLAG_PREDICTIVE_PREFETCH = 1 << 5, /* I/O from zfetch */
|
||||||
ARC_FLAG_L2COMPRESS = 1 << 6, /* compress in L2ARC */
|
|
||||||
ARC_FLAG_PREDICTIVE_PREFETCH = 1 << 7, /* I/O from zfetch */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Private ARC flags. These flags are private ARC only flags that
|
* Private ARC flags. These flags are private ARC only flags that
|
||||||
* will show up in b_flags in the arc_hdr_buf_t. These flags should
|
* will show up in b_flags in the arc_hdr_buf_t. These flags should
|
||||||
* only be set by ARC code.
|
* only be set by ARC code.
|
||||||
*/
|
*/
|
||||||
ARC_FLAG_IN_HASH_TABLE = 1 << 8, /* buffer is hashed */
|
ARC_FLAG_IN_HASH_TABLE = 1 << 6, /* buffer is hashed */
|
||||||
ARC_FLAG_IO_IN_PROGRESS = 1 << 9, /* I/O in progress */
|
ARC_FLAG_IO_IN_PROGRESS = 1 << 7, /* I/O in progress */
|
||||||
ARC_FLAG_IO_ERROR = 1 << 10, /* I/O failed for buf */
|
ARC_FLAG_IO_ERROR = 1 << 8, /* I/O failed for buf */
|
||||||
ARC_FLAG_FREED_IN_READ = 1 << 11, /* freed during read */
|
ARC_FLAG_INDIRECT = 1 << 9, /* indirect block */
|
||||||
ARC_FLAG_BUF_AVAILABLE = 1 << 12, /* block not in use */
|
|
||||||
ARC_FLAG_INDIRECT = 1 << 13, /* indirect block */
|
|
||||||
/* Indicates that block was read with ASYNC priority. */
|
/* Indicates that block was read with ASYNC priority. */
|
||||||
ARC_FLAG_PRIO_ASYNC_READ = 1 << 14,
|
ARC_FLAG_PRIO_ASYNC_READ = 1 << 10,
|
||||||
ARC_FLAG_L2_WRITING = 1 << 15, /* write in progress */
|
ARC_FLAG_L2_WRITING = 1 << 11, /* write in progress */
|
||||||
ARC_FLAG_L2_EVICTED = 1 << 16, /* evicted during I/O */
|
ARC_FLAG_L2_EVICTED = 1 << 12, /* evicted during I/O */
|
||||||
ARC_FLAG_L2_WRITE_HEAD = 1 << 17, /* head of write list */
|
ARC_FLAG_L2_WRITE_HEAD = 1 << 13, /* head of write list */
|
||||||
/* indicates that the buffer contains metadata (otherwise, data) */
|
/* indicates that the buffer contains metadata (otherwise, data) */
|
||||||
ARC_FLAG_BUFC_METADATA = 1 << 18,
|
ARC_FLAG_BUFC_METADATA = 1 << 14,
|
||||||
|
|
||||||
/* Flags specifying whether optional hdr struct fields are defined */
|
/* Flags specifying whether optional hdr struct fields are defined */
|
||||||
ARC_FLAG_HAS_L1HDR = 1 << 19,
|
ARC_FLAG_HAS_L1HDR = 1 << 15,
|
||||||
ARC_FLAG_HAS_L2HDR = 1 << 20,
|
ARC_FLAG_HAS_L2HDR = 1 << 16,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Indicates the arc_buf_hdr_t's b_pdata matches the on-disk data.
|
||||||
|
* This allows the l2arc to use the blkptr's checksum to verify
|
||||||
|
* the data without having to store the checksum in the hdr.
|
||||||
|
*/
|
||||||
|
ARC_FLAG_COMPRESSED_ARC = 1 << 17,
|
||||||
|
ARC_FLAG_SHARED_DATA = 1 << 18,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The arc buffer's compression mode is stored in the top 7 bits of the
|
||||||
|
* flags field, so these dummy flags are included so that MDB can
|
||||||
|
* interpret the enum properly.
|
||||||
|
*/
|
||||||
|
ARC_FLAG_COMPRESS_0 = 1 << 24,
|
||||||
|
ARC_FLAG_COMPRESS_1 = 1 << 25,
|
||||||
|
ARC_FLAG_COMPRESS_2 = 1 << 26,
|
||||||
|
ARC_FLAG_COMPRESS_3 = 1 << 27,
|
||||||
|
ARC_FLAG_COMPRESS_4 = 1 << 28,
|
||||||
|
ARC_FLAG_COMPRESS_5 = 1 << 29,
|
||||||
|
ARC_FLAG_COMPRESS_6 = 1 << 30
|
||||||
|
|
||||||
} arc_flags_t;
|
} arc_flags_t;
|
||||||
|
|
||||||
|
@ -116,11 +147,10 @@ struct arc_buf {
|
||||||
arc_buf_t *b_next;
|
arc_buf_t *b_next;
|
||||||
kmutex_t b_evict_lock;
|
kmutex_t b_evict_lock;
|
||||||
void *b_data;
|
void *b_data;
|
||||||
arc_evict_func_t *b_efunc;
|
|
||||||
void *b_private;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef enum arc_buf_contents {
|
typedef enum arc_buf_contents {
|
||||||
|
ARC_BUFC_INVALID, /* invalid type */
|
||||||
ARC_BUFC_DATA, /* buffer contains data */
|
ARC_BUFC_DATA, /* buffer contains data */
|
||||||
ARC_BUFC_METADATA, /* buffer contains metadata */
|
ARC_BUFC_METADATA, /* buffer contains metadata */
|
||||||
ARC_BUFC_NUMTYPES
|
ARC_BUFC_NUMTYPES
|
||||||
|
@ -154,7 +184,7 @@ typedef struct arc_buf_info {
|
||||||
arc_state_type_t abi_state_type;
|
arc_state_type_t abi_state_type;
|
||||||
arc_buf_contents_t abi_state_contents;
|
arc_buf_contents_t abi_state_contents;
|
||||||
uint32_t abi_flags;
|
uint32_t abi_flags;
|
||||||
uint32_t abi_datacnt;
|
uint32_t abi_bufcnt;
|
||||||
uint64_t abi_size;
|
uint64_t abi_size;
|
||||||
uint64_t abi_spa;
|
uint64_t abi_spa;
|
||||||
uint64_t abi_access;
|
uint64_t abi_access;
|
||||||
|
@ -171,13 +201,12 @@ typedef struct arc_buf_info {
|
||||||
|
|
||||||
void arc_space_consume(uint64_t space, arc_space_type_t type);
|
void arc_space_consume(uint64_t space, arc_space_type_t type);
|
||||||
void arc_space_return(uint64_t space, arc_space_type_t type);
|
void arc_space_return(uint64_t space, arc_space_type_t type);
|
||||||
arc_buf_t *arc_buf_alloc(spa_t *spa, uint64_t size, void *tag,
|
arc_buf_t *arc_alloc_buf(spa_t *spa, int32_t size, void *tag,
|
||||||
arc_buf_contents_t type);
|
arc_buf_contents_t type);
|
||||||
arc_buf_t *arc_loan_buf(spa_t *spa, uint64_t size);
|
arc_buf_t *arc_loan_buf(spa_t *spa, uint64_t size);
|
||||||
void arc_return_buf(arc_buf_t *buf, void *tag);
|
void arc_return_buf(arc_buf_t *buf, void *tag);
|
||||||
void arc_loan_inuse_buf(arc_buf_t *buf, void *tag);
|
void arc_loan_inuse_buf(arc_buf_t *buf, void *tag);
|
||||||
void arc_buf_add_ref(arc_buf_t *buf, void *tag);
|
void arc_buf_destroy(arc_buf_t *buf, void *tag);
|
||||||
boolean_t arc_buf_remove_ref(arc_buf_t *buf, void *tag);
|
|
||||||
void arc_buf_info(arc_buf_t *buf, arc_buf_info_t *abi, int state_index);
|
void arc_buf_info(arc_buf_t *buf, arc_buf_info_t *abi, int state_index);
|
||||||
uint64_t arc_buf_size(arc_buf_t *buf);
|
uint64_t arc_buf_size(arc_buf_t *buf);
|
||||||
void arc_release(arc_buf_t *buf, void *tag);
|
void arc_release(arc_buf_t *buf, void *tag);
|
||||||
|
@ -185,7 +214,6 @@ int arc_released(arc_buf_t *buf);
|
||||||
void arc_buf_sigsegv(int sig, siginfo_t *si, void *unused);
|
void arc_buf_sigsegv(int sig, siginfo_t *si, void *unused);
|
||||||
void arc_buf_freeze(arc_buf_t *buf);
|
void arc_buf_freeze(arc_buf_t *buf);
|
||||||
void arc_buf_thaw(arc_buf_t *buf);
|
void arc_buf_thaw(arc_buf_t *buf);
|
||||||
boolean_t arc_buf_eviction_needed(arc_buf_t *buf);
|
|
||||||
#ifdef ZFS_DEBUG
|
#ifdef ZFS_DEBUG
|
||||||
int arc_referenced(arc_buf_t *buf);
|
int arc_referenced(arc_buf_t *buf);
|
||||||
#endif
|
#endif
|
||||||
|
@ -194,8 +222,7 @@ int arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
|
||||||
arc_done_func_t *done, void *private, zio_priority_t priority, int flags,
|
arc_done_func_t *done, void *private, zio_priority_t priority, int flags,
|
||||||
arc_flags_t *arc_flags, const zbookmark_phys_t *zb);
|
arc_flags_t *arc_flags, const zbookmark_phys_t *zb);
|
||||||
zio_t *arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
|
zio_t *arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
|
||||||
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
|
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
|
||||||
const zio_prop_t *zp,
|
|
||||||
arc_done_func_t *ready, arc_done_func_t *child_ready,
|
arc_done_func_t *ready, arc_done_func_t *child_ready,
|
||||||
arc_done_func_t *physdone, arc_done_func_t *done,
|
arc_done_func_t *physdone, arc_done_func_t *done,
|
||||||
void *private, zio_priority_t priority, int zio_flags,
|
void *private, zio_priority_t priority, int zio_flags,
|
||||||
|
@ -205,13 +232,11 @@ arc_prune_t *arc_add_prune_callback(arc_prune_func_t *func, void *private);
|
||||||
void arc_remove_prune_callback(arc_prune_t *p);
|
void arc_remove_prune_callback(arc_prune_t *p);
|
||||||
void arc_freed(spa_t *spa, const blkptr_t *bp);
|
void arc_freed(spa_t *spa, const blkptr_t *bp);
|
||||||
|
|
||||||
void arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private);
|
|
||||||
boolean_t arc_clear_callback(arc_buf_t *buf);
|
|
||||||
|
|
||||||
void arc_flush(spa_t *spa, boolean_t retry);
|
void arc_flush(spa_t *spa, boolean_t retry);
|
||||||
void arc_tempreserve_clear(uint64_t reserve);
|
void arc_tempreserve_clear(uint64_t reserve);
|
||||||
int arc_tempreserve_space(uint64_t reserve, uint64_t txg);
|
int arc_tempreserve_space(uint64_t reserve, uint64_t txg);
|
||||||
|
|
||||||
|
uint64_t arc_max_bytes(void);
|
||||||
void arc_init(void);
|
void arc_init(void);
|
||||||
void arc_fini(void);
|
void arc_fini(void);
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ typedef struct arc_state {
|
||||||
/*
|
/*
|
||||||
* total amount of evictable data in this state
|
* total amount of evictable data in this state
|
||||||
*/
|
*/
|
||||||
uint64_t arcs_lsize[ARC_BUFC_NUMTYPES];
|
refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
|
||||||
/*
|
/*
|
||||||
* total amount of data in this state; this includes: evictable,
|
* total amount of data in this state; this includes: evictable,
|
||||||
* non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
|
* non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
|
||||||
|
@ -140,11 +140,13 @@ struct arc_write_callback {
|
||||||
*/
|
*/
|
||||||
typedef struct l1arc_buf_hdr {
|
typedef struct l1arc_buf_hdr {
|
||||||
kmutex_t b_freeze_lock;
|
kmutex_t b_freeze_lock;
|
||||||
|
zio_cksum_t *b_freeze_cksum;
|
||||||
|
|
||||||
arc_buf_t *b_buf;
|
arc_buf_t *b_buf;
|
||||||
uint32_t b_datacnt;
|
uint32_t b_bufcnt;
|
||||||
/* for waiting on writes to complete */
|
/* for waiting on writes to complete */
|
||||||
kcondvar_t b_cv;
|
kcondvar_t b_cv;
|
||||||
|
uint8_t b_byteswap;
|
||||||
|
|
||||||
|
|
||||||
/* protected by arc state mutex */
|
/* protected by arc state mutex */
|
||||||
|
@ -163,8 +165,7 @@ typedef struct l1arc_buf_hdr {
|
||||||
refcount_t b_refcnt;
|
refcount_t b_refcnt;
|
||||||
|
|
||||||
arc_callback_t *b_acb;
|
arc_callback_t *b_acb;
|
||||||
/* temporary buffer holder for in-flight compressed data */
|
void *b_pdata;
|
||||||
void *b_tmp_cdata;
|
|
||||||
} l1arc_buf_hdr_t;
|
} l1arc_buf_hdr_t;
|
||||||
|
|
||||||
typedef struct l2arc_dev {
|
typedef struct l2arc_dev {
|
||||||
|
@ -185,10 +186,7 @@ typedef struct l2arc_buf_hdr {
|
||||||
/* protected by arc_buf_hdr mutex */
|
/* protected by arc_buf_hdr mutex */
|
||||||
l2arc_dev_t *b_dev; /* L2ARC device */
|
l2arc_dev_t *b_dev; /* L2ARC device */
|
||||||
uint64_t b_daddr; /* disk address, offset byte */
|
uint64_t b_daddr; /* disk address, offset byte */
|
||||||
/* real alloc'd buffer size depending on b_compress applied */
|
|
||||||
uint32_t b_hits;
|
uint32_t b_hits;
|
||||||
int32_t b_asize;
|
|
||||||
uint8_t b_compress;
|
|
||||||
|
|
||||||
list_node_t b_l2node;
|
list_node_t b_l2node;
|
||||||
} l2arc_buf_hdr_t;
|
} l2arc_buf_hdr_t;
|
||||||
|
@ -202,20 +200,37 @@ struct arc_buf_hdr {
|
||||||
/* protected by hash lock */
|
/* protected by hash lock */
|
||||||
dva_t b_dva;
|
dva_t b_dva;
|
||||||
uint64_t b_birth;
|
uint64_t b_birth;
|
||||||
/*
|
|
||||||
* Even though this checksum is only set/verified when a buffer is in
|
|
||||||
* the L1 cache, it needs to be in the set of common fields because it
|
|
||||||
* must be preserved from the time before a buffer is written out to
|
|
||||||
* L2ARC until after it is read back in.
|
|
||||||
*/
|
|
||||||
zio_cksum_t *b_freeze_cksum;
|
|
||||||
|
|
||||||
|
arc_buf_contents_t b_type;
|
||||||
arc_buf_hdr_t *b_hash_next;
|
arc_buf_hdr_t *b_hash_next;
|
||||||
arc_flags_t b_flags;
|
arc_flags_t b_flags;
|
||||||
|
|
||||||
/* immutable */
|
/*
|
||||||
int32_t b_size;
|
* This field stores the size of the data buffer after
|
||||||
uint64_t b_spa;
|
* compression, and is set in the arc's zio completion handlers.
|
||||||
|
* It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes).
|
||||||
|
*
|
||||||
|
* While the block pointers can store up to 32MB in their psize
|
||||||
|
* field, we can only store up to 32MB minus 512B. This is due
|
||||||
|
* to the bp using a bias of 1, whereas we use a bias of 0 (i.e.
|
||||||
|
* a field of zeros represents 512B in the bp). We can't use a
|
||||||
|
* bias of 1 since we need to reserve a psize of zero, here, to
|
||||||
|
* represent holes and embedded blocks.
|
||||||
|
*
|
||||||
|
* This isn't a problem in practice, since the maximum size of a
|
||||||
|
* buffer is limited to 16MB, so we never need to store 32MB in
|
||||||
|
* this field. Even in the upstream illumos code base, the
|
||||||
|
* maximum size of a buffer is limited to 16MB.
|
||||||
|
*/
|
||||||
|
uint16_t b_psize;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This field stores the size of the data buffer before
|
||||||
|
* compression, and cannot change once set. It is in units
|
||||||
|
* of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes)
|
||||||
|
*/
|
||||||
|
uint16_t b_lsize; /* immutable */
|
||||||
|
uint64_t b_spa; /* immutable */
|
||||||
|
|
||||||
/* L2ARC fields. Undefined when not in L2ARC. */
|
/* L2ARC fields. Undefined when not in L2ARC. */
|
||||||
l2arc_buf_hdr_t b_l2hdr;
|
l2arc_buf_hdr_t b_l2hdr;
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include <sys/zfs_context.h>
|
#include <sys/zfs_context.h>
|
||||||
#include <sys/refcount.h>
|
#include <sys/refcount.h>
|
||||||
#include <sys/zrlock.h>
|
#include <sys/zrlock.h>
|
||||||
|
#include <sys/multilist.h>
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -228,6 +229,11 @@ typedef struct dmu_buf_impl {
|
||||||
*/
|
*/
|
||||||
avl_node_t db_link;
|
avl_node_t db_link;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Link in dbuf_cache.
|
||||||
|
*/
|
||||||
|
multilist_node_t db_cache_link;
|
||||||
|
|
||||||
/* Data which is unique to data (leaf) blocks: */
|
/* Data which is unique to data (leaf) blocks: */
|
||||||
|
|
||||||
/* User callback information. */
|
/* User callback information. */
|
||||||
|
@ -303,8 +309,7 @@ void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
|
||||||
bp_embedded_type_t etype, enum zio_compress comp,
|
bp_embedded_type_t etype, enum zio_compress comp,
|
||||||
int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
|
int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
|
||||||
|
|
||||||
void dbuf_clear(dmu_buf_impl_t *db);
|
void dbuf_destroy(dmu_buf_impl_t *db);
|
||||||
void dbuf_evict(dmu_buf_impl_t *db);
|
|
||||||
|
|
||||||
void dbuf_unoverride(dbuf_dirty_record_t *dr);
|
void dbuf_unoverride(dbuf_dirty_record_t *dr);
|
||||||
void dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx);
|
void dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx);
|
||||||
|
@ -342,10 +347,6 @@ boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
|
||||||
(dbuf_is_metadata(_db) && \
|
(dbuf_is_metadata(_db) && \
|
||||||
((_db)->db_objset->os_secondary_cache == ZFS_CACHE_METADATA)))
|
((_db)->db_objset->os_secondary_cache == ZFS_CACHE_METADATA)))
|
||||||
|
|
||||||
#define DBUF_IS_L2COMPRESSIBLE(_db) \
|
|
||||||
((_db)->db_objset->os_compress != ZIO_COMPRESS_OFF || \
|
|
||||||
(dbuf_is_metadata(_db) && zfs_mdcomp_disable == B_FALSE))
|
|
||||||
|
|
||||||
#ifdef ZFS_DEBUG
|
#ifdef ZFS_DEBUG
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -70,6 +70,7 @@ int64_t refcount_remove(refcount_t *rc, void *holder_tag);
|
||||||
int64_t refcount_add_many(refcount_t *rc, uint64_t number, void *holder_tag);
|
int64_t refcount_add_many(refcount_t *rc, uint64_t number, void *holder_tag);
|
||||||
int64_t refcount_remove_many(refcount_t *rc, uint64_t number, void *holder_tag);
|
int64_t refcount_remove_many(refcount_t *rc, uint64_t number, void *holder_tag);
|
||||||
void refcount_transfer(refcount_t *dst, refcount_t *src);
|
void refcount_transfer(refcount_t *dst, refcount_t *src);
|
||||||
|
void refcount_transfer_ownership(refcount_t *, void *, void *);
|
||||||
|
|
||||||
void refcount_init(void);
|
void refcount_init(void);
|
||||||
void refcount_fini(void);
|
void refcount_fini(void);
|
||||||
|
@ -97,6 +98,7 @@ typedef struct refcount {
|
||||||
atomic_add_64(&(src)->rc_count, -__tmp); \
|
atomic_add_64(&(src)->rc_count, -__tmp); \
|
||||||
atomic_add_64(&(dst)->rc_count, __tmp); \
|
atomic_add_64(&(dst)->rc_count, __tmp); \
|
||||||
}
|
}
|
||||||
|
#define refcount_transfer_ownership(rc, current_holder, new_holder)
|
||||||
|
|
||||||
#define refcount_init()
|
#define refcount_init()
|
||||||
#define refcount_fini()
|
#define refcount_fini()
|
||||||
|
|
|
@ -135,6 +135,8 @@ _NOTE(CONSTCOND) } while (0)
|
||||||
#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
|
#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
|
||||||
#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
|
#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
|
||||||
|
|
||||||
|
#define SPA_COMPRESSBITS 7
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All SPA data is represented by 128-bit data virtual addresses (DVAs).
|
* All SPA data is represented by 128-bit data virtual addresses (DVAs).
|
||||||
* The members of the dva_t should be considered opaque outside the SPA.
|
* The members of the dva_t should be considered opaque outside the SPA.
|
||||||
|
@ -363,8 +365,10 @@ _NOTE(CONSTCOND) } while (0)
|
||||||
16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
|
16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
|
||||||
_NOTE(CONSTCOND) } while (0)
|
_NOTE(CONSTCOND) } while (0)
|
||||||
|
|
||||||
#define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 7)
|
#define BP_GET_COMPRESS(bp) \
|
||||||
#define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 7, x)
|
BF64_GET((bp)->blk_prop, 32, SPA_COMPRESSBITS)
|
||||||
|
#define BP_SET_COMPRESS(bp, x) \
|
||||||
|
BF64_SET((bp)->blk_prop, 32, SPA_COMPRESSBITS, x)
|
||||||
|
|
||||||
#define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1)
|
#define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1)
|
||||||
#define BP_SET_EMBEDDED(bp, x) BF64_SET((bp)->blk_prop, 39, 1, x)
|
#define BP_SET_EMBEDDED(bp, x) BF64_SET((bp)->blk_prop, 39, 1, x)
|
||||||
|
|
|
@ -50,9 +50,10 @@ DECLARE_EVENT_CLASS(zfs_arc_buf_hdr_class,
|
||||||
__array(uint64_t, hdr_dva_word, 2)
|
__array(uint64_t, hdr_dva_word, 2)
|
||||||
__field(uint64_t, hdr_birth)
|
__field(uint64_t, hdr_birth)
|
||||||
__field(uint32_t, hdr_flags)
|
__field(uint32_t, hdr_flags)
|
||||||
__field(uint32_t, hdr_datacnt)
|
__field(uint32_t, hdr_bufcnt)
|
||||||
__field(arc_buf_contents_t, hdr_type)
|
__field(arc_buf_contents_t, hdr_type)
|
||||||
__field(uint64_t, hdr_size)
|
__field(uint16_t, hdr_psize)
|
||||||
|
__field(uint16_t, hdr_lsize)
|
||||||
__field(uint64_t, hdr_spa)
|
__field(uint64_t, hdr_spa)
|
||||||
__field(arc_state_type_t, hdr_state_type)
|
__field(arc_state_type_t, hdr_state_type)
|
||||||
__field(clock_t, hdr_access)
|
__field(clock_t, hdr_access)
|
||||||
|
@ -68,8 +69,9 @@ DECLARE_EVENT_CLASS(zfs_arc_buf_hdr_class,
|
||||||
__entry->hdr_dva_word[1] = ab->b_dva.dva_word[1];
|
__entry->hdr_dva_word[1] = ab->b_dva.dva_word[1];
|
||||||
__entry->hdr_birth = ab->b_birth;
|
__entry->hdr_birth = ab->b_birth;
|
||||||
__entry->hdr_flags = ab->b_flags;
|
__entry->hdr_flags = ab->b_flags;
|
||||||
__entry->hdr_datacnt = ab->b_l1hdr.b_datacnt;
|
__entry->hdr_bufcnt = ab->b_l1hdr.b_bufcnt;
|
||||||
__entry->hdr_size = ab->b_size;
|
__entry->hdr_psize = ab->b_psize;
|
||||||
|
__entry->hdr_lsize = ab->b_lsize;
|
||||||
__entry->hdr_spa = ab->b_spa;
|
__entry->hdr_spa = ab->b_spa;
|
||||||
__entry->hdr_state_type = ab->b_l1hdr.b_state->arcs_state;
|
__entry->hdr_state_type = ab->b_l1hdr.b_state->arcs_state;
|
||||||
__entry->hdr_access = ab->b_l1hdr.b_arc_access;
|
__entry->hdr_access = ab->b_l1hdr.b_arc_access;
|
||||||
|
@ -81,13 +83,13 @@ DECLARE_EVENT_CLASS(zfs_arc_buf_hdr_class,
|
||||||
__entry->hdr_refcount = ab->b_l1hdr.b_refcnt.rc_count;
|
__entry->hdr_refcount = ab->b_l1hdr.b_refcnt.rc_count;
|
||||||
),
|
),
|
||||||
TP_printk("hdr { dva 0x%llx:0x%llx birth %llu "
|
TP_printk("hdr { dva 0x%llx:0x%llx birth %llu "
|
||||||
"flags 0x%x datacnt %u type %u size %llu spa %llu "
|
"flags 0x%x bufcnt %u type %u psize %u lsize %u spa %llu "
|
||||||
"state_type %u access %lu mru_hits %u mru_ghost_hits %u "
|
"state_type %u access %lu mru_hits %u mru_ghost_hits %u "
|
||||||
"mfu_hits %u mfu_ghost_hits %u l2_hits %u refcount %lli }",
|
"mfu_hits %u mfu_ghost_hits %u l2_hits %u refcount %lli }",
|
||||||
__entry->hdr_dva_word[0], __entry->hdr_dva_word[1],
|
__entry->hdr_dva_word[0], __entry->hdr_dva_word[1],
|
||||||
__entry->hdr_birth, __entry->hdr_flags,
|
__entry->hdr_birth, __entry->hdr_flags,
|
||||||
__entry->hdr_datacnt, __entry->hdr_type, __entry->hdr_size,
|
__entry->hdr_bufcnt, __entry->hdr_type, __entry->hdr_psize,
|
||||||
__entry->hdr_spa, __entry->hdr_state_type,
|
__entry->hdr_lsize, __entry->hdr_spa, __entry->hdr_state_type,
|
||||||
__entry->hdr_access, __entry->hdr_mru_hits,
|
__entry->hdr_access, __entry->hdr_mru_hits,
|
||||||
__entry->hdr_mru_ghost_hits, __entry->hdr_mfu_hits,
|
__entry->hdr_mru_ghost_hits, __entry->hdr_mfu_hits,
|
||||||
__entry->hdr_mfu_ghost_hits, __entry->hdr_l2_hits,
|
__entry->hdr_mfu_ghost_hits, __entry->hdr_l2_hits,
|
||||||
|
@ -185,9 +187,10 @@ DECLARE_EVENT_CLASS(zfs_arc_miss_class,
|
||||||
__array(uint64_t, hdr_dva_word, 2)
|
__array(uint64_t, hdr_dva_word, 2)
|
||||||
__field(uint64_t, hdr_birth)
|
__field(uint64_t, hdr_birth)
|
||||||
__field(uint32_t, hdr_flags)
|
__field(uint32_t, hdr_flags)
|
||||||
__field(uint32_t, hdr_datacnt)
|
__field(uint32_t, hdr_bufcnt)
|
||||||
__field(arc_buf_contents_t, hdr_type)
|
__field(arc_buf_contents_t, hdr_type)
|
||||||
__field(uint64_t, hdr_size)
|
__field(uint16_t, hdr_psize)
|
||||||
|
__field(uint16_t, hdr_lsize)
|
||||||
__field(uint64_t, hdr_spa)
|
__field(uint64_t, hdr_spa)
|
||||||
__field(arc_state_type_t, hdr_state_type)
|
__field(arc_state_type_t, hdr_state_type)
|
||||||
__field(clock_t, hdr_access)
|
__field(clock_t, hdr_access)
|
||||||
|
@ -215,8 +218,9 @@ DECLARE_EVENT_CLASS(zfs_arc_miss_class,
|
||||||
__entry->hdr_dva_word[1] = hdr->b_dva.dva_word[1];
|
__entry->hdr_dva_word[1] = hdr->b_dva.dva_word[1];
|
||||||
__entry->hdr_birth = hdr->b_birth;
|
__entry->hdr_birth = hdr->b_birth;
|
||||||
__entry->hdr_flags = hdr->b_flags;
|
__entry->hdr_flags = hdr->b_flags;
|
||||||
__entry->hdr_datacnt = hdr->b_l1hdr.b_datacnt;
|
__entry->hdr_bufcnt = hdr->b_l1hdr.b_bufcnt;
|
||||||
__entry->hdr_size = hdr->b_size;
|
__entry->hdr_psize = hdr->b_psize;
|
||||||
|
__entry->hdr_lsize = hdr->b_lsize;
|
||||||
__entry->hdr_spa = hdr->b_spa;
|
__entry->hdr_spa = hdr->b_spa;
|
||||||
__entry->hdr_state_type = hdr->b_l1hdr.b_state->arcs_state;
|
__entry->hdr_state_type = hdr->b_l1hdr.b_state->arcs_state;
|
||||||
__entry->hdr_access = hdr->b_l1hdr.b_arc_access;
|
__entry->hdr_access = hdr->b_l1hdr.b_arc_access;
|
||||||
|
@ -246,7 +250,7 @@ DECLARE_EVENT_CLASS(zfs_arc_miss_class,
|
||||||
__entry->zb_blkid = zb->zb_blkid;
|
__entry->zb_blkid = zb->zb_blkid;
|
||||||
),
|
),
|
||||||
TP_printk("hdr { dva 0x%llx:0x%llx birth %llu "
|
TP_printk("hdr { dva 0x%llx:0x%llx birth %llu "
|
||||||
"flags 0x%x datacnt %u size %llu spa %llu state_type %u "
|
"flags 0x%x bufcnt %u psize %u lsize %u spa %llu state_type %u "
|
||||||
"access %lu mru_hits %u mru_ghost_hits %u mfu_hits %u "
|
"access %lu mru_hits %u mru_ghost_hits %u mfu_hits %u "
|
||||||
"mfu_ghost_hits %u l2_hits %u refcount %lli } "
|
"mfu_ghost_hits %u l2_hits %u refcount %lli } "
|
||||||
"bp { dva0 0x%llx:0x%llx dva1 0x%llx:0x%llx dva2 "
|
"bp { dva0 0x%llx:0x%llx dva1 0x%llx:0x%llx dva2 "
|
||||||
|
@ -255,7 +259,7 @@ DECLARE_EVENT_CLASS(zfs_arc_miss_class,
|
||||||
"blkid %llu }",
|
"blkid %llu }",
|
||||||
__entry->hdr_dva_word[0], __entry->hdr_dva_word[1],
|
__entry->hdr_dva_word[0], __entry->hdr_dva_word[1],
|
||||||
__entry->hdr_birth, __entry->hdr_flags,
|
__entry->hdr_birth, __entry->hdr_flags,
|
||||||
__entry->hdr_datacnt, __entry->hdr_size,
|
__entry->hdr_bufcnt, __entry->hdr_psize, __entry->hdr_lsize,
|
||||||
__entry->hdr_spa, __entry->hdr_state_type, __entry->hdr_access,
|
__entry->hdr_spa, __entry->hdr_state_type, __entry->hdr_access,
|
||||||
__entry->hdr_mru_hits, __entry->hdr_mru_ghost_hits,
|
__entry->hdr_mru_hits, __entry->hdr_mru_ghost_hits,
|
||||||
__entry->hdr_mfu_hits, __entry->hdr_mfu_ghost_hits,
|
__entry->hdr_mfu_hits, __entry->hdr_mfu_ghost_hits,
|
||||||
|
|
|
@ -92,6 +92,20 @@ DEFINE_EVENT(zfs_dbuf_class, name, \
|
||||||
TP_ARGS(db, zio))
|
TP_ARGS(db, zio))
|
||||||
DEFINE_DBUF_EVENT(zfs_blocked__read);
|
DEFINE_DBUF_EVENT(zfs_blocked__read);
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(zfs_dbuf_evict_one_class,
|
||||||
|
TP_PROTO(dmu_buf_impl_t *db, multilist_sublist_t *mls),
|
||||||
|
TP_ARGS(db, mls),
|
||||||
|
TP_STRUCT__entry(DBUF_TP_STRUCT_ENTRY),
|
||||||
|
TP_fast_assign(DBUF_TP_FAST_ASSIGN),
|
||||||
|
TP_printk(DBUF_TP_PRINTK_FMT, DBUF_TP_PRINTK_ARGS)
|
||||||
|
);
|
||||||
|
|
||||||
|
#define DEFINE_DBUF_EVICT_ONE_EVENT(name) \
|
||||||
|
DEFINE_EVENT(zfs_dbuf_evict_one_class, name, \
|
||||||
|
TP_PROTO(dmu_buf_impl_t *db, multilist_sublist_t *mls), \
|
||||||
|
TP_ARGS(db, mls))
|
||||||
|
DEFINE_DBUF_EVICT_ONE_EVENT(zfs_dbuf__evict__one);
|
||||||
|
|
||||||
#endif /* _TRACE_DBUF_H */
|
#endif /* _TRACE_DBUF_H */
|
||||||
|
|
||||||
#undef TRACE_INCLUDE_PATH
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
|
|
@ -527,6 +527,10 @@ extern void *zio_data_buf_alloc(size_t size);
|
||||||
extern void zio_data_buf_free(void *buf, size_t size);
|
extern void zio_data_buf_free(void *buf, size_t size);
|
||||||
extern void *zio_buf_alloc_flags(size_t size, int flags);
|
extern void *zio_buf_alloc_flags(size_t size, int flags);
|
||||||
|
|
||||||
|
extern void zio_push_transform(zio_t *zio, void *data, uint64_t size,
|
||||||
|
uint64_t bufsize, zio_transform_func_t *transform);
|
||||||
|
extern void zio_pop_transforms(zio_t *zio);
|
||||||
|
|
||||||
extern void zio_resubmit_stage_async(void *);
|
extern void zio_resubmit_stage_async(void *);
|
||||||
|
|
||||||
extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd,
|
extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd,
|
||||||
|
|
|
@ -64,8 +64,12 @@ extern zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS];
|
||||||
*/
|
*/
|
||||||
extern zio_checksum_func_t zio_checksum_SHA256;
|
extern zio_checksum_func_t zio_checksum_SHA256;
|
||||||
|
|
||||||
|
extern int zio_checksum_equal(spa_t *, blkptr_t *, enum zio_checksum,
|
||||||
|
void *, uint64_t, uint64_t, zio_bad_cksum_t *);
|
||||||
extern void zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
|
extern void zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
|
||||||
void *data, uint64_t size);
|
void *data, uint64_t size);
|
||||||
|
extern int zio_checksum_error_impl(spa_t *, blkptr_t *, enum zio_checksum,
|
||||||
|
void *, uint64_t, uint64_t, zio_bad_cksum_t *);
|
||||||
extern int zio_checksum_error(zio_t *zio, zio_bad_cksum_t *out);
|
extern int zio_checksum_error(zio_t *zio, zio_bad_cksum_t *out);
|
||||||
extern enum zio_checksum spa_dedup_checksum(spa_t *spa);
|
extern enum zio_checksum spa_dedup_checksum(spa_t *spa);
|
||||||
|
|
||||||
|
|
|
@ -96,20 +96,6 @@ successfully compressed before writing. A value of 100 disables this feature.
|
||||||
Default value: \fB200\fR.
|
Default value: \fB200\fR.
|
||||||
.RE
|
.RE
|
||||||
|
|
||||||
.sp
|
|
||||||
.ne 2
|
|
||||||
.na
|
|
||||||
\fBl2arc_max_block_size\fR (ulong)
|
|
||||||
.ad
|
|
||||||
.RS 12n
|
|
||||||
The maximum block size which may be written to an L2ARC device, after
|
|
||||||
compression and other factors. This setting is used to prevent a small
|
|
||||||
number of large blocks from pushing a larger number of small blocks out
|
|
||||||
of the cache.
|
|
||||||
.sp
|
|
||||||
Default value: \fB16,777,216\fR.
|
|
||||||
.RE
|
|
||||||
|
|
||||||
.sp
|
.sp
|
||||||
.ne 2
|
.ne 2
|
||||||
.na
|
.na
|
||||||
|
|
3520
module/zfs/arc.c
3520
module/zfs/arc.c
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -95,7 +95,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
|
||||||
abi.abi_state_type,
|
abi.abi_state_type,
|
||||||
abi.abi_state_contents,
|
abi.abi_state_contents,
|
||||||
abi.abi_flags,
|
abi.abi_flags,
|
||||||
(ulong_t)abi.abi_datacnt,
|
(ulong_t)abi.abi_bufcnt,
|
||||||
(u_longlong_t)abi.abi_size,
|
(u_longlong_t)abi.abi_size,
|
||||||
(u_longlong_t)abi.abi_access,
|
(u_longlong_t)abi.abi_access,
|
||||||
(ulong_t)abi.abi_mru_hits,
|
(ulong_t)abi.abi_mru_hits,
|
||||||
|
|
|
@ -1337,7 +1337,7 @@ void
|
||||||
dmu_return_arcbuf(arc_buf_t *buf)
|
dmu_return_arcbuf(arc_buf_t *buf)
|
||||||
{
|
{
|
||||||
arc_return_buf(buf, FTAG);
|
arc_return_buf(buf, FTAG);
|
||||||
VERIFY(arc_buf_remove_ref(buf, FTAG));
|
arc_buf_destroy(buf, FTAG);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1681,8 +1681,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
|
||||||
|
|
||||||
zio_nowait(arc_write(pio, os->os_spa, txg,
|
zio_nowait(arc_write(pio, os->os_spa, txg,
|
||||||
bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
|
bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
|
||||||
DBUF_IS_L2COMPRESSIBLE(db), &zp, dmu_sync_ready,
|
&zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
|
||||||
NULL, NULL, dmu_sync_done, dsa,
|
|
||||||
ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
|
ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
|
@ -2040,11 +2039,11 @@ dmu_init(void)
|
||||||
xuio_stat_init();
|
xuio_stat_init();
|
||||||
dmu_objset_init();
|
dmu_objset_init();
|
||||||
dnode_init();
|
dnode_init();
|
||||||
dbuf_init();
|
|
||||||
zfetch_init();
|
zfetch_init();
|
||||||
dmu_tx_init();
|
dmu_tx_init();
|
||||||
l2arc_init();
|
l2arc_init();
|
||||||
arc_init();
|
arc_init();
|
||||||
|
dbuf_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
|
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <sys/dmu.h>
|
#include <sys/dmu.h>
|
||||||
|
@ -146,7 +146,7 @@ diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
(void) arc_buf_remove_ref(abuf, &abuf);
|
arc_buf_destroy(abuf, &abuf);
|
||||||
if (err)
|
if (err)
|
||||||
return (err);
|
return (err);
|
||||||
/* Don't care about the data blocks */
|
/* Don't care about the data blocks */
|
||||||
|
|
|
@ -358,8 +358,6 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
|
||||||
|
|
||||||
if (DMU_OS_IS_L2CACHEABLE(os))
|
if (DMU_OS_IS_L2CACHEABLE(os))
|
||||||
aflags |= ARC_FLAG_L2CACHE;
|
aflags |= ARC_FLAG_L2CACHE;
|
||||||
if (DMU_OS_IS_L2COMPRESSIBLE(os))
|
|
||||||
aflags |= ARC_FLAG_L2COMPRESS;
|
|
||||||
|
|
||||||
dprintf_bp(os->os_rootbp, "reading %s", "");
|
dprintf_bp(os->os_rootbp, "reading %s", "");
|
||||||
err = arc_read(NULL, spa, os->os_rootbp,
|
err = arc_read(NULL, spa, os->os_rootbp,
|
||||||
|
@ -376,14 +374,13 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
|
||||||
/* Increase the blocksize if we are permitted. */
|
/* Increase the blocksize if we are permitted. */
|
||||||
if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
|
if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
|
||||||
arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
|
arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
|
||||||
arc_buf_t *buf = arc_buf_alloc(spa,
|
arc_buf_t *buf = arc_alloc_buf(spa,
|
||||||
sizeof (objset_phys_t), &os->os_phys_buf,
|
sizeof (objset_phys_t), &os->os_phys_buf,
|
||||||
ARC_BUFC_METADATA);
|
ARC_BUFC_METADATA);
|
||||||
bzero(buf->b_data, sizeof (objset_phys_t));
|
bzero(buf->b_data, sizeof (objset_phys_t));
|
||||||
bcopy(os->os_phys_buf->b_data, buf->b_data,
|
bcopy(os->os_phys_buf->b_data, buf->b_data,
|
||||||
arc_buf_size(os->os_phys_buf));
|
arc_buf_size(os->os_phys_buf));
|
||||||
(void) arc_buf_remove_ref(os->os_phys_buf,
|
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
|
||||||
&os->os_phys_buf);
|
|
||||||
os->os_phys_buf = buf;
|
os->os_phys_buf = buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -392,7 +389,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
|
||||||
} else {
|
} else {
|
||||||
int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
|
int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
|
||||||
sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
|
sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
|
||||||
os->os_phys_buf = arc_buf_alloc(spa, size,
|
os->os_phys_buf = arc_alloc_buf(spa, size,
|
||||||
&os->os_phys_buf, ARC_BUFC_METADATA);
|
&os->os_phys_buf, ARC_BUFC_METADATA);
|
||||||
os->os_phys = os->os_phys_buf->b_data;
|
os->os_phys = os->os_phys_buf->b_data;
|
||||||
bzero(os->os_phys, size);
|
bzero(os->os_phys, size);
|
||||||
|
@ -475,8 +472,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
|
||||||
if (needlock)
|
if (needlock)
|
||||||
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
|
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
VERIFY(arc_buf_remove_ref(os->os_phys_buf,
|
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
|
||||||
&os->os_phys_buf));
|
|
||||||
kmem_free(os, sizeof (objset_t));
|
kmem_free(os, sizeof (objset_t));
|
||||||
return (err);
|
return (err);
|
||||||
}
|
}
|
||||||
|
@ -787,7 +783,7 @@ dmu_objset_evict_done(objset_t *os)
|
||||||
}
|
}
|
||||||
zil_free(os->os_zil);
|
zil_free(os->os_zil);
|
||||||
|
|
||||||
VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf));
|
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is a barrier to prevent the objset from going away in
|
* This is a barrier to prevent the objset from going away in
|
||||||
|
@ -1183,7 +1179,6 @@ dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
|
||||||
|
|
||||||
zio = arc_write(pio, os->os_spa, tx->tx_txg,
|
zio = arc_write(pio, os->os_spa, tx->tx_txg,
|
||||||
os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
|
os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
|
||||||
DMU_OS_IS_L2COMPRESSIBLE(os),
|
|
||||||
&zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
|
&zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
|
||||||
os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
|
os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
|
||||||
|
|
||||||
|
|
|
@ -647,7 +647,7 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
|
||||||
if (err != 0)
|
if (err != 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
(void) arc_buf_remove_ref(abuf, &abuf);
|
arc_buf_destroy(abuf, &abuf);
|
||||||
} else if (type == DMU_OT_SA) {
|
} else if (type == DMU_OT_SA) {
|
||||||
arc_flags_t aflags = ARC_FLAG_WAIT;
|
arc_flags_t aflags = ARC_FLAG_WAIT;
|
||||||
arc_buf_t *abuf;
|
arc_buf_t *abuf;
|
||||||
|
@ -659,7 +659,7 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
|
||||||
return (SET_ERROR(EIO));
|
return (SET_ERROR(EIO));
|
||||||
|
|
||||||
err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
|
err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
|
||||||
(void) arc_buf_remove_ref(abuf, &abuf);
|
arc_buf_destroy(abuf, &abuf);
|
||||||
} else if (backup_do_embed(dsa, bp)) {
|
} else if (backup_do_embed(dsa, bp)) {
|
||||||
/* it's an embedded level-0 block of a regular object */
|
/* it's an embedded level-0 block of a regular object */
|
||||||
int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
|
int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
|
||||||
|
@ -684,7 +684,7 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
|
||||||
if (zfs_send_corrupt_data) {
|
if (zfs_send_corrupt_data) {
|
||||||
uint64_t *ptr;
|
uint64_t *ptr;
|
||||||
/* Send a block filled with 0x"zfs badd bloc" */
|
/* Send a block filled with 0x"zfs badd bloc" */
|
||||||
abuf = arc_buf_alloc(spa, blksz, &abuf,
|
abuf = arc_alloc_buf(spa, blksz, &abuf,
|
||||||
ARC_BUFC_DATA);
|
ARC_BUFC_DATA);
|
||||||
for (ptr = abuf->b_data;
|
for (ptr = abuf->b_data;
|
||||||
(char *)ptr < (char *)abuf->b_data + blksz;
|
(char *)ptr < (char *)abuf->b_data + blksz;
|
||||||
|
@ -713,7 +713,7 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
|
||||||
err = dump_write(dsa, type, zb->zb_object,
|
err = dump_write(dsa, type, zb->zb_object,
|
||||||
offset, blksz, bp, abuf->b_data);
|
offset, blksz, bp, abuf->b_data);
|
||||||
}
|
}
|
||||||
(void) arc_buf_remove_ref(abuf, &abuf);
|
arc_buf_destroy(abuf, &abuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(err == 0 || err == EINTR);
|
ASSERT(err == 0 || err == EINTR);
|
||||||
|
|
|
@ -386,7 +386,7 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buf)
|
if (buf)
|
||||||
(void) arc_buf_remove_ref(buf, &buf);
|
arc_buf_destroy(buf, &buf);
|
||||||
|
|
||||||
post:
|
post:
|
||||||
if (err == 0 && (td->td_flags & TRAVERSE_POST))
|
if (err == 0 && (td->td_flags & TRAVERSE_POST))
|
||||||
|
@ -610,7 +610,7 @@ traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
|
||||||
|
|
||||||
osp = buf->b_data;
|
osp = buf->b_data;
|
||||||
traverse_zil(td, &osp->os_zil_header);
|
traverse_zil(td, &osp->os_zil_header);
|
||||||
(void) arc_buf_remove_ref(buf, &buf);
|
arc_buf_destroy(buf, &buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(flags & TRAVERSE_PREFETCH_DATA) ||
|
if (!(flags & TRAVERSE_PREFETCH_DATA) ||
|
||||||
|
|
|
@ -502,7 +502,7 @@ dnode_destroy(dnode_t *dn)
|
||||||
}
|
}
|
||||||
if (dn->dn_bonus != NULL) {
|
if (dn->dn_bonus != NULL) {
|
||||||
mutex_enter(&dn->dn_bonus->db_mtx);
|
mutex_enter(&dn->dn_bonus->db_mtx);
|
||||||
dbuf_evict(dn->dn_bonus);
|
dbuf_destroy(dn->dn_bonus);
|
||||||
dn->dn_bonus = NULL;
|
dn->dn_bonus = NULL;
|
||||||
}
|
}
|
||||||
dn->dn_zio = NULL;
|
dn->dn_zio = NULL;
|
||||||
|
|
|
@ -421,7 +421,7 @@ dnode_evict_dbufs(dnode_t *dn)
|
||||||
avl_insert_here(&dn->dn_dbufs, db_marker, db,
|
avl_insert_here(&dn->dn_dbufs, db_marker, db,
|
||||||
AVL_BEFORE);
|
AVL_BEFORE);
|
||||||
|
|
||||||
dbuf_clear(db);
|
dbuf_destroy(db);
|
||||||
|
|
||||||
db_next = AVL_NEXT(&dn->dn_dbufs, db_marker);
|
db_next = AVL_NEXT(&dn->dn_dbufs, db_marker);
|
||||||
avl_remove(&dn->dn_dbufs, db_marker);
|
avl_remove(&dn->dn_dbufs, db_marker);
|
||||||
|
@ -445,7 +445,7 @@ dnode_evict_bonus(dnode_t *dn)
|
||||||
if (dn->dn_bonus != NULL) {
|
if (dn->dn_bonus != NULL) {
|
||||||
if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
|
if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
|
||||||
mutex_enter(&dn->dn_bonus->db_mtx);
|
mutex_enter(&dn->dn_bonus->db_mtx);
|
||||||
dbuf_evict(dn->dn_bonus);
|
dbuf_destroy(dn->dn_bonus);
|
||||||
dn->dn_bonus = NULL;
|
dn->dn_bonus = NULL;
|
||||||
} else {
|
} else {
|
||||||
dn->dn_bonus->db_pending_evict = TRUE;
|
dn->dn_bonus->db_pending_evict = TRUE;
|
||||||
|
|
|
@ -692,7 +692,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
|
||||||
dsl_scan_visitbp(cbp, &czb, dnp,
|
dsl_scan_visitbp(cbp, &czb, dnp,
|
||||||
ds, scn, ostype, tx);
|
ds, scn, ostype, tx);
|
||||||
}
|
}
|
||||||
(void) arc_buf_remove_ref(buf, &buf);
|
arc_buf_destroy(buf, &buf);
|
||||||
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
|
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
|
||||||
arc_flags_t flags = ARC_FLAG_WAIT;
|
arc_flags_t flags = ARC_FLAG_WAIT;
|
||||||
dnode_phys_t *cdnp;
|
dnode_phys_t *cdnp;
|
||||||
|
@ -722,7 +722,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
|
||||||
cdnp, zb->zb_blkid * epb + i, tx);
|
cdnp, zb->zb_blkid * epb + i, tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
(void) arc_buf_remove_ref(buf, &buf);
|
arc_buf_destroy(buf, &buf);
|
||||||
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
|
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
|
||||||
arc_flags_t flags = ARC_FLAG_WAIT;
|
arc_flags_t flags = ARC_FLAG_WAIT;
|
||||||
objset_phys_t *osp;
|
objset_phys_t *osp;
|
||||||
|
@ -754,7 +754,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
|
||||||
&osp->os_userused_dnode,
|
&osp->os_userused_dnode,
|
||||||
DMU_USERUSED_OBJECT, tx);
|
DMU_USERUSED_OBJECT, tx);
|
||||||
}
|
}
|
||||||
(void) arc_buf_remove_ref(buf, &buf);
|
arc_buf_destroy(buf, &buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
|
|
|
@ -227,4 +227,28 @@ refcount_transfer(refcount_t *dst, refcount_t *src)
|
||||||
list_destroy(&removed);
|
list_destroy(&removed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
refcount_transfer_ownership(refcount_t *rc, void *current_holder,
|
||||||
|
void *new_holder)
|
||||||
|
{
|
||||||
|
reference_t *ref;
|
||||||
|
boolean_t found = B_FALSE;
|
||||||
|
|
||||||
|
mutex_enter(&rc->rc_mtx);
|
||||||
|
if (!rc->rc_tracked) {
|
||||||
|
mutex_exit(&rc->rc_mtx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (ref = list_head(&rc->rc_list); ref;
|
||||||
|
ref = list_next(&rc->rc_list, ref)) {
|
||||||
|
if (ref->ref_holder == current_holder) {
|
||||||
|
ref->ref_holder = new_holder;
|
||||||
|
found = B_TRUE;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT(found);
|
||||||
|
mutex_exit(&rc->rc_mtx);
|
||||||
|
}
|
||||||
#endif /* ZFS_DEBUG */
|
#endif /* ZFS_DEBUG */
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
|
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Portions Copyright 2010 Robert Milkowski */
|
/* Portions Copyright 2010 Robert Milkowski */
|
||||||
|
@ -260,7 +260,7 @@ zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
VERIFY(arc_buf_remove_ref(abuf, &abuf));
|
arc_buf_destroy(abuf, &abuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (error);
|
return (error);
|
||||||
|
@ -297,7 +297,7 @@ zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
|
||||||
if (error == 0) {
|
if (error == 0) {
|
||||||
if (wbuf != NULL)
|
if (wbuf != NULL)
|
||||||
bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
|
bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
|
||||||
(void) arc_buf_remove_ref(abuf, &abuf);
|
arc_buf_destroy(abuf, &abuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (error);
|
return (error);
|
||||||
|
|
|
@ -301,7 +301,7 @@ zio_data_buf_free(void *buf, size_t size)
|
||||||
* Push and pop I/O transform buffers
|
* Push and pop I/O transform buffers
|
||||||
* ==========================================================================
|
* ==========================================================================
|
||||||
*/
|
*/
|
||||||
static void
|
void
|
||||||
zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize,
|
zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize,
|
||||||
zio_transform_func_t *transform)
|
zio_transform_func_t *transform)
|
||||||
{
|
{
|
||||||
|
@ -319,7 +319,7 @@ zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize,
|
||||||
zio->io_size = size;
|
zio->io_size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
zio_pop_transforms(zio_t *zio)
|
zio_pop_transforms(zio_t *zio)
|
||||||
{
|
{
|
||||||
zio_transform_t *zt;
|
zio_transform_t *zt;
|
||||||
|
@ -2390,7 +2390,7 @@ zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
|
||||||
bcmp(abuf->b_data, zio->io_orig_data,
|
bcmp(abuf->b_data, zio->io_orig_data,
|
||||||
zio->io_orig_size) != 0)
|
zio->io_orig_size) != 0)
|
||||||
error = SET_ERROR(EEXIST);
|
error = SET_ERROR(EEXIST);
|
||||||
VERIFY(arc_buf_remove_ref(abuf, &abuf));
|
arc_buf_destroy(abuf, &abuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
ddt_enter(ddt);
|
ddt_enter(ddt);
|
||||||
|
|
|
@ -187,25 +187,19 @@ zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
|
zio_checksum_error_impl(spa_t *spa, blkptr_t *bp, enum zio_checksum checksum,
|
||||||
|
void *data, uint64_t size, uint64_t offset, zio_bad_cksum_t *info)
|
||||||
{
|
{
|
||||||
blkptr_t *bp = zio->io_bp;
|
|
||||||
uint_t checksum = (bp == NULL ? zio->io_prop.zp_checksum :
|
|
||||||
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
|
|
||||||
int byteswap;
|
|
||||||
int error;
|
|
||||||
uint64_t size = (bp == NULL ? zio->io_size :
|
|
||||||
(BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp)));
|
|
||||||
uint64_t offset = zio->io_offset;
|
|
||||||
void *data = zio->io_data;
|
|
||||||
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
|
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
|
||||||
zio_cksum_t actual_cksum, expected_cksum, verifier;
|
zio_cksum_t actual_cksum, expected_cksum;
|
||||||
|
int byteswap;
|
||||||
|
|
||||||
if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
|
if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
|
||||||
return (SET_ERROR(EINVAL));
|
return (SET_ERROR(EINVAL));
|
||||||
|
|
||||||
if (ci->ci_eck) {
|
if (ci->ci_eck) {
|
||||||
zio_eck_t *eck;
|
zio_eck_t *eck;
|
||||||
|
zio_cksum_t verifier;
|
||||||
|
|
||||||
if (checksum == ZIO_CHECKSUM_ZILOG2) {
|
if (checksum == ZIO_CHECKSUM_ZILOG2) {
|
||||||
zil_chain_t *zilc = data;
|
zil_chain_t *zilc = data;
|
||||||
|
@ -244,32 +238,51 @@ zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
|
||||||
ci->ci_func[byteswap](data, size, &actual_cksum);
|
ci->ci_func[byteswap](data, size, &actual_cksum);
|
||||||
eck->zec_cksum = expected_cksum;
|
eck->zec_cksum = expected_cksum;
|
||||||
|
|
||||||
if (byteswap)
|
if (byteswap) {
|
||||||
byteswap_uint64_array(&expected_cksum,
|
byteswap_uint64_array(&expected_cksum,
|
||||||
sizeof (zio_cksum_t));
|
sizeof (zio_cksum_t));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ASSERT(!BP_IS_GANG(bp));
|
|
||||||
byteswap = BP_SHOULD_BYTESWAP(bp);
|
byteswap = BP_SHOULD_BYTESWAP(bp);
|
||||||
expected_cksum = bp->blk_cksum;
|
expected_cksum = bp->blk_cksum;
|
||||||
ci->ci_func[byteswap](data, size, &actual_cksum);
|
ci->ci_func[byteswap](data, size, &actual_cksum);
|
||||||
}
|
}
|
||||||
|
|
||||||
info->zbc_expected = expected_cksum;
|
if (info != NULL) {
|
||||||
info->zbc_actual = actual_cksum;
|
info->zbc_expected = expected_cksum;
|
||||||
info->zbc_checksum_name = ci->ci_name;
|
info->zbc_actual = actual_cksum;
|
||||||
info->zbc_byteswapped = byteswap;
|
info->zbc_checksum_name = ci->ci_name;
|
||||||
info->zbc_injected = 0;
|
info->zbc_byteswapped = byteswap;
|
||||||
info->zbc_has_cksum = 1;
|
info->zbc_injected = 0;
|
||||||
|
info->zbc_has_cksum = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
|
if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
|
||||||
return (SET_ERROR(ECKSUM));
|
return (SET_ERROR(ECKSUM));
|
||||||
|
|
||||||
if (zio_injection_enabled && !zio->io_error &&
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
|
||||||
|
{
|
||||||
|
blkptr_t *bp = zio->io_bp;
|
||||||
|
uint_t checksum = (bp == NULL ? zio->io_prop.zp_checksum :
|
||||||
|
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
|
||||||
|
int error;
|
||||||
|
uint64_t size = (bp == NULL ? zio->io_size :
|
||||||
|
(BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp)));
|
||||||
|
uint64_t offset = zio->io_offset;
|
||||||
|
void *data = zio->io_data;
|
||||||
|
spa_t *spa = zio->io_spa;
|
||||||
|
|
||||||
|
error = zio_checksum_error_impl(spa, bp, checksum, data, size,
|
||||||
|
offset, info);
|
||||||
|
if (error != 0 && zio_injection_enabled && !zio->io_error &&
|
||||||
(error = zio_handle_fault_injection(zio, ECKSUM)) != 0) {
|
(error = zio_handle_fault_injection(zio, ECKSUM)) != 0) {
|
||||||
|
|
||||||
info->zbc_injected = 1;
|
info->zbc_injected = 1;
|
||||||
return (error);
|
return (error);
|
||||||
}
|
}
|
||||||
|
return (error);
|
||||||
return (0);
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue