Handle compressed buffers in __dbuf_hold_impl()
In __dbuf_hold_impl(), if a buffer is currently syncing and is still referenced from db_data, a copy is made in case it is dirtied again in the txg. Previously, the buffer for the copy was simply allocated with arc_alloc_buf() which doesn't handle compressed or encrypted buffers (which are a special case of a compressed buffer). The result was typically an invalid memory access because the newly-allocated buffer was of the uncompressed size. This commit fixes the problem by handling the 2 compressed cases, encrypted and unencrypted, respectively, with arc_alloc_raw_buf() and arc_alloc_compressed_buf(). Although using the proper allocation functions fixes the invalid memory access by allocating a buffer of the compressed size, another unrelated issue made it impossible to properly detect compressed buffers in the first place. The header's compression flag was set to ZIO_COMPRESS_OFF in arc_write() when it was possible that an attached buffer was actually compressed. This commit adds logic to only set ZIO_COMPRESS_OFF in the non-ZIO_RAW case which wil handle both cases of compressed buffers (encrypted or unencrypted). Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #5742 Closes #6797
This commit is contained in:
parent
951e62169e
commit
d7881a6dca
|
@ -6087,6 +6087,10 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
|
||||||
VERIFY3P(buf->b_data, !=, NULL);
|
VERIFY3P(buf->b_data, !=, NULL);
|
||||||
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
|
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!(zio_flags & ZIO_FLAG_RAW))
|
||||||
|
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
|
||||||
|
|
||||||
ASSERT(!arc_buf_is_shared(buf));
|
ASSERT(!arc_buf_is_shared(buf));
|
||||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,6 @@ struct dbuf_hold_impl_data {
|
||||||
blkptr_t *dh_bp;
|
blkptr_t *dh_bp;
|
||||||
int dh_err;
|
int dh_err;
|
||||||
dbuf_dirty_record_t *dh_dr;
|
dbuf_dirty_record_t *dh_dr;
|
||||||
arc_buf_contents_t dh_type;
|
|
||||||
int dh_depth;
|
int dh_depth;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2628,6 +2627,36 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
|
||||||
|
|
||||||
#define DBUF_HOLD_IMPL_MAX_DEPTH 20
|
#define DBUF_HOLD_IMPL_MAX_DEPTH 20
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper function for __dbuf_hold_impl() to copy a buffer. Handles
|
||||||
|
* the case of compressed and uncompressed buffers by allocating the
|
||||||
|
* new buffer, respectively, with arc_alloc_raw_buf(),
|
||||||
|
* arc_alloc_compressed_buf() or arc_alloc_buf().*
|
||||||
|
*
|
||||||
|
* NOTE: Declared noinline to avoid stack bloat in __dbuf_hold_impl().
|
||||||
|
*/
|
||||||
|
noinline static void
|
||||||
|
dbuf_hold_copy(struct dbuf_hold_impl_data *dh)
|
||||||
|
{
|
||||||
|
dnode_t *dn = dh->dh_dn;
|
||||||
|
dmu_buf_impl_t *db = dh->dh_db;
|
||||||
|
dbuf_dirty_record_t *dr = dh->dh_dr;
|
||||||
|
arc_buf_t *data = dr->dt.dl.dr_data;
|
||||||
|
|
||||||
|
enum zio_compress compress_type = arc_get_compression(data);
|
||||||
|
|
||||||
|
if (compress_type != ZIO_COMPRESS_OFF) {
|
||||||
|
dbuf_set_data(db, arc_alloc_compressed_buf(
|
||||||
|
dn->dn_objset->os_spa, db, arc_buf_size(data),
|
||||||
|
arc_buf_lsize(data), compress_type));
|
||||||
|
} else {
|
||||||
|
dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
|
||||||
|
DBUF_GET_BUFC_TYPE(db), db->db.db_size));
|
||||||
|
}
|
||||||
|
|
||||||
|
bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns with db_holds incremented, and db_mtx not held.
|
* Returns with db_holds incremented, and db_mtx not held.
|
||||||
* Note: dn_struct_rwlock must be held.
|
* Note: dn_struct_rwlock must be held.
|
||||||
|
@ -2693,16 +2722,8 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
|
||||||
dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
|
dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
|
||||||
dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
|
dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
|
||||||
dh->dh_dr = dh->dh_db->db_data_pending;
|
dh->dh_dr = dh->dh_db->db_data_pending;
|
||||||
|
if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf)
|
||||||
if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) {
|
dbuf_hold_copy(dh);
|
||||||
dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db);
|
|
||||||
|
|
||||||
dbuf_set_data(dh->dh_db,
|
|
||||||
arc_alloc_buf(dh->dh_dn->dn_objset->os_spa,
|
|
||||||
dh->dh_db, dh->dh_type, dh->dh_db->db.db_size));
|
|
||||||
bcopy(dh->dh_dr->dt.dl.dr_data->b_data,
|
|
||||||
dh->dh_db->db.db_data, dh->dh_db->db.db_size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (multilist_link_active(&dh->dh_db->db_cache_link)) {
|
if (multilist_link_active(&dh->dh_db->db_cache_link)) {
|
||||||
|
@ -2775,7 +2796,6 @@ __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
|
||||||
dh->dh_bp = NULL;
|
dh->dh_bp = NULL;
|
||||||
dh->dh_err = 0;
|
dh->dh_err = 0;
|
||||||
dh->dh_dr = NULL;
|
dh->dh_dr = NULL;
|
||||||
dh->dh_type = 0;
|
|
||||||
|
|
||||||
dh->dh_depth = depth;
|
dh->dh_depth = depth;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue