Raw receive functions must not decrypt data
This patch fixes a small bug found where receive_spill() sometimes attempted to decrypt spill blocks when doing a raw receive. In addition, this patch fixes another small issue in arc_buf_fill()'s error handling where a decryption failure (which could be caused by the first bug) would attempt to set the arc header's IO_ERROR flag without holding the header's lock. Reviewed-by: Matthew Thode <prometheanfire@gentoo.org> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed by: Matthew Ahrens <mahrens@delphix.com> Signed-off-by: Tom Caputi <tcaputi@datto.com> Closes #7564 Closes #7584 Closes #7592
This commit is contained in:
parent
6969afcefd
commit
e7504d7a18
|
@ -514,7 +514,8 @@ int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *);
|
|||
* Special spill buffer support used by "SA" framework
|
||||
*/
|
||||
|
||||
int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp);
|
||||
int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag,
|
||||
dmu_buf_t **dbp);
|
||||
int dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags,
|
||||
void *tag, dmu_buf_t **dbp);
|
||||
int dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp);
|
||||
|
|
|
@ -2144,7 +2144,11 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
|
|||
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
|
||||
zb, !!(flags & ARC_FILL_NOAUTH));
|
||||
if (error != 0) {
|
||||
if (hash_lock != NULL)
|
||||
mutex_enter(hash_lock);
|
||||
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
|
||||
if (hash_lock != NULL)
|
||||
mutex_exit(hash_lock);
|
||||
return (error);
|
||||
}
|
||||
}
|
||||
|
@ -2247,7 +2251,11 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
|
|||
"hdr %p, compress %d, psize %d, lsize %d",
|
||||
hdr, arc_hdr_get_compress(hdr),
|
||||
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
|
||||
if (hash_lock != NULL)
|
||||
mutex_enter(hash_lock);
|
||||
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
|
||||
if (hash_lock != NULL)
|
||||
mutex_exit(hash_lock);
|
||||
return (SET_ERROR(EIO));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -459,15 +459,20 @@ dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
|
|||
}
|
||||
|
||||
int
|
||||
dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
|
||||
dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag,
|
||||
dmu_buf_t **dbp)
|
||||
{
|
||||
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
|
||||
dnode_t *dn;
|
||||
int err;
|
||||
uint32_t db_flags = DB_RF_CANFAIL;
|
||||
|
||||
if (flags & DMU_READ_NO_DECRYPT)
|
||||
db_flags |= DB_RF_NO_DECRYPT;
|
||||
|
||||
DB_DNODE_ENTER(db);
|
||||
dn = DB_DNODE(db);
|
||||
err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
|
||||
err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp);
|
||||
DB_DNODE_EXIT(db);
|
||||
|
||||
return (err);
|
||||
|
|
|
@ -2936,6 +2936,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
|
|||
dmu_tx_t *tx;
|
||||
dmu_buf_t *db, *db_spill;
|
||||
int err;
|
||||
uint32_t flags = 0;
|
||||
|
||||
if (drrs->drr_length < SPA_MINBLOCKSIZE ||
|
||||
drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
|
||||
|
@ -2946,6 +2947,8 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
|
|||
drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
|
||||
drrs->drr_compressed_size == 0)
|
||||
return (SET_ERROR(EINVAL));
|
||||
|
||||
flags |= DMU_READ_NO_DECRYPT;
|
||||
}
|
||||
|
||||
if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
|
||||
|
@ -2955,7 +2958,8 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
|
|||
rwa->max_object = drrs->drr_object;
|
||||
|
||||
VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
|
||||
if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
|
||||
if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
|
||||
&db_spill)) != 0) {
|
||||
dmu_buf_rele(db, FTAG);
|
||||
return (err);
|
||||
}
|
||||
|
@ -2971,7 +2975,6 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
|
|||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
}
|
||||
dmu_buf_will_dirty(db_spill, tx);
|
||||
|
||||
if (db_spill->db_size < drrs->drr_length)
|
||||
VERIFY(0 == dbuf_spill_set_blksz(db_spill,
|
||||
|
|
|
@ -698,7 +698,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
|
|||
boolean_t dummy;
|
||||
|
||||
if (hdl->sa_spill == NULL) {
|
||||
VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, NULL,
|
||||
VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, 0, NULL,
|
||||
&hdl->sa_spill) == 0);
|
||||
}
|
||||
dmu_buf_will_dirty(hdl->sa_spill, tx);
|
||||
|
|
Loading…
Reference in New Issue