Fix raw receive with different indirect block size.
Unlike regular receive, raw receive require destination to have the same block structure as the source. In case of dnode reclaim this triggers two special cases, requiring special handling: - If dn_nlevels == 1, we can change the ibs, but dnode_set_blksz() should not dirty the data buffer if block size does not change, or durign receive dbuf_dirty_lightweight() will trigger assertion. - If dn_nlevels > 1, we just can't change the ibs, dnode_set_blksz() would fail and receive_object would trigger assertion, so we should destroy and recreate the dnode from scratch. Reviewed-by: Paul Dagnelie <pcd@delphix.com> Signed-off-by: Alexander Motin <mav@FreeBSD.org> Sponsored by: iXsystems, Inc. Closes #15039
This commit is contained in:
parent
67c5e1ba4f
commit
c4e8742149
|
@ -1795,17 +1795,19 @@ receive_handle_existing_object(const struct receive_writer_arg *rwa,
|
|||
}
|
||||
|
||||
/*
|
||||
* The dmu does not currently support decreasing nlevels
|
||||
* or changing the number of dnode slots on an object. For
|
||||
* non-raw sends, this does not matter and the new object
|
||||
* can just use the previous one's nlevels. For raw sends,
|
||||
* however, the structure of the received dnode (including
|
||||
* nlevels and dnode slots) must match that of the send
|
||||
* side. Therefore, instead of using dmu_object_reclaim(),
|
||||
* we must free the object completely and call
|
||||
* dmu_object_claim_dnsize() instead.
|
||||
* The dmu does not currently support decreasing nlevels or changing
|
||||
* indirect block size if there is already one, same as changing the
|
||||
* number of of dnode slots on an object. For non-raw sends this
|
||||
* does not matter and the new object can just use the previous one's
|
||||
* parameters. For raw sends, however, the structure of the received
|
||||
* dnode (including indirects and dnode slots) must match that of the
|
||||
* send side. Therefore, instead of using dmu_object_reclaim(), we
|
||||
* must free the object completely and call dmu_object_claim_dnsize()
|
||||
* instead.
|
||||
*/
|
||||
if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) ||
|
||||
if ((rwa->raw && ((doi->doi_indirection > 1 &&
|
||||
indblksz != doi->doi_metadata_block_size) ||
|
||||
drro->drr_nlevels < doi->doi_indirection)) ||
|
||||
dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
|
||||
err = dmu_free_long_object(rwa->os, drro->drr_object);
|
||||
if (err != 0)
|
||||
|
|
|
@ -1882,7 +1882,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
|
|||
if (ibs == dn->dn_indblkshift)
|
||||
ibs = 0;
|
||||
|
||||
if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
|
||||
if (size == dn->dn_datablksz && ibs == 0)
|
||||
return (0);
|
||||
|
||||
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||
|
@ -1905,6 +1905,8 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
|
|||
if (ibs && dn->dn_nlevels != 1)
|
||||
goto fail;
|
||||
|
||||
dnode_setdirty(dn, tx);
|
||||
if (size != dn->dn_datablksz) {
|
||||
/* resize the old block */
|
||||
err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
|
||||
if (err == 0) {
|
||||
|
@ -1914,15 +1916,14 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
|
|||
}
|
||||
|
||||
dnode_setdblksz(dn, size);
|
||||
dnode_setdirty(dn, tx);
|
||||
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = size;
|
||||
if (db)
|
||||
dbuf_rele(db, FTAG);
|
||||
}
|
||||
if (ibs) {
|
||||
dn->dn_indblkshift = ibs;
|
||||
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
|
||||
}
|
||||
/* release after we have fixed the blocksize in the dnode */
|
||||
if (db)
|
||||
dbuf_rele(db, FTAG);
|
||||
|
||||
rw_exit(&dn->dn_struct_rwlock);
|
||||
return (0);
|
||||
|
|
Loading…
Reference in New Issue