Fix raw receive with different indirect block size.

Unlike regular receive, raw receive require destination to have the
same block structure as the source.  In case of dnode reclaim this
triggers two special cases, requiring special handling:
 - If dn_nlevels == 1, we can change the ibs, but dnode_set_blksz()
should not dirty the data buffer if block size does not change, or
durign receive dbuf_dirty_lightweight() will trigger assertion.
 - If dn_nlevels > 1, we just can't change the ibs, dnode_set_blksz()
would fail and receive_object would trigger assertion, so we should
destroy and recreate the dnode from scratch.

Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Signed-off-by:	Alexander Motin <mav@FreeBSD.org>
Sponsored by:	iXsystems, Inc.
Closes #15039
This commit is contained in:
Alexander Motin 2023-07-14 19:16:40 -04:00 committed by Brian Behlendorf
parent 6e79bcd356
commit e01e3a4e12
2 changed files with 28 additions and 25 deletions

View File

@ -1533,17 +1533,19 @@ receive_handle_existing_object(const struct receive_writer_arg *rwa,
} }
/* /*
* The dmu does not currently support decreasing nlevels * The dmu does not currently support decreasing nlevels or changing
* or changing the number of dnode slots on an object. For * indirect block size if there is already one, same as changing the
* non-raw sends, this does not matter and the new object * number of of dnode slots on an object. For non-raw sends this
* can just use the previous one's nlevels. For raw sends, * does not matter and the new object can just use the previous one's
* however, the structure of the received dnode (including * parameters. For raw sends, however, the structure of the received
* nlevels and dnode slots) must match that of the send * dnode (including indirects and dnode slots) must match that of the
* side. Therefore, instead of using dmu_object_reclaim(), * send side. Therefore, instead of using dmu_object_reclaim(), we
* we must free the object completely and call * must free the object completely and call dmu_object_claim_dnsize()
* dmu_object_claim_dnsize() instead. * instead.
*/ */
if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) || if ((rwa->raw && ((doi->doi_indirection > 1 &&
indblksz != doi->doi_metadata_block_size) ||
drro->drr_nlevels < doi->doi_indirection)) ||
dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) { dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
err = dmu_free_long_object(rwa->os, drro->drr_object); err = dmu_free_long_object(rwa->os, drro->drr_object);
if (err != 0) if (err != 0)

View File

@ -1891,7 +1891,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
if (ibs == dn->dn_indblkshift) if (ibs == dn->dn_indblkshift)
ibs = 0; ibs = 0;
if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0) if (size == dn->dn_datablksz && ibs == 0)
return (0); return (0);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER); rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
@ -1914,6 +1914,8 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
if (ibs && dn->dn_nlevels != 1) if (ibs && dn->dn_nlevels != 1)
goto fail; goto fail;
dnode_setdirty(dn, tx);
if (size != dn->dn_datablksz) {
/* resize the old block */ /* resize the old block */
err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db); err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
if (err == 0) { if (err == 0) {
@ -1923,15 +1925,14 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
} }
dnode_setdblksz(dn, size); dnode_setdblksz(dn, size);
dnode_setdirty(dn, tx);
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = size; dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = size;
if (db)
dbuf_rele(db, FTAG);
}
if (ibs) { if (ibs) {
dn->dn_indblkshift = ibs; dn->dn_indblkshift = ibs;
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs; dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
} }
/* release after we have fixed the blocksize in the dnode */
if (db)
dbuf_rele(db, FTAG);
rw_exit(&dn->dn_struct_rwlock); rw_exit(&dn->dn_struct_rwlock);
return (0); return (0);