Fix 'zfs recv' of non large_dnode send streams

Currently, there is a bug where older send streams without the
DMU_BACKUP_FEATURE_LARGE_DNODE flag are not handled correctly.
The code in receive_object() fails to handle cases where
drro->drr_dn_slots is set to 0, which is always the case when the
sending code does not support this feature flag. This patch fixes
the issue by ensuring that that a value of 0 is treated as
DNODE_MIN_SLOTS.

Tested-by:  DHE <git@dehacked.net>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #7617
Closes #7662
This commit is contained in:
Tom Caputi 2018-06-28 17:55:11 -04:00 committed by Tony Hutter
parent dc3eea871a
commit 45f0437912
2 changed files with 30 additions and 6 deletions

View File

@ -261,6 +261,9 @@ dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
int dn_slots = dnodesize >> DNODE_SHIFT; int dn_slots = dnodesize >> DNODE_SHIFT;
int err; int err;
if (dn_slots == 0)
dn_slots = DNODE_MIN_SLOTS;
if (object == DMU_META_DNODE_OBJECT) if (object == DMU_META_DNODE_OBJECT)
return (SET_ERROR(EBADF)); return (SET_ERROR(EBADF));

View File

@ -2139,6 +2139,8 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
dmu_tx_t *tx; dmu_tx_t *tx;
uint64_t object; uint64_t object;
int err; int err;
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
if (drro->drr_type == DMU_OT_NONE || if (drro->drr_type == DMU_OT_NONE ||
!DMU_OT_IS_VALID(drro->drr_type) || !DMU_OT_IS_VALID(drro->drr_type) ||
@ -2150,7 +2152,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
drro->drr_bonuslen > drro->drr_bonuslen >
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) || DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
drro->drr_dn_slots > dn_slots >
(spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) { (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
} }
@ -2177,12 +2179,31 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
if (drro->drr_blksz != doi.doi_data_block_size || if (drro->drr_blksz != doi.doi_data_block_size ||
nblkptr < doi.doi_nblkptr || nblkptr < doi.doi_nblkptr ||
drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) { dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
err = dmu_free_long_range(rwa->os, drro->drr_object, err = dmu_free_long_range(rwa->os, drro->drr_object,
0, DMU_OBJECT_END); 0, DMU_OBJECT_END);
if (err != 0) if (err != 0)
return (SET_ERROR(EINVAL)); return (SET_ERROR(EINVAL));
} }
/*
* The dmu does not currently support decreasing nlevels
* on an object. For non-raw sends, this does not matter
* and the new object can just use the previous one's nlevels.
* For raw sends, however, the structure of the received dnode
* (including nlevels) must match that of the send side.
* Therefore, instead of using dmu_object_reclaim(), we must
* free the object completely and call dmu_object_claim_dnsize()
* instead.
*/
if (dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
err = dmu_free_long_object(rwa->os, drro->drr_object);
if (err != 0)
return (SET_ERROR(EINVAL));
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
object = DMU_NEW_OBJECT;
}
} else if (err == EEXIST) { } else if (err == EEXIST) {
/* /*
* The object requested is currently an interior slot of a * The object requested is currently an interior slot of a
@ -2204,9 +2225,9 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
* another object from the previous snapshot. We must free * another object from the previous snapshot. We must free
* these objects before we attempt to allocate the new dnode. * these objects before we attempt to allocate the new dnode.
*/ */
if (drro->drr_dn_slots > 1) { if (dn_slots > 1) {
for (uint64_t slot = drro->drr_object + 1; for (uint64_t slot = drro->drr_object + 1;
slot < drro->drr_object + drro->drr_dn_slots; slot < drro->drr_object + dn_slots;
slot++) { slot++) {
dmu_object_info_t slot_doi; dmu_object_info_t slot_doi;
@ -2238,7 +2259,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object, err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, drro->drr_blksz, drro->drr_type, drro->drr_blksz,
drro->drr_bonustype, drro->drr_bonuslen, drro->drr_bonustype, drro->drr_bonuslen,
drro->drr_dn_slots << DNODE_SHIFT, tx); dn_slots << DNODE_SHIFT, tx);
} else if (drro->drr_type != doi.doi_type || } else if (drro->drr_type != doi.doi_type ||
drro->drr_blksz != doi.doi_data_block_size || drro->drr_blksz != doi.doi_data_block_size ||
drro->drr_bonustype != doi.doi_bonus_type || drro->drr_bonustype != doi.doi_bonus_type ||
@ -2247,7 +2268,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object, err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, drro->drr_blksz, drro->drr_type, drro->drr_blksz,
drro->drr_bonustype, drro->drr_bonuslen, drro->drr_bonustype, drro->drr_bonuslen,
drro->drr_dn_slots << DNODE_SHIFT, tx); dn_slots << DNODE_SHIFT, tx);
} }
if (err != 0) { if (err != 0) {
dmu_tx_commit(tx); dmu_tx_commit(tx);