Fix unallocated object detection for large_dnode datasets

Fix dmu_object_next() to correctly handle unallocated objects on
large_dnode datasets.

We implement this by scanning the dnode block until we find the correct
offset to be used in dnode_next_offset(). This is necessary because we
can't assume *objectp is a hole even if dmu_object_info() returns
ENOENT.

This fixes a couple of issues with zfs receive on large_dnode datasets.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: loli10K <ezomori.nozomu@gmail.com>
Closes #5027 
Closes #5532
This commit is contained in:
LOLi 2017-01-14 00:47:34 +01:00 committed by Brian Behlendorf
parent 5043684ae5
commit 08f0510d87
3 changed files with 42 additions and 19 deletions

View File

@ -237,28 +237,39 @@ int
dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg)
{
uint64_t offset;
dmu_object_info_t doi;
uint64_t start_obj;
struct dsl_dataset *ds = os->os_dsl_dataset;
int dnodesize;
int error;
if (*objectp == 0) {
start_obj = 1;
} else if (ds && ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) {
/*
* Avoid expensive dnode hold if this dataset doesn't use large dnodes.
* For large_dnode datasets, scan from the beginning of the
* dnode block to find the starting offset. This is needed
* because objectp could be part of a large dnode so we can't
* assume it's a hole even if dmu_object_info() returns ENOENT.
*/
if (ds && ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) {
error = dmu_object_info(os, *objectp, &doi);
if (error && !(error == EINVAL && *objectp == 0))
return (SET_ERROR(error));
int epb = DNODE_BLOCK_SIZE >> DNODE_SHIFT;
int skip;
uint64_t i;
for (i = *objectp & ~(epb - 1); i <= *objectp; i += skip) {
dmu_object_info_t doi;
error = dmu_object_info(os, i, &doi);
if (error)
skip = 1;
else
dnodesize = doi.doi_dnodesize;
} else {
dnodesize = DNODE_MIN_SIZE;
skip = doi.doi_dnodesize >> DNODE_SHIFT;
}
if (*objectp == 0)
offset = 1 << DNODE_SHIFT;
else
offset = (*objectp << DNODE_SHIFT) + dnodesize;
start_obj = i;
} else {
start_obj = *objectp + 1;
}
offset = start_obj << DNODE_SHIFT;
error = dnode_next_offset(DMU_META_DNODE(os),
(hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg);

View File

@ -1184,6 +1184,7 @@ dnode_is_free(dmu_buf_impl_t *db, int idx, int slots)
* errors:
* EINVAL - invalid object number.
* ENOSPC - hole too small to fulfill "slots" request
* ENOENT - the requested dnode is not allocated
* EIO - i/o error.
* succeeds even for free dnodes.
*/

View File

@ -27,9 +27,11 @@ verify_runnable "both"
TEST_SEND_FS=$TESTPOOL/send_large_dnode
TEST_RECV_FS=$TESTPOOL/recv_large_dnode
TEST_SNAP=$TEST_SEND_FS@ldnsnap
TEST_SNAPINCR=$TEST_SEND_FS@ldnsnap_incr
TEST_STREAM=$TESTDIR/ldnsnap
TEST_STREAMINCR=$TESTDIR/ldnsnap_incr
TEST_FILE=foo
TEST_FILEINCR=bar
function cleanup
{
@ -42,6 +44,7 @@ function cleanup
fi
rm -f $TEST_STREAM
rm -f $TEST_STREAMINCR
}
log_onexit cleanup
@ -49,10 +52,13 @@ log_onexit cleanup
log_assert "zfs send stream with large dnodes accepted by new pool"
log_must $ZFS create -o dnodesize=1k $TEST_SEND_FS
log_must touch /$TEST_SEND_FS/$TEST_FILE
log_must $ZFS umount $TEST_SEND_FS
log_must $TOUCH /$TEST_SEND_FS/$TEST_FILE
log_must $ZFS snap $TEST_SNAP
log_must $ZFS send $TEST_SNAP > $TEST_STREAM
log_must $RM -f /$TEST_SEND_FS/$TEST_FILE
log_must $TOUCH /$TEST_SEND_FS/$TEST_FILEINCR
log_must $ZFS snap $TEST_SNAPINCR
log_must $ZFS send -i $TEST_SNAP $TEST_SNAPINCR > $TEST_STREAMINCR
log_must eval "$ZFS recv $TEST_RECV_FS < $TEST_STREAM"
inode=$(ls -li /$TEST_RECV_FS/$TEST_FILE | awk '{print $1}')
@ -61,4 +67,9 @@ if [[ "$dnsize" != "1K" ]]; then
log_fail "dnode size is $dnsize (expected 1K)"
fi
log_must eval "$ZFS recv -F $TEST_RECV_FS < $TEST_STREAMINCR"
log_must $DIFF -r /$TEST_SEND_FS /$TEST_RECV_FS
log_must $ZFS umount $TEST_SEND_FS
log_must $ZFS umount $TEST_RECV_FS
log_pass