Add defensive assertions
Coverity complains about possible bugs involving referencing NULL return values and division by zero. The division by zero bugs require that a block pointer be corrupt, either from in-memory corruption, or on-disk corruption. The NULL return value complaints are only bugs if assumptions that we make about the state of data structures are wrong. Some seem impossible to be wrong and thus are false positives, while others are hard to analyze. Rather than dismiss these as false positives by assuming we know better, we add defensive assertions to let us know when our assumptions are wrong. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu> Closes #13972
This commit is contained in:
parent
bfaa1d98f4
commit
a6ccb36b94
|
@ -182,6 +182,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
|||
return;
|
||||
}
|
||||
|
||||
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
|
||||
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os),
|
||||
lr->lr_foid, ZB_ZIL_LEVEL,
|
||||
lr->lr_offset / BP_GET_LSIZE(bp));
|
||||
|
|
|
@ -1133,6 +1133,7 @@ zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
|
|||
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
|
||||
cb->cb_acl_node);
|
||||
}
|
||||
ASSERT3P(cb->cb_acl_node, !=, NULL);
|
||||
*dataptr = cb->cb_acl_node->z_acldata;
|
||||
*length = cb->cb_acl_node->z_size;
|
||||
}
|
||||
|
|
|
@ -1163,6 +1163,7 @@ zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
|
|||
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
|
||||
cb->cb_acl_node);
|
||||
}
|
||||
ASSERT3P(cb->cb_acl_node, !=, NULL);
|
||||
*dataptr = cb->cb_acl_node->z_acldata;
|
||||
*length = cb->cb_acl_node->z_size;
|
||||
}
|
||||
|
|
|
@ -8539,6 +8539,7 @@ l2arc_dev_get_next(void)
|
|||
else if (next == first)
|
||||
break;
|
||||
|
||||
ASSERT3P(next, !=, NULL);
|
||||
} while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
|
||||
next->l2ad_trim_all);
|
||||
|
||||
|
|
|
@ -2687,6 +2687,7 @@ dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
|
|||
dbuf_dirty_record_t *dr;
|
||||
|
||||
dr = list_head(&db->db_dirty_records);
|
||||
ASSERT3P(dr, !=, NULL);
|
||||
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
|
||||
dl = &dr->dt.dl;
|
||||
dl->dr_overridden_by = *bp;
|
||||
|
@ -2748,6 +2749,7 @@ dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
|
|||
dmu_buf_will_not_fill(dbuf, tx);
|
||||
|
||||
dr = list_head(&db->db_dirty_records);
|
||||
ASSERT3P(dr, !=, NULL);
|
||||
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
|
||||
dl = &dr->dt.dl;
|
||||
encode_embedded_bp_compressed(&dl->dr_overridden_by,
|
||||
|
|
|
@ -111,6 +111,7 @@ traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
|
|||
if (claim_txg == 0 || bp->blk_birth < claim_txg)
|
||||
return (0);
|
||||
|
||||
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
|
||||
SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
|
||||
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
|
||||
|
||||
|
|
|
@ -542,6 +542,7 @@ dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
|
|||
dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
|
||||
ASSERT3P(dle, !=, NULL);
|
||||
dle_prev = AVL_PREV(&dl->dl_tree, dle);
|
||||
ASSERT3P(dle_prev, !=, NULL);
|
||||
|
||||
dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx);
|
||||
|
||||
|
|
|
@ -1470,6 +1470,7 @@ dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
|
|||
if (claim_txg == 0 || bp->blk_birth < claim_txg)
|
||||
return (0);
|
||||
|
||||
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
|
||||
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
|
||||
lr->lr_foid, ZB_ZIL_LEVEL,
|
||||
lr->lr_offset / BP_GET_LSIZE(bp));
|
||||
|
|
|
@ -303,8 +303,10 @@ mmp_next_leaf(spa_t *spa)
|
|||
|
||||
do {
|
||||
leaf = list_next(&spa->spa_leaf_list, leaf);
|
||||
if (leaf == NULL)
|
||||
if (leaf == NULL) {
|
||||
leaf = list_head(&spa->spa_leaf_list);
|
||||
ASSERT3P(leaf, !=, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* We skip unwritable, offline, detached, and dRAID spare
|
||||
|
|
|
@ -369,6 +369,7 @@ range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
|
|||
* invalid as soon as we do any mutating btree operations.
|
||||
*/
|
||||
rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
|
||||
ASSERT3P(rs_after, !=, NULL);
|
||||
rs_set_start_raw(rs_after, rt, before_start);
|
||||
rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
|
||||
rs = rs_after;
|
||||
|
|
|
@ -507,6 +507,7 @@ void
|
|||
spa_log_summary_decrement_blkcount(spa_t *spa, uint64_t blocks_gone)
|
||||
{
|
||||
log_summary_entry_t *e = list_head(&spa->spa_log_summary);
|
||||
ASSERT3P(e, !=, NULL);
|
||||
if (e->lse_txgcount > 0)
|
||||
e->lse_txgcount--;
|
||||
for (; e != NULL; e = list_head(&spa->spa_log_summary)) {
|
||||
|
|
|
@ -1319,6 +1319,7 @@ vdev_indirect_io_start(zio_t *zio)
|
|||
vdev_indirect_gather_splits, zio);
|
||||
|
||||
indirect_split_t *first = list_head(&iv->iv_splits);
|
||||
ASSERT3P(first, !=, NULL);
|
||||
if (first->is_size == zio->io_size) {
|
||||
/*
|
||||
* This is not a split block; we are pointing to the entire
|
||||
|
|
|
@ -756,6 +756,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
|
|||
do {
|
||||
dio = nio;
|
||||
nio = AVL_NEXT(t, dio);
|
||||
ASSERT3P(dio, !=, NULL);
|
||||
zio_add_child(dio, aio);
|
||||
vdev_queue_io_remove(vq, dio);
|
||||
|
||||
|
|
|
@ -339,6 +339,7 @@ zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
|
|||
if (wbuf == NULL)
|
||||
zio_flags |= ZIO_FLAG_RAW;
|
||||
|
||||
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
|
||||
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
|
||||
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
|
||||
|
||||
|
|
Loading…
Reference in New Issue