diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c index 202b5a6197..a5fbdb2454 100644 --- a/cmd/zdb/zdb.c +++ b/cmd/zdb/zdb.c @@ -734,13 +734,19 @@ dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class) static void dump_all_ddts(spa_t *spa) { - ddt_histogram_t ddh_total = { 0 }; - ddt_stat_t dds_total = { 0 }; + ddt_histogram_t ddh_total; + ddt_stat_t dds_total; + enum zio_checksum c; + enum ddt_type type; + enum ddt_class class; - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + bzero(&ddh_total, sizeof (ddt_histogram_t)); + bzero(&dds_total, sizeof (ddt_stat_t)); + + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { dump_ddt(ddt, type, class); } diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 9b643062af..ebce811d37 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -1432,10 +1432,11 @@ arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) static void arc_hdr_destroy(arc_buf_hdr_t *hdr) { + l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; + ASSERT(refcount_is_zero(&hdr->b_refcnt)); ASSERT3P(hdr->b_state, ==, arc_anon); ASSERT(!HDR_IO_IN_PROGRESS(hdr)); - l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; if (l2hdr != NULL) { boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 22e7188bcf..5b19e750f9 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -107,9 +107,9 @@ dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) { dbuf_hash_table_t *h = &dbuf_hash_table; objset_t *os = dn->dn_objset; - uint64_t obj = dn->dn_object; - uint64_t hv = DBUF_HASH(os, obj, level, blkid); - uint64_t idx = hv & h->hash_table_mask; + uint64_t obj; + uint64_t hv; + uint64_t idx; dmu_buf_impl_t *db; obj = dn->dn_object; diff --git a/module/zfs/ddt.c b/module/zfs/ddt.c index cd4e8476c2..7420173cc9 100644 --- a/module/zfs/ddt.c +++ b/module/zfs/ddt.c @@ -446,11 +446,15 @@ ddt_histogram_empty(const ddt_histogram_t *ddh) void ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total) { + enum zio_checksum c; + enum ddt_type type; + enum ddt_class class; + /* Sum the statistics we cached in ddt_object_sync(). */ - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { ddt_object_t *ddo = &ddt->ddt_object_stats[type][class]; @@ -474,10 +478,14 @@ ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total) void ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh) { - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + enum zio_checksum c; + enum ddt_type type; + enum ddt_class class; + + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { ddt_histogram_add(ddh, &ddt->ddt_histogram_cache[type][class]); @@ -650,9 +658,10 @@ ddt_alloc(const ddt_key_t *ddk) static void ddt_free(ddt_entry_t *dde) { - ASSERT(!dde->dde_loading); int p; + ASSERT(!dde->dde_loading); + for (p = 0; p < DDT_PHYS_TYPES; p++) ASSERT(dde->dde_lead_zio[p] == NULL); @@ -741,6 +750,8 @@ ddt_prefetch(spa_t *spa, const blkptr_t *bp) { ddt_t *ddt; ddt_entry_t dde; + enum ddt_type type; + enum ddt_class class; if (!BP_GET_DEDUP(bp)) return; @@ -753,8 +764,8 @@ ddt_prefetch(spa_t *spa, const blkptr_t *bp) ddt = ddt_select(spa, bp); ddt_key_fill(&dde.dde_key, bp); - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; class++) { + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { ddt_object_prefetch(ddt, type, class, &dde); } } @@ -812,15 +823,20 @@ ddt_table_free(ddt_t *ddt) void ddt_create(spa_t *spa) { + enum zio_checksum c; + spa->spa_dedup_checksum = ZIO_DEDUPCHECKSUM; - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) spa->spa_ddt[c] = ddt_table_alloc(spa, c); } int ddt_load(spa_t *spa) { + enum zio_checksum c; + enum ddt_type type; + enum ddt_class class; int error; ddt_create(spa); @@ -832,10 +848,10 @@ ddt_load(spa_t *spa) if (error) return (error == ENOENT ? 0 : error); - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { error = ddt_object_load(ddt, type, class); if (error != 0 && error != ENOENT) @@ -856,7 +872,9 @@ ddt_load(spa_t *spa) void ddt_unload(spa_t *spa) { - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + enum zio_checksum c; + + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { if (spa->spa_ddt[c]) { ddt_table_free(spa->spa_ddt[c]); spa->spa_ddt[c] = NULL; @@ -869,6 +887,8 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp) { ddt_t *ddt; ddt_entry_t dde; + enum ddt_type type; + enum ddt_class class; if (!BP_GET_DEDUP(bp)) return (B_FALSE); @@ -880,8 +900,8 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp) ddt_key_fill(&dde.dde_key, bp); - for (enum ddt_type type = 0; type < DDT_TYPES; type++) - for (enum ddt_class class = 0; class <= max_class; class++) + for (type = 0; type < DDT_TYPES; type++) + for (class = 0; class <= max_class; class++) if (ddt_object_lookup(ddt, type, class, &dde) == 0) return (B_TRUE); @@ -893,13 +913,15 @@ ddt_repair_start(ddt_t *ddt, const blkptr_t *bp) { ddt_key_t ddk; ddt_entry_t *dde; + enum ddt_type type; + enum ddt_class class; ddt_key_fill(&ddk, bp); dde = ddt_alloc(&ddk); - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; class++) { + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { /* * We can only do repair if there are multiple copies * of the block. For anything in the UNIQUE class, @@ -1067,6 +1089,8 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg) spa_t *spa = ddt->ddt_spa; ddt_entry_t *dde; void *cookie = NULL; + enum ddt_type type; + enum ddt_class class; if (avl_numnodes(&ddt->ddt_tree) == 0) return; @@ -1086,8 +1110,8 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg) ddt_free(dde); } - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; class++) { + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { if (!ddt_object_exists(ddt, type, class)) continue; ddt_object_sync(ddt, type, class, tx); @@ -1106,12 +1130,13 @@ ddt_sync(spa_t *spa, uint64_t txg) dmu_tx_t *tx; zio_t *rio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); + enum zio_checksum c; ASSERT(spa_syncing_txg(spa) == txg); tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; if (ddt == NULL) continue; diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index 2e1fff35a9..6a86bbca8a 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -92,7 +92,7 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) int used, compressed, uncompressed; int64_t delta; - used = bp_get_dasize(tx->tx_pool->dp_spa, bp); + used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); compressed = BP_GET_PSIZE(bp); uncompressed = BP_GET_UCSIZE(bp); @@ -136,15 +136,17 @@ int dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, boolean_t async) { + int used, compressed, uncompressed; + if (BP_IS_HOLE(bp)) return (0); ASSERT(dmu_tx_is_syncing(tx)); ASSERT(bp->blk_birth <= tx->tx_txg); - int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); - int compressed = BP_GET_PSIZE(bp); - int uncompressed = BP_GET_UCSIZE(bp); + used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); + compressed = BP_GET_PSIZE(bp); + uncompressed = BP_GET_UCSIZE(bp); ASSERT(used > 0); if (ds == NULL) { @@ -1753,6 +1755,7 @@ dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) if (dsl_dataset_is_snapshot(ds_next)) { dsl_dataset_t *ds_nextnext; + dsl_dataset_t *hds; /* * Update next's unique to include blocks which @@ -1775,7 +1778,6 @@ dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) ASSERT3P(ds_next->ds_prev, ==, NULL); /* Collapse range in this head. */ - dsl_dataset_t *hds; VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &hds)); diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index 2cd21a102b..b50c1518cd 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -692,9 +692,10 @@ upgrade_dir_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) void dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) { - ASSERT(dmu_tx_is_syncing(tx)); uint64_t obj; + ASSERT(dmu_tx_is_syncing(tx)); + (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); VERIFY(0 == dsl_pool_open_special_dir(dp, FREE_DIR_NAME, &dp->dp_free_dir)); diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index e402dde7c3..0756f57f37 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -1072,6 +1072,7 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) { dsl_pool_t *dp = scn->scn_dp; dsl_dataset_t *ds; + char *dsname; VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); @@ -1081,7 +1082,7 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) dmu_buf_will_dirty(ds->ds_dbuf, tx); dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx); - char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); + dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); dsl_dataset_name(ds, dsname); zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " "pausing=%u", diff --git a/module/zfs/sa.c b/module/zfs/sa.c index a91b379f99..0ce2b9cd5c 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -1402,8 +1402,8 @@ sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, void *data) /* Verify header size is consistent with layout information */ ASSERT(tb); - ASSERT(IS_SA_BONUSTYPE(bonustype) && - SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb) || !IS_SA_BONUSTYPE(bonustype) || + ASSERT((IS_SA_BONUSTYPE(bonustype) && + SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb)) || !IS_SA_BONUSTYPE(bonustype) || (IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0)); /* diff --git a/module/zfs/spa.c b/module/zfs/spa.c index b0236e49f9..c90607a089 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -1677,7 +1677,6 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, int orig_mode = spa->spa_mode; int parse; uint64_t obj; - int c; /* * If this is an untrusted config, access the pool in read-only mode. @@ -2707,6 +2706,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, nvlist_t **spares, **l2cache; uint_t nspares, nl2cache; uint64_t version, obj; + int c; /* * If this pool already exists, return failure. diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index 20946c4e75..2726d6994f 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -884,10 +884,9 @@ spa_vdev_config_enter(spa_t *spa) void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) { - ASSERT(MUTEX_HELD(&spa_namespace_lock)); - int config_changed = B_FALSE; + ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(txg > spa_last_synced_txg(spa)); spa->spa_pending_vdev = NULL; diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c index 023a36b7aa..cc6343cfd1 100644 --- a/module/zfs/vdev_label.c +++ b/module/zfs/vdev_label.c @@ -1075,12 +1075,13 @@ vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) zio = zio_root(spa, NULL, NULL, flags); for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) { - uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t), - KM_SLEEP); + uint64_t *good_writes; + zio_t *vio; ASSERT(!vd->vdev_ishole); - zio_t *vio = zio_null(zio, spa, NULL, + good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); + vio = zio_null(zio, spa, NULL, (vd->vdev_islog || vd->vdev_aux != NULL) ? vdev_label_sync_ignore_done : vdev_label_sync_top_done, good_writes, flags); diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 24bd3ddcdd..17fdb9b7af 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -1892,8 +1892,8 @@ zfs_obj_to_pobj(objset_t *osp, uint64_t obj, uint64_t *pobjp, int *is_xattrdir, dmu_object_info_from_db(db, &doi); if ((doi.doi_bonus_type != DMU_OT_SA && doi.doi_bonus_type != DMU_OT_ZNODE) || - doi.doi_bonus_type == DMU_OT_ZNODE && - doi.doi_bonus_size < sizeof (znode_phys_t)) { + (doi.doi_bonus_type == DMU_OT_ZNODE && + doi.doi_bonus_size < sizeof (znode_phys_t))) { sa_buf_rele(db, FTAG); return (EINVAL); } diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 520639e063..c70786c42b 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -2171,6 +2171,8 @@ zio_dva_claim(zio_t *zio) static void zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) { + int g; + ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); ASSERT(zio->io_bp_override == NULL);