diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index c2321378de..e4a208bb44 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -3221,6 +3221,7 @@ out: umem_free(snap3name, MAXNAMELEN); } +#undef OD_ARRAY_SIZE #define OD_ARRAY_SIZE 4 /* @@ -3255,14 +3256,21 @@ ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) umem_free(od, size); } +#undef OD_ARRAY_SIZE +#define OD_ARRAY_SIZE 2 + /* * Verify that dmu_{read,write} work as expected. */ void ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) { + int size; + ztest_od_t *od; + objset_t *os = zd->zd_os; - ztest_od_t od[2]; + size = sizeof(ztest_od_t) * OD_ARRAY_SIZE; + od = umem_alloc(size, UMEM_NOFAIL); dmu_tx_t *tx; int i, freeit, error; uint64_t n, s, txg; @@ -3301,11 +3309,13 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) /* * Read the directory info. If it's the first time, set things up. */ - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); - ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize); + ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, size, B_FALSE) != 0) { + umem_free(od, size); return; + } bigobj = od[0].od_object; packobj = od[1].od_object; @@ -3369,6 +3379,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) if (txg == 0) { umem_free(packbuf, packsize); umem_free(bigbuf, bigsize); + umem_free(od, size); return; } @@ -3469,6 +3480,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id) umem_free(packbuf, packsize); umem_free(bigbuf, bigsize); + umem_free(od, size); } void @@ -3520,14 +3532,18 @@ compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf, } } +#undef OD_ARRAY_SIZE +#define OD_ARRAY_SIZE 2 + void ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[2]; + ztest_od_t *od; dmu_tx_t *tx; uint64_t i; int error; + int size; uint64_t n, s, txg; bufwad_t *packbuf, *bigbuf; uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize; @@ -3540,6 +3556,9 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) arc_buf_t **bigbuf_arcbufs; dmu_object_info_t doi; + size = sizeof(ztest_od_t) * OD_ARRAY_SIZE; + od = umem_alloc(size, UMEM_NOFAIL); + /* * This test uses two objects, packobj and bigobj, that are always * updated together (i.e. in the same tx) so that their contents are @@ -3559,11 +3578,14 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) /* * Read the directory info. If it's the first time, set things up. */ - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); - ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); + ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + + if (ztest_object_init(zd, od, size, B_FALSE) != 0) { + umem_free(od, size); return; + } bigobj = od[0].od_object; packobj = od[1].od_object; @@ -3649,6 +3671,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) } } umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); + umem_free(od, size); dmu_buf_rele(bonus_db, FTAG); return; } @@ -3745,13 +3768,16 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) umem_free(packbuf, packsize); umem_free(bigbuf, bigsize); umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *)); + umem_free(od, size); } /* ARGSUSED */ void ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) { - ztest_od_t od[1]; + ztest_od_t *od; + + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); uint64_t offset = (1ULL << (ztest_random(20) + 43)) + (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); @@ -3760,47 +3786,56 @@ ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id) * to verify that parallel writes to an object -- even to the * same blocks within the object -- doesn't cause any trouble. */ - ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); + ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) return; while (ztest_random(10) != 0) - ztest_io(zd, od[0].od_object, offset); + ztest_io(zd, od->od_object, offset); + + umem_free(od, sizeof(ztest_od_t)); } void ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id) { - ztest_od_t od[1]; + ztest_od_t *od; uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) + (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT); uint64_t count = ztest_random(20) + 1; uint64_t blocksize = ztest_random_blocksize(); void *data; - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); - if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); + + if (ztest_object_init(zd, od, sizeof (ztest_od_t), !ztest_random(2)) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } - if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0) + if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } - ztest_prealloc(zd, od[0].od_object, offset, count * blocksize); + ztest_prealloc(zd, od->od_object, offset, count * blocksize); data = umem_zalloc(blocksize, UMEM_NOFAIL); while (ztest_random(count) != 0) { uint64_t randoff = offset + (ztest_random(count) * blocksize); - if (ztest_write(zd, od[0].od_object, randoff, blocksize, + if (ztest_write(zd, od->od_object, randoff, blocksize, data) != 0) break; while (ztest_random(4) != 0) - ztest_io(zd, od[0].od_object, randoff); + ztest_io(zd, od->od_object, randoff); } umem_free(data, blocksize); + umem_free(od, sizeof(ztest_od_t)); } /* @@ -3814,7 +3849,7 @@ void ztest_zap(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; uint64_t object; uint64_t txg, last_txg; uint64_t value[ZTEST_ZAP_MAX_INTS]; @@ -3825,12 +3860,14 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) int error; char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" }; - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) - return; + if (ztest_object_init(zd, od, sizeof (ztest_od_t), + !ztest_random(2)) != 0) + goto out; - object = od[0].od_object; + object = od->od_object; /* * Generate a known hash collision, and verify that @@ -3840,7 +3877,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_zap(tx, object, B_TRUE, NULL); txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); if (txg == 0) - return; + goto out; for (i = 0; i < 2; i++) { value[i] = i; VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t), @@ -3908,7 +3945,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_zap(tx, object, B_TRUE, NULL); txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); if (txg == 0) - return; + goto out; if (last_txg > txg) fatal(0, "zap future leak: old %llu new %llu", last_txg, txg); @@ -3933,7 +3970,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) error = zap_length(os, object, txgname, &zl_intsize, &zl_ints); if (error == ENOENT) - return; + goto out; ASSERT3U(error, ==, 0); @@ -3941,10 +3978,12 @@ ztest_zap(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_zap(tx, object, B_TRUE, NULL); txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); if (txg == 0) - return; + goto out; VERIFY3U(0, ==, zap_remove(os, object, txgname, tx)); VERIFY3U(0, ==, zap_remove(os, object, propname, tx)); dmu_tx_commit(tx); +out: + umem_free(od, sizeof(ztest_od_t)); } /* @@ -3954,16 +3993,17 @@ void ztest_fzap(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; uint64_t object, txg; int i; - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0) - return; - - object = od[0].od_object; + if (ztest_object_init(zd, od, sizeof (ztest_od_t), + !ztest_random(2)) != 0) + goto out; + object = od->od_object; /* * Add entries to this ZAP and make sure it spills over @@ -3983,12 +4023,14 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id) dmu_tx_hold_zap(tx, object, B_TRUE, name); txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); if (txg == 0) - return; + goto out; error = zap_add(os, object, name, sizeof (uint64_t), 1, &value, tx); ASSERT(error == 0 || error == EEXIST); dmu_tx_commit(tx); } +out: + umem_free(od, sizeof(ztest_od_t)); } /* ARGSUSED */ @@ -3996,7 +4038,7 @@ void ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc; dmu_tx_t *tx; int i, namelen, error; @@ -4004,12 +4046,15 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) char name[20], string_value[20]; void *data; - ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } - object = od[0].od_object; + object = od->od_object; /* * Generate a random name of the form 'xxx.....' where each @@ -4098,6 +4143,8 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id) if (tx != NULL) dmu_tx_commit(tx); + + umem_free(od, sizeof(ztest_od_t)); } /* @@ -4187,23 +4234,26 @@ void ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) { objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; dmu_tx_t *tx; ztest_cb_data_t *cb_data[3], *tmp_cb; uint64_t old_txg, txg; int i, error; - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } tx = dmu_tx_create(os); cb_data[0] = ztest_create_cb_data(os, 0); dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]); - dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t)); + dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t)); /* Every once in a while, abort the transaction on purpose */ if (ztest_random(100) == 0) @@ -4237,6 +4287,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) umem_free(cb_data[i], sizeof (ztest_cb_data_t)); } + umem_free(od, sizeof(ztest_od_t)); return; } @@ -4246,14 +4297,14 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) /* * Read existing data to make sure there isn't a future leak. */ - VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t), + VERIFY(0 == dmu_read(os, od->od_object, 0, sizeof (uint64_t), &old_txg, DMU_READ_PREFETCH)); if (old_txg > txg) fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64, old_txg, txg); - dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx); + dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx); (void) mutex_enter(&zcl.zcl_callbacks_lock); @@ -4305,6 +4356,8 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id) (void) mutex_exit(&zcl.zcl_callbacks_lock); dmu_tx_commit(tx); + + umem_free(od, sizeof(ztest_od_t)); } /* ARGSUSED */ @@ -4655,7 +4708,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) ztest_shared_t *zs = ztest_shared; spa_t *spa = zs->zs_spa; objset_t *os = zd->zd_os; - ztest_od_t od[1]; + ztest_od_t *od; uint64_t object, blocksize, txg, pattern, psize; enum zio_checksum checksum = spa_dedup_checksum(spa); dmu_buf_t *db; @@ -4668,10 +4721,13 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) blocksize = ztest_random_blocksize(); blocksize = MIN(blocksize, 2048); /* because we write so many */ - ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); + od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL); + ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0); - if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0) + if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) { + umem_free(od, sizeof(ztest_od_t)); return; + } /* * Take the name lock as writer to prevent anyone else from changing @@ -4684,6 +4740,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1, B_FALSE) != 0) { (void) rw_exit(&zs->zs_name_lock); + umem_free(od, sizeof(ztest_od_t)); return; } @@ -4698,6 +4755,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); if (txg == 0) { (void) rw_exit(&zs->zs_name_lock); + umem_free(od, sizeof(ztest_od_t)); return; } @@ -4742,6 +4800,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id) zio_buf_free(buf, psize); (void) rw_exit(&zs->zs_name_lock); + umem_free(od, sizeof(ztest_od_t)); } /* diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index c45bcb399c..b7a4adae9c 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -1501,7 +1501,8 @@ dbuf_clear(dmu_buf_impl_t *db) dbuf_rele(parent, db); } -static int +__attribute__((always_inline)) +static inline int dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, dmu_buf_impl_t **parentp, blkptr_t **bpp) { diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 6b90cc2309..a77118203c 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -1669,7 +1669,8 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, * Load an existing storage pool, using the pool's builtin spa_config as a * source of configuration information. */ -static int +__attribute__((always_inline)) +static inline int spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, char **ereport) diff --git a/module/zfs/zio.c b/module/zfs/zio.c index a7d8b03c5a..201e9ccce0 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -436,7 +436,8 @@ zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) return (waiting); } -static void +__attribute__((always_inline)) +static inline void zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) { uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; @@ -1121,7 +1122,8 @@ zio_interrupt(zio_t *zio) */ static zio_pipe_stage_t *zio_pipeline[]; -void +__attribute__((always_inline)) +inline void zio_execute(zio_t *zio) { zio->io_executor = curthread;